~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_alloc.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *  linux/mm/page_alloc.c
  4  *
  5  *  Manages the free list, the system allocates free pages here.
  6  *  Note that kmalloc() lives in slab.c
  7  *
  8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  9  *  Swap reorganised 29.12.95, Stephen Tweedie
 10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 16  */
 17 
 18 #include <linux/stddef.h>
 19 #include <linux/mm.h>
 20 #include <linux/highmem.h>
 21 #include <linux/swap.h>
 22 #include <linux/interrupt.h>
 23 #include <linux/pagemap.h>
 24 #include <linux/jiffies.h>
 25 #include <linux/memblock.h>
 26 #include <linux/compiler.h>
 27 #include <linux/kernel.h>
 28 #include <linux/kasan.h>
 29 #include <linux/module.h>
 30 #include <linux/suspend.h>
 31 #include <linux/pagevec.h>
 32 #include <linux/blkdev.h>
 33 #include <linux/slab.h>
 34 #include <linux/ratelimit.h>
 35 #include <linux/oom.h>
 36 #include <linux/topology.h>
 37 #include <linux/sysctl.h>
 38 #include <linux/cpu.h>
 39 #include <linux/cpuset.h>
 40 #include <linux/memory_hotplug.h>
 41 #include <linux/nodemask.h>
 42 #include <linux/vmalloc.h>
 43 #include <linux/vmstat.h>
 44 #include <linux/mempolicy.h>
 45 #include <linux/memremap.h>
 46 #include <linux/stop_machine.h>
 47 #include <linux/random.h>
 48 #include <linux/sort.h>
 49 #include <linux/pfn.h>
 50 #include <linux/backing-dev.h>
 51 #include <linux/fault-inject.h>
 52 #include <linux/page-isolation.h>
 53 #include <linux/debugobjects.h>
 54 #include <linux/kmemleak.h>
 55 #include <linux/compaction.h>
 56 #include <trace/events/kmem.h>
 57 #include <trace/events/oom.h>
 58 #include <linux/prefetch.h>
 59 #include <linux/mm_inline.h>
 60 #include <linux/mmu_notifier.h>
 61 #include <linux/migrate.h>
 62 #include <linux/hugetlb.h>
 63 #include <linux/sched/rt.h>
 64 #include <linux/sched/mm.h>
 65 #include <linux/page_owner.h>
 66 #include <linux/kthread.h>
 67 #include <linux/memcontrol.h>
 68 #include <linux/ftrace.h>
 69 #include <linux/lockdep.h>
 70 #include <linux/nmi.h>
 71 #include <linux/psi.h>
 72 #include <linux/padata.h>
 73 #include <linux/khugepaged.h>
 74 #include <linux/buffer_head.h>
 75 #include <asm/sections.h>
 76 #include <asm/tlbflush.h>
 77 #include <asm/div64.h>
 78 #include "internal.h"
 79 #include "shuffle.h"
 80 #include "page_reporting.h"
 81 
 82 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
 83 typedef int __bitwise fpi_t;
 84 
 85 /* No special request */
 86 #define FPI_NONE                ((__force fpi_t)0)
 87 
 88 /*
 89  * Skip free page reporting notification for the (possibly merged) page.
 90  * This does not hinder free page reporting from grabbing the page,
 91  * reporting it and marking it "reported" -  it only skips notifying
 92  * the free page reporting infrastructure about a newly freed page. For
 93  * example, used when temporarily pulling a page from a freelist and
 94  * putting it back unmodified.
 95  */
 96 #define FPI_SKIP_REPORT_NOTIFY  ((__force fpi_t)BIT(0))
 97 
 98 /*
 99  * Place the (possibly merged) page to the tail of the freelist. Will ignore
100  * page shuffling (relevant code - e.g., memory onlining - is expected to
101  * shuffle the whole zone).
102  *
103  * Note: No code should rely on this flag for correctness - it's purely
104  *       to allow for optimizations when handing back either fresh pages
105  *       (memory onlining) or untouched pages (page isolation, free page
106  *       reporting).
107  */
108 #define FPI_TO_TAIL             ((__force fpi_t)BIT(1))
109 
110 /*
111  * Don't poison memory with KASAN (only for the tag-based modes).
112  * During boot, all non-reserved memblock memory is exposed to page_alloc.
113  * Poisoning all that memory lengthens boot time, especially on systems with
114  * large amount of RAM. This flag is used to skip that poisoning.
115  * This is only done for the tag-based KASAN modes, as those are able to
116  * detect memory corruptions with the memory tags assigned by default.
117  * All memory allocated normally after boot gets poisoned as usual.
118  */
119 #define FPI_SKIP_KASAN_POISON   ((__force fpi_t)BIT(2))
120 
121 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
122 static DEFINE_MUTEX(pcp_batch_high_lock);
123 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
124 
125 struct pagesets {
126         local_lock_t lock;
127 };
128 static DEFINE_PER_CPU(struct pagesets, pagesets) = {
129         .lock = INIT_LOCAL_LOCK(lock),
130 };
131 
132 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
133 DEFINE_PER_CPU(int, numa_node);
134 EXPORT_PER_CPU_SYMBOL(numa_node);
135 #endif
136 
137 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
138 
139 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
140 /*
141  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
142  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
143  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
144  * defined in <linux/topology.h>.
145  */
146 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
147 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
148 #endif
149 
150 /* work_structs for global per-cpu drains */
151 struct pcpu_drain {
152         struct zone *zone;
153         struct work_struct work;
154 };
155 static DEFINE_MUTEX(pcpu_drain_mutex);
156 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
157 
158 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
159 volatile unsigned long latent_entropy __latent_entropy;
160 EXPORT_SYMBOL(latent_entropy);
161 #endif
162 
163 /*
164  * Array of node states.
165  */
166 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
167         [N_POSSIBLE] = NODE_MASK_ALL,
168         [N_ONLINE] = { { [0] = 1UL } },
169 #ifndef CONFIG_NUMA
170         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
171 #ifdef CONFIG_HIGHMEM
172         [N_HIGH_MEMORY] = { { [0] = 1UL } },
173 #endif
174         [N_MEMORY] = { { [0] = 1UL } },
175         [N_CPU] = { { [0] = 1UL } },
176 #endif  /* NUMA */
177 };
178 EXPORT_SYMBOL(node_states);
179 
180 atomic_long_t _totalram_pages __read_mostly;
181 EXPORT_SYMBOL(_totalram_pages);
182 unsigned long totalreserve_pages __read_mostly;
183 unsigned long totalcma_pages __read_mostly;
184 
185 int percpu_pagelist_high_fraction;
186 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
187 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
188 EXPORT_SYMBOL(init_on_alloc);
189 
190 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
191 EXPORT_SYMBOL(init_on_free);
192 
193 static bool _init_on_alloc_enabled_early __read_mostly
194                                 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
195 static int __init early_init_on_alloc(char *buf)
196 {
197 
198         return kstrtobool(buf, &_init_on_alloc_enabled_early);
199 }
200 early_param("init_on_alloc", early_init_on_alloc);
201 
202 static bool _init_on_free_enabled_early __read_mostly
203                                 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
204 static int __init early_init_on_free(char *buf)
205 {
206         return kstrtobool(buf, &_init_on_free_enabled_early);
207 }
208 early_param("init_on_free", early_init_on_free);
209 
210 /*
211  * A cached value of the page's pageblock's migratetype, used when the page is
212  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
213  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
214  * Also the migratetype set in the page does not necessarily match the pcplist
215  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
216  * other index - this ensures that it will be put on the correct CMA freelist.
217  */
218 static inline int get_pcppage_migratetype(struct page *page)
219 {
220         return page->index;
221 }
222 
223 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
224 {
225         page->index = migratetype;
226 }
227 
228 #ifdef CONFIG_PM_SLEEP
229 /*
230  * The following functions are used by the suspend/hibernate code to temporarily
231  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
232  * while devices are suspended.  To avoid races with the suspend/hibernate code,
233  * they should always be called with system_transition_mutex held
234  * (gfp_allowed_mask also should only be modified with system_transition_mutex
235  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
236  * with that modification).
237  */
238 
239 static gfp_t saved_gfp_mask;
240 
241 void pm_restore_gfp_mask(void)
242 {
243         WARN_ON(!mutex_is_locked(&system_transition_mutex));
244         if (saved_gfp_mask) {
245                 gfp_allowed_mask = saved_gfp_mask;
246                 saved_gfp_mask = 0;
247         }
248 }
249 
250 void pm_restrict_gfp_mask(void)
251 {
252         WARN_ON(!mutex_is_locked(&system_transition_mutex));
253         WARN_ON(saved_gfp_mask);
254         saved_gfp_mask = gfp_allowed_mask;
255         gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
256 }
257 
258 bool pm_suspended_storage(void)
259 {
260         if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
261                 return false;
262         return true;
263 }
264 #endif /* CONFIG_PM_SLEEP */
265 
266 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
267 unsigned int pageblock_order __read_mostly;
268 #endif
269 
270 static void __free_pages_ok(struct page *page, unsigned int order,
271                             fpi_t fpi_flags);
272 
273 /*
274  * results with 256, 32 in the lowmem_reserve sysctl:
275  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
276  *      1G machine -> (16M dma, 784M normal, 224M high)
277  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
278  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
279  *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
280  *
281  * TBD: should special case ZONE_DMA32 machines here - in those we normally
282  * don't need any ZONE_NORMAL reservation
283  */
284 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
285 #ifdef CONFIG_ZONE_DMA
286         [ZONE_DMA] = 256,
287 #endif
288 #ifdef CONFIG_ZONE_DMA32
289         [ZONE_DMA32] = 256,
290 #endif
291         [ZONE_NORMAL] = 32,
292 #ifdef CONFIG_HIGHMEM
293         [ZONE_HIGHMEM] = 0,
294 #endif
295         [ZONE_MOVABLE] = 0,
296 };
297 
298 static char * const zone_names[MAX_NR_ZONES] = {
299 #ifdef CONFIG_ZONE_DMA
300          "DMA",
301 #endif
302 #ifdef CONFIG_ZONE_DMA32
303          "DMA32",
304 #endif
305          "Normal",
306 #ifdef CONFIG_HIGHMEM
307          "HighMem",
308 #endif
309          "Movable",
310 #ifdef CONFIG_ZONE_DEVICE
311          "Device",
312 #endif
313 };
314 
315 const char * const migratetype_names[MIGRATE_TYPES] = {
316         "Unmovable",
317         "Movable",
318         "Reclaimable",
319         "HighAtomic",
320 #ifdef CONFIG_CMA
321         "CMA",
322 #endif
323 #ifdef CONFIG_MEMORY_ISOLATION
324         "Isolate",
325 #endif
326 };
327 
328 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
329         [NULL_COMPOUND_DTOR] = NULL,
330         [COMPOUND_PAGE_DTOR] = free_compound_page,
331 #ifdef CONFIG_HUGETLB_PAGE
332         [HUGETLB_PAGE_DTOR] = free_huge_page,
333 #endif
334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
335         [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
336 #endif
337 };
338 
339 int min_free_kbytes = 1024;
340 int user_min_free_kbytes = -1;
341 int watermark_boost_factor __read_mostly = 15000;
342 int watermark_scale_factor = 10;
343 
344 static unsigned long nr_kernel_pages __initdata;
345 static unsigned long nr_all_pages __initdata;
346 static unsigned long dma_reserve __initdata;
347 
348 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
349 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
350 static unsigned long required_kernelcore __initdata;
351 static unsigned long required_kernelcore_percent __initdata;
352 static unsigned long required_movablecore __initdata;
353 static unsigned long required_movablecore_percent __initdata;
354 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
355 static bool mirrored_kernelcore __meminitdata;
356 
357 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
358 int movable_zone;
359 EXPORT_SYMBOL(movable_zone);
360 
361 #if MAX_NUMNODES > 1
362 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
363 unsigned int nr_online_nodes __read_mostly = 1;
364 EXPORT_SYMBOL(nr_node_ids);
365 EXPORT_SYMBOL(nr_online_nodes);
366 #endif
367 
368 int page_group_by_mobility_disabled __read_mostly;
369 
370 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
371 /*
372  * During boot we initialize deferred pages on-demand, as needed, but once
373  * page_alloc_init_late() has finished, the deferred pages are all initialized,
374  * and we can permanently disable that path.
375  */
376 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
377 
378 /*
379  * Calling kasan_poison_pages() only after deferred memory initialization
380  * has completed. Poisoning pages during deferred memory init will greatly
381  * lengthen the process and cause problem in large memory systems as the
382  * deferred pages initialization is done with interrupt disabled.
383  *
384  * Assuming that there will be no reference to those newly initialized
385  * pages before they are ever allocated, this should have no effect on
386  * KASAN memory tracking as the poison will be properly inserted at page
387  * allocation time. The only corner case is when pages are allocated by
388  * on-demand allocation and then freed again before the deferred pages
389  * initialization is done, but this is not likely to happen.
390  */
391 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
392 {
393         return static_branch_unlikely(&deferred_pages) ||
394                (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
395                 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
396                PageSkipKASanPoison(page);
397 }
398 
399 /* Returns true if the struct page for the pfn is uninitialised */
400 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
401 {
402         int nid = early_pfn_to_nid(pfn);
403 
404         if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
405                 return true;
406 
407         return false;
408 }
409 
410 /*
411  * Returns true when the remaining initialisation should be deferred until
412  * later in the boot cycle when it can be parallelised.
413  */
414 static bool __meminit
415 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
416 {
417         static unsigned long prev_end_pfn, nr_initialised;
418 
419         /*
420          * prev_end_pfn static that contains the end of previous zone
421          * No need to protect because called very early in boot before smp_init.
422          */
423         if (prev_end_pfn != end_pfn) {
424                 prev_end_pfn = end_pfn;
425                 nr_initialised = 0;
426         }
427 
428         /* Always populate low zones for address-constrained allocations */
429         if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
430                 return false;
431 
432         if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
433                 return true;
434         /*
435          * We start only with one section of pages, more pages are added as
436          * needed until the rest of deferred pages are initialized.
437          */
438         nr_initialised++;
439         if ((nr_initialised > PAGES_PER_SECTION) &&
440             (pfn & (PAGES_PER_SECTION - 1)) == 0) {
441                 NODE_DATA(nid)->first_deferred_pfn = pfn;
442                 return true;
443         }
444         return false;
445 }
446 #else
447 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
448 {
449         return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
450                 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
451                PageSkipKASanPoison(page);
452 }
453 
454 static inline bool early_page_uninitialised(unsigned long pfn)
455 {
456         return false;
457 }
458 
459 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
460 {
461         return false;
462 }
463 #endif
464 
465 /* Return a pointer to the bitmap storing bits affecting a block of pages */
466 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
467                                                         unsigned long pfn)
468 {
469 #ifdef CONFIG_SPARSEMEM
470         return section_to_usemap(__pfn_to_section(pfn));
471 #else
472         return page_zone(page)->pageblock_flags;
473 #endif /* CONFIG_SPARSEMEM */
474 }
475 
476 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
477 {
478 #ifdef CONFIG_SPARSEMEM
479         pfn &= (PAGES_PER_SECTION-1);
480 #else
481         pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
482 #endif /* CONFIG_SPARSEMEM */
483         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
484 }
485 
486 static __always_inline
487 unsigned long __get_pfnblock_flags_mask(const struct page *page,
488                                         unsigned long pfn,
489                                         unsigned long mask)
490 {
491         unsigned long *bitmap;
492         unsigned long bitidx, word_bitidx;
493         unsigned long word;
494 
495         bitmap = get_pageblock_bitmap(page, pfn);
496         bitidx = pfn_to_bitidx(page, pfn);
497         word_bitidx = bitidx / BITS_PER_LONG;
498         bitidx &= (BITS_PER_LONG-1);
499 
500         word = bitmap[word_bitidx];
501         return (word >> bitidx) & mask;
502 }
503 
504 /**
505  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
506  * @page: The page within the block of interest
507  * @pfn: The target page frame number
508  * @mask: mask of bits that the caller is interested in
509  *
510  * Return: pageblock_bits flags
511  */
512 unsigned long get_pfnblock_flags_mask(const struct page *page,
513                                         unsigned long pfn, unsigned long mask)
514 {
515         return __get_pfnblock_flags_mask(page, pfn, mask);
516 }
517 
518 static __always_inline int get_pfnblock_migratetype(const struct page *page,
519                                         unsigned long pfn)
520 {
521         return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
522 }
523 
524 /**
525  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
526  * @page: The page within the block of interest
527  * @flags: The flags to set
528  * @pfn: The target page frame number
529  * @mask: mask of bits that the caller is interested in
530  */
531 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
532                                         unsigned long pfn,
533                                         unsigned long mask)
534 {
535         unsigned long *bitmap;
536         unsigned long bitidx, word_bitidx;
537         unsigned long old_word, word;
538 
539         BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
540         BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
541 
542         bitmap = get_pageblock_bitmap(page, pfn);
543         bitidx = pfn_to_bitidx(page, pfn);
544         word_bitidx = bitidx / BITS_PER_LONG;
545         bitidx &= (BITS_PER_LONG-1);
546 
547         VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
548 
549         mask <<= bitidx;
550         flags <<= bitidx;
551 
552         word = READ_ONCE(bitmap[word_bitidx]);
553         for (;;) {
554                 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
555                 if (word == old_word)
556                         break;
557                 word = old_word;
558         }
559 }
560 
561 void set_pageblock_migratetype(struct page *page, int migratetype)
562 {
563         if (unlikely(page_group_by_mobility_disabled &&
564                      migratetype < MIGRATE_PCPTYPES))
565                 migratetype = MIGRATE_UNMOVABLE;
566 
567         set_pfnblock_flags_mask(page, (unsigned long)migratetype,
568                                 page_to_pfn(page), MIGRATETYPE_MASK);
569 }
570 
571 #ifdef CONFIG_DEBUG_VM
572 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
573 {
574         int ret = 0;
575         unsigned seq;
576         unsigned long pfn = page_to_pfn(page);
577         unsigned long sp, start_pfn;
578 
579         do {
580                 seq = zone_span_seqbegin(zone);
581                 start_pfn = zone->zone_start_pfn;
582                 sp = zone->spanned_pages;
583                 if (!zone_spans_pfn(zone, pfn))
584                         ret = 1;
585         } while (zone_span_seqretry(zone, seq));
586 
587         if (ret)
588                 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
589                         pfn, zone_to_nid(zone), zone->name,
590                         start_pfn, start_pfn + sp);
591 
592         return ret;
593 }
594 
595 static int page_is_consistent(struct zone *zone, struct page *page)
596 {
597         if (zone != page_zone(page))
598                 return 0;
599 
600         return 1;
601 }
602 /*
603  * Temporary debugging check for pages not lying within a given zone.
604  */
605 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
606 {
607         if (page_outside_zone_boundaries(zone, page))
608                 return 1;
609         if (!page_is_consistent(zone, page))
610                 return 1;
611 
612         return 0;
613 }
614 #else
615 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
616 {
617         return 0;
618 }
619 #endif
620 
621 static void bad_page(struct page *page, const char *reason)
622 {
623         static unsigned long resume;
624         static unsigned long nr_shown;
625         static unsigned long nr_unshown;
626 
627         /*
628          * Allow a burst of 60 reports, then keep quiet for that minute;
629          * or allow a steady drip of one report per second.
630          */
631         if (nr_shown == 60) {
632                 if (time_before(jiffies, resume)) {
633                         nr_unshown++;
634                         goto out;
635                 }
636                 if (nr_unshown) {
637                         pr_alert(
638                               "BUG: Bad page state: %lu messages suppressed\n",
639                                 nr_unshown);
640                         nr_unshown = 0;
641                 }
642                 nr_shown = 0;
643         }
644         if (nr_shown++ == 0)
645                 resume = jiffies + 60 * HZ;
646 
647         pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
648                 current->comm, page_to_pfn(page));
649         dump_page(page, reason);
650 
651         print_modules();
652         dump_stack();
653 out:
654         /* Leave bad fields for debug, except PageBuddy could make trouble */
655         page_mapcount_reset(page); /* remove PageBuddy */
656         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
657 }
658 
659 static inline unsigned int order_to_pindex(int migratetype, int order)
660 {
661         int base = order;
662 
663 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
664         if (order > PAGE_ALLOC_COSTLY_ORDER) {
665                 VM_BUG_ON(order != pageblock_order);
666                 base = PAGE_ALLOC_COSTLY_ORDER + 1;
667         }
668 #else
669         VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
670 #endif
671 
672         return (MIGRATE_PCPTYPES * base) + migratetype;
673 }
674 
675 static inline int pindex_to_order(unsigned int pindex)
676 {
677         int order = pindex / MIGRATE_PCPTYPES;
678 
679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
680         if (order > PAGE_ALLOC_COSTLY_ORDER)
681                 order = pageblock_order;
682 #else
683         VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
684 #endif
685 
686         return order;
687 }
688 
689 static inline bool pcp_allowed_order(unsigned int order)
690 {
691         if (order <= PAGE_ALLOC_COSTLY_ORDER)
692                 return true;
693 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
694         if (order == pageblock_order)
695                 return true;
696 #endif
697         return false;
698 }
699 
700 static inline void free_the_page(struct page *page, unsigned int order)
701 {
702         if (pcp_allowed_order(order))           /* Via pcp? */
703                 free_unref_page(page, order);
704         else
705                 __free_pages_ok(page, order, FPI_NONE);
706 }
707 
708 /*
709  * Higher-order pages are called "compound pages".  They are structured thusly:
710  *
711  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
712  *
713  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
714  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
715  *
716  * The first tail page's ->compound_dtor holds the offset in array of compound
717  * page destructors. See compound_page_dtors.
718  *
719  * The first tail page's ->compound_order holds the order of allocation.
720  * This usage means that zero-order pages may not be compound.
721  */
722 
723 void free_compound_page(struct page *page)
724 {
725         mem_cgroup_uncharge(page_folio(page));
726         free_the_page(page, compound_order(page));
727 }
728 
729 void prep_compound_page(struct page *page, unsigned int order)
730 {
731         int i;
732         int nr_pages = 1 << order;
733 
734         __SetPageHead(page);
735         for (i = 1; i < nr_pages; i++) {
736                 struct page *p = page + i;
737                 p->mapping = TAIL_MAPPING;
738                 set_compound_head(p, page);
739         }
740 
741         set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
742         set_compound_order(page, order);
743         atomic_set(compound_mapcount_ptr(page), -1);
744         if (hpage_pincount_available(page))
745                 atomic_set(compound_pincount_ptr(page), 0);
746 }
747 
748 #ifdef CONFIG_DEBUG_PAGEALLOC
749 unsigned int _debug_guardpage_minorder;
750 
751 bool _debug_pagealloc_enabled_early __read_mostly
752                         = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
753 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
754 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
755 EXPORT_SYMBOL(_debug_pagealloc_enabled);
756 
757 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
758 
759 static int __init early_debug_pagealloc(char *buf)
760 {
761         return kstrtobool(buf, &_debug_pagealloc_enabled_early);
762 }
763 early_param("debug_pagealloc", early_debug_pagealloc);
764 
765 static int __init debug_guardpage_minorder_setup(char *buf)
766 {
767         unsigned long res;
768 
769         if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
770                 pr_err("Bad debug_guardpage_minorder value\n");
771                 return 0;
772         }
773         _debug_guardpage_minorder = res;
774         pr_info("Setting debug_guardpage_minorder to %lu\n", res);
775         return 0;
776 }
777 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
778 
779 static inline bool set_page_guard(struct zone *zone, struct page *page,
780                                 unsigned int order, int migratetype)
781 {
782         if (!debug_guardpage_enabled())
783                 return false;
784 
785         if (order >= debug_guardpage_minorder())
786                 return false;
787 
788         __SetPageGuard(page);
789         INIT_LIST_HEAD(&page->lru);
790         set_page_private(page, order);
791         /* Guard pages are not available for any usage */
792         __mod_zone_freepage_state(zone, -(1 << order), migratetype);
793 
794         return true;
795 }
796 
797 static inline void clear_page_guard(struct zone *zone, struct page *page,
798                                 unsigned int order, int migratetype)
799 {
800         if (!debug_guardpage_enabled())
801                 return;
802 
803         __ClearPageGuard(page);
804 
805         set_page_private(page, 0);
806         if (!is_migrate_isolate(migratetype))
807                 __mod_zone_freepage_state(zone, (1 << order), migratetype);
808 }
809 #else
810 static inline bool set_page_guard(struct zone *zone, struct page *page,
811                         unsigned int order, int migratetype) { return false; }
812 static inline void clear_page_guard(struct zone *zone, struct page *page,
813                                 unsigned int order, int migratetype) {}
814 #endif
815 
816 /*
817  * Enable static keys related to various memory debugging and hardening options.
818  * Some override others, and depend on early params that are evaluated in the
819  * order of appearance. So we need to first gather the full picture of what was
820  * enabled, and then make decisions.
821  */
822 void init_mem_debugging_and_hardening(void)
823 {
824         bool page_poisoning_requested = false;
825 
826 #ifdef CONFIG_PAGE_POISONING
827         /*
828          * Page poisoning is debug page alloc for some arches. If
829          * either of those options are enabled, enable poisoning.
830          */
831         if (page_poisoning_enabled() ||
832              (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
833               debug_pagealloc_enabled())) {
834                 static_branch_enable(&_page_poisoning_enabled);
835                 page_poisoning_requested = true;
836         }
837 #endif
838 
839         if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
840             page_poisoning_requested) {
841                 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
842                         "will take precedence over init_on_alloc and init_on_free\n");
843                 _init_on_alloc_enabled_early = false;
844                 _init_on_free_enabled_early = false;
845         }
846 
847         if (_init_on_alloc_enabled_early)
848                 static_branch_enable(&init_on_alloc);
849         else
850                 static_branch_disable(&init_on_alloc);
851 
852         if (_init_on_free_enabled_early)
853                 static_branch_enable(&init_on_free);
854         else
855                 static_branch_disable(&init_on_free);
856 
857 #ifdef CONFIG_DEBUG_PAGEALLOC
858         if (!debug_pagealloc_enabled())
859                 return;
860 
861         static_branch_enable(&_debug_pagealloc_enabled);
862 
863         if (!debug_guardpage_minorder())
864                 return;
865 
866         static_branch_enable(&_debug_guardpage_enabled);
867 #endif
868 }
869 
870 static inline void set_buddy_order(struct page *page, unsigned int order)
871 {
872         set_page_private(page, order);
873         __SetPageBuddy(page);
874 }
875 
876 /*
877  * This function checks whether a page is free && is the buddy
878  * we can coalesce a page and its buddy if
879  * (a) the buddy is not in a hole (check before calling!) &&
880  * (b) the buddy is in the buddy system &&
881  * (c) a page and its buddy have the same order &&
882  * (d) a page and its buddy are in the same zone.
883  *
884  * For recording whether a page is in the buddy system, we set PageBuddy.
885  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
886  *
887  * For recording page's order, we use page_private(page).
888  */
889 static inline bool page_is_buddy(struct page *page, struct page *buddy,
890                                                         unsigned int order)
891 {
892         if (!page_is_guard(buddy) && !PageBuddy(buddy))
893                 return false;
894 
895         if (buddy_order(buddy) != order)
896                 return false;
897 
898         /*
899          * zone check is done late to avoid uselessly calculating
900          * zone/node ids for pages that could never merge.
901          */
902         if (page_zone_id(page) != page_zone_id(buddy))
903                 return false;
904 
905         VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
906 
907         return true;
908 }
909 
910 #ifdef CONFIG_COMPACTION
911 static inline struct capture_control *task_capc(struct zone *zone)
912 {
913         struct capture_control *capc = current->capture_control;
914 
915         return unlikely(capc) &&
916                 !(current->flags & PF_KTHREAD) &&
917                 !capc->page &&
918                 capc->cc->zone == zone ? capc : NULL;
919 }
920 
921 static inline bool
922 compaction_capture(struct capture_control *capc, struct page *page,
923                    int order, int migratetype)
924 {
925         if (!capc || order != capc->cc->order)
926                 return false;
927 
928         /* Do not accidentally pollute CMA or isolated regions*/
929         if (is_migrate_cma(migratetype) ||
930             is_migrate_isolate(migratetype))
931                 return false;
932 
933         /*
934          * Do not let lower order allocations pollute a movable pageblock.
935          * This might let an unmovable request use a reclaimable pageblock
936          * and vice-versa but no more than normal fallback logic which can
937          * have trouble finding a high-order free page.
938          */
939         if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
940                 return false;
941 
942         capc->page = page;
943         return true;
944 }
945 
946 #else
947 static inline struct capture_control *task_capc(struct zone *zone)
948 {
949         return NULL;
950 }
951 
952 static inline bool
953 compaction_capture(struct capture_control *capc, struct page *page,
954                    int order, int migratetype)
955 {
956         return false;
957 }
958 #endif /* CONFIG_COMPACTION */
959 
960 /* Used for pages not on another list */
961 static inline void add_to_free_list(struct page *page, struct zone *zone,
962                                     unsigned int order, int migratetype)
963 {
964         struct free_area *area = &zone->free_area[order];
965 
966         list_add(&page->lru, &area->free_list[migratetype]);
967         area->nr_free++;
968 }
969 
970 /* Used for pages not on another list */
971 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
972                                          unsigned int order, int migratetype)
973 {
974         struct free_area *area = &zone->free_area[order];
975 
976         list_add_tail(&page->lru, &area->free_list[migratetype]);
977         area->nr_free++;
978 }
979 
980 /*
981  * Used for pages which are on another list. Move the pages to the tail
982  * of the list - so the moved pages won't immediately be considered for
983  * allocation again (e.g., optimization for memory onlining).
984  */
985 static inline void move_to_free_list(struct page *page, struct zone *zone,
986                                      unsigned int order, int migratetype)
987 {
988         struct free_area *area = &zone->free_area[order];
989 
990         list_move_tail(&page->lru, &area->free_list[migratetype]);
991 }
992 
993 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
994                                            unsigned int order)
995 {
996         /* clear reported state and update reported page count */
997         if (page_reported(page))
998                 __ClearPageReported(page);
999 
1000         list_del(&page->lru);
1001         __ClearPageBuddy(page);
1002         set_page_private(page, 0);
1003         zone->free_area[order].nr_free--;
1004 }
1005 
1006 /*
1007  * If this is not the largest possible page, check if the buddy
1008  * of the next-highest order is free. If it is, it's possible
1009  * that pages are being freed that will coalesce soon. In case,
1010  * that is happening, add the free page to the tail of the list
1011  * so it's less likely to be used soon and more likely to be merged
1012  * as a higher order page
1013  */
1014 static inline bool
1015 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1016                    struct page *page, unsigned int order)
1017 {
1018         struct page *higher_page, *higher_buddy;
1019         unsigned long combined_pfn;
1020 
1021         if (order >= MAX_ORDER - 2)
1022                 return false;
1023 
1024         combined_pfn = buddy_pfn & pfn;
1025         higher_page = page + (combined_pfn - pfn);
1026         buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
1027         higher_buddy = higher_page + (buddy_pfn - combined_pfn);
1028 
1029         return page_is_buddy(higher_page, higher_buddy, order + 1);
1030 }
1031 
1032 /*
1033  * Freeing function for a buddy system allocator.
1034  *
1035  * The concept of a buddy system is to maintain direct-mapped table
1036  * (containing bit values) for memory blocks of various "orders".
1037  * The bottom level table contains the map for the smallest allocatable
1038  * units of memory (here, pages), and each level above it describes
1039  * pairs of units from the levels below, hence, "buddies".
1040  * At a high level, all that happens here is marking the table entry
1041  * at the bottom level available, and propagating the changes upward
1042  * as necessary, plus some accounting needed to play nicely with other
1043  * parts of the VM system.
1044  * At each level, we keep a list of pages, which are heads of continuous
1045  * free pages of length of (1 << order) and marked with PageBuddy.
1046  * Page's order is recorded in page_private(page) field.
1047  * So when we are allocating or freeing one, we can derive the state of the
1048  * other.  That is, if we allocate a small block, and both were
1049  * free, the remainder of the region must be split into blocks.
1050  * If a block is freed, and its buddy is also free, then this
1051  * triggers coalescing into a block of larger size.
1052  *
1053  * -- nyc
1054  */
1055 
1056 static inline void __free_one_page(struct page *page,
1057                 unsigned long pfn,
1058                 struct zone *zone, unsigned int order,
1059                 int migratetype, fpi_t fpi_flags)
1060 {
1061         struct capture_control *capc = task_capc(zone);
1062         unsigned long buddy_pfn;
1063         unsigned long combined_pfn;
1064         unsigned int max_order;
1065         struct page *buddy;
1066         bool to_tail;
1067 
1068         max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1069 
1070         VM_BUG_ON(!zone_is_initialized(zone));
1071         VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1072 
1073         VM_BUG_ON(migratetype == -1);
1074         if (likely(!is_migrate_isolate(migratetype)))
1075                 __mod_zone_freepage_state(zone, 1 << order, migratetype);
1076 
1077         VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1078         VM_BUG_ON_PAGE(bad_range(zone, page), page);
1079 
1080 continue_merging:
1081         while (order < max_order) {
1082                 if (compaction_capture(capc, page, order, migratetype)) {
1083                         __mod_zone_freepage_state(zone, -(1 << order),
1084                                                                 migratetype);
1085                         return;
1086                 }
1087                 buddy_pfn = __find_buddy_pfn(pfn, order);
1088                 buddy = page + (buddy_pfn - pfn);
1089 
1090                 if (!page_is_buddy(page, buddy, order))
1091                         goto done_merging;
1092                 /*
1093                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1094                  * merge with it and move up one order.
1095                  */
1096                 if (page_is_guard(buddy))
1097                         clear_page_guard(zone, buddy, order, migratetype);
1098                 else
1099                         del_page_from_free_list(buddy, zone, order);
1100                 combined_pfn = buddy_pfn & pfn;
1101                 page = page + (combined_pfn - pfn);
1102                 pfn = combined_pfn;
1103                 order++;
1104         }
1105         if (order < MAX_ORDER - 1) {
1106                 /* If we are here, it means order is >= pageblock_order.
1107                  * We want to prevent merge between freepages on isolate
1108                  * pageblock and normal pageblock. Without this, pageblock
1109                  * isolation could cause incorrect freepage or CMA accounting.
1110                  *
1111                  * We don't want to hit this code for the more frequent
1112                  * low-order merging.
1113                  */
1114                 if (unlikely(has_isolate_pageblock(zone))) {
1115                         int buddy_mt;
1116 
1117                         buddy_pfn = __find_buddy_pfn(pfn, order);
1118                         buddy = page + (buddy_pfn - pfn);
1119                         buddy_mt = get_pageblock_migratetype(buddy);
1120 
1121                         if (migratetype != buddy_mt
1122                                         && (is_migrate_isolate(migratetype) ||
1123                                                 is_migrate_isolate(buddy_mt)))
1124                                 goto done_merging;
1125                 }
1126                 max_order = order + 1;
1127                 goto continue_merging;
1128         }
1129 
1130 done_merging:
1131         set_buddy_order(page, order);
1132 
1133         if (fpi_flags & FPI_TO_TAIL)
1134                 to_tail = true;
1135         else if (is_shuffle_order(order))
1136                 to_tail = shuffle_pick_tail();
1137         else
1138                 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1139 
1140         if (to_tail)
1141                 add_to_free_list_tail(page, zone, order, migratetype);
1142         else
1143                 add_to_free_list(page, zone, order, migratetype);
1144 
1145         /* Notify page reporting subsystem of freed page */
1146         if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1147                 page_reporting_notify_free(order);
1148 }
1149 
1150 /*
1151  * A bad page could be due to a number of fields. Instead of multiple branches,
1152  * try and check multiple fields with one check. The caller must do a detailed
1153  * check if necessary.
1154  */
1155 static inline bool page_expected_state(struct page *page,
1156                                         unsigned long check_flags)
1157 {
1158         if (unlikely(atomic_read(&page->_mapcount) != -1))
1159                 return false;
1160 
1161         if (unlikely((unsigned long)page->mapping |
1162                         page_ref_count(page) |
1163 #ifdef CONFIG_MEMCG
1164                         page->memcg_data |
1165 #endif
1166                         (page->flags & check_flags)))
1167                 return false;
1168 
1169         return true;
1170 }
1171 
1172 static const char *page_bad_reason(struct page *page, unsigned long flags)
1173 {
1174         const char *bad_reason = NULL;
1175 
1176         if (unlikely(atomic_read(&page->_mapcount) != -1))
1177                 bad_reason = "nonzero mapcount";
1178         if (unlikely(page->mapping != NULL))
1179                 bad_reason = "non-NULL mapping";
1180         if (unlikely(page_ref_count(page) != 0))
1181                 bad_reason = "nonzero _refcount";
1182         if (unlikely(page->flags & flags)) {
1183                 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1184                         bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1185                 else
1186                         bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1187         }
1188 #ifdef CONFIG_MEMCG
1189         if (unlikely(page->memcg_data))
1190                 bad_reason = "page still charged to cgroup";
1191 #endif
1192         return bad_reason;
1193 }
1194 
1195 static void check_free_page_bad(struct page *page)
1196 {
1197         bad_page(page,
1198                  page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1199 }
1200 
1201 static inline int check_free_page(struct page *page)
1202 {
1203         if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1204                 return 0;
1205 
1206         /* Something has gone sideways, find it */
1207         check_free_page_bad(page);
1208         return 1;
1209 }
1210 
1211 static int free_tail_pages_check(struct page *head_page, struct page *page)
1212 {
1213         int ret = 1;
1214 
1215         /*
1216          * We rely page->lru.next never has bit 0 set, unless the page
1217          * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1218          */
1219         BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1220 
1221         if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1222                 ret = 0;
1223                 goto out;
1224         }
1225         switch (page - head_page) {
1226         case 1:
1227                 /* the first tail page: ->mapping may be compound_mapcount() */
1228                 if (unlikely(compound_mapcount(page))) {
1229                         bad_page(page, "nonzero compound_mapcount");
1230                         goto out;
1231                 }
1232                 break;
1233         case 2:
1234                 /*
1235                  * the second tail page: ->mapping is
1236                  * deferred_list.next -- ignore value.
1237                  */
1238                 break;
1239         default:
1240                 if (page->mapping != TAIL_MAPPING) {
1241                         bad_page(page, "corrupted mapping in tail page");
1242                         goto out;
1243                 }
1244                 break;
1245         }
1246         if (unlikely(!PageTail(page))) {
1247                 bad_page(page, "PageTail not set");
1248                 goto out;
1249         }
1250         if (unlikely(compound_head(page) != head_page)) {
1251                 bad_page(page, "compound_head not consistent");
1252                 goto out;
1253         }
1254         ret = 0;
1255 out:
1256         page->mapping = NULL;
1257         clear_compound_head(page);
1258         return ret;
1259 }
1260 
1261 static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
1262 {
1263         int i;
1264 
1265         if (zero_tags) {
1266                 for (i = 0; i < numpages; i++)
1267                         tag_clear_highpage(page + i);
1268                 return;
1269         }
1270 
1271         /* s390's use of memset() could override KASAN redzones. */
1272         kasan_disable_current();
1273         for (i = 0; i < numpages; i++) {
1274                 u8 tag = page_kasan_tag(page + i);
1275                 page_kasan_tag_reset(page + i);
1276                 clear_highpage(page + i);
1277                 page_kasan_tag_set(page + i, tag);
1278         }
1279         kasan_enable_current();
1280 }
1281 
1282 static __always_inline bool free_pages_prepare(struct page *page,
1283                         unsigned int order, bool check_free, fpi_t fpi_flags)
1284 {
1285         int bad = 0;
1286         bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1287 
1288         VM_BUG_ON_PAGE(PageTail(page), page);
1289 
1290         trace_mm_page_free(page, order);
1291 
1292         if (unlikely(PageHWPoison(page)) && !order) {
1293                 /*
1294                  * Do not let hwpoison pages hit pcplists/buddy
1295                  * Untie memcg state and reset page's owner
1296                  */
1297                 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1298                         __memcg_kmem_uncharge_page(page, order);
1299                 reset_page_owner(page, order);
1300                 return false;
1301         }
1302 
1303         /*
1304          * Check tail pages before head page information is cleared to
1305          * avoid checking PageCompound for order-0 pages.
1306          */
1307         if (unlikely(order)) {
1308                 bool compound = PageCompound(page);
1309                 int i;
1310 
1311                 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1312 
1313                 if (compound) {
1314                         ClearPageDoubleMap(page);
1315                         ClearPageHasHWPoisoned(page);
1316                 }
1317                 for (i = 1; i < (1 << order); i++) {
1318                         if (compound)
1319                                 bad += free_tail_pages_check(page, page + i);
1320                         if (unlikely(check_free_page(page + i))) {
1321                                 bad++;
1322                                 continue;
1323                         }
1324                         (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1325                 }
1326         }
1327         if (PageMappingFlags(page))
1328                 page->mapping = NULL;
1329         if (memcg_kmem_enabled() && PageMemcgKmem(page))
1330                 __memcg_kmem_uncharge_page(page, order);
1331         if (check_free)
1332                 bad += check_free_page(page);
1333         if (bad)
1334                 return false;
1335 
1336         page_cpupid_reset_last(page);
1337         page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1338         reset_page_owner(page, order);
1339 
1340         if (!PageHighMem(page)) {
1341                 debug_check_no_locks_freed(page_address(page),
1342                                            PAGE_SIZE << order);
1343                 debug_check_no_obj_freed(page_address(page),
1344                                            PAGE_SIZE << order);
1345         }
1346 
1347         kernel_poison_pages(page, 1 << order);
1348 
1349         /*
1350          * As memory initialization might be integrated into KASAN,
1351          * kasan_free_pages and kernel_init_free_pages must be
1352          * kept together to avoid discrepancies in behavior.
1353          *
1354          * With hardware tag-based KASAN, memory tags must be set before the
1355          * page becomes unavailable via debug_pagealloc or arch_free_page.
1356          */
1357         if (kasan_has_integrated_init()) {
1358                 if (!skip_kasan_poison)
1359                         kasan_free_pages(page, order);
1360         } else {
1361                 bool init = want_init_on_free();
1362 
1363                 if (init)
1364                         kernel_init_free_pages(page, 1 << order, false);
1365                 if (!skip_kasan_poison)
1366                         kasan_poison_pages(page, order, init);
1367         }
1368 
1369         /*
1370          * arch_free_page() can make the page's contents inaccessible.  s390
1371          * does this.  So nothing which can access the page's contents should
1372          * happen after this.
1373          */
1374         arch_free_page(page, order);
1375 
1376         debug_pagealloc_unmap_pages(page, 1 << order);
1377 
1378         return true;
1379 }
1380 
1381 #ifdef CONFIG_DEBUG_VM
1382 /*
1383  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1384  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1385  * moved from pcp lists to free lists.
1386  */
1387 static bool free_pcp_prepare(struct page *page, unsigned int order)
1388 {
1389         return free_pages_prepare(page, order, true, FPI_NONE);
1390 }
1391 
1392 static bool bulkfree_pcp_prepare(struct page *page)
1393 {
1394         if (debug_pagealloc_enabled_static())
1395                 return check_free_page(page);
1396         else
1397                 return false;
1398 }
1399 #else
1400 /*
1401  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1402  * moving from pcp lists to free list in order to reduce overhead. With
1403  * debug_pagealloc enabled, they are checked also immediately when being freed
1404  * to the pcp lists.
1405  */
1406 static bool free_pcp_prepare(struct page *page, unsigned int order)
1407 {
1408         if (debug_pagealloc_enabled_static())
1409                 return free_pages_prepare(page, order, true, FPI_NONE);
1410         else
1411                 return free_pages_prepare(page, order, false, FPI_NONE);
1412 }
1413 
1414 static bool bulkfree_pcp_prepare(struct page *page)
1415 {
1416         return check_free_page(page);
1417 }
1418 #endif /* CONFIG_DEBUG_VM */
1419 
1420 static inline void prefetch_buddy(struct page *page)
1421 {
1422         unsigned long pfn = page_to_pfn(page);
1423         unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1424         struct page *buddy = page + (buddy_pfn - pfn);
1425 
1426         prefetch(buddy);
1427 }
1428 
1429 /*
1430  * Frees a number of pages from the PCP lists
1431  * Assumes all pages on list are in same zone.
1432  * count is the number of pages to free.
1433  */
1434 static void free_pcppages_bulk(struct zone *zone, int count,
1435                                         struct per_cpu_pages *pcp)
1436 {
1437         int pindex = 0;
1438         int batch_free = 0;
1439         int nr_freed = 0;
1440         unsigned int order;
1441         int prefetch_nr = READ_ONCE(pcp->batch);
1442         bool isolated_pageblocks;
1443         struct page *page, *tmp;
1444         LIST_HEAD(head);
1445 
1446         /*
1447          * Ensure proper count is passed which otherwise would stuck in the
1448          * below while (list_empty(list)) loop.
1449          */
1450         count = min(pcp->count, count);
1451         while (count > 0) {
1452                 struct list_head *list;
1453 
1454                 /*
1455                  * Remove pages from lists in a round-robin fashion. A
1456                  * batch_free count is maintained that is incremented when an
1457                  * empty list is encountered.  This is so more pages are freed
1458                  * off fuller lists instead of spinning excessively around empty
1459                  * lists
1460                  */
1461                 do {
1462                         batch_free++;
1463                         if (++pindex == NR_PCP_LISTS)
1464                                 pindex = 0;
1465                         list = &pcp->lists[pindex];
1466                 } while (list_empty(list));
1467 
1468                 /* This is the only non-empty list. Free them all. */
1469                 if (batch_free == NR_PCP_LISTS)
1470                         batch_free = count;
1471 
1472                 order = pindex_to_order(pindex);
1473                 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1474                 do {
1475                         page = list_last_entry(list, struct page, lru);
1476                         /* must delete to avoid corrupting pcp list */
1477                         list_del(&page->lru);
1478                         nr_freed += 1 << order;
1479                         count -= 1 << order;
1480 
1481                         if (bulkfree_pcp_prepare(page))
1482                                 continue;
1483 
1484                         /* Encode order with the migratetype */
1485                         page->index <<= NR_PCP_ORDER_WIDTH;
1486                         page->index |= order;
1487 
1488                         list_add_tail(&page->lru, &head);
1489 
1490                         /*
1491                          * We are going to put the page back to the global
1492                          * pool, prefetch its buddy to speed up later access
1493                          * under zone->lock. It is believed the overhead of
1494                          * an additional test and calculating buddy_pfn here
1495                          * can be offset by reduced memory latency later. To
1496                          * avoid excessive prefetching due to large count, only
1497                          * prefetch buddy for the first pcp->batch nr of pages.
1498                          */
1499                         if (prefetch_nr) {
1500                                 prefetch_buddy(page);
1501                                 prefetch_nr--;
1502                         }
1503                 } while (count > 0 && --batch_free && !list_empty(list));
1504         }
1505         pcp->count -= nr_freed;
1506 
1507         /*
1508          * local_lock_irq held so equivalent to spin_lock_irqsave for
1509          * both PREEMPT_RT and non-PREEMPT_RT configurations.
1510          */
1511         spin_lock(&zone->lock);
1512         isolated_pageblocks = has_isolate_pageblock(zone);
1513 
1514         /*
1515          * Use safe version since after __free_one_page(),
1516          * page->lru.next will not point to original list.
1517          */
1518         list_for_each_entry_safe(page, tmp, &head, lru) {
1519                 int mt = get_pcppage_migratetype(page);
1520 
1521                 /* mt has been encoded with the order (see above) */
1522                 order = mt & NR_PCP_ORDER_MASK;
1523                 mt >>= NR_PCP_ORDER_WIDTH;
1524 
1525                 /* MIGRATE_ISOLATE page should not go to pcplists */
1526                 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1527                 /* Pageblock could have been isolated meanwhile */
1528                 if (unlikely(isolated_pageblocks))
1529                         mt = get_pageblock_migratetype(page);
1530 
1531                 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1532                 trace_mm_page_pcpu_drain(page, order, mt);
1533         }
1534         spin_unlock(&zone->lock);
1535 }
1536 
1537 static void free_one_page(struct zone *zone,
1538                                 struct page *page, unsigned long pfn,
1539                                 unsigned int order,
1540                                 int migratetype, fpi_t fpi_flags)
1541 {
1542         unsigned long flags;
1543 
1544         spin_lock_irqsave(&zone->lock, flags);
1545         if (unlikely(has_isolate_pageblock(zone) ||
1546                 is_migrate_isolate(migratetype))) {
1547                 migratetype = get_pfnblock_migratetype(page, pfn);
1548         }
1549         __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1550         spin_unlock_irqrestore(&zone->lock, flags);
1551 }
1552 
1553 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1554                                 unsigned long zone, int nid)
1555 {
1556         mm_zero_struct_page(page);
1557         set_page_links(page, zone, nid, pfn);
1558         init_page_count(page);
1559         page_mapcount_reset(page);
1560         page_cpupid_reset_last(page);
1561         page_kasan_tag_reset(page);
1562 
1563         INIT_LIST_HEAD(&page->lru);
1564 #ifdef WANT_PAGE_VIRTUAL
1565         /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1566         if (!is_highmem_idx(zone))
1567                 set_page_address(page, __va(pfn << PAGE_SHIFT));
1568 #endif
1569 }
1570 
1571 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1572 static void __meminit init_reserved_page(unsigned long pfn)
1573 {
1574         pg_data_t *pgdat;
1575         int nid, zid;
1576 
1577         if (!early_page_uninitialised(pfn))
1578                 return;
1579 
1580         nid = early_pfn_to_nid(pfn);
1581         pgdat = NODE_DATA(nid);
1582 
1583         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1584                 struct zone *zone = &pgdat->node_zones[zid];
1585 
1586                 if (zone_spans_pfn(zone, pfn))
1587                         break;
1588         }
1589         __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1590 }
1591 #else
1592 static inline void init_reserved_page(unsigned long pfn)
1593 {
1594 }
1595 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1596 
1597 /*
1598  * Initialised pages do not have PageReserved set. This function is
1599  * called for each range allocated by the bootmem allocator and
1600  * marks the pages PageReserved. The remaining valid pages are later
1601  * sent to the buddy page allocator.
1602  */
1603 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1604 {
1605         unsigned long start_pfn = PFN_DOWN(start);
1606         unsigned long end_pfn = PFN_UP(end);
1607 
1608         for (; start_pfn < end_pfn; start_pfn++) {
1609                 if (pfn_valid(start_pfn)) {
1610                         struct page *page = pfn_to_page(start_pfn);
1611 
1612                         init_reserved_page(start_pfn);
1613 
1614                         /* Avoid false-positive PageTail() */
1615                         INIT_LIST_HEAD(&page->lru);
1616 
1617                         /*
1618                          * no need for atomic set_bit because the struct
1619                          * page is not visible yet so nobody should
1620                          * access it yet.
1621                          */
1622                         __SetPageReserved(page);
1623                 }
1624         }
1625 }
1626 
1627 static void __free_pages_ok(struct page *page, unsigned int order,
1628                             fpi_t fpi_flags)
1629 {
1630         unsigned long flags;
1631         int migratetype;
1632         unsigned long pfn = page_to_pfn(page);
1633         struct zone *zone = page_zone(page);
1634 
1635         if (!free_pages_prepare(page, order, true, fpi_flags))
1636                 return;
1637 
1638         migratetype = get_pfnblock_migratetype(page, pfn);
1639 
1640         spin_lock_irqsave(&zone->lock, flags);
1641         if (unlikely(has_isolate_pageblock(zone) ||
1642                 is_migrate_isolate(migratetype))) {
1643                 migratetype = get_pfnblock_migratetype(page, pfn);
1644         }
1645         __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1646         spin_unlock_irqrestore(&zone->lock, flags);
1647 
1648         __count_vm_events(PGFREE, 1 << order);
1649 }
1650 
1651 void __free_pages_core(struct page *page, unsigned int order)
1652 {
1653         unsigned int nr_pages = 1 << order;
1654         struct page *p = page;
1655         unsigned int loop;
1656 
1657         /*
1658          * When initializing the memmap, __init_single_page() sets the refcount
1659          * of all pages to 1 ("allocated"/"not free"). We have to set the
1660          * refcount of all involved pages to 0.
1661          */
1662         prefetchw(p);
1663         for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1664                 prefetchw(p + 1);
1665                 __ClearPageReserved(p);
1666                 set_page_count(p, 0);
1667         }
1668         __ClearPageReserved(p);
1669         set_page_count(p, 0);
1670 
1671         atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1672 
1673         /*
1674          * Bypass PCP and place fresh pages right to the tail, primarily
1675          * relevant for memory onlining.
1676          */
1677         __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1678 }
1679 
1680 #ifdef CONFIG_NUMA
1681 
1682 /*
1683  * During memory init memblocks map pfns to nids. The search is expensive and
1684  * this caches recent lookups. The implementation of __early_pfn_to_nid
1685  * treats start/end as pfns.
1686  */
1687 struct mminit_pfnnid_cache {
1688         unsigned long last_start;
1689         unsigned long last_end;
1690         int last_nid;
1691 };
1692 
1693 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1694 
1695 /*
1696  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1697  */
1698 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1699                                         struct mminit_pfnnid_cache *state)
1700 {
1701         unsigned long start_pfn, end_pfn;
1702         int nid;
1703 
1704         if (state->last_start <= pfn && pfn < state->last_end)
1705                 return state->last_nid;
1706 
1707         nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1708         if (nid != NUMA_NO_NODE) {
1709                 state->last_start = start_pfn;
1710                 state->last_end = end_pfn;
1711                 state->last_nid = nid;
1712         }
1713 
1714         return nid;
1715 }
1716 
1717 int __meminit early_pfn_to_nid(unsigned long pfn)
1718 {
1719         static DEFINE_SPINLOCK(early_pfn_lock);
1720         int nid;
1721 
1722         spin_lock(&early_pfn_lock);
1723         nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1724         if (nid < 0)
1725                 nid = first_online_node;
1726         spin_unlock(&early_pfn_lock);
1727 
1728         return nid;
1729 }
1730 #endif /* CONFIG_NUMA */
1731 
1732 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1733                                                         unsigned int order)
1734 {
1735         if (early_page_uninitialised(pfn))
1736                 return;
1737         __free_pages_core(page, order);
1738 }
1739 
1740 /*
1741  * Check that the whole (or subset of) a pageblock given by the interval of
1742  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1743  * with the migration of free compaction scanner.
1744  *
1745  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1746  *
1747  * It's possible on some configurations to have a setup like node0 node1 node0
1748  * i.e. it's possible that all pages within a zones range of pages do not
1749  * belong to a single zone. We assume that a border between node0 and node1
1750  * can occur within a single pageblock, but not a node0 node1 node0
1751  * interleaving within a single pageblock. It is therefore sufficient to check
1752  * the first and last page of a pageblock and avoid checking each individual
1753  * page in a pageblock.
1754  */
1755 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1756                                      unsigned long end_pfn, struct zone *zone)
1757 {
1758         struct page *start_page;
1759         struct page *end_page;
1760 
1761         /* end_pfn is one past the range we are checking */
1762         end_pfn--;
1763 
1764         if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1765                 return NULL;
1766 
1767         start_page = pfn_to_online_page(start_pfn);
1768         if (!start_page)
1769                 return NULL;
1770 
1771         if (page_zone(start_page) != zone)
1772                 return NULL;
1773 
1774         end_page = pfn_to_page(end_pfn);
1775 
1776         /* This gives a shorter code than deriving page_zone(end_page) */
1777         if (page_zone_id(start_page) != page_zone_id(end_page))
1778                 return NULL;
1779 
1780         return start_page;
1781 }
1782 
1783 void set_zone_contiguous(struct zone *zone)
1784 {
1785         unsigned long block_start_pfn = zone->zone_start_pfn;
1786         unsigned long block_end_pfn;
1787 
1788         block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1789         for (; block_start_pfn < zone_end_pfn(zone);
1790                         block_start_pfn = block_end_pfn,
1791                          block_end_pfn += pageblock_nr_pages) {
1792 
1793                 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1794 
1795                 if (!__pageblock_pfn_to_page(block_start_pfn,
1796                                              block_end_pfn, zone))
1797                         return;
1798                 cond_resched();
1799         }
1800 
1801         /* We confirm that there is no hole */
1802         zone->contiguous = true;
1803 }
1804 
1805 void clear_zone_contiguous(struct zone *zone)
1806 {
1807         zone->contiguous = false;
1808 }
1809 
1810 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1811 static void __init deferred_free_range(unsigned long pfn,
1812                                        unsigned long nr_pages)
1813 {
1814         struct page *page;
1815         unsigned long i;
1816 
1817         if (!nr_pages)
1818                 return;
1819 
1820         page = pfn_to_page(pfn);
1821 
1822         /* Free a large naturally-aligned chunk if possible */
1823         if (nr_pages == pageblock_nr_pages &&
1824             (pfn & (pageblock_nr_pages - 1)) == 0) {
1825                 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1826                 __free_pages_core(page, pageblock_order);
1827                 return;
1828         }
1829 
1830         for (i = 0; i < nr_pages; i++, page++, pfn++) {
1831                 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1832                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1833                 __free_pages_core(page, 0);
1834         }
1835 }
1836 
1837 /* Completion tracking for deferred_init_memmap() threads */
1838 static atomic_t pgdat_init_n_undone __initdata;
1839 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1840 
1841 static inline void __init pgdat_init_report_one_done(void)
1842 {
1843         if (atomic_dec_and_test(&pgdat_init_n_undone))
1844                 complete(&pgdat_init_all_done_comp);
1845 }
1846 
1847 /*
1848  * Returns true if page needs to be initialized or freed to buddy allocator.
1849  *
1850  * First we check if pfn is valid on architectures where it is possible to have
1851  * holes within pageblock_nr_pages. On systems where it is not possible, this
1852  * function is optimized out.
1853  *
1854  * Then, we check if a current large page is valid by only checking the validity
1855  * of the head pfn.
1856  */
1857 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1858 {
1859         if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1860                 return false;
1861         return true;
1862 }
1863 
1864 /*
1865  * Free pages to buddy allocator. Try to free aligned pages in
1866  * pageblock_nr_pages sizes.
1867  */
1868 static void __init deferred_free_pages(unsigned long pfn,
1869                                        unsigned long end_pfn)
1870 {
1871         unsigned long nr_pgmask = pageblock_nr_pages - 1;
1872         unsigned long nr_free = 0;
1873 
1874         for (; pfn < end_pfn; pfn++) {
1875                 if (!deferred_pfn_valid(pfn)) {
1876                         deferred_free_range(pfn - nr_free, nr_free);
1877                         nr_free = 0;
1878                 } else if (!(pfn & nr_pgmask)) {
1879                         deferred_free_range(pfn - nr_free, nr_free);
1880                         nr_free = 1;
1881                 } else {
1882                         nr_free++;
1883                 }
1884         }
1885         /* Free the last block of pages to allocator */
1886         deferred_free_range(pfn - nr_free, nr_free);
1887 }
1888 
1889 /*
1890  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1891  * by performing it only once every pageblock_nr_pages.
1892  * Return number of pages initialized.
1893  */
1894 static unsigned long  __init deferred_init_pages(struct zone *zone,
1895                                                  unsigned long pfn,
1896                                                  unsigned long end_pfn)
1897 {
1898         unsigned long nr_pgmask = pageblock_nr_pages - 1;
1899         int nid = zone_to_nid(zone);
1900         unsigned long nr_pages = 0;
1901         int zid = zone_idx(zone);
1902         struct page *page = NULL;
1903 
1904         for (; pfn < end_pfn; pfn++) {
1905                 if (!deferred_pfn_valid(pfn)) {
1906                         page = NULL;
1907                         continue;
1908                 } else if (!page || !(pfn & nr_pgmask)) {
1909                         page = pfn_to_page(pfn);
1910                 } else {
1911                         page++;
1912                 }
1913                 __init_single_page(page, pfn, zid, nid);
1914                 nr_pages++;
1915         }
1916         return (nr_pages);
1917 }
1918 
1919 /*
1920  * This function is meant to pre-load the iterator for the zone init.
1921  * Specifically it walks through the ranges until we are caught up to the
1922  * first_init_pfn value and exits there. If we never encounter the value we
1923  * return false indicating there are no valid ranges left.
1924  */
1925 static bool __init
1926 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1927                                     unsigned long *spfn, unsigned long *epfn,
1928                                     unsigned long first_init_pfn)
1929 {
1930         u64 j;
1931 
1932         /*
1933          * Start out by walking through the ranges in this zone that have
1934          * already been initialized. We don't need to do anything with them
1935          * so we just need to flush them out of the system.
1936          */
1937         for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1938                 if (*epfn <= first_init_pfn)
1939                         continue;
1940                 if (*spfn < first_init_pfn)
1941                         *spfn = first_init_pfn;
1942                 *i = j;
1943                 return true;
1944         }
1945 
1946         return false;
1947 }
1948 
1949 /*
1950  * Initialize and free pages. We do it in two loops: first we initialize
1951  * struct page, then free to buddy allocator, because while we are
1952  * freeing pages we can access pages that are ahead (computing buddy
1953  * page in __free_one_page()).
1954  *
1955  * In order to try and keep some memory in the cache we have the loop
1956  * broken along max page order boundaries. This way we will not cause
1957  * any issues with the buddy page computation.
1958  */
1959 static unsigned long __init
1960 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1961                        unsigned long *end_pfn)
1962 {
1963         unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1964         unsigned long spfn = *start_pfn, epfn = *end_pfn;
1965         unsigned long nr_pages = 0;
1966         u64 j = *i;
1967 
1968         /* First we loop through and initialize the page values */
1969         for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1970                 unsigned long t;
1971 
1972                 if (mo_pfn <= *start_pfn)
1973                         break;
1974 
1975                 t = min(mo_pfn, *end_pfn);
1976                 nr_pages += deferred_init_pages(zone, *start_pfn, t);
1977 
1978                 if (mo_pfn < *end_pfn) {
1979                         *start_pfn = mo_pfn;
1980                         break;
1981                 }
1982         }
1983 
1984         /* Reset values and now loop through freeing pages as needed */
1985         swap(j, *i);
1986 
1987         for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1988                 unsigned long t;
1989 
1990                 if (mo_pfn <= spfn)
1991                         break;
1992 
1993                 t = min(mo_pfn, epfn);
1994                 deferred_free_pages(spfn, t);
1995 
1996                 if (mo_pfn <= epfn)
1997                         break;
1998         }
1999 
2000         return nr_pages;
2001 }
2002 
2003 static void __init
2004 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2005                            void *arg)
2006 {
2007         unsigned long spfn, epfn;
2008         struct zone *zone = arg;
2009         u64 i;
2010 
2011         deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2012 
2013         /*
2014          * Initialize and free pages in MAX_ORDER sized increments so that we
2015          * can avoid introducing any issues with the buddy allocator.
2016          */
2017         while (spfn < end_pfn) {
2018                 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2019                 cond_resched();
2020         }
2021 }
2022 
2023 /* An arch may override for more concurrency. */
2024 __weak int __init
2025 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2026 {
2027         return 1;
2028 }
2029 
2030 /* Initialise remaining memory on a node */
2031 static int __init deferred_init_memmap(void *data)
2032 {
2033         pg_data_t *pgdat = data;
2034         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2035         unsigned long spfn = 0, epfn = 0;
2036         unsigned long first_init_pfn, flags;
2037         unsigned long start = jiffies;
2038         struct zone *zone;
2039         int zid, max_threads;
2040         u64 i;
2041 
2042         /* Bind memory initialisation thread to a local node if possible */
2043         if (!cpumask_empty(cpumask))
2044                 set_cpus_allowed_ptr(current, cpumask);
2045 
2046         pgdat_resize_lock(pgdat, &flags);
2047         first_init_pfn = pgdat->first_deferred_pfn;
2048         if (first_init_pfn == ULONG_MAX) {
2049                 pgdat_resize_unlock(pgdat, &flags);
2050                 pgdat_init_report_one_done();
2051                 return 0;
2052         }
2053 
2054         /* Sanity check boundaries */
2055         BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2056         BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2057         pgdat->first_deferred_pfn = ULONG_MAX;
2058 
2059         /*
2060          * Once we unlock here, the zone cannot be grown anymore, thus if an
2061          * interrupt thread must allocate this early in boot, zone must be
2062          * pre-grown prior to start of deferred page initialization.
2063          */
2064         pgdat_resize_unlock(pgdat, &flags);
2065 
2066         /* Only the highest zone is deferred so find it */
2067         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2068                 zone = pgdat->node_zones + zid;
2069                 if (first_init_pfn < zone_end_pfn(zone))
2070                         break;
2071         }
2072 
2073         /* If the zone is empty somebody else may have cleared out the zone */
2074         if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2075                                                  first_init_pfn))
2076                 goto zone_empty;
2077 
2078         max_threads = deferred_page_init_max_threads(cpumask);
2079 
2080         while (spfn < epfn) {
2081                 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2082                 struct padata_mt_job job = {
2083                         .thread_fn   = deferred_init_memmap_chunk,
2084                         .fn_arg      = zone,
2085                         .start       = spfn,
2086                         .size        = epfn_align - spfn,
2087                         .align       = PAGES_PER_SECTION,
2088                         .min_chunk   = PAGES_PER_SECTION,
2089                         .max_threads = max_threads,
2090                 };
2091 
2092                 padata_do_multithreaded(&job);
2093                 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2094                                                     epfn_align);
2095         }
2096 zone_empty:
2097         /* Sanity check that the next zone really is unpopulated */
2098         WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2099 
2100         pr_info("node %d deferred pages initialised in %ums\n",
2101                 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2102 
2103         pgdat_init_report_one_done();
2104         return 0;
2105 }
2106 
2107 /*
2108  * If this zone has deferred pages, try to grow it by initializing enough
2109  * deferred pages to satisfy the allocation specified by order, rounded up to
2110  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2111  * of SECTION_SIZE bytes by initializing struct pages in increments of
2112  * PAGES_PER_SECTION * sizeof(struct page) bytes.
2113  *
2114  * Return true when zone was grown, otherwise return false. We return true even
2115  * when we grow less than requested, to let the caller decide if there are
2116  * enough pages to satisfy the allocation.
2117  *
2118  * Note: We use noinline because this function is needed only during boot, and
2119  * it is called from a __ref function _deferred_grow_zone. This way we are
2120  * making sure that it is not inlined into permanent text section.
2121  */
2122 static noinline bool __init
2123 deferred_grow_zone(struct zone *zone, unsigned int order)
2124 {
2125         unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2126         pg_data_t *pgdat = zone->zone_pgdat;
2127         unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2128         unsigned long spfn, epfn, flags;
2129         unsigned long nr_pages = 0;
2130         u64 i;
2131 
2132         /* Only the last zone may have deferred pages */
2133         if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2134                 return false;
2135 
2136         pgdat_resize_lock(pgdat, &flags);
2137 
2138         /*
2139          * If someone grew this zone while we were waiting for spinlock, return
2140          * true, as there might be enough pages already.
2141          */
2142         if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2143                 pgdat_resize_unlock(pgdat, &flags);
2144                 return true;
2145         }
2146 
2147         /* If the zone is empty somebody else may have cleared out the zone */
2148         if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2149                                                  first_deferred_pfn)) {
2150                 pgdat->first_deferred_pfn = ULONG_MAX;
2151                 pgdat_resize_unlock(pgdat, &flags);
2152                 /* Retry only once. */
2153                 return first_deferred_pfn != ULONG_MAX;
2154         }
2155 
2156         /*
2157          * Initialize and free pages in MAX_ORDER sized increments so
2158          * that we can avoid introducing any issues with the buddy
2159          * allocator.
2160          */
2161         while (spfn < epfn) {
2162                 /* update our first deferred PFN for this section */
2163                 first_deferred_pfn = spfn;
2164 
2165                 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2166                 touch_nmi_watchdog();
2167 
2168                 /* We should only stop along section boundaries */
2169                 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2170                         continue;
2171 
2172                 /* If our quota has been met we can stop here */
2173                 if (nr_pages >= nr_pages_needed)
2174                         break;
2175         }
2176 
2177         pgdat->first_deferred_pfn = spfn;
2178         pgdat_resize_unlock(pgdat, &flags);
2179 
2180         return nr_pages > 0;
2181 }
2182 
2183 /*
2184  * deferred_grow_zone() is __init, but it is called from
2185  * get_page_from_freelist() during early boot until deferred_pages permanently
2186  * disables this call. This is why we have refdata wrapper to avoid warning,
2187  * and to ensure that the function body gets unloaded.
2188  */
2189 static bool __ref
2190 _deferred_grow_zone(struct zone *zone, unsigned int order)
2191 {
2192         return deferred_grow_zone(zone, order);
2193 }
2194 
2195 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2196 
2197 void __init page_alloc_init_late(void)
2198 {
2199         struct zone *zone;
2200         int nid;
2201 
2202 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2203 
2204         /* There will be num_node_state(N_MEMORY) threads */
2205         atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2206         for_each_node_state(nid, N_MEMORY) {
2207                 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2208         }
2209 
2210         /* Block until all are initialised */
2211         wait_for_completion(&pgdat_init_all_done_comp);
2212 
2213         /*
2214          * We initialized the rest of the deferred pages.  Permanently disable
2215          * on-demand struct page initialization.
2216          */
2217         static_branch_disable(&deferred_pages);
2218 
2219         /* Reinit limits that are based on free pages after the kernel is up */
2220         files_maxfiles_init();
2221 #endif
2222 
2223         buffer_init();
2224 
2225         /* Discard memblock private memory */
2226         memblock_discard();
2227 
2228         for_each_node_state(nid, N_MEMORY)
2229                 shuffle_free_memory(NODE_DATA(nid));
2230 
2231         for_each_populated_zone(zone)
2232                 set_zone_contiguous(zone);
2233 }
2234 
2235 #ifdef CONFIG_CMA
2236 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2237 void __init init_cma_reserved_pageblock(struct page *page)
2238 {
2239         unsigned i = pageblock_nr_pages;
2240         struct page *p = page;
2241 
2242         do {
2243                 __ClearPageReserved(p);
2244                 set_page_count(p, 0);
2245         } while (++p, --i);
2246 
2247         set_pageblock_migratetype(page, MIGRATE_CMA);
2248 
2249         if (pageblock_order >= MAX_ORDER) {
2250                 i = pageblock_nr_pages;
2251                 p = page;
2252                 do {
2253                         set_page_refcounted(p);
2254                         __free_pages(p, MAX_ORDER - 1);
2255                         p += MAX_ORDER_NR_PAGES;
2256                 } while (i -= MAX_ORDER_NR_PAGES);
2257         } else {
2258                 set_page_refcounted(page);
2259                 __free_pages(page, pageblock_order);
2260         }
2261 
2262         adjust_managed_page_count(page, pageblock_nr_pages);
2263         page_zone(page)->cma_pages += pageblock_nr_pages;
2264 }
2265 #endif
2266 
2267 /*
2268  * The order of subdivision here is critical for the IO subsystem.
2269  * Please do not alter this order without good reasons and regression
2270  * testing. Specifically, as large blocks of memory are subdivided,
2271  * the order in which smaller blocks are delivered depends on the order
2272  * they're subdivided in this function. This is the primary factor
2273  * influencing the order in which pages are delivered to the IO
2274  * subsystem according to empirical testing, and this is also justified
2275  * by considering the behavior of a buddy system containing a single
2276  * large block of memory acted on by a series of small allocations.
2277  * This behavior is a critical factor in sglist merging's success.
2278  *
2279  * -- nyc
2280  */
2281 static inline void expand(struct zone *zone, struct page *page,
2282         int low, int high, int migratetype)
2283 {
2284         unsigned long size = 1 << high;
2285 
2286         while (high > low) {
2287                 high--;
2288                 size >>= 1;
2289                 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2290 
2291                 /*
2292                  * Mark as guard pages (or page), that will allow to
2293                  * merge back to allocator when buddy will be freed.
2294                  * Corresponding page table entries will not be touched,
2295                  * pages will stay not present in virtual address space
2296                  */
2297                 if (set_page_guard(zone, &page[size], high, migratetype))
2298                         continue;
2299 
2300                 add_to_free_list(&page[size], zone, high, migratetype);
2301                 set_buddy_order(&page[size], high);
2302         }
2303 }
2304 
2305 static void check_new_page_bad(struct page *page)
2306 {
2307         if (unlikely(page->flags & __PG_HWPOISON)) {
2308                 /* Don't complain about hwpoisoned pages */
2309                 page_mapcount_reset(page); /* remove PageBuddy */
2310                 return;
2311         }
2312 
2313         bad_page(page,
2314                  page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2315 }
2316 
2317 /*
2318  * This page is about to be returned from the page allocator
2319  */
2320 static inline int check_new_page(struct page *page)
2321 {
2322         if (likely(page_expected_state(page,
2323                                 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2324                 return 0;
2325 
2326         check_new_page_bad(page);
2327         return 1;
2328 }
2329 
2330 #ifdef CONFIG_DEBUG_VM
2331 /*
2332  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2333  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2334  * also checked when pcp lists are refilled from the free lists.
2335  */
2336 static inline bool check_pcp_refill(struct page *page)
2337 {
2338         if (debug_pagealloc_enabled_static())
2339                 return check_new_page(page);
2340         else
2341                 return false;
2342 }
2343 
2344 static inline bool check_new_pcp(struct page *page)
2345 {
2346         return check_new_page(page);
2347 }
2348 #else
2349 /*
2350  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2351  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2352  * enabled, they are also checked when being allocated from the pcp lists.
2353  */
2354 static inline bool check_pcp_refill(struct page *page)
2355 {
2356         return check_new_page(page);
2357 }
2358 static inline bool check_new_pcp(struct page *page)
2359 {
2360         if (debug_pagealloc_enabled_static())
2361                 return check_new_page(page);
2362         else
2363                 return false;
2364 }
2365 #endif /* CONFIG_DEBUG_VM */
2366 
2367 static bool check_new_pages(struct page *page, unsigned int order)
2368 {
2369         int i;
2370         for (i = 0; i < (1 << order); i++) {
2371                 struct page *p = page + i;
2372 
2373                 if (unlikely(check_new_page(p)))
2374                         return true;
2375         }
2376 
2377         return false;
2378 }
2379 
2380 inline void post_alloc_hook(struct page *page, unsigned int order,
2381                                 gfp_t gfp_flags)
2382 {
2383         set_page_private(page, 0);
2384         set_page_refcounted(page);
2385 
2386         arch_alloc_page(page, order);
2387         debug_pagealloc_map_pages(page, 1 << order);
2388 
2389         /*
2390          * Page unpoisoning must happen before memory initialization.
2391          * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2392          * allocations and the page unpoisoning code will complain.
2393          */
2394         kernel_unpoison_pages(page, 1 << order);
2395 
2396         /*
2397          * As memory initialization might be integrated into KASAN,
2398          * kasan_alloc_pages and kernel_init_free_pages must be
2399          * kept together to avoid discrepancies in behavior.
2400          */
2401         if (kasan_has_integrated_init()) {
2402                 kasan_alloc_pages(page, order, gfp_flags);
2403         } else {
2404                 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2405 
2406                 kasan_unpoison_pages(page, order, init);
2407                 if (init)
2408                         kernel_init_free_pages(page, 1 << order,
2409                                                gfp_flags & __GFP_ZEROTAGS);
2410         }
2411 
2412         set_page_owner(page, order, gfp_flags);
2413 }
2414 
2415 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2416                                                         unsigned int alloc_flags)
2417 {
2418         post_alloc_hook(page, order, gfp_flags);
2419 
2420         if (order && (gfp_flags & __GFP_COMP))
2421                 prep_compound_page(page, order);
2422 
2423         /*
2424          * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2425          * allocate the page. The expectation is that the caller is taking
2426          * steps that will free more memory. The caller should avoid the page
2427          * being used for !PFMEMALLOC purposes.
2428          */
2429         if (alloc_flags & ALLOC_NO_WATERMARKS)
2430                 set_page_pfmemalloc(page);
2431         else
2432                 clear_page_pfmemalloc(page);
2433 }
2434 
2435 /*
2436  * Go through the free lists for the given migratetype and remove
2437  * the smallest available page from the freelists
2438  */
2439 static __always_inline
2440 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2441                                                 int migratetype)
2442 {
2443         unsigned int current_order;
2444         struct free_area *area;
2445         struct page *page;
2446 
2447         /* Find a page of the appropriate size in the preferred list */
2448         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2449                 area = &(zone->free_area[current_order]);
2450                 page = get_page_from_free_area(area, migratetype);
2451                 if (!page)
2452                         continue;
2453                 del_page_from_free_list(page, zone, current_order);
2454                 expand(zone, page, order, current_order, migratetype);
2455                 set_pcppage_migratetype(page, migratetype);
2456                 return page;
2457         }
2458 
2459         return NULL;
2460 }
2461 
2462 
2463 /*
2464  * This array describes the order lists are fallen back to when
2465  * the free lists for the desirable migrate type are depleted
2466  */
2467 static int fallbacks[MIGRATE_TYPES][3] = {
2468         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2469         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2470         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2471 #ifdef CONFIG_CMA
2472         [MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2473 #endif
2474 #ifdef CONFIG_MEMORY_ISOLATION
2475         [MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2476 #endif
2477 };
2478 
2479 #ifdef CONFIG_CMA
2480 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2481                                         unsigned int order)
2482 {
2483         return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2484 }
2485 #else
2486 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2487                                         unsigned int order) { return NULL; }
2488 #endif
2489 
2490 /*
2491  * Move the free pages in a range to the freelist tail of the requested type.
2492  * Note that start_page and end_pages are not aligned on a pageblock
2493  * boundary. If alignment is required, use move_freepages_block()
2494  */
2495 static int move_freepages(struct zone *zone,
2496                           unsigned long start_pfn, unsigned long end_pfn,
2497                           int migratetype, int *num_movable)
2498 {
2499         struct page *page;
2500         unsigned long pfn;
2501         unsigned int order;
2502         int pages_moved = 0;
2503 
2504         for (pfn = start_pfn; pfn <= end_pfn;) {
2505                 page = pfn_to_page(pfn);
2506                 if (!PageBuddy(page)) {
2507                         /*
2508                          * We assume that pages that could be isolated for
2509                          * migration are movable. But we don't actually try
2510                          * isolating, as that would be expensive.
2511                          */
2512                         if (num_movable &&
2513                                         (PageLRU(page) || __PageMovable(page)))
2514                                 (*num_movable)++;
2515                         pfn++;
2516                         continue;
2517                 }
2518 
2519                 /* Make sure we are not inadvertently changing nodes */
2520                 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2521                 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2522 
2523                 order = buddy_order(page);
2524                 move_to_free_list(page, zone, order, migratetype);
2525                 pfn += 1 << order;
2526                 pages_moved += 1 << order;
2527         }
2528 
2529         return pages_moved;
2530 }
2531 
2532 int move_freepages_block(struct zone *zone, struct page *page,
2533                                 int migratetype, int *num_movable)
2534 {
2535         unsigned long start_pfn, end_pfn, pfn;
2536 
2537         if (num_movable)
2538                 *num_movable = 0;
2539 
2540         pfn = page_to_pfn(page);
2541         start_pfn = pfn & ~(pageblock_nr_pages - 1);
2542         end_pfn = start_pfn + pageblock_nr_pages - 1;
2543 
2544         /* Do not cross zone boundaries */
2545         if (!zone_spans_pfn(zone, start_pfn))
2546                 start_pfn = pfn;
2547         if (!zone_spans_pfn(zone, end_pfn))
2548                 return 0;
2549 
2550         return move_freepages(zone, start_pfn, end_pfn, migratetype,
2551                                                                 num_movable);
2552 }
2553 
2554 static void change_pageblock_range(struct page *pageblock_page,
2555                                         int start_order, int migratetype)
2556 {
2557         int nr_pageblocks = 1 << (start_order - pageblock_order);
2558 
2559         while (nr_pageblocks--) {
2560                 set_pageblock_migratetype(pageblock_page, migratetype);
2561                 pageblock_page += pageblock_nr_pages;
2562         }
2563 }
2564 
2565 /*
2566  * When we are falling back to another migratetype during allocation, try to
2567  * steal extra free pages from the same pageblocks to satisfy further
2568  * allocations, instead of polluting multiple pageblocks.
2569  *
2570  * If we are stealing a relatively large buddy page, it is likely there will
2571  * be more free pages in the pageblock, so try to steal them all. For
2572  * reclaimable and unmovable allocations, we steal regardless of page size,
2573  * as fragmentation caused by those allocations polluting movable pageblocks
2574  * is worse than movable allocations stealing from unmovable and reclaimable
2575  * pageblocks.
2576  */
2577 static bool can_steal_fallback(unsigned int order, int start_mt)
2578 {
2579         /*
2580          * Leaving this order check is intended, although there is
2581          * relaxed order check in next check. The reason is that
2582          * we can actually steal whole pageblock if this condition met,
2583          * but, below check doesn't guarantee it and that is just heuristic
2584          * so could be changed anytime.
2585          */
2586         if (order >= pageblock_order)
2587                 return true;
2588 
2589         if (order >= pageblock_order / 2 ||
2590                 start_mt == MIGRATE_RECLAIMABLE ||
2591                 start_mt == MIGRATE_UNMOVABLE ||
2592                 page_group_by_mobility_disabled)
2593                 return true;
2594 
2595         return false;
2596 }
2597 
2598 static inline bool boost_watermark(struct zone *zone)
2599 {
2600         unsigned long max_boost;
2601 
2602         if (!watermark_boost_factor)
2603                 return false;
2604         /*
2605          * Don't bother in zones that are unlikely to produce results.
2606          * On small machines, including kdump capture kernels running
2607          * in a small area, boosting the watermark can cause an out of
2608          * memory situation immediately.
2609          */
2610         if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2611                 return false;
2612 
2613         max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2614                         watermark_boost_factor, 10000);
2615 
2616         /*
2617          * high watermark may be uninitialised if fragmentation occurs
2618          * very early in boot so do not boost. We do not fall
2619          * through and boost by pageblock_nr_pages as failing
2620          * allocations that early means that reclaim is not going
2621          * to help and it may even be impossible to reclaim the
2622          * boosted watermark resulting in a hang.
2623          */
2624         if (!max_boost)
2625                 return false;
2626 
2627         max_boost = max(pageblock_nr_pages, max_boost);
2628 
2629         zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2630                 max_boost);
2631 
2632         return true;
2633 }
2634 
2635 /*
2636  * This function implements actual steal behaviour. If order is large enough,
2637  * we can steal whole pageblock. If not, we first move freepages in this
2638  * pageblock to our migratetype and determine how many already-allocated pages
2639  * are there in the pageblock with a compatible migratetype. If at least half
2640  * of pages are free or compatible, we can change migratetype of the pageblock
2641  * itself, so pages freed in the future will be put on the correct free list.
2642  */
2643 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2644                 unsigned int alloc_flags, int start_type, bool whole_block)
2645 {
2646         unsigned int current_order = buddy_order(page);
2647         int free_pages, movable_pages, alike_pages;
2648         int old_block_type;
2649 
2650         old_block_type = get_pageblock_migratetype(page);
2651 
2652         /*
2653          * This can happen due to races and we want to prevent broken
2654          * highatomic accounting.
2655          */
2656         if (is_migrate_highatomic(old_block_type))
2657                 goto single_page;
2658 
2659         /* Take ownership for orders >= pageblock_order */
2660         if (current_order >= pageblock_order) {
2661                 change_pageblock_range(page, current_order, start_type);
2662                 goto single_page;
2663         }
2664 
2665         /*
2666          * Boost watermarks to increase reclaim pressure to reduce the
2667          * likelihood of future fallbacks. Wake kswapd now as the node
2668          * may be balanced overall and kswapd will not wake naturally.
2669          */
2670         if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2671                 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2672 
2673         /* We are not allowed to try stealing from the whole block */
2674         if (!whole_block)
2675                 goto single_page;
2676 
2677         free_pages = move_freepages_block(zone, page, start_type,
2678                                                 &movable_pages);
2679         /*
2680          * Determine how many pages are compatible with our allocation.
2681          * For movable allocation, it's the number of movable pages which
2682          * we just obtained. For other types it's a bit more tricky.
2683          */
2684         if (start_type == MIGRATE_MOVABLE) {
2685                 alike_pages = movable_pages;
2686         } else {
2687                 /*
2688                  * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2689                  * to MOVABLE pageblock, consider all non-movable pages as
2690                  * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2691                  * vice versa, be conservative since we can't distinguish the
2692                  * exact migratetype of non-movable pages.
2693                  */
2694                 if (old_block_type == MIGRATE_MOVABLE)
2695                         alike_pages = pageblock_nr_pages
2696                                                 - (free_pages + movable_pages);
2697                 else
2698                         alike_pages = 0;
2699         }
2700 
2701         /* moving whole block can fail due to zone boundary conditions */
2702         if (!free_pages)
2703                 goto single_page;
2704 
2705         /*
2706          * If a sufficient number of pages in the block are either free or of
2707          * comparable migratability as our allocation, claim the whole block.
2708          */
2709         if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2710                         page_group_by_mobility_disabled)
2711                 set_pageblock_migratetype(page, start_type);
2712 
2713         return;
2714 
2715 single_page:
2716         move_to_free_list(page, zone, current_order, start_type);
2717 }
2718 
2719 /*
2720  * Check whether there is a suitable fallback freepage with requested order.
2721  * If only_stealable is true, this function returns fallback_mt only if
2722  * we can steal other freepages all together. This would help to reduce
2723  * fragmentation due to mixed migratetype pages in one pageblock.
2724  */
2725 int find_suitable_fallback(struct free_area *area, unsigned int order,
2726                         int migratetype, bool only_stealable, bool *can_steal)
2727 {
2728         int i;
2729         int fallback_mt;
2730 
2731         if (area->nr_free == 0)
2732                 return -1;
2733 
2734         *can_steal = false;
2735         for (i = 0;; i++) {
2736                 fallback_mt = fallbacks[migratetype][i];
2737                 if (fallback_mt == MIGRATE_TYPES)
2738                         break;
2739 
2740                 if (free_area_empty(area, fallback_mt))
2741                         continue;
2742 
2743                 if (can_steal_fallback(order, migratetype))
2744                         *can_steal = true;
2745 
2746                 if (!only_stealable)
2747                         return fallback_mt;
2748 
2749                 if (*can_steal)
2750                         return fallback_mt;
2751         }
2752 
2753         return -1;
2754 }
2755 
2756 /*
2757  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2758  * there are no empty page blocks that contain a page with a suitable order
2759  */
2760 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2761                                 unsigned int alloc_order)
2762 {
2763         int mt;
2764         unsigned long max_managed, flags;
2765 
2766         /*
2767          * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2768          * Check is race-prone but harmless.
2769          */
2770         max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2771         if (zone->nr_reserved_highatomic >= max_managed)
2772                 return;
2773 
2774         spin_lock_irqsave(&zone->lock, flags);
2775 
2776         /* Recheck the nr_reserved_highatomic limit under the lock */
2777         if (zone->nr_reserved_highatomic >= max_managed)
2778                 goto out_unlock;
2779 
2780         /* Yoink! */
2781         mt = get_pageblock_migratetype(page);
2782         if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2783             && !is_migrate_cma(mt)) {
2784                 zone->nr_reserved_highatomic += pageblock_nr_pages;
2785                 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2786                 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2787         }
2788 
2789 out_unlock:
2790         spin_unlock_irqrestore(&zone->lock, flags);
2791 }
2792 
2793 /*
2794  * Used when an allocation is about to fail under memory pressure. This
2795  * potentially hurts the reliability of high-order allocations when under
2796  * intense memory pressure but failed atomic allocations should be easier
2797  * to recover from than an OOM.
2798  *
2799  * If @force is true, try to unreserve a pageblock even though highatomic
2800  * pageblock is exhausted.
2801  */
2802 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2803                                                 bool force)
2804 {
2805         struct zonelist *zonelist = ac->zonelist;
2806         unsigned long flags;
2807         struct zoneref *z;
2808         struct zone *zone;
2809         struct page *page;
2810         int order;
2811         bool ret;
2812 
2813         for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2814                                                                 ac->nodemask) {
2815                 /*
2816                  * Preserve at least one pageblock unless memory pressure
2817                  * is really high.
2818                  */
2819                 if (!force && zone->nr_reserved_highatomic <=
2820                                         pageblock_nr_pages)
2821                         continue;
2822 
2823                 spin_lock_irqsave(&zone->lock, flags);
2824                 for (order = 0; order < MAX_ORDER; order++) {
2825                         struct free_area *area = &(zone->free_area[order]);
2826 
2827                         page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2828                         if (!page)
2829                                 continue;
2830 
2831                         /*
2832                          * In page freeing path, migratetype change is racy so
2833                          * we can counter several free pages in a pageblock
2834                          * in this loop although we changed the pageblock type
2835                          * from highatomic to ac->migratetype. So we should
2836                          * adjust the count once.
2837                          */
2838                         if (is_migrate_highatomic_page(page)) {
2839                                 /*
2840                                  * It should never happen but changes to
2841                                  * locking could inadvertently allow a per-cpu
2842                                  * drain to add pages to MIGRATE_HIGHATOMIC
2843                                  * while unreserving so be safe and watch for
2844                                  * underflows.
2845                                  */
2846                                 zone->nr_reserved_highatomic -= min(
2847                                                 pageblock_nr_pages,
2848                                                 zone->nr_reserved_highatomic);
2849                         }
2850 
2851                         /*
2852                          * Convert to ac->migratetype and avoid the normal
2853                          * pageblock stealing heuristics. Minimally, the caller
2854                          * is doing the work and needs the pages. More
2855                          * importantly, if the block was always converted to
2856                          * MIGRATE_UNMOVABLE or another type then the number
2857                          * of pageblocks that cannot be completely freed
2858                          * may increase.
2859                          */
2860                         set_pageblock_migratetype(page, ac->migratetype);
2861                         ret = move_freepages_block(zone, page, ac->migratetype,
2862                                                                         NULL);
2863                         if (ret) {
2864                                 spin_unlock_irqrestore(&zone->lock, flags);
2865                                 return ret;
2866                         }
2867                 }
2868                 spin_unlock_irqrestore(&zone->lock, flags);
2869         }
2870 
2871         return false;
2872 }
2873 
2874 /*
2875  * Try finding a free buddy page on the fallback list and put it on the free
2876  * list of requested migratetype, possibly along with other pages from the same
2877  * block, depending on fragmentation avoidance heuristics. Returns true if
2878  * fallback was found so that __rmqueue_smallest() can grab it.
2879  *
2880  * The use of signed ints for order and current_order is a deliberate
2881  * deviation from the rest of this file, to make the for loop
2882  * condition simpler.
2883  */
2884 static __always_inline bool
2885 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2886                                                 unsigned int alloc_flags)
2887 {
2888         struct free_area *area;
2889         int current_order;
2890         int min_order = order;
2891         struct page *page;
2892         int fallback_mt;
2893         bool can_steal;
2894 
2895         /*
2896          * Do not steal pages from freelists belonging to other pageblocks
2897          * i.e. orders < pageblock_order. If there are no local zones free,
2898          * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2899          */
2900         if (alloc_flags & ALLOC_NOFRAGMENT)
2901                 min_order = pageblock_order;
2902 
2903         /*
2904          * Find the largest available free page in the other list. This roughly
2905          * approximates finding the pageblock with the most free pages, which
2906          * would be too costly to do exactly.
2907          */
2908         for (current_order = MAX_ORDER - 1; current_order >= min_order;
2909                                 --current_order) {
2910                 area = &(zone->free_area[current_order]);
2911                 fallback_mt = find_suitable_fallback(area, current_order,
2912                                 start_migratetype, false, &can_steal);
2913                 if (fallback_mt == -1)
2914                         continue;
2915 
2916                 /*
2917                  * We cannot steal all free pages from the pageblock and the
2918                  * requested migratetype is movable. In that case it's better to
2919                  * steal and split the smallest available page instead of the
2920                  * largest available page, because even if the next movable
2921                  * allocation falls back into a different pageblock than this
2922                  * one, it won't cause permanent fragmentation.
2923                  */
2924                 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2925                                         && current_order > order)
2926                         goto find_smallest;
2927 
2928                 goto do_steal;
2929         }
2930 
2931         return false;
2932 
2933 find_smallest:
2934         for (current_order = order; current_order < MAX_ORDER;
2935                                                         current_order++) {
2936                 area = &(zone->free_area[current_order]);
2937                 fallback_mt = find_suitable_fallback(area, current_order,
2938                                 start_migratetype, false, &can_steal);
2939                 if (fallback_mt != -1)
2940                         break;
2941         }
2942 
2943         /*
2944          * This should not happen - we already found a suitable fallback
2945          * when looking for the largest page.
2946          */
2947         VM_BUG_ON(current_order == MAX_ORDER);
2948 
2949 do_steal:
2950         page = get_page_from_free_area(area, fallback_mt);
2951 
2952         steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2953                                                                 can_steal);
2954 
2955         trace_mm_page_alloc_extfrag(page, order, current_order,
2956                 start_migratetype, fallback_mt);
2957 
2958         return true;
2959 
2960 }
2961 
2962 /*
2963  * Do the hard work of removing an element from the buddy allocator.
2964  * Call me with the zone->lock already held.
2965  */
2966 static __always_inline struct page *
2967 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2968                                                 unsigned int alloc_flags)
2969 {
2970         struct page *page;
2971 
2972         if (IS_ENABLED(CONFIG_CMA)) {
2973                 /*
2974                  * Balance movable allocations between regular and CMA areas by
2975                  * allocating from CMA when over half of the zone's free memory
2976                  * is in the CMA area.
2977                  */
2978                 if (alloc_flags & ALLOC_CMA &&
2979                     zone_page_state(zone, NR_FREE_CMA_PAGES) >
2980                     zone_page_state(zone, NR_FREE_PAGES) / 2) {
2981                         page = __rmqueue_cma_fallback(zone, order);
2982                         if (page)
2983                                 goto out;
2984                 }
2985         }
2986 retry:
2987         page = __rmqueue_smallest(zone, order, migratetype);
2988         if (unlikely(!page)) {
2989                 if (alloc_flags & ALLOC_CMA)
2990                         page = __rmqueue_cma_fallback(zone, order);
2991 
2992                 if (!page && __rmqueue_fallback(zone, order, migratetype,
2993                                                                 alloc_flags))
2994                         goto retry;
2995         }
2996 out:
2997         if (page)
2998                 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2999         return page;
3000 }
3001 
3002 /*
3003  * Obtain a specified number of elements from the buddy allocator, all under
3004  * a single hold of the lock, for efficiency.  Add them to the supplied list.
3005  * Returns the number of new pages which were placed at *list.
3006  */
3007 static int rmqueue_bulk(struct zone *zone, unsigned int order,
3008                         unsigned long count, struct list_head *list,
3009                         int migratetype, unsigned int alloc_flags)
3010 {
3011         int i, allocated = 0;
3012 
3013         /*
3014          * local_lock_irq held so equivalent to spin_lock_irqsave for
3015          * both PREEMPT_RT and non-PREEMPT_RT configurations.
3016          */
3017         spin_lock(&zone->lock);
3018         for (i = 0; i < count; ++i) {
3019                 struct page *page = __rmqueue(zone, order, migratetype,
3020                                                                 alloc_flags);
3021                 if (unlikely(page == NULL))
3022                         break;
3023 
3024                 if (unlikely(check_pcp_refill(page)))
3025                         continue;
3026 
3027                 /*
3028                  * Split buddy pages returned by expand() are received here in
3029                  * physical page order. The page is added to the tail of
3030                  * caller's list. From the callers perspective, the linked list
3031                  * is ordered by page number under some conditions. This is
3032                  * useful for IO devices that can forward direction from the
3033                  * head, thus also in the physical page order. This is useful
3034                  * for IO devices that can merge IO requests if the physical
3035                  * pages are ordered properly.
3036                  */
3037                 list_add_tail(&page->lru, list);
3038                 allocated++;
3039                 if (is_migrate_cma(get_pcppage_migratetype(page)))
3040                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3041                                               -(1 << order));
3042         }
3043 
3044         /*
3045          * i pages were removed from the buddy list even if some leak due
3046          * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3047          * on i. Do not confuse with 'allocated' which is the number of
3048          * pages added to the pcp list.
3049          */
3050         __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3051         spin_unlock(&zone->lock);
3052         return allocated;
3053 }
3054 
3055 #ifdef CONFIG_NUMA
3056 /*
3057  * Called from the vmstat counter updater to drain pagesets of this
3058  * currently executing processor on remote nodes after they have
3059  * expired.
3060  *
3061  * Note that this function must be called with the thread pinned to
3062  * a single processor.
3063  */
3064 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3065 {
3066         unsigned long flags;
3067         int to_drain, batch;
3068 
3069         local_lock_irqsave(&pagesets.lock, flags);
3070         batch = READ_ONCE(pcp->batch);
3071         to_drain = min(pcp->count, batch);
3072         if (to_drain > 0)
3073                 free_pcppages_bulk(zone, to_drain, pcp);
3074         local_unlock_irqrestore(&pagesets.lock, flags);
3075 }
3076 #endif
3077 
3078 /*
3079  * Drain pcplists of the indicated processor and zone.
3080  *
3081  * The processor must either be the current processor and the
3082  * thread pinned to the current processor or a processor that
3083  * is not online.
3084  */
3085 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3086 {
3087         unsigned long flags;
3088         struct per_cpu_pages *pcp;
3089 
3090         local_lock_irqsave(&pagesets.lock, flags);
3091 
3092         pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3093         if (pcp->count)
3094                 free_pcppages_bulk(zone, pcp->count, pcp);
3095 
3096         local_unlock_irqrestore(&pagesets.lock, flags);
3097 }
3098 
3099 /*
3100  * Drain pcplists of all zones on the indicated processor.
3101  *
3102  * The processor must either be the current processor and the
3103  * thread pinned to the current processor or a processor that
3104  * is not online.
3105  */
3106 static void drain_pages(unsigned int cpu)
3107 {
3108         struct zone *zone;
3109 
3110         for_each_populated_zone(zone) {
3111                 drain_pages_zone(cpu, zone);
3112         }
3113 }
3114 
3115 /*
3116  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3117  *
3118  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3119  * the single zone's pages.
3120  */
3121 void drain_local_pages(struct zone *zone)
3122 {
3123         int cpu = smp_processor_id();
3124 
3125         if (zone)
3126                 drain_pages_zone(cpu, zone);
3127         else
3128                 drain_pages(cpu);
3129 }
3130 
3131 static void drain_local_pages_wq(struct work_struct *work)
3132 {
3133         struct pcpu_drain *drain;
3134 
3135         drain = container_of(work, struct pcpu_drain, work);
3136 
3137         /*
3138          * drain_all_pages doesn't use proper cpu hotplug protection so
3139          * we can race with cpu offline when the WQ can move this from
3140          * a cpu pinned worker to an unbound one. We can operate on a different
3141          * cpu which is alright but we also have to make sure to not move to
3142          * a different one.
3143          */
3144         migrate_disable();
3145         drain_local_pages(drain->zone);
3146         migrate_enable();
3147 }
3148 
3149 /*
3150  * The implementation of drain_all_pages(), exposing an extra parameter to
3151  * drain on all cpus.
3152  *
3153  * drain_all_pages() is optimized to only execute on cpus where pcplists are
3154  * not empty. The check for non-emptiness can however race with a free to
3155  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3156  * that need the guarantee that every CPU has drained can disable the
3157  * optimizing racy check.
3158  */
3159 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3160 {
3161         int cpu;
3162 
3163         /*
3164          * Allocate in the BSS so we won't require allocation in
3165          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3166          */
3167         static cpumask_t cpus_with_pcps;
3168 
3169         /*
3170          * Make sure nobody triggers this path before mm_percpu_wq is fully
3171          * initialized.
3172          */
3173         if (WARN_ON_ONCE(!mm_percpu_wq))
3174                 return;
3175 
3176         /*
3177          * Do not drain if one is already in progress unless it's specific to
3178          * a zone. Such callers are primarily CMA and memory hotplug and need
3179          * the drain to be complete when the call returns.
3180          */
3181         if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3182                 if (!zone)
3183                         return;
3184                 mutex_lock(&pcpu_drain_mutex);
3185         }
3186 
3187         /*
3188          * We don't care about racing with CPU hotplug event
3189          * as offline notification will cause the notified
3190          * cpu to drain that CPU pcps and on_each_cpu_mask
3191          * disables preemption as part of its processing
3192          */
3193         for_each_online_cpu(cpu) {
3194                 struct per_cpu_pages *pcp;
3195                 struct zone *z;
3196                 bool has_pcps = false;
3197 
3198                 if (force_all_cpus) {
3199                         /*
3200                          * The pcp.count check is racy, some callers need a
3201                          * guarantee that no cpu is missed.
3202                          */
3203                         has_pcps = true;
3204                 } else if (zone) {
3205                         pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3206                         if (pcp->count)
3207                                 has_pcps = true;
3208                 } else {
3209                         for_each_populated_zone(z) {
3210                                 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3211                                 if (pcp->count) {
3212                                         has_pcps = true;
3213                                         break;
3214                                 }
3215                         }
3216                 }
3217 
3218                 if (has_pcps)
3219                         cpumask_set_cpu(cpu, &cpus_with_pcps);
3220                 else
3221                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
3222         }
3223 
3224         for_each_cpu(cpu, &cpus_with_pcps) {
3225                 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3226 
3227                 drain->zone = zone;
3228                 INIT_WORK(&drain->work, drain_local_pages_wq);
3229                 queue_work_on(cpu, mm_percpu_wq, &drain->work);
3230         }
3231         for_each_cpu(cpu, &cpus_with_pcps)
3232                 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3233 
3234         mutex_unlock(&pcpu_drain_mutex);
3235 }
3236 
3237 /*
3238  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3239  *
3240  * When zone parameter is non-NULL, spill just the single zone's pages.
3241  *
3242  * Note that this can be extremely slow as the draining happens in a workqueue.
3243  */
3244 void drain_all_pages(struct zone *zone)
3245 {
3246         __drain_all_pages(zone, false);
3247 }
3248 
3249 #ifdef CONFIG_HIBERNATION
3250 
3251 /*
3252  * Touch the watchdog for every WD_PAGE_COUNT pages.
3253  */
3254 #define WD_PAGE_COUNT   (128*1024)
3255 
3256 void mark_free_pages(struct zone *zone)
3257 {
3258         unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3259         unsigned long flags;
3260         unsigned int order, t;
3261         struct page *page;
3262 
3263         if (zone_is_empty(zone))
3264                 return;
3265 
3266         spin_lock_irqsave(&zone->lock, flags);
3267 
3268         max_zone_pfn = zone_end_pfn(zone);
3269         for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3270                 if (pfn_valid(pfn)) {
3271                         page = pfn_to_page(pfn);
3272 
3273                         if (!--page_count) {
3274                                 touch_nmi_watchdog();
3275                                 page_count = WD_PAGE_COUNT;
3276                         }
3277 
3278                         if (page_zone(page) != zone)
3279                                 continue;
3280 
3281                         if (!swsusp_page_is_forbidden(page))
3282                                 swsusp_unset_page_free(page);
3283                 }
3284 
3285         for_each_migratetype_order(order, t) {
3286                 list_for_each_entry(page,
3287                                 &zone->free_area[order].free_list[t], lru) {
3288                         unsigned long i;
3289 
3290                         pfn = page_to_pfn(page);
3291                         for (i = 0; i < (1UL << order); i++) {
3292                                 if (!--page_count) {
3293                                         touch_nmi_watchdog();
3294                                         page_count = WD_PAGE_COUNT;
3295                                 }
3296                                 swsusp_set_page_free(pfn_to_page(pfn + i));
3297                         }
3298                 }
3299         }
3300         spin_unlock_irqrestore(&zone->lock, flags);
3301 }
3302 #endif /* CONFIG_PM */
3303 
3304 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3305                                                         unsigned int order)
3306 {
3307         int migratetype;
3308 
3309         if (!free_pcp_prepare(page, order))
3310                 return false;
3311 
3312         migratetype = get_pfnblock_migratetype(page, pfn);
3313         set_pcppage_migratetype(page, migratetype);
3314         return true;
3315 }
3316 
3317 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch)
3318 {
3319         int min_nr_free, max_nr_free;
3320 
3321         /* Check for PCP disabled or boot pageset */
3322         if (unlikely(high < batch))
3323                 return 1;
3324 
3325         /* Leave at least pcp->batch pages on the list */
3326         min_nr_free = batch;
3327         max_nr_free = high - batch;
3328 
3329         /*
3330          * Double the number of pages freed each time there is subsequent
3331          * freeing of pages without any allocation.
3332          */
3333         batch <<= pcp->free_factor;
3334         if (batch < max_nr_free)
3335                 pcp->free_factor++;
3336         batch = clamp(batch, min_nr_free, max_nr_free);
3337 
3338         return batch;
3339 }
3340 
3341 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
3342 {
3343         int high = READ_ONCE(pcp->high);
3344 
3345         if (unlikely(!high))
3346                 return 0;
3347 
3348         if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3349                 return high;
3350 
3351         /*
3352          * If reclaim is active, limit the number of pages that can be
3353          * stored on pcp lists
3354          */
3355         return min(READ_ONCE(pcp->batch) << 2, high);
3356 }
3357 
3358 static void free_unref_page_commit(struct page *page, unsigned long pfn,
3359                                    int migratetype, unsigned int order)
3360 {
3361         struct zone *zone = page_zone(page);
3362         struct per_cpu_pages *pcp;
3363         int high;
3364         int pindex;
3365 
3366         __count_vm_event(PGFREE);
3367         pcp = this_cpu_ptr(zone->per_cpu_pageset);
3368         pindex = order_to_pindex(migratetype, order);
3369         list_add(&page->lru, &pcp->lists[pindex]);
3370         pcp->count += 1 << order;
3371         high = nr_pcp_high(pcp, zone);
3372         if (pcp->count >= high) {
3373                 int batch = READ_ONCE(pcp->batch);
3374 
3375                 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
3376         }
3377 }
3378 
3379 /*
3380  * Free a pcp page
3381  */
3382 void free_unref_page(struct page *page, unsigned int order)
3383 {
3384         unsigned long flags;
3385         unsigned long pfn = page_to_pfn(page);
3386         int migratetype;
3387 
3388         if (!free_unref_page_prepare(page, pfn, order))
3389                 return;
3390 
3391         /*
3392          * We only track unmovable, reclaimable and movable on pcp lists.
3393          * Place ISOLATE pages on the isolated list because they are being
3394          * offlined but treat HIGHATOMIC as movable pages so we can get those
3395          * areas back if necessary. Otherwise, we may have to free
3396          * excessively into the page allocator
3397          */
3398         migratetype = get_pcppage_migratetype(page);
3399         if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3400                 if (unlikely(is_migrate_isolate(migratetype))) {
3401                         free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3402                         return;
3403                 }
3404                 migratetype = MIGRATE_MOVABLE;
3405         }
3406 
3407         local_lock_irqsave(&pagesets.lock, flags);
3408         free_unref_page_commit(page, pfn, migratetype, order);
3409         local_unlock_irqrestore(&pagesets.lock, flags);
3410 }
3411 
3412 /*
3413  * Free a list of 0-order pages
3414  */
3415 void free_unref_page_list(struct list_head *list)
3416 {
3417         struct page *page, *next;
3418         unsigned long flags, pfn;
3419         int batch_count = 0;
3420         int migratetype;
3421 
3422         /* Prepare pages for freeing */
3423         list_for_each_entry_safe(page, next, list, lru) {
3424                 pfn = page_to_pfn(page);
3425                 if (!free_unref_page_prepare(page, pfn, 0)) {
3426                         list_del(&page->lru);
3427                         continue;
3428                 }
3429 
3430                 /*
3431                  * Free isolated pages directly to the allocator, see
3432                  * comment in free_unref_page.
3433                  */
3434                 migratetype = get_pcppage_migratetype(page);
3435                 if (unlikely(is_migrate_isolate(migratetype))) {
3436                         list_del(&page->lru);
3437                         free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3438                         continue;
3439                 }
3440 
3441                 set_page_private(page, pfn);
3442         }
3443 
3444         local_lock_irqsave(&pagesets.lock, flags);
3445         list_for_each_entry_safe(page, next, list, lru) {
3446                 pfn = page_private(page);
3447                 set_page_private(page, 0);
3448 
3449                 /*
3450                  * Non-isolated types over MIGRATE_PCPTYPES get added
3451                  * to the MIGRATE_MOVABLE pcp list.
3452                  */
3453                 migratetype = get_pcppage_migratetype(page);
3454                 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3455                         migratetype = MIGRATE_MOVABLE;
3456 
3457                 trace_mm_page_free_batched(page);
3458                 free_unref_page_commit(page, pfn, migratetype, 0);
3459 
3460                 /*
3461                  * Guard against excessive IRQ disabled times when we get
3462                  * a large list of pages to free.
3463                  */
3464                 if (++batch_count == SWAP_CLUSTER_MAX) {
3465                         local_unlock_irqrestore(&pagesets.lock, flags);
3466                         batch_count = 0;
3467                         local_lock_irqsave(&pagesets.lock, flags);
3468                 }
3469         }
3470         local_unlock_irqrestore(&pagesets.lock, flags);
3471 }
3472 
3473 /*
3474  * split_page takes a non-compound higher-order page, and splits it into
3475  * n (1<<order) sub-pages: page[0..n]
3476  * Each sub-page must be freed individually.
3477  *
3478  * Note: this is probably too low level an operation for use in drivers.
3479  * Please consult with lkml before using this in your driver.
3480  */
3481 void split_page(struct page *page, unsigned int order)
3482 {
3483         int i;
3484 
3485         VM_BUG_ON_PAGE(PageCompound(page), page);
3486         VM_BUG_ON_PAGE(!page_count(page), page);
3487 
3488         for (i = 1; i < (1 << order); i++)
3489                 set_page_refcounted(page + i);
3490         split_page_owner(page, 1 << order);
3491         split_page_memcg(page, 1 << order);
3492 }
3493 EXPORT_SYMBOL_GPL(split_page);
3494 
3495 int __isolate_free_page(struct page *page, unsigned int order)
3496 {
3497         unsigned long watermark;
3498         struct zone *zone;
3499         int mt;
3500 
3501         BUG_ON(!PageBuddy(page));
3502 
3503         zone = page_zone(page);
3504         mt = get_pageblock_migratetype(page);
3505 
3506         if (!is_migrate_isolate(mt)) {
3507                 /*
3508                  * Obey watermarks as if the page was being allocated. We can
3509                  * emulate a high-order watermark check with a raised order-0
3510                  * watermark, because we already know our high-order page
3511                  * exists.
3512                  */
3513                 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3514                 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3515                         return 0;
3516 
3517                 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3518         }
3519 
3520         /* Remove page from free list */
3521 
3522         del_page_from_free_list(page, zone, order);
3523 
3524         /*
3525          * Set the pageblock if the isolated page is at least half of a
3526          * pageblock
3527          */
3528         if (order >= pageblock_order - 1) {
3529                 struct page *endpage = page + (1 << order) - 1;
3530                 for (; page < endpage; page += pageblock_nr_pages) {
3531                         int mt = get_pageblock_migratetype(page);
3532                         if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3533                             && !is_migrate_highatomic(mt))
3534                                 set_pageblock_migratetype(page,
3535                                                           MIGRATE_MOVABLE);
3536                 }
3537         }
3538 
3539 
3540         return 1UL << order;
3541 }
3542 
3543 /**
3544  * __putback_isolated_page - Return a now-isolated page back where we got it
3545  * @page: Page that was isolated
3546  * @order: Order of the isolated page
3547  * @mt: The page's pageblock's migratetype
3548  *
3549  * This function is meant to return a page pulled from the free lists via
3550  * __isolate_free_page back to the free lists they were pulled from.
3551  */
3552 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3553 {
3554         struct zone *zone = page_zone(page);
3555 
3556         /* zone lock should be held when this function is called */
3557         lockdep_assert_held(&zone->lock);
3558 
3559         /* Return isolated page to tail of freelist. */
3560         __free_one_page(page, page_to_pfn(page), zone, order, mt,
3561                         FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3562 }
3563 
3564 /*
3565  * Update NUMA hit/miss statistics
3566  *
3567  * Must be called with interrupts disabled.
3568  */
3569 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3570                                    long nr_account)
3571 {
3572 #ifdef CONFIG_NUMA
3573         enum numa_stat_item local_stat = NUMA_LOCAL;
3574 
3575         /* skip numa counters update if numa stats is disabled */
3576         if (!static_branch_likely(&vm_numa_stat_key))
3577                 return;
3578 
3579         if (zone_to_nid(z) != numa_node_id())
3580                 local_stat = NUMA_OTHER;
3581 
3582         if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3583                 __count_numa_events(z, NUMA_HIT, nr_account);
3584         else {
3585                 __count_numa_events(z, NUMA_MISS, nr_account);
3586                 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3587         }
3588         __count_numa_events(z, local_stat, nr_account);
3589 #endif
3590 }
3591 
3592 /* Remove page from the per-cpu list, caller must protect the list */
3593 static inline
3594 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3595                         int migratetype,
3596                         unsigned int alloc_flags,
3597                         struct per_cpu_pages *pcp,
3598                         struct list_head *list)
3599 {
3600         struct page *page;
3601 
3602         do {
3603                 if (list_empty(list)) {
3604                         int batch = READ_ONCE(pcp->batch);
3605                         int alloced;
3606 
3607                         /*
3608                          * Scale batch relative to order if batch implies
3609                          * free pages can be stored on the PCP. Batch can
3610                          * be 1 for small zones or for boot pagesets which
3611                          * should never store free pages as the pages may
3612                          * belong to arbitrary zones.
3613                          */
3614                         if (batch > 1)
3615                                 batch = max(batch >> order, 2);
3616                         alloced = rmqueue_bulk(zone, order,
3617                                         batch, list,
3618                                         migratetype, alloc_flags);
3619 
3620                         pcp->count += alloced << order;
3621                         if (unlikely(list_empty(list)))
3622                                 return NULL;
3623                 }
3624 
3625                 page = list_first_entry(list, struct page, lru);
3626                 list_del(&page->lru);
3627                 pcp->count -= 1 << order;
3628         } while (check_new_pcp(page));
3629 
3630         return page;
3631 }
3632 
3633 /* Lock and remove page from the per-cpu list */
3634 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3635                         struct zone *zone, unsigned int order,
3636                         gfp_t gfp_flags, int migratetype,
3637                         unsigned int alloc_flags)
3638 {
3639         struct per_cpu_pages *pcp;
3640         struct list_head *list;
3641         struct page *page;
3642         unsigned long flags;
3643 
3644         local_lock_irqsave(&pagesets.lock, flags);
3645 
3646         /*
3647          * On allocation, reduce the number of pages that are batch freed.
3648          * See nr_pcp_free() where free_factor is increased for subsequent
3649          * frees.
3650          */
3651         pcp = this_cpu_ptr(zone->per_cpu_pageset);
3652         pcp->free_factor >>= 1;
3653         list = &pcp->lists[order_to_pindex(migratetype, order)];
3654         page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3655         local_unlock_irqrestore(&pagesets.lock, flags);
3656         if (page) {
3657                 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3658                 zone_statistics(preferred_zone, zone, 1);
3659         }
3660         return page;
3661 }
3662 
3663 /*
3664  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3665  */
3666 static inline
3667 struct page *rmqueue(struct zone *preferred_zone,
3668                         struct zone *zone, unsigned int order,
3669                         gfp_t gfp_flags, unsigned int alloc_flags,
3670                         int migratetype)
3671 {
3672         unsigned long flags;
3673         struct page *page;
3674 
3675         if (likely(pcp_allowed_order(order))) {
3676                 /*
3677                  * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3678                  * we need to skip it when CMA area isn't allowed.
3679                  */
3680                 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3681                                 migratetype != MIGRATE_MOVABLE) {
3682                         page = rmqueue_pcplist(preferred_zone, zone, order,
3683                                         gfp_flags, migratetype, alloc_flags);
3684                         goto out;
3685                 }
3686         }
3687 
3688         /*
3689          * We most definitely don't want callers attempting to
3690          * allocate greater than order-1 page units with __GFP_NOFAIL.
3691          */
3692         WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3693         spin_lock_irqsave(&zone->lock, flags);
3694 
3695         do {
3696                 page = NULL;
3697                 /*
3698                  * order-0 request can reach here when the pcplist is skipped
3699                  * due to non-CMA allocation context. HIGHATOMIC area is
3700                  * reserved for high-order atomic allocation, so order-0
3701                  * request should skip it.
3702                  */
3703                 if (order > 0 && alloc_flags & ALLOC_HARDER) {
3704                         page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3705                         if (page)
3706                                 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3707                 }
3708                 if (!page)
3709                         page = __rmqueue(zone, order, migratetype, alloc_flags);
3710         } while (page && check_new_pages(page, order));
3711         if (!page)
3712                 goto failed;
3713 
3714         __mod_zone_freepage_state(zone, -(1 << order),
3715                                   get_pcppage_migratetype(page));
3716         spin_unlock_irqrestore(&zone->lock, flags);
3717 
3718         __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3719         zone_statistics(preferred_zone, zone, 1);
3720 
3721 out:
3722         /* Separate test+clear to avoid unnecessary atomics */
3723         if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3724                 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3725                 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3726         }
3727 
3728         VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3729         return page;
3730 
3731 failed:
3732         spin_unlock_irqrestore(&zone->lock, flags);
3733         return NULL;
3734 }
3735 
3736 #ifdef CONFIG_FAIL_PAGE_ALLOC
3737 
3738 static struct {
3739         struct fault_attr attr;
3740 
3741         bool ignore_gfp_highmem;
3742         bool ignore_gfp_reclaim;
3743         u32 min_order;
3744 } fail_page_alloc = {
3745         .attr = FAULT_ATTR_INITIALIZER,
3746         .ignore_gfp_reclaim = true,
3747         .ignore_gfp_highmem = true,
3748         .min_order = 1,
3749 };
3750 
3751 static int __init setup_fail_page_alloc(char *str)
3752 {
3753         return setup_fault_attr(&fail_page_alloc.attr, str);
3754 }
3755 __setup("fail_page_alloc=", setup_fail_page_alloc);
3756 
3757 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3758 {
3759         if (order < fail_page_alloc.min_order)
3760                 return false;
3761         if (gfp_mask & __GFP_NOFAIL)
3762                 return false;
3763         if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3764                 return false;
3765         if (fail_page_alloc.ignore_gfp_reclaim &&
3766                         (gfp_mask & __GFP_DIRECT_RECLAIM))
3767                 return false;
3768 
3769         return should_fail(&fail_page_alloc.attr, 1 << order);
3770 }
3771 
3772 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3773 
3774 static int __init fail_page_alloc_debugfs(void)
3775 {
3776         umode_t mode = S_IFREG | 0600;
3777         struct dentry *dir;
3778 
3779         dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3780                                         &fail_page_alloc.attr);
3781 
3782         debugfs_create_bool("ignore-gfp-wait", mode, dir,
3783                             &fail_page_alloc.ignore_gfp_reclaim);
3784         debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3785                             &fail_page_alloc.ignore_gfp_highmem);
3786         debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3787 
3788         return 0;
3789 }
3790 
3791 late_initcall(fail_page_alloc_debugfs);
3792 
3793 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3794 
3795 #else /* CONFIG_FAIL_PAGE_ALLOC */
3796 
3797 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3798 {
3799         return false;
3800 }
3801 
3802 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3803 
3804 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3805 {
3806         return __should_fail_alloc_page(gfp_mask, order);
3807 }
3808 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3809 
3810 static inline long __zone_watermark_unusable_free(struct zone *z,
3811                                 unsigned int order, unsigned int alloc_flags)
3812 {
3813         const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3814         long unusable_free = (1 << order) - 1;
3815 
3816         /*
3817          * If the caller does not have rights to ALLOC_HARDER then subtract
3818          * the high-atomic reserves. This will over-estimate the size of the
3819          * atomic reserve but it avoids a search.
3820          */
3821         if (likely(!alloc_harder))
3822                 unusable_free += z->nr_reserved_highatomic;
3823 
3824 #ifdef CONFIG_CMA
3825         /* If allocation can't use CMA areas don't use free CMA pages */
3826         if (!(alloc_flags & ALLOC_CMA))
3827                 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3828 #endif
3829 
3830         return unusable_free;
3831 }
3832 
3833 /*
3834  * Return true if free base pages are above 'mark'. For high-order checks it
3835  * will return true of the order-0 watermark is reached and there is at least
3836  * one free page of a suitable size. Checking now avoids taking the zone lock
3837  * to check in the allocation paths if no pages are free.
3838  */
3839 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3840                          int highest_zoneidx, unsigned int alloc_flags,
3841                          long free_pages)
3842 {
3843         long min = mark;
3844         int o;
3845         const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3846 
3847         /* free_pages may go negative - that's OK */
3848         free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3849 
3850         if (alloc_flags & ALLOC_HIGH)
3851                 min -= min / 2;
3852 
3853         if (unlikely(alloc_harder)) {
3854                 /*
3855                  * OOM victims can try even harder than normal ALLOC_HARDER
3856                  * users on the grounds that it's definitely going to be in
3857                  * the exit path shortly and free memory. Any allocation it
3858                  * makes during the free path will be small and short-lived.
3859                  */
3860                 if (alloc_flags & ALLOC_OOM)
3861                         min -= min / 2;
3862                 else
3863                         min -= min / 4;
3864         }
3865 
3866         /*
3867          * Check watermarks for an order-0 allocation request. If these
3868          * are not met, then a high-order request also cannot go ahead
3869          * even if a suitable page happened to be free.
3870          */
3871         if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3872                 return false;
3873 
3874         /* If this is an order-0 request then the watermark is fine */
3875         if (!order)
3876                 return true;
3877 
3878         /* For a high-order request, check at least one suitable page is free */
3879         for (o = order; o < MAX_ORDER; o++) {
3880                 struct free_area *area = &z->free_area[o];
3881                 int mt;
3882 
3883                 if (!area->nr_free)
3884                         continue;
3885 
3886                 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3887                         if (!free_area_empty(area, mt))
3888                                 return true;
3889                 }
3890 
3891 #ifdef CONFIG_CMA
3892                 if ((alloc_flags & ALLOC_CMA) &&
3893                     !free_area_empty(area, MIGRATE_CMA)) {
3894                         return true;
3895                 }
3896 #endif
3897                 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3898                         return true;
3899         }
3900         return false;
3901 }
3902 
3903 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3904                       int highest_zoneidx, unsigned int alloc_flags)
3905 {
3906         return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3907                                         zone_page_state(z, NR_FREE_PAGES));
3908 }
3909 
3910 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3911                                 unsigned long mark, int highest_zoneidx,
3912                                 unsigned int alloc_flags, gfp_t gfp_mask)
3913 {
3914         long free_pages;
3915 
3916         free_pages = zone_page_state(z, NR_FREE_PAGES);
3917 
3918         /*
3919          * Fast check for order-0 only. If this fails then the reserves
3920          * need to be calculated.
3921          */
3922         if (!order) {
3923                 long fast_free;
3924 
3925                 fast_free = free_pages;
3926                 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3927                 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3928                         return true;
3929         }
3930 
3931         if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3932                                         free_pages))
3933                 return true;
3934         /*
3935          * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3936          * when checking the min watermark. The min watermark is the
3937          * point where boosting is ignored so that kswapd is woken up
3938          * when below the low watermark.
3939          */
3940         if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3941                 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3942                 mark = z->_watermark[WMARK_MIN];
3943                 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3944                                         alloc_flags, free_pages);
3945         }
3946 
3947         return false;
3948 }
3949 
3950 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3951                         unsigned long mark, int highest_zoneidx)
3952 {
3953         long free_pages = zone_page_state(z, NR_FREE_PAGES);
3954 
3955         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3956                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3957 
3958         return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3959                                                                 free_pages);
3960 }
3961 
3962 #ifdef CONFIG_NUMA
3963 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3964 
3965 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3966 {
3967         return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3968                                 node_reclaim_distance;
3969 }
3970 #else   /* CONFIG_NUMA */
3971 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3972 {
3973         return true;
3974 }
3975 #endif  /* CONFIG_NUMA */
3976 
3977 /*
3978  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3979  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3980  * premature use of a lower zone may cause lowmem pressure problems that
3981  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3982  * probably too small. It only makes sense to spread allocations to avoid
3983  * fragmentation between the Normal and DMA32 zones.
3984  */
3985 static inline unsigned int
3986 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3987 {
3988         unsigned int alloc_flags;
3989 
3990         /*
3991          * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3992          * to save a branch.
3993          */
3994         alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3995 
3996 #ifdef CONFIG_ZONE_DMA32
3997         if (!zone)
3998                 return alloc_flags;
3999 
4000         if (zone_idx(zone) != ZONE_NORMAL)
4001                 return alloc_flags;
4002 
4003         /*
4004          * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4005          * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4006          * on UMA that if Normal is populated then so is DMA32.
4007          */
4008         BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4009         if (nr_online_nodes > 1 && !populated_zone(--zone))
4010                 return alloc_flags;
4011 
4012         alloc_flags |= ALLOC_NOFRAGMENT;
4013 #endif /* CONFIG_ZONE_DMA32 */
4014         return alloc_flags;
4015 }
4016 
4017 /* Must be called after current_gfp_context() which can change gfp_mask */
4018 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4019                                                   unsigned int alloc_flags)
4020 {
4021 #ifdef CONFIG_CMA
4022         if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4023                 alloc_flags |= ALLOC_CMA;
4024 #endif
4025         return alloc_flags;
4026 }
4027 
4028 /*
4029  * get_page_from_freelist goes through the zonelist trying to allocate
4030  * a page.
4031  */
4032 static struct page *
4033 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4034                                                 const struct alloc_context *ac)
4035 {
4036         struct zoneref *z;
4037         struct zone *zone;
4038         struct pglist_data *last_pgdat_dirty_limit = NULL;
4039         bool no_fallback;
4040 
4041 retry:
4042         /*
4043          * Scan zonelist, looking for a zone with enough free.
4044          * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
4045          */
4046         no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4047         z = ac->preferred_zoneref;
4048         for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4049                                         ac->nodemask) {
4050                 struct page *page;
4051                 unsigned long mark;
4052 
4053                 if (cpusets_enabled() &&
4054                         (alloc_flags & ALLOC_CPUSET) &&
4055                         !__cpuset_zone_allowed(zone, gfp_mask))
4056                                 continue;
4057                 /*
4058                  * When allocating a page cache page for writing, we
4059                  * want to get it from a node that is within its dirty
4060                  * limit, such that no single node holds more than its
4061                  * proportional share of globally allowed dirty pages.
4062                  * The dirty limits take into account the node's
4063                  * lowmem reserves and high watermark so that kswapd
4064                  * should be able to balance it without having to
4065                  * write pages from its LRU list.
4066                  *
4067                  * XXX: For now, allow allocations to potentially
4068                  * exceed the per-node dirty limit in the slowpath
4069                  * (spread_dirty_pages unset) before going into reclaim,
4070                  * which is important when on a NUMA setup the allowed
4071                  * nodes are together not big enough to reach the
4072                  * global limit.  The proper fix for these situations
4073                  * will require awareness of nodes in the
4074                  * dirty-throttling and the flusher threads.
4075                  */
4076                 if (ac->spread_dirty_pages) {
4077                         if (last_pgdat_dirty_limit == zone->zone_pgdat)
4078                                 continue;
4079 
4080                         if (!node_dirty_ok(zone->zone_pgdat)) {
4081                                 last_pgdat_dirty_limit = zone->zone_pgdat;
4082                                 continue;
4083                         }
4084                 }
4085 
4086                 if (no_fallback && nr_online_nodes > 1 &&
4087                     zone != ac->preferred_zoneref->zone) {
4088                         int local_nid;
4089 
4090                         /*
4091                          * If moving to a remote node, retry but allow
4092                          * fragmenting fallbacks. Locality is more important
4093                          * than fragmentation avoidance.
4094                          */
4095                         local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4096                         if (zone_to_nid(zone) != local_nid) {
4097                                 alloc_flags &= ~ALLOC_NOFRAGMENT;
4098                                 goto retry;
4099                         }
4100                 }
4101 
4102                 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4103                 if (!zone_watermark_fast(zone, order, mark,
4104                                        ac->highest_zoneidx, alloc_flags,
4105                                        gfp_mask)) {
4106                         int ret;
4107 
4108 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4109                         /*
4110                          * Watermark failed for this zone, but see if we can
4111                          * grow this zone if it contains deferred pages.
4112                          */
4113                         if (static_branch_unlikely(&deferred_pages)) {
4114                                 if (_deferred_grow_zone(zone, order))
4115                                         goto try_this_zone;
4116                         }
4117 #endif
4118                         /* Checked here to keep the fast path fast */
4119                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4120                         if (alloc_flags & ALLOC_NO_WATERMARKS)
4121                                 goto try_this_zone;
4122 
4123                         if (!node_reclaim_enabled() ||
4124                             !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4125                                 continue;
4126 
4127                         ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4128                         switch (ret) {
4129                         case NODE_RECLAIM_NOSCAN:
4130                                 /* did not scan */
4131                                 continue;
4132                         case NODE_RECLAIM_FULL:
4133                                 /* scanned but unreclaimable */
4134                                 continue;
4135                         default:
4136                                 /* did we reclaim enough */
4137                                 if (zone_watermark_ok(zone, order, mark,
4138                                         ac->highest_zoneidx, alloc_flags))
4139                                         goto try_this_zone;
4140 
4141                                 continue;
4142                         }
4143                 }
4144 
4145 try_this_zone:
4146                 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4147                                 gfp_mask, alloc_flags, ac->migratetype);
4148                 if (page) {
4149                         prep_new_page(page, order, gfp_mask, alloc_flags);
4150 
4151                         /*
4152                          * If this is a high-order atomic allocation then check
4153                          * if the pageblock should be reserved for the future
4154                          */
4155                         if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4156                                 reserve_highatomic_pageblock(page, zone, order);
4157 
4158                         return page;
4159                 } else {
4160 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4161                         /* Try again if zone has deferred pages */
4162                         if (static_branch_unlikely(&deferred_pages)) {
4163                                 if (_deferred_grow_zone(zone, order))
4164                                         goto try_this_zone;
4165                         }
4166 #endif
4167                 }
4168         }
4169 
4170         /*
4171          * It's possible on a UMA machine to get through all zones that are
4172          * fragmented. If avoiding fragmentation, reset and try again.
4173          */
4174         if (no_fallback) {
4175                 alloc_flags &= ~ALLOC_NOFRAGMENT;
4176                 goto retry;
4177         }
4178 
4179         return NULL;
4180 }
4181 
4182 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4183 {
4184         unsigned int filter = SHOW_MEM_FILTER_NODES;
4185 
4186         /*
4187          * This documents exceptions given to allocations in certain
4188          * contexts that are allowed to allocate outside current's set
4189          * of allowed nodes.
4190          */
4191         if (!(gfp_mask & __GFP_NOMEMALLOC))
4192                 if (tsk_is_oom_victim(current) ||
4193                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
4194                         filter &= ~SHOW_MEM_FILTER_NODES;
4195         if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4196                 filter &= ~SHOW_MEM_FILTER_NODES;
4197 
4198         show_mem(filter, nodemask);
4199 }
4200 
4201 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4202 {
4203         struct va_format vaf;
4204         va_list args;
4205         static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4206 
4207         if ((gfp_mask & __GFP_NOWARN) ||
4208              !__ratelimit(&nopage_rs) ||
4209              ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4210                 return;
4211 
4212         va_start(args, fmt);
4213         vaf.fmt = fmt;
4214         vaf.va = &args;
4215         pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4216                         current->comm, &vaf, gfp_mask, &gfp_mask,
4217                         nodemask_pr_args(nodemask));
4218         va_end(args);
4219 
4220         cpuset_print_current_mems_allowed();
4221         pr_cont("\n");
4222         dump_stack();
4223         warn_alloc_show_mem(gfp_mask, nodemask);
4224 }
4225 
4226 static inline struct page *
4227 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4228                               unsigned int alloc_flags,
4229                               const struct alloc_context *ac)
4230 {
4231         struct page *page;
4232 
4233         page = get_page_from_freelist(gfp_mask, order,
4234                         alloc_flags|ALLOC_CPUSET, ac);
4235         /*
4236          * fallback to ignore cpuset restriction if our nodes
4237          * are depleted
4238          */
4239         if (!page)
4240                 page = get_page_from_freelist(gfp_mask, order,
4241                                 alloc_flags, ac);
4242 
4243         return page;
4244 }
4245 
4246 static inline struct page *
4247 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4248         const struct alloc_context *ac, unsigned long *did_some_progress)
4249 {
4250         struct oom_control oc = {
4251                 .zonelist = ac->zonelist,
4252                 .nodemask = ac->nodemask,
4253                 .memcg = NULL,
4254                 .gfp_mask = gfp_mask,
4255                 .order = order,
4256         };
4257         struct page *page;
4258 
4259         *did_some_progress = 0;
4260 
4261         /*
4262          * Acquire the oom lock.  If that fails, somebody else is
4263          * making progress for us.
4264          */
4265         if (!mutex_trylock(&oom_lock)) {
4266                 *did_some_progress = 1;
4267                 schedule_timeout_uninterruptible(1);
4268                 return NULL;
4269         }
4270 
4271         /*
4272          * Go through the zonelist yet one more time, keep very high watermark
4273          * here, this is only to catch a parallel oom killing, we must fail if
4274          * we're still under heavy pressure. But make sure that this reclaim
4275          * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4276          * allocation which will never fail due to oom_lock already held.
4277          */
4278         page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4279                                       ~__GFP_DIRECT_RECLAIM, order,
4280                                       ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4281         if (page)
4282                 goto out;
4283 
4284         /* Coredumps can quickly deplete all memory reserves */
4285         if (current->flags & PF_DUMPCORE)
4286                 goto out;
4287         /* The OOM killer will not help higher order allocs */
4288         if (order > PAGE_ALLOC_COSTLY_ORDER)
4289                 goto out;
4290         /*
4291          * We have already exhausted all our reclaim opportunities without any
4292          * success so it is time to admit defeat. We will skip the OOM killer
4293          * because it is very likely that the caller has a more reasonable
4294          * fallback than shooting a random task.
4295          *
4296          * The OOM killer may not free memory on a specific node.
4297          */
4298         if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4299                 goto out;
4300         /* The OOM killer does not needlessly kill tasks for lowmem */
4301         if (ac->highest_zoneidx < ZONE_NORMAL)
4302                 goto out;
4303         if (pm_suspended_storage())
4304                 goto out;
4305         /*
4306          * XXX: GFP_NOFS allocations should rather fail than rely on
4307          * other request to make a forward progress.
4308          * We are in an unfortunate situation where out_of_memory cannot
4309          * do much for this context but let's try it to at least get
4310          * access to memory reserved if the current task is killed (see
4311          * out_of_memory). Once filesystems are ready to handle allocation
4312          * failures more gracefully we should just bail out here.
4313          */
4314 
4315         /* Exhausted what can be done so it's blame time */
4316         if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4317                 *did_some_progress = 1;
4318 
4319                 /*
4320                  * Help non-failing allocations by giving them access to memory
4321                  * reserves
4322                  */
4323                 if (gfp_mask & __GFP_NOFAIL)
4324                         page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4325                                         ALLOC_NO_WATERMARKS, ac);
4326         }
4327 out:
4328         mutex_unlock(&oom_lock);
4329         return page;
4330 }
4331 
4332 /*
4333  * Maximum number of compaction retries with a progress before OOM
4334  * killer is consider as the only way to move forward.
4335  */
4336 #define MAX_COMPACT_RETRIES 16
4337 
4338 #ifdef CONFIG_COMPACTION
4339 /* Try memory compaction for high-order allocations before reclaim */
4340 static struct page *
4341 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4342                 unsigned int alloc_flags, const struct alloc_context *ac,
4343                 enum compact_priority prio, enum compact_result *compact_result)
4344 {
4345         struct page *page = NULL;
4346         unsigned long pflags;
4347         unsigned int noreclaim_flag;
4348 
4349         if (!order)
4350                 return NULL;
4351 
4352         psi_memstall_enter(&pflags);
4353         noreclaim_flag = memalloc_noreclaim_save();
4354 
4355         *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4356                                                                 prio, &page);
4357 
4358         memalloc_noreclaim_restore(noreclaim_flag);
4359         psi_memstall_leave(&pflags);
4360 
4361         if (*compact_result == COMPACT_SKIPPED)
4362                 return NULL;
4363         /*
4364          * At least in one zone compaction wasn't deferred or skipped, so let's
4365          * count a compaction stall
4366          */
4367         count_vm_event(COMPACTSTALL);
4368 
4369         /* Prep a captured page if available */
4370         if (page)
4371                 prep_new_page(page, order, gfp_mask, alloc_flags);
4372 
4373         /* Try get a page from the freelist if available */
4374         if (!page)
4375                 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4376 
4377         if (page) {
4378                 struct zone *zone = page_zone(page);
4379 
4380                 zone->compact_blockskip_flush = false;
4381                 compaction_defer_reset(zone, order, true);
4382                 count_vm_event(COMPACTSUCCESS);
4383                 return page;
4384         }
4385 
4386         /*
4387          * It's bad if compaction run occurs and fails. The most likely reason
4388          * is that pages exist, but not enough to satisfy watermarks.
4389          */
4390         count_vm_event(COMPACTFAIL);
4391 
4392         cond_resched();
4393 
4394         return NULL;
4395 }
4396 
4397 static inline bool
4398 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4399                      enum compact_result compact_result,
4400                      enum compact_priority *compact_priority,
4401                      int *compaction_retries)
4402 {
4403         int max_retries = MAX_COMPACT_RETRIES;
4404         int min_priority;
4405         bool ret = false;
4406         int retries = *compaction_retries;
4407         enum compact_priority priority = *compact_priority;
4408 
4409         if (!order)
4410                 return false;
4411 
4412         if (fatal_signal_pending(current))
4413                 return false;
4414 
4415         if (compaction_made_progress(compact_result))
4416                 (*compaction_retries)++;
4417 
4418         /*
4419          * compaction considers all the zone as desperately out of memory
4420          * so it doesn't really make much sense to retry except when the
4421          * failure could be caused by insufficient priority
4422          */
4423         if (compaction_failed(compact_result))
4424                 goto check_priority;
4425 
4426         /*
4427          * compaction was skipped because there are not enough order-0 pages
4428          * to work with, so we retry only if it looks like reclaim can help.
4429          */
4430         if (compaction_needs_reclaim(compact_result)) {
4431                 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4432                 goto out;
4433         }
4434 
4435         /*
4436          * make sure the compaction wasn't deferred or didn't bail out early
4437          * due to locks contention before we declare that we should give up.
4438          * But the next retry should use a higher priority if allowed, so
4439          * we don't just keep bailing out endlessly.
4440          */
4441         if (compaction_withdrawn(compact_result)) {
4442                 goto check_priority;
4443         }
4444 
4445         /*
4446          * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4447          * costly ones because they are de facto nofail and invoke OOM
4448          * killer to move on while costly can fail and users are ready
4449          * to cope with that. 1/4 retries is rather arbitrary but we
4450          * would need much more detailed feedback from compaction to
4451          * make a better decision.
4452          */
4453         if (order > PAGE_ALLOC_COSTLY_ORDER)
4454                 max_retries /= 4;
4455         if (*compaction_retries <= max_retries) {
4456                 ret = true;
4457                 goto out;
4458         }
4459 
4460         /*
4461          * Make sure there are attempts at the highest priority if we exhausted
4462          * all retries or failed at the lower priorities.
4463          */
4464 check_priority:
4465         min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4466                         MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4467 
4468         if (*compact_priority > min_priority) {
4469                 (*compact_priority)--;
4470                 *compaction_retries = 0;
4471                 ret = true;
4472         }
4473 out:
4474         trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4475         return ret;
4476 }
4477 #else
4478 static inline struct page *
4479 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4480                 unsigned int alloc_flags, const struct alloc_context *ac,
4481                 enum compact_priority prio, enum compact_result *compact_result)
4482 {
4483         *compact_result = COMPACT_SKIPPED;
4484         return NULL;
4485 }
4486 
4487 static inline bool
4488 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4489                      enum compact_result compact_result,
4490                      enum compact_priority *compact_priority,
4491                      int *compaction_retries)
4492 {
4493         struct zone *zone;
4494         struct zoneref *z;
4495 
4496         if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4497                 return false;
4498 
4499         /*
4500          * There are setups with compaction disabled which would prefer to loop
4501          * inside the allocator rather than hit the oom killer prematurely.
4502          * Let's give them a good hope and keep retrying while the order-0
4503          * watermarks are OK.
4504          */
4505         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4506                                 ac->highest_zoneidx, ac->nodemask) {
4507                 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4508                                         ac->highest_zoneidx, alloc_flags))
4509                         return true;
4510         }
4511         return false;
4512 }
4513 #endif /* CONFIG_COMPACTION */
4514 
4515 #ifdef CONFIG_LOCKDEP
4516 static struct lockdep_map __fs_reclaim_map =
4517         STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4518 
4519 static bool __need_reclaim(gfp_t gfp_mask)
4520 {
4521         /* no reclaim without waiting on it */
4522         if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4523                 return false;
4524 
4525         /* this guy won't enter reclaim */
4526         if (current->flags & PF_MEMALLOC)
4527                 return false;
4528 
4529         if (gfp_mask & __GFP_NOLOCKDEP)
4530                 return false;
4531 
4532         return true;
4533 }
4534 
4535 void __fs_reclaim_acquire(unsigned long ip)
4536 {
4537         lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4538 }
4539 
4540 void __fs_reclaim_release(unsigned long ip)
4541 {
4542         lock_release(&__fs_reclaim_map, ip);
4543 }
4544 
4545 void fs_reclaim_acquire(gfp_t gfp_mask)
4546 {
4547         gfp_mask = current_gfp_context(gfp_mask);
4548 
4549         if (__need_reclaim(gfp_mask)) {
4550                 if (gfp_mask & __GFP_FS)
4551                         __fs_reclaim_acquire(_RET_IP_);
4552 
4553 #ifdef CONFIG_MMU_NOTIFIER
4554                 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4555                 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4556 #endif
4557 
4558         }
4559 }
4560 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4561 
4562 void fs_reclaim_release(gfp_t gfp_mask)
4563 {
4564         gfp_mask = current_gfp_context(gfp_mask);
4565 
4566         if (__need_reclaim(gfp_mask)) {
4567                 if (gfp_mask & __GFP_FS)
4568                         __fs_reclaim_release(_RET_IP_);
4569         }
4570 }
4571 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4572 #endif
4573 
4574 /* Perform direct synchronous page reclaim */
4575 static unsigned long
4576 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4577                                         const struct alloc_context *ac)
4578 {
4579         unsigned int noreclaim_flag;
4580         unsigned long pflags, progress;
4581 
4582         cond_resched();
4583 
4584         /* We now go into synchronous reclaim */
4585         cpuset_memory_pressure_bump();
4586         psi_memstall_enter(&pflags);
4587         fs_reclaim_acquire(gfp_mask);
4588         noreclaim_flag = memalloc_noreclaim_save();
4589 
4590         progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4591                                                                 ac->nodemask);
4592 
4593         memalloc_noreclaim_restore(noreclaim_flag);
4594         fs_reclaim_release(gfp_mask);
4595         psi_memstall_leave(&pflags);
4596 
4597         cond_resched();
4598 
4599         return progress;
4600 }
4601 
4602 /* The really slow allocator path where we enter direct reclaim */
4603 static inline struct page *
4604 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4605                 unsigned int alloc_flags, const struct alloc_context *ac,
4606                 unsigned long *did_some_progress)
4607 {
4608         struct page *page = NULL;
4609         bool drained = false;
4610 
4611         *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4612         if (unlikely(!(*did_some_progress)))
4613                 return NULL;
4614 
4615 retry:
4616         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4617 
4618         /*
4619          * If an allocation failed after direct reclaim, it could be because
4620          * pages are pinned on the per-cpu lists or in high alloc reserves.
4621          * Shrink them and try again
4622          */
4623         if (!page && !drained) {
4624                 unreserve_highatomic_pageblock(ac, false);
4625                 drain_all_pages(NULL);
4626                 drained = true;
4627                 goto retry;
4628         }
4629 
4630         return page;
4631 }
4632 
4633 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4634                              const struct alloc_context *ac)
4635 {
4636         struct zoneref *z;
4637         struct zone *zone;
4638         pg_data_t *last_pgdat = NULL;
4639         enum zone_type highest_zoneidx = ac->highest_zoneidx;
4640 
4641         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4642                                         ac->nodemask) {
4643                 if (last_pgdat != zone->zone_pgdat)
4644                         wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4645                 last_pgdat = zone->zone_pgdat;
4646         }
4647 }
4648 
4649 static inline unsigned int
4650 gfp_to_alloc_flags(gfp_t gfp_mask)
4651 {
4652         unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4653 
4654         /*
4655          * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4656          * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4657          * to save two branches.
4658          */
4659         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4660         BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4661 
4662         /*
4663          * The caller may dip into page reserves a bit more if the caller
4664          * cannot run direct reclaim, or if the caller has realtime scheduling
4665          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4666          * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4667          */
4668         alloc_flags |= (__force int)
4669                 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4670 
4671         if (gfp_mask & __GFP_ATOMIC) {
4672                 /*
4673                  * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4674                  * if it can't schedule.
4675                  */
4676                 if (!(gfp_mask & __GFP_NOMEMALLOC))
4677                         alloc_flags |= ALLOC_HARDER;
4678                 /*
4679                  * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4680                  * comment for __cpuset_node_allowed().
4681                  */
4682                 alloc_flags &= ~ALLOC_CPUSET;
4683         } else if (unlikely(rt_task(current)) && in_task())
4684                 alloc_flags |= ALLOC_HARDER;
4685 
4686         alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4687 
4688         return alloc_flags;
4689 }
4690 
4691 static bool oom_reserves_allowed(struct task_struct *tsk)
4692 {
4693         if (!tsk_is_oom_victim(tsk))
4694                 return false;
4695 
4696         /*
4697          * !MMU doesn't have oom reaper so give access to memory reserves
4698          * only to the thread with TIF_MEMDIE set
4699          */
4700         if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4701                 return false;
4702 
4703         return true;
4704 }
4705 
4706 /*
4707  * Distinguish requests which really need access to full memory
4708  * reserves from oom victims which can live with a portion of it
4709  */
4710 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4711 {
4712         if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4713                 return 0;
4714         if (gfp_mask & __GFP_MEMALLOC)
4715                 return ALLOC_NO_WATERMARKS;
4716         if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4717                 return ALLOC_NO_WATERMARKS;
4718         if (!in_interrupt()) {
4719                 if (current->flags & PF_MEMALLOC)
4720                         return ALLOC_NO_WATERMARKS;
4721                 else if (oom_reserves_allowed(current))
4722                         return ALLOC_OOM;
4723         }
4724 
4725         return 0;
4726 }
4727 
4728 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4729 {
4730         return !!__gfp_pfmemalloc_flags(gfp_mask);
4731 }
4732 
4733 /*
4734  * Checks whether it makes sense to retry the reclaim to make a forward progress
4735  * for the given allocation request.
4736  *
4737  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4738  * without success, or when we couldn't even meet the watermark if we
4739  * reclaimed all remaining pages on the LRU lists.
4740  *
4741  * Returns true if a retry is viable or false to enter the oom path.
4742  */
4743 static inline bool
4744 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4745                      struct alloc_context *ac, int alloc_flags,
4746                      bool did_some_progress, int *no_progress_loops)
4747 {
4748         struct zone *zone;
4749         struct zoneref *z;
4750         bool ret = false;
4751 
4752         /*
4753          * Costly allocations might have made a progress but this doesn't mean
4754          * their order will become available due to high fragmentation so
4755          * always increment the no progress counter for them
4756          */
4757         if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4758                 *no_progress_loops = 0;
4759         else
4760                 (*no_progress_loops)++;
4761 
4762         /*
4763          * Make sure we converge to OOM if we cannot make any progress
4764          * several times in the row.
4765          */
4766         if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4767                 /* Before OOM, exhaust highatomic_reserve */
4768                 return unreserve_highatomic_pageblock(ac, true);
4769         }
4770 
4771         /*
4772          * Keep reclaiming pages while there is a chance this will lead
4773          * somewhere.  If none of the target zones can satisfy our allocation
4774          * request even if all reclaimable pages are considered then we are
4775          * screwed and have to go OOM.
4776          */
4777         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4778                                 ac->highest_zoneidx, ac->nodemask) {
4779                 unsigned long available;
4780                 unsigned long reclaimable;
4781                 unsigned long min_wmark = min_wmark_pages(zone);
4782                 bool wmark;
4783 
4784                 available = reclaimable = zone_reclaimable_pages(zone);
4785                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4786 
4787                 /*
4788                  * Would the allocation succeed if we reclaimed all
4789                  * reclaimable pages?
4790                  */
4791                 wmark = __zone_watermark_ok(zone, order, min_wmark,
4792                                 ac->highest_zoneidx, alloc_flags, available);
4793                 trace_reclaim_retry_zone(z, order, reclaimable,
4794                                 available, min_wmark, *no_progress_loops, wmark);
4795                 if (wmark) {
4796                         ret = true;
4797                         break;
4798                 }
4799         }
4800 
4801         /*
4802          * Memory allocation/reclaim might be called from a WQ context and the
4803          * current implementation of the WQ concurrency control doesn't
4804          * recognize that a particular WQ is congested if the worker thread is
4805          * looping without ever sleeping. Therefore we have to do a short sleep
4806          * here rather than calling cond_resched().
4807          */
4808         if (current->flags & PF_WQ_WORKER)
4809                 schedule_timeout_uninterruptible(1);
4810         else
4811                 cond_resched();
4812         return ret;
4813 }
4814 
4815 static inline bool
4816 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4817 {
4818         /*
4819          * It's possible that cpuset's mems_allowed and the nodemask from
4820          * mempolicy don't intersect. This should be normally dealt with by
4821          * policy_nodemask(), but it's possible to race with cpuset update in
4822          * such a way the check therein was true, and then it became false
4823          * before we got our cpuset_mems_cookie here.
4824          * This assumes that for all allocations, ac->nodemask can come only
4825          * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4826          * when it does not intersect with the cpuset restrictions) or the
4827          * caller can deal with a violated nodemask.
4828          */
4829         if (cpusets_enabled() && ac->nodemask &&
4830                         !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4831                 ac->nodemask = NULL;
4832                 return true;
4833         }
4834 
4835         /*
4836          * When updating a task's mems_allowed or mempolicy nodemask, it is
4837          * possible to race with parallel threads in such a way that our
4838          * allocation can fail while the mask is being updated. If we are about
4839          * to fail, check if the cpuset changed during allocation and if so,
4840          * retry.
4841          */
4842         if (read_mems_allowed_retry(cpuset_mems_cookie))
4843                 return true;
4844 
4845         return false;
4846 }
4847 
4848 static inline struct page *
4849 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4850                                                 struct alloc_context *ac)
4851 {
4852         bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4853         const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4854         struct page *page = NULL;
4855         unsigned int alloc_flags;
4856         unsigned long did_some_progress;
4857         enum compact_priority compact_priority;
4858         enum compact_result compact_result;
4859         int compaction_retries;
4860         int no_progress_loops;
4861         unsigned int cpuset_mems_cookie;
4862         int reserve_flags;
4863 
4864         /*
4865          * We also sanity check to catch abuse of atomic reserves being used by
4866          * callers that are not in atomic context.
4867          */
4868         if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4869                                 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4870                 gfp_mask &= ~__GFP_ATOMIC;
4871 
4872 retry_cpuset:
4873         compaction_retries = 0;
4874         no_progress_loops = 0;
4875         compact_priority = DEF_COMPACT_PRIORITY;
4876         cpuset_mems_cookie = read_mems_allowed_begin();
4877 
4878         /*
4879          * The fast path uses conservative alloc_flags to succeed only until
4880          * kswapd needs to be woken up, and to avoid the cost of setting up
4881          * alloc_flags precisely. So we do that now.
4882          */
4883         alloc_flags = gfp_to_alloc_flags(gfp_mask);
4884 
4885         /*
4886          * We need to recalculate the starting point for the zonelist iterator
4887          * because we might have used different nodemask in the fast path, or
4888          * there was a cpuset modification and we are retrying - otherwise we
4889          * could end up iterating over non-eligible zones endlessly.
4890          */
4891         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4892                                         ac->highest_zoneidx, ac->nodemask);
4893         if (!ac->preferred_zoneref->zone)
4894                 goto nopage;
4895 
4896         /*
4897          * Check for insane configurations where the cpuset doesn't contain
4898          * any suitable zone to satisfy the request - e.g. non-movable
4899          * GFP_HIGHUSER allocations from MOVABLE nodes only.
4900          */
4901         if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4902                 struct zoneref *z = first_zones_zonelist(ac->zonelist,
4903                                         ac->highest_zoneidx,
4904                                         &cpuset_current_mems_allowed);
4905                 if (!z->zone)
4906                         goto nopage;
4907         }
4908 
4909         if (alloc_flags & ALLOC_KSWAPD)
4910                 wake_all_kswapds(order, gfp_mask, ac);
4911 
4912         /*
4913          * The adjusted alloc_flags might result in immediate success, so try
4914          * that first
4915          */
4916         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4917         if (page)
4918                 goto got_pg;
4919 
4920         /*
4921          * For costly allocations, try direct compaction first, as it's likely
4922          * that we have enough base pages and don't need to reclaim. For non-
4923          * movable high-order allocations, do that as well, as compaction will
4924          * try prevent permanent fragmentation by migrating from blocks of the
4925          * same migratetype.
4926          * Don't try this for allocations that are allowed to ignore
4927          * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4928          */
4929         if (can_direct_reclaim &&
4930                         (costly_order ||
4931                            (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4932                         && !gfp_pfmemalloc_allowed(gfp_mask)) {
4933                 page = __alloc_pages_direct_compact(gfp_mask, order,
4934                                                 alloc_flags, ac,
4935                                                 INIT_COMPACT_PRIORITY,
4936                                                 &compact_result);
4937                 if (page)
4938                         goto got_pg;
4939 
4940                 /*
4941                  * Checks for costly allocations with __GFP_NORETRY, which
4942                  * includes some THP page fault allocations
4943                  */
4944                 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4945                         /*
4946                          * If allocating entire pageblock(s) and compaction
4947                          * failed because all zones are below low watermarks
4948                          * or is prohibited because it recently failed at this
4949                          * order, fail immediately unless the allocator has
4950                          * requested compaction and reclaim retry.
4951                          *
4952                          * Reclaim is
4953                          *  - potentially very expensive because zones are far
4954                          *    below their low watermarks or this is part of very
4955                          *    bursty high order allocations,
4956                          *  - not guaranteed to help because isolate_freepages()
4957                          *    may not iterate over freed pages as part of its
4958                          *    linear scan, and
4959                          *  - unlikely to make entire pageblocks free on its
4960                          *    own.
4961                          */
4962                         if (compact_result == COMPACT_SKIPPED ||
4963                             compact_result == COMPACT_DEFERRED)
4964                                 goto nopage;
4965 
4966                         /*
4967                          * Looks like reclaim/compaction is worth trying, but
4968                          * sync compaction could be very expensive, so keep
4969                          * using async compaction.
4970                          */
4971                         compact_priority = INIT_COMPACT_PRIORITY;
4972                 }
4973         }
4974 
4975 retry:
4976         /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4977         if (alloc_flags & ALLOC_KSWAPD)
4978                 wake_all_kswapds(order, gfp_mask, ac);
4979 
4980         reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4981         if (reserve_flags)
4982                 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
4983 
4984         /*
4985          * Reset the nodemask and zonelist iterators if memory policies can be
4986          * ignored. These allocations are high priority and system rather than
4987          * user oriented.
4988          */
4989         if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4990                 ac->nodemask = NULL;
4991                 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4992                                         ac->highest_zoneidx, ac->nodemask);
4993         }
4994 
4995         /* Attempt with potentially adjusted zonelist and alloc_flags */
4996         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4997         if (page)
4998                 goto got_pg;
4999 
5000         /* Caller is not willing to reclaim, we can't balance anything */
5001         if (!can_direct_reclaim)
5002                 goto nopage;
5003 
5004         /* Avoid recursion of direct reclaim */
5005         if (current->flags & PF_MEMALLOC)
5006                 goto nopage;
5007 
5008         /* Try direct reclaim and then allocating */
5009         page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5010                                                         &did_some_progress);
5011         if (page)
5012                 goto got_pg;
5013 
5014         /* Try direct compaction and then allocating */
5015         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5016                                         compact_priority, &compact_result);
5017         if (page)
5018                 goto got_pg;
5019 
5020         /* Do not loop if specifically requested */
5021         if (gfp_mask & __GFP_NORETRY)
5022                 goto nopage;
5023 
5024         /*
5025          * Do not retry costly high order allocations unless they are
5026          * __GFP_RETRY_MAYFAIL
5027          */
5028         if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5029                 goto nopage;
5030 
5031         if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5032                                  did_some_progress > 0, &no_progress_loops))
5033                 goto retry;
5034 
5035         /*
5036          * It doesn't make any sense to retry for the compaction if the order-0
5037          * reclaim is not able to make any progress because the current
5038          * implementation of the compaction depends on the sufficient amount
5039          * of free memory (see __compaction_suitable)
5040          */
5041         if (did_some_progress > 0 &&
5042                         should_compact_retry(ac, order, alloc_flags,
5043                                 compact_result, &compact_priority,
5044                                 &compaction_retries))
5045                 goto retry;
5046 
5047 
5048         /* Deal with possible cpuset update races before we start OOM killing */
5049         if (check_retry_cpuset(cpuset_mems_cookie, ac))
5050                 goto retry_cpuset;
5051 
5052         /* Reclaim has failed us, start killing things */
5053         page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5054         if (page)
5055                 goto got_pg;
5056 
5057         /* Avoid allocations with no watermarks from looping endlessly */
5058         if (tsk_is_oom_victim(current) &&
5059             (alloc_flags & ALLOC_OOM ||
5060              (gfp_mask & __GFP_NOMEMALLOC)))
5061                 goto nopage;
5062 
5063         /* Retry as long as the OOM killer is making progress */
5064         if (did_some_progress) {
5065                 no_progress_loops = 0;
5066                 goto retry;
5067         }
5068 
5069 nopage:
5070         /* Deal with possible cpuset update races before we fail */
5071         if (check_retry_cpuset(cpuset_mems_cookie, ac))
5072                 goto retry_cpuset;
5073 
5074         /*
5075          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5076          * we always retry
5077          */
5078         if (gfp_mask & __GFP_NOFAIL) {
5079                 /*
5080                  * All existing users of the __GFP_NOFAIL are blockable, so warn
5081                  * of any new users that actually require GFP_NOWAIT
5082                  */
5083                 if (WARN_ON_ONCE(!can_direct_reclaim))
5084                         goto fail;
5085 
5086                 /*
5087                  * PF_MEMALLOC request from this context is rather bizarre
5088                  * because we cannot reclaim anything and only can loop waiting
5089                  * for somebody to do a work for us
5090                  */
5091                 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
5092 
5093                 /*
5094                  * non failing costly orders are a hard requirement which we
5095                  * are not prepared for much so let's warn about these users
5096                  * so that we can identify them and convert them to something
5097                  * else.
5098                  */
5099                 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
5100 
5101                 /*
5102                  * Help non-failing allocations by giving them access to memory
5103                  * reserves but do not use ALLOC_NO_WATERMARKS because this
5104                  * could deplete whole memory reserves which would just make
5105                  * the situation worse
5106                  */
5107                 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5108                 if (page)
5109                         goto got_pg;
5110 
5111                 cond_resched();
5112                 goto retry;
5113         }
5114 fail:
5115         warn_alloc(gfp_mask, ac->nodemask,
5116                         "page allocation failure: order:%u", order);
5117 got_pg:
5118         return page;
5119 }
5120 
5121 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5122                 int preferred_nid, nodemask_t *nodemask,
5123                 struct alloc_context *ac, gfp_t *alloc_gfp,
5124                 unsigned int *alloc_flags)
5125 {
5126         ac->highest_zoneidx = gfp_zone(gfp_mask);
5127         ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5128         ac->nodemask = nodemask;
5129         ac->migratetype = gfp_migratetype(gfp_mask);
5130 
5131         if (cpusets_enabled()) {
5132                 *alloc_gfp |= __GFP_HARDWALL;
5133                 /*
5134                  * When we are in the interrupt context, it is irrelevant
5135                  * to the current task context. It means that any node ok.
5136                  */
5137                 if (in_task() && !ac->nodemask)
5138                         ac->nodemask = &cpuset_current_mems_allowed;
5139                 else
5140                         *alloc_flags |= ALLOC_CPUSET;
5141         }
5142 
5143         fs_reclaim_acquire(gfp_mask);
5144         fs_reclaim_release(gfp_mask);
5145 
5146         might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
5147 
5148         if (should_fail_alloc_page(gfp_mask, order))
5149                 return false;
5150 
5151         *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5152 
5153         /* Dirty zone balancing only done in the fast path */
5154         ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5155 
5156         /*
5157          * The preferred zone is used for statistics but crucially it is
5158          * also used as the starting point for the zonelist iterator. It
5159          * may get reset for allocations that ignore memory policies.
5160          */
5161         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5162                                         ac->highest_zoneidx, ac->nodemask);
5163 
5164         return true;
5165 }
5166 
5167 /*
5168  * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5169  * @gfp: GFP flags for the allocation
5170  * @preferred_nid: The preferred NUMA node ID to allocate from
5171  * @nodemask: Set of nodes to allocate from, may be NULL
5172  * @nr_pages: The number of pages desired on the list or array
5173  * @page_list: Optional list to store the allocated pages
5174  * @page_array: Optional array to store the pages
5175  *
5176  * This is a batched version of the page allocator that attempts to
5177  * allocate nr_pages quickly. Pages are added to page_list if page_list
5178  * is not NULL, otherwise it is assumed that the page_array is valid.
5179  *
5180  * For lists, nr_pages is the number of pages that should be allocated.
5181  *
5182  * For arrays, only NULL elements are populated with pages and nr_pages
5183  * is the maximum number of pages that will be stored in the array.
5184  *
5185  * Returns the number of pages on the list or array.
5186  */
5187 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5188                         nodemask_t *nodemask, int nr_pages,
5189                         struct list_head *page_list,
5190                         struct page **page_array)
5191 {
5192         struct page *page;
5193         unsigned long flags;
5194         struct zone *zone;
5195         struct zoneref *z;
5196         struct per_cpu_pages *pcp;
5197         struct list_head *pcp_list;
5198         struct alloc_context ac;
5199         gfp_t alloc_gfp;
5200         unsigned int alloc_flags = ALLOC_WMARK_LOW;
5201         int nr_populated = 0, nr_account = 0;
5202 
5203         /*
5204          * Skip populated array elements to determine if any pages need
5205          * to be allocated before disabling IRQs.
5206          */
5207         while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5208                 nr_populated++;
5209 
5210         /* No pages requested? */
5211         if (unlikely(nr_pages <= 0))
5212                 goto out;
5213 
5214         /* Already populated array? */
5215         if (unlikely(page_array && nr_pages - nr_populated == 0))
5216                 goto out;
5217 
5218         /* Bulk allocator does not support memcg accounting. */
5219         if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5220                 goto failed;
5221 
5222         /* Use the single page allocator for one page. */
5223         if (nr_pages - nr_populated == 1)
5224                 goto failed;
5225 
5226 #ifdef CONFIG_PAGE_OWNER
5227         /*
5228          * PAGE_OWNER may recurse into the allocator to allocate space to
5229          * save the stack with pagesets.lock held. Releasing/reacquiring
5230          * removes much of the performance benefit of bulk allocation so
5231          * force the caller to allocate one page at a time as it'll have
5232          * similar performance to added complexity to the bulk allocator.
5233          */
5234         if (static_branch_unlikely(&page_owner_inited))
5235                 goto failed;
5236 #endif
5237 
5238         /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5239         gfp &= gfp_allowed_mask;
5240         alloc_gfp = gfp;
5241         if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5242                 goto out;
5243         gfp = alloc_gfp;
5244 
5245         /* Find an allowed local zone that meets the low watermark. */
5246         for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5247                 unsigned long mark;
5248 
5249                 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5250                     !__cpuset_zone_allowed(zone, gfp)) {
5251                         continue;
5252                 }
5253 
5254                 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5255                     zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5256                         goto failed;
5257                 }
5258 
5259                 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5260                 if (zone_watermark_fast(zone, 0,  mark,
5261                                 zonelist_zone_idx(ac.preferred_zoneref),
5262                                 alloc_flags, gfp)) {
5263                         break;
5264                 }
5265         }
5266 
5267         /*
5268          * If there are no allowed local zones that meets the watermarks then
5269          * try to allocate a single page and reclaim if necessary.
5270          */
5271         if (unlikely(!zone))
5272                 goto failed;
5273 
5274         /* Attempt the batch allocation */
5275         local_lock_irqsave(&pagesets.lock, flags);
5276         pcp = this_cpu_ptr(zone->per_cpu_pageset);
5277         pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5278 
5279         while (nr_populated < nr_pages) {
5280 
5281                 /* Skip existing pages */
5282                 if (page_array && page_array[nr_populated]) {
5283                         nr_populated++;
5284                         continue;
5285                 }
5286 
5287                 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5288                                                                 pcp, pcp_list);
5289                 if (unlikely(!page)) {
5290                         /* Try and get at least one page */
5291                         if (!nr_populated)
5292                                 goto failed_irq;
5293                         break;
5294                 }
5295                 nr_account++;
5296 
5297                 prep_new_page(page, 0, gfp, 0);
5298                 if (page_list)
5299                         list_add(&page->lru, page_list);
5300                 else
5301                         page_array[nr_populated] = page;
5302                 nr_populated++;
5303         }
5304 
5305         local_unlock_irqrestore(&pagesets.lock, flags);
5306 
5307         __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5308         zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5309 
5310 out:
5311         return nr_populated;
5312 
5313 failed_irq:
5314         local_unlock_irqrestore(&pagesets.lock, flags);
5315 
5316 failed:
5317         page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5318         if (page) {
5319                 if (page_list)
5320                         list_add(&page->lru, page_list);
5321                 else
5322                         page_array[nr_populated] = page;
5323                 nr_populated++;
5324         }
5325 
5326         goto out;
5327 }
5328 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5329 
5330 /*
5331  * This is the 'heart' of the zoned buddy allocator.
5332  */
5333 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5334                                                         nodemask_t *nodemask)
5335 {
5336         struct page *page;
5337         unsigned int alloc_flags = ALLOC_WMARK_LOW;
5338         gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5339         struct alloc_context ac = { };
5340 
5341         /*
5342          * There are several places where we assume that the order value is sane
5343          * so bail out early if the request is out of bound.
5344          */
5345         if (unlikely(order >= MAX_ORDER)) {
5346                 WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
5347                 return NULL;
5348         }
5349 
5350         gfp &= gfp_allowed_mask;
5351         /*
5352          * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5353          * resp. GFP_NOIO which has to be inherited for all allocation requests
5354          * from a particular context which has been marked by
5355          * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5356          * movable zones are not used during allocation.
5357          */
5358         gfp = current_gfp_context(gfp);
5359         alloc_gfp = gfp;
5360         if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5361                         &alloc_gfp, &alloc_flags))
5362                 return NULL;
5363 
5364         /*
5365          * Forbid the first pass from falling back to types that fragment
5366          * memory until all local zones are considered.
5367          */
5368         alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5369 
5370         /* First allocation attempt */
5371         page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5372         if (likely(page))
5373                 goto out;
5374 
5375         alloc_gfp = gfp;
5376         ac.spread_dirty_pages = false;
5377 
5378         /*
5379          * Restore the original nodemask if it was potentially replaced with
5380          * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5381          */
5382         ac.nodemask = nodemask;
5383 
5384         page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5385 
5386 out:
5387         if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5388             unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5389                 __free_pages(page, order);
5390                 page = NULL;
5391         }
5392 
5393         trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5394 
5395         return page;
5396 }
5397 EXPORT_SYMBOL(__alloc_pages);
5398 
5399 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5400                 nodemask_t *nodemask)
5401 {
5402         struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5403                         preferred_nid, nodemask);
5404 
5405         if (page && order > 1)
5406                 prep_transhuge_page(page);
5407         return (struct folio *)page;
5408 }
5409 EXPORT_SYMBOL(__folio_alloc);
5410 
5411 /*
5412  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5413  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5414  * you need to access high mem.
5415  */
5416 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5417 {
5418         struct page *page;
5419 
5420         page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5421         if (!page)
5422                 return 0;
5423         return (unsigned long) page_address(page);
5424 }
5425 EXPORT_SYMBOL(__get_free_pages);
5426 
5427 unsigned long get_zeroed_page(gfp_t gfp_mask)
5428 {
5429         return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5430 }
5431 EXPORT_SYMBOL(get_zeroed_page);
5432 
5433 /**
5434  * __free_pages - Free pages allocated with alloc_pages().
5435  * @page: The page pointer returned from alloc_pages().
5436  * @order: The order of the allocation.
5437  *
5438  * This function can free multi-page allocations that are not compound
5439  * pages.  It does not check that the @order passed in matches that of
5440  * the allocation, so it is easy to leak memory.  Freeing more memory
5441  * than was allocated will probably emit a warning.
5442  *
5443  * If the last reference to this page is speculative, it will be released
5444  * by put_page() which only frees the first page of a non-compound
5445  * allocation.  To prevent the remaining pages from being leaked, we free
5446  * the subsequent pages here.  If you want to use the page's reference
5447  * count to decide when to free the allocation, you should allocate a
5448  * compound page, and use put_page() instead of __free_pages().
5449  *
5450  * Context: May be called in interrupt context or while holding a normal
5451  * spinlock, but not in NMI context or while holding a raw spinlock.
5452  */
5453 void __free_pages(struct page *page, unsigned int order)
5454 {
5455         if (put_page_testzero(page))
5456                 free_the_page(page, order);
5457         else if (!PageHead(page))
5458                 while (order-- > 0)
5459                         free_the_page(page + (1 << order), order);
5460 }
5461 EXPORT_SYMBOL(__free_pages);
5462 
5463 void free_pages(unsigned long addr, unsigned int order)
5464 {
5465         if (addr != 0) {
5466                 VM_BUG_ON(!virt_addr_valid((void *)addr));
5467                 __free_pages(virt_to_page((void *)addr), order);
5468         }
5469 }
5470 
5471 EXPORT_SYMBOL(free_pages);
5472 
5473 /*
5474  * Page Fragment:
5475  *  An arbitrary-length arbitrary-offset area of memory which resides
5476  *  within a 0 or higher order page.  Multiple fragments within that page
5477  *  are individually refcounted, in the page's reference counter.
5478  *
5479  * The page_frag functions below provide a simple allocation framework for
5480  * page fragments.  This is used by the network stack and network device
5481  * drivers to provide a backing region of memory for use as either an
5482  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5483  */
5484 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5485                                              gfp_t gfp_mask)
5486 {
5487         struct page *page = NULL;
5488         gfp_t gfp = gfp_mask;
5489 
5490 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5491         gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5492                     __GFP_NOMEMALLOC;
5493         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5494                                 PAGE_FRAG_CACHE_MAX_ORDER);
5495         nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5496 #endif
5497         if (unlikely(!page))
5498                 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5499 
5500         nc->va = page ? page_address(page) : NULL;
5501 
5502         return page;
5503 }
5504 
5505 void __page_frag_cache_drain(struct page *page, unsigned int count)
5506 {
5507         VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5508 
5509         if (page_ref_sub_and_test(page, count))
5510                 free_the_page(page, compound_order(page));
5511 }
5512 EXPORT_SYMBOL(__page_frag_cache_drain);
5513 
5514 void *page_frag_alloc_align(struct page_frag_cache *nc,
5515                       unsigned int fragsz, gfp_t gfp_mask,
5516                       unsigned int align_mask)
5517 {
5518         unsigned int size = PAGE_SIZE;
5519         struct page *page;
5520         int offset;
5521 
5522         if (unlikely(!nc->va)) {
5523 refill:
5524                 page = __page_frag_cache_refill(nc, gfp_mask);
5525                 if (!page)
5526                         return NULL;
5527 
5528 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5529                 /* if size can vary use size else just use PAGE_SIZE */
5530                 size = nc->size;
5531 #endif
5532                 /* Even if we own the page, we do not use atomic_set().
5533                  * This would break get_page_unless_zero() users.
5534                  */
5535                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5536 
5537                 /* reset page count bias and offset to start of new frag */
5538                 nc->pfmemalloc = page_is_pfmemalloc(page);
5539                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5540                 nc->offset = size;
5541         }
5542 
5543         offset = nc->offset - fragsz;
5544         if (unlikely(offset < 0)) {
5545                 page = virt_to_page(nc->va);
5546 
5547                 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5548                         goto refill;
5549 
5550                 if (unlikely(nc->pfmemalloc)) {
5551                         free_the_page(page, compound_order(page));
5552                         goto refill;
5553                 }
5554 
5555 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5556                 /* if size can vary use size else just use PAGE_SIZE */
5557                 size = nc->size;
5558 #endif
5559                 /* OK, page count is 0, we can safely set it */
5560                 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5561 
5562                 /* reset page count bias and offset to start of new frag */
5563                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5564                 offset = size - fragsz;
5565         }
5566 
5567         nc->pagecnt_bias--;
5568         offset &= align_mask;
5569         nc->offset = offset;
5570 
5571         return nc->va + offset;
5572 }
5573 EXPORT_SYMBOL(page_frag_alloc_align);
5574 
5575 /*
5576  * Frees a page fragment allocated out of either a compound or order 0 page.
5577  */
5578 void page_frag_free(void *addr)
5579 {
5580         struct page *page = virt_to_head_page(addr);
5581 
5582         if (unlikely(put_page_testzero(page)))
5583                 free_the_page(page, compound_order(page));
5584 }
5585 EXPORT_SYMBOL(page_frag_free);
5586 
5587 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5588                 size_t size)
5589 {
5590         if (addr) {
5591                 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5592                 unsigned long used = addr + PAGE_ALIGN(size);
5593 
5594                 split_page(virt_to_page((void *)addr), order);
5595                 while (used < alloc_end) {
5596                         free_page(used);
5597                         used += PAGE_SIZE;
5598                 }
5599         }
5600         return (void *)addr;
5601 }
5602 
5603 /**
5604  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5605  * @size: the number of bytes to allocate
5606  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5607  *
5608  * This function is similar to alloc_pages(), except that it allocates the
5609  * minimum number of pages to satisfy the request.  alloc_pages() can only
5610  * allocate memory in power-of-two pages.
5611  *
5612  * This function is also limited by MAX_ORDER.
5613  *
5614  * Memory allocated by this function must be released by free_pages_exact().
5615  *
5616  * Return: pointer to the allocated area or %NULL in case of error.
5617  */
5618 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5619 {
5620         unsigned int order = get_order(size);
5621         unsigned long addr;
5622 
5623         if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5624                 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5625 
5626         addr = __get_free_pages(gfp_mask, order);
5627         return make_alloc_exact(addr, order, size);
5628 }
5629 EXPORT_SYMBOL(alloc_pages_exact);
5630 
5631 /**
5632  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5633  *                         pages on a node.
5634  * @nid: the preferred node ID where memory should be allocated
5635  * @size: the number of bytes to allocate
5636  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5637  *
5638  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5639  * back.
5640  *
5641  * Return: pointer to the allocated area or %NULL in case of error.
5642  */
5643 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5644 {
5645         unsigned int order = get_order(size);
5646         struct page *p;
5647 
5648         if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5649                 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5650 
5651         p = alloc_pages_node(nid, gfp_mask, order);
5652         if (!p)
5653                 return NULL;
5654         return make_alloc_exact((unsigned long)page_address(p), order, size);
5655 }
5656 
5657 /**
5658  * free_pages_exact - release memory allocated via alloc_pages_exact()
5659  * @virt: the value returned by alloc_pages_exact.
5660  * @size: size of allocation, same value as passed to alloc_pages_exact().
5661  *
5662  * Release the memory allocated by a previous call to alloc_pages_exact.
5663  */
5664 void free_pages_exact(void *virt, size_t size)
5665 {
5666         unsigned long addr = (unsigned long)virt;
5667         unsigned long end = addr + PAGE_ALIGN(size);
5668 
5669         while (addr < end) {
5670                 free_page(addr);
5671                 addr += PAGE_SIZE;
5672         }
5673 }
5674 EXPORT_SYMBOL(free_pages_exact);
5675 
5676 /**
5677  * nr_free_zone_pages - count number of pages beyond high watermark
5678  * @offset: The zone index of the highest zone
5679  *
5680  * nr_free_zone_pages() counts the number of pages which are beyond the
5681  * high watermark within all zones at or below a given zone index.  For each
5682  * zone, the number of pages is calculated as:
5683  *
5684  *     nr_free_zone_pages = managed_pages - high_pages
5685  *
5686  * Return: number of pages beyond high watermark.
5687  */
5688 static unsigned long nr_free_zone_pages(int offset)
5689 {
5690         struct zoneref *z;
5691         struct zone *zone;
5692 
5693         /* Just pick one node, since fallback list is circular */
5694         unsigned long sum = 0;
5695 
5696         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5697 
5698         for_each_zone_zonelist(zone, z, zonelist, offset) {
5699                 unsigned long size = zone_managed_pages(zone);
5700                 unsigned long high = high_wmark_pages(zone);
5701                 if (size > high)
5702                         sum += size - high;
5703         }
5704 
5705         return sum;
5706 }
5707 
5708 /**
5709  * nr_free_buffer_pages - count number of pages beyond high watermark
5710  *
5711  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5712  * watermark within ZONE_DMA and ZONE_NORMAL.
5713  *
5714  * Return: number of pages beyond high watermark within ZONE_DMA and
5715  * ZONE_NORMAL.
5716  */
5717 unsigned long nr_free_buffer_pages(void)
5718 {
5719         return nr_free_zone_pages(gfp_zone(GFP_USER));
5720 }
5721 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5722 
5723 static inline void show_node(struct zone *zone)
5724 {
5725         if (IS_ENABLED(CONFIG_NUMA))
5726                 printk("Node %d ", zone_to_nid(zone));
5727 }
5728 
5729 long si_mem_available(void)
5730 {
5731         long available;
5732         unsigned long pagecache;
5733         unsigned long wmark_low = 0;
5734         unsigned long pages[NR_LRU_LISTS];
5735         unsigned long reclaimable;
5736         struct zone *zone;
5737         int lru;
5738 
5739         for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5740                 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5741 
5742         for_each_zone(zone)
5743                 wmark_low += low_wmark_pages(zone);
5744 
5745         /*
5746          * Estimate the amount of memory available for userspace allocations,
5747          * without causing swapping.
5748          */
5749         available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5750 
5751         /*
5752          * Not all the page cache can be freed, otherwise the system will
5753          * start swapping. Assume at least half of the page cache, or the
5754          * low watermark worth of cache, needs to stay.
5755          */
5756         pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5757         pagecache -= min(pagecache / 2, wmark_low);
5758         available += pagecache;
5759 
5760         /*
5761          * Part of the reclaimable slab and other kernel memory consists of
5762          * items that are in use, and cannot be freed. Cap this estimate at the
5763          * low watermark.
5764          */
5765         reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5766                 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5767         available += reclaimable - min(reclaimable / 2, wmark_low);
5768 
5769         if (available < 0)
5770                 available = 0;
5771         return available;
5772 }
5773 EXPORT_SYMBOL_GPL(si_mem_available);
5774 
5775 void si_meminfo(struct sysinfo *val)
5776 {
5777         val->totalram = totalram_pages();
5778         val->sharedram = global_node_page_state(NR_SHMEM);
5779         val->freeram = global_zone_page_state(NR_FREE_PAGES);
5780         val->bufferram = nr_blockdev_pages();
5781         val->totalhigh = totalhigh_pages();
5782         val->freehigh = nr_free_highpages();
5783         val->mem_unit = PAGE_SIZE;
5784 }
5785 
5786 EXPORT_SYMBOL(si_meminfo);
5787 
5788 #ifdef CONFIG_NUMA
5789 void si_meminfo_node(struct sysinfo *val, int nid)
5790 {
5791         int zone_type;          /* needs to be signed */
5792         unsigned long managed_pages = 0;
5793         unsigned long managed_highpages = 0;
5794         unsigned long free_highpages = 0;
5795         pg_data_t *pgdat = NODE_DATA(nid);
5796 
5797         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5798                 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5799         val->totalram = managed_pages;
5800         val->sharedram = node_page_state(pgdat, NR_SHMEM);
5801         val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5802 #ifdef CONFIG_HIGHMEM
5803         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5804                 struct zone *zone = &pgdat->node_zones[zone_type];
5805 
5806                 if (is_highmem(zone)) {
5807                         managed_highpages += zone_managed_pages(zone);
5808                         free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5809                 }
5810         }
5811         val->totalhigh = managed_highpages;
5812         val->freehigh = free_highpages;
5813 #else
5814         val->totalhigh = managed_highpages;
5815         val->freehigh = free_highpages;
5816 #endif
5817         val->mem_unit = PAGE_SIZE;
5818 }
5819 #endif
5820 
5821 /*
5822  * Determine whether the node should be displayed or not, depending on whether
5823  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5824  */
5825 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5826 {
5827         if (!(flags & SHOW_MEM_FILTER_NODES))
5828                 return false;
5829 
5830         /*
5831          * no node mask - aka implicit memory numa policy. Do not bother with
5832          * the synchronization - read_mems_allowed_begin - because we do not
5833          * have to be precise here.
5834          */
5835         if (!nodemask)
5836                 nodemask = &cpuset_current_mems_allowed;
5837 
5838         return !node_isset(nid, *nodemask);
5839 }
5840 
5841 #define K(x) ((x) << (PAGE_SHIFT-10))
5842 
5843 static void show_migration_types(unsigned char type)
5844 {
5845         static const char types[MIGRATE_TYPES] = {
5846                 [MIGRATE_UNMOVABLE]     = 'U',
5847                 [MIGRATE_MOVABLE]       = 'M',
5848                 [MIGRATE_RECLAIMABLE]   = 'E',
5849                 [MIGRATE_HIGHATOMIC]    = 'H',
5850 #ifdef CONFIG_CMA
5851                 [MIGRATE_CMA]           = 'C',
5852 #endif
5853 #ifdef CONFIG_MEMORY_ISOLATION
5854                 [MIGRATE_ISOLATE]       = 'I',
5855 #endif
5856         };
5857         char tmp[MIGRATE_TYPES + 1];
5858         char *p = tmp;
5859         int i;
5860 
5861         for (i = 0; i < MIGRATE_TYPES; i++) {
5862                 if (type & (1 << i))
5863                         *p++ = types[i];
5864         }
5865 
5866         *p = '\0';
5867         printk(KERN_CONT "(%s) ", tmp);
5868 }
5869 
5870 /*
5871  * Show free area list (used inside shift_scroll-lock stuff)
5872  * We also calculate the percentage fragmentation. We do this by counting the
5873  * memory on each free list with the exception of the first item on the list.
5874  *
5875  * Bits in @filter:
5876  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5877  *   cpuset.
5878  */
5879 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5880 {
5881         unsigned long free_pcp = 0;
5882         int cpu;
5883         struct zone *zone;
5884         pg_data_t *pgdat;
5885 
5886         for_each_populated_zone(zone) {
5887                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5888                         continue;
5889 
5890                 for_each_online_cpu(cpu)
5891                         free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5892         }
5893 
5894         printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5895                 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5896                 " unevictable:%lu dirty:%lu writeback:%lu\n"
5897                 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5898                 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5899                 " kernel_misc_reclaimable:%lu\n"
5900                 " free:%lu free_pcp:%lu free_cma:%lu\n",
5901                 global_node_page_state(NR_ACTIVE_ANON),
5902                 global_node_page_state(NR_INACTIVE_ANON),
5903                 global_node_page_state(NR_ISOLATED_ANON),
5904                 global_node_page_state(NR_ACTIVE_FILE),
5905                 global_node_page_state(NR_INACTIVE_FILE),
5906                 global_node_page_state(NR_ISOLATED_FILE),
5907                 global_node_page_state(NR_UNEVICTABLE),
5908                 global_node_page_state(NR_FILE_DIRTY),
5909                 global_node_page_state(NR_WRITEBACK),
5910                 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5911                 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5912                 global_node_page_state(NR_FILE_MAPPED),
5913                 global_node_page_state(NR_SHMEM),
5914                 global_node_page_state(NR_PAGETABLE),
5915                 global_zone_page_state(NR_BOUNCE),
5916                 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
5917                 global_zone_page_state(NR_FREE_PAGES),
5918                 free_pcp,
5919                 global_zone_page_state(NR_FREE_CMA_PAGES));
5920 
5921         for_each_online_pgdat(pgdat) {
5922                 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5923                         continue;
5924 
5925                 printk("Node %d"
5926                         " active_anon:%lukB"
5927                         " inactive_anon:%lukB"
5928                         " active_file:%lukB"
5929                         " inactive_file:%lukB"
5930                         " unevictable:%lukB"
5931                         " isolated(anon):%lukB"
5932                         " isolated(file):%lukB"
5933                         " mapped:%lukB"
5934                         " dirty:%lukB"
5935                         " writeback:%lukB"
5936                         " shmem:%lukB"
5937 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5938                         " shmem_thp: %lukB"
5939                         " shmem_pmdmapped: %lukB"
5940                         " anon_thp: %lukB"
5941 #endif
5942                         " writeback_tmp:%lukB"
5943                         " kernel_stack:%lukB"
5944 #ifdef CONFIG_SHADOW_CALL_STACK
5945                         " shadow_call_stack:%lukB"
5946 #endif
5947                         " pagetables:%lukB"
5948                         " all_unreclaimable? %s"
5949                         "\n",
5950                         pgdat->node_id,
5951                         K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5952                         K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5953                         K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5954                         K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5955                         K(node_page_state(pgdat, NR_UNEVICTABLE)),
5956                         K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5957                         K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5958                         K(node_page_state(pgdat, NR_FILE_MAPPED)),
5959                         K(node_page_state(pgdat, NR_FILE_DIRTY)),
5960                         K(node_page_state(pgdat, NR_WRITEBACK)),
5961                         K(node_page_state(pgdat, NR_SHMEM)),
5962 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5963                         K(node_page_state(pgdat, NR_SHMEM_THPS)),
5964                         K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
5965                         K(node_page_state(pgdat, NR_ANON_THPS)),
5966 #endif
5967                         K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5968                         node_page_state(pgdat, NR_KERNEL_STACK_KB),
5969 #ifdef CONFIG_SHADOW_CALL_STACK
5970                         node_page_state(pgdat, NR_KERNEL_SCS_KB),
5971 #endif
5972                         K(node_page_state(pgdat, NR_PAGETABLE)),
5973                         pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5974                                 "yes" : "no");
5975         }
5976 
5977         for_each_populated_zone(zone) {
5978                 int i;
5979 
5980                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5981                         continue;
5982 
5983                 free_pcp = 0;
5984                 for_each_online_cpu(cpu)
5985                         free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
5986 
5987                 show_node(zone);
5988                 printk(KERN_CONT
5989                         "%s"
5990                         " free:%lukB"
5991                         " boost:%lukB"
5992                         " min:%lukB"
5993                         " low:%lukB"
5994                         " high:%lukB"
5995                         " reserved_highatomic:%luKB"
5996                         " active_anon:%lukB"
5997                         " inactive_anon:%lukB"
5998                         " active_file:%lukB"
5999                         " inactive_file:%lukB"
6000                         " unevictable:%lukB"
6001                         " writepending:%lukB"
6002                         " present:%lukB"
6003                         " managed:%lukB"
6004                         " mlocked:%lukB"
6005                         " bounce:%lukB"
6006                         " free_pcp:%lukB"
6007                         " local_pcp:%ukB"
6008                         " free_cma:%lukB"
6009                         "\n",
6010                         zone->name,
6011                         K(zone_page_state(zone, NR_FREE_PAGES)),
6012                         K(zone->watermark_boost),
6013                         K(min_wmark_pages(zone)),
6014                         K(low_wmark_pages(zone)),
6015                         K(high_wmark_pages(zone)),
6016                         K(zone->nr_reserved_highatomic),
6017                         K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6018                         K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6019                         K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6020                         K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6021                         K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6022                         K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6023                         K(zone->present_pages),
6024                         K(zone_managed_pages(zone)),
6025                         K(zone_page_state(zone, NR_MLOCK)),
6026                         K(zone_page_state(zone, NR_BOUNCE)),
6027                         K(free_pcp),
6028                         K(this_cpu_read(zone->per_cpu_pageset->count)),
6029                         K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6030                 printk("lowmem_reserve[]:");
6031                 for (i = 0; i < MAX_NR_ZONES; i++)
6032                         printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6033                 printk(KERN_CONT "\n");
6034         }
6035 
6036         for_each_populated_zone(zone) {
6037                 unsigned int order;
6038                 unsigned long nr[MAX_ORDER], flags, total = 0;
6039                 unsigned char types[MAX_ORDER];
6040 
6041                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6042                         continue;
6043                 show_node(zone);
6044                 printk(KERN_CONT "%s: ", zone->name);
6045 
6046                 spin_lock_irqsave(&zone->lock, flags);
6047                 for (order = 0; order < MAX_ORDER; order++) {
6048                         struct free_area *area = &zone->free_area[order];
6049                         int type;
6050 
6051                         nr[order] = area->nr_free;
6052                         total += nr[order] << order;
6053 
6054                         types[order] = 0;
6055                         for (type = 0; type < MIGRATE_TYPES; type++) {
6056                                 if (!free_area_empty(area, type))
6057                                         types[order] |= 1 << type;
6058                         }
6059                 }
6060                 spin_unlock_irqrestore(&zone->lock, flags);
6061                 for (order = 0; order < MAX_ORDER; order++) {
6062                         printk(KERN_CONT "%lu*%lukB ",
6063                                nr[order], K(1UL) << order);
6064                         if (nr[order])
6065                                 show_migration_types(types[order]);
6066                 }
6067                 printk(KERN_CONT "= %lukB\n", K(total));
6068         }
6069 
6070         hugetlb_show_meminfo();
6071 
6072         printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6073 
6074         show_swap_cache_info();
6075 }
6076 
6077 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6078 {
6079         zoneref->zone = zone;
6080         zoneref->zone_idx = zone_idx(zone);
6081 }
6082 
6083 /*
6084  * Builds allocation fallback zone lists.
6085  *
6086  * Add all populated zones of a node to the zonelist.
6087  */
6088 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6089 {
6090         struct zone *zone;
6091         enum zone_type zone_type = MAX_NR_ZONES;
6092         int nr_zones = 0;
6093 
6094         do {
6095                 zone_type--;
6096                 zone = pgdat->node_zones + zone_type;
6097                 if (managed_zone(zone)) {
6098                         zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6099                         check_highest_zone(zone_type);
6100                 }
6101         } while (zone_type);
6102 
6103         return nr_zones;
6104 }
6105 
6106 #ifdef CONFIG_NUMA
6107 
6108 static int __parse_numa_zonelist_order(char *s)
6109 {
6110         /*
6111          * We used to support different zonelists modes but they turned
6112          * out to be just not useful. Let's keep the warning in place
6113          * if somebody still use the cmd line parameter so that we do
6114          * not fail it silently
6115          */
6116         if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6117                 pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
6118                 return -EINVAL;
6119         }
6120         return 0;
6121 }
6122 
6123 char numa_zonelist_order[] = "Node";
6124 
6125 /*
6126  * sysctl handler for numa_zonelist_order
6127  */
6128 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6129                 void *buffer, size_t *length, loff_t *ppos)
6130 {
6131         if (write)
6132                 return __parse_numa_zonelist_order(buffer);
6133         return proc_dostring(table, write, buffer, length, ppos);
6134 }
6135 
6136 
6137 #define MAX_NODE_LOAD (nr_online_nodes)
6138 static int node_load[MAX_NUMNODES];
6139 
6140 /**
6141  * find_next_best_node - find the next node that should appear in a given node's fallback list
6142  * @node: node whose fallback list we're appending
6143  * @used_node_mask: nodemask_t of already used nodes
6144  *
6145  * We use a number of factors to determine which is the next node that should
6146  * appear on a given node's fallback list.  The node should not have appeared
6147  * already in @node's fallback list, and it should be the next closest node
6148  * according to the distance array (which contains arbitrary distance values
6149  * from each node to each node in the system), and should also prefer nodes
6150  * with no CPUs, since presumably they'll have very little allocation pressure
6151  * on them otherwise.
6152  *
6153  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6154  */
6155 int find_next_best_node(int node, nodemask_t *used_node_mask)
6156 {
6157         int n, val;
6158         int min_val = INT_MAX;
6159         int best_node = NUMA_NO_NODE;
6160 
6161         /* Use the local node if we haven't already */
6162         if (!node_isset(node, *used_node_mask)) {
6163                 node_set(node, *used_node_mask);
6164                 return node;
6165         }
6166 
6167         for_each_node_state(n, N_MEMORY) {
6168 
6169                 /* Don't want a node to appear more than once */
6170                 if (node_isset(n, *used_node_mask))
6171                         continue;
6172 
6173                 /* Use the distance array to find the distance */
6174                 val = node_distance(node, n);
6175 
6176                 /* Penalize nodes under us ("prefer the next node") */
6177                 val += (n < node);
6178 
6179                 /* Give preference to headless and unused nodes */
6180                 if (!cpumask_empty(cpumask_of_node(n)))
6181                         val += PENALTY_FOR_NODE_WITH_CPUS;
6182 
6183                 /* Slight preference for less loaded node */
6184                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
6185                 val += node_load[n];
6186 
6187                 if (val < min_val) {
6188                         min_val = val;
6189                         best_node = n;
6190                 }
6191         }
6192 
6193         if (best_node >= 0)
6194                 node_set(best_node, *used_node_mask);
6195 
6196         return best_node;
6197 }
6198 
6199 
6200 /*
6201  * Build zonelists ordered by node and zones within node.
6202  * This results in maximum locality--normal zone overflows into local
6203  * DMA zone, if any--but risks exhausting DMA zone.
6204  */
6205 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6206                 unsigned nr_nodes)
6207 {
6208         struct zoneref *zonerefs;
6209         int i;
6210 
6211         zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6212 
6213         for (i = 0; i < nr_nodes; i++) {
6214                 int nr_zones;
6215 
6216                 pg_data_t *node = NODE_DATA(node_order[i]);
6217 
6218                 nr_zones = build_zonerefs_node(node, zonerefs);
6219                 zonerefs += nr_zones;
6220         }
6221         zonerefs->zone = NULL;
6222         zonerefs->zone_idx = 0;
6223 }
6224 
6225 /*
6226  * Build gfp_thisnode zonelists
6227  */
6228 static void build_thisnode_zonelists(pg_data_t *pgdat)
6229 {
6230         struct zoneref *zonerefs;
6231         int nr_zones;
6232 
6233         zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6234         nr_zones = build_zonerefs_node(pgdat, zonerefs);
6235         zonerefs += nr_zones;
6236         zonerefs->zone = NULL;
6237         zonerefs->zone_idx = 0;
6238 }
6239 
6240 /*
6241  * Build zonelists ordered by zone and nodes within zones.
6242  * This results in conserving DMA zone[s] until all Normal memory is
6243  * exhausted, but results in overflowing to remote node while memory
6244  * may still exist in local DMA zone.
6245  */
6246 
6247 static void build_zonelists(pg_data_t *pgdat)
6248 {
6249         static int node_order[MAX_NUMNODES];
6250         int node, load, nr_nodes = 0;
6251         nodemask_t used_mask = NODE_MASK_NONE;
6252         int local_node, prev_node;
6253 
6254         /* NUMA-aware ordering of nodes */
6255         local_node = pgdat->node_id;
6256         load = nr_online_nodes;
6257         prev_node = local_node;
6258 
6259         memset(node_order, 0, sizeof(node_order));
6260         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6261                 /*
6262                  * We don't want to pressure a particular node.
6263                  * So adding penalty to the first node in same
6264                  * distance group to make it round-robin.
6265                  */
6266                 if (node_distance(local_node, node) !=
6267                     node_distance(local_node, prev_node))
6268                         node_load[node] += load;
6269 
6270                 node_order[nr_nodes++] = node;
6271                 prev_node = node;
6272                 load--;
6273         }
6274 
6275         build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6276         build_thisnode_zonelists(pgdat);
6277         pr_info("Fallback order for Node %d: ", local_node);
6278         for (node = 0; node < nr_nodes; node++)
6279                 pr_cont("%d ", node_order[node]);
6280         pr_cont("\n");
6281 }
6282 
6283 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6284 /*
6285  * Return node id of node used for "local" allocations.
6286  * I.e., first node id of first zone in arg node's generic zonelist.
6287  * Used for initializing percpu 'numa_mem', which is used primarily
6288  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6289  */
6290 int local_memory_node(int node)
6291 {
6292         struct zoneref *z;
6293 
6294         z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6295                                    gfp_zone(GFP_KERNEL),
6296                                    NULL);
6297         return zone_to_nid(z->zone);
6298 }
6299 #endif
6300 
6301 static void setup_min_unmapped_ratio(void);
6302 static void setup_min_slab_ratio(void);
6303 #else   /* CONFIG_NUMA */
6304 
6305 static void build_zonelists(pg_data_t *pgdat)
6306 {
6307         int node, local_node;
6308         struct zoneref *zonerefs;
6309         int nr_zones;
6310 
6311         local_node = pgdat->node_id;
6312 
6313         zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6314         nr_zones = build_zonerefs_node(pgdat, zonerefs);
6315         zonerefs += nr_zones;
6316 
6317         /*
6318          * Now we build the zonelist so that it contains the zones
6319          * of all the other nodes.
6320          * We don't want to pressure a particular node, so when
6321          * building the zones for node N, we make sure that the
6322          * zones coming right after the local ones are those from
6323          * node N+1 (modulo N)
6324          */
6325         for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6326                 if (!node_online(node))
6327                         continue;
6328                 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6329                 zonerefs += nr_zones;
6330         }
6331         for (node = 0; node < local_node; node++) {
6332                 if (!node_online(node))
6333                         continue;
6334                 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6335                 zonerefs += nr_zones;
6336         }
6337 
6338         zonerefs->zone = NULL;
6339         zonerefs->zone_idx = 0;
6340 }
6341 
6342 #endif  /* CONFIG_NUMA */
6343 
6344 /*
6345  * Boot pageset table. One per cpu which is going to be used for all
6346  * zones and all nodes. The parameters will be set in such a way
6347  * that an item put on a list will immediately be handed over to
6348  * the buddy list. This is safe since pageset manipulation is done
6349  * with interrupts disabled.
6350  *
6351  * The boot_pagesets must be kept even after bootup is complete for
6352  * unused processors and/or zones. They do play a role for bootstrapping
6353  * hotplugged processors.
6354  *
6355  * zoneinfo_show() and maybe other functions do
6356  * not check if the processor is online before following the pageset pointer.
6357  * Other parts of the kernel may not check if the zone is available.
6358  */
6359 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6360 /* These effectively disable the pcplists in the boot pageset completely */
6361 #define BOOT_PAGESET_HIGH       0
6362 #define BOOT_PAGESET_BATCH      1
6363 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6364 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6365 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6366 
6367 static void __build_all_zonelists(void *data)
6368 {
6369         int nid;
6370         int __maybe_unused cpu;
6371         pg_data_t *self = data;
6372         static DEFINE_SPINLOCK(lock);
6373 
6374         spin_lock(&lock);
6375 
6376 #ifdef CONFIG_NUMA
6377         memset(node_load, 0, sizeof(node_load));
6378 #endif
6379 
6380         /*
6381          * This node is hotadded and no memory is yet present.   So just
6382          * building zonelists is fine - no need to touch other nodes.
6383          */
6384         if (self && !node_online(self->node_id)) {
6385                 build_zonelists(self);
6386         } else {
6387                 for_each_online_node(nid) {
6388                         pg_data_t *pgdat = NODE_DATA(nid);
6389 
6390                         build_zonelists(pgdat);
6391                 }
6392 
6393 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6394                 /*
6395                  * We now know the "local memory node" for each node--
6396                  * i.e., the node of the first zone in the generic zonelist.
6397                  * Set up numa_mem percpu variable for on-line cpus.  During
6398                  * boot, only the boot cpu should be on-line;  we'll init the
6399                  * secondary cpus' numa_mem as they come on-line.  During
6400                  * node/memory hotplug, we'll fixup all on-line cpus.
6401                  */
6402                 for_each_online_cpu(cpu)
6403                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6404 #endif
6405         }
6406 
6407         spin_unlock(&am