~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_alloc.c

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/mm/page_alloc.c
  3  *
  4  *  Manages the free list, the system allocates free pages here.
  5  *  Note that kmalloc() lives in slab.c
  6  *
  7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  8  *  Swap reorganised 29.12.95, Stephen Tweedie
  9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 15  */
 16 
 17 #include <linux/stddef.h>
 18 #include <linux/mm.h>
 19 #include <linux/swap.h>
 20 #include <linux/interrupt.h>
 21 #include <linux/pagemap.h>
 22 #include <linux/jiffies.h>
 23 #include <linux/bootmem.h>
 24 #include <linux/memblock.h>
 25 #include <linux/compiler.h>
 26 #include <linux/kernel.h>
 27 #include <linux/kmemcheck.h>
 28 #include <linux/kasan.h>
 29 #include <linux/module.h>
 30 #include <linux/suspend.h>
 31 #include <linux/pagevec.h>
 32 #include <linux/blkdev.h>
 33 #include <linux/slab.h>
 34 #include <linux/ratelimit.h>
 35 #include <linux/oom.h>
 36 #include <linux/notifier.h>
 37 #include <linux/topology.h>
 38 #include <linux/sysctl.h>
 39 #include <linux/cpu.h>
 40 #include <linux/cpuset.h>
 41 #include <linux/memory_hotplug.h>
 42 #include <linux/nodemask.h>
 43 #include <linux/vmalloc.h>
 44 #include <linux/vmstat.h>
 45 #include <linux/mempolicy.h>
 46 #include <linux/stop_machine.h>
 47 #include <linux/sort.h>
 48 #include <linux/pfn.h>
 49 #include <linux/backing-dev.h>
 50 #include <linux/fault-inject.h>
 51 #include <linux/page-isolation.h>
 52 #include <linux/page_ext.h>
 53 #include <linux/debugobjects.h>
 54 #include <linux/kmemleak.h>
 55 #include <linux/compaction.h>
 56 #include <trace/events/kmem.h>
 57 #include <linux/prefetch.h>
 58 #include <linux/mm_inline.h>
 59 #include <linux/migrate.h>
 60 #include <linux/page_ext.h>
 61 #include <linux/hugetlb.h>
 62 #include <linux/sched/rt.h>
 63 #include <linux/page_owner.h>
 64 
 65 #include <asm/sections.h>
 66 #include <asm/tlbflush.h>
 67 #include <asm/div64.h>
 68 #include "internal.h"
 69 
 70 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 71 static DEFINE_MUTEX(pcp_batch_high_lock);
 72 #define MIN_PERCPU_PAGELIST_FRACTION    (8)
 73 
 74 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 75 DEFINE_PER_CPU(int, numa_node);
 76 EXPORT_PER_CPU_SYMBOL(numa_node);
 77 #endif
 78 
 79 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 80 /*
 81  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 82  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 83  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 84  * defined in <linux/topology.h>.
 85  */
 86 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
 87 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
 88 int _node_numa_mem_[MAX_NUMNODES];
 89 #endif
 90 
 91 /*
 92  * Array of node states.
 93  */
 94 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 95         [N_POSSIBLE] = NODE_MASK_ALL,
 96         [N_ONLINE] = { { [0] = 1UL } },
 97 #ifndef CONFIG_NUMA
 98         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 99 #ifdef CONFIG_HIGHMEM
100         [N_HIGH_MEMORY] = { { [0] = 1UL } },
101 #endif
102 #ifdef CONFIG_MOVABLE_NODE
103         [N_MEMORY] = { { [0] = 1UL } },
104 #endif
105         [N_CPU] = { { [0] = 1UL } },
106 #endif  /* NUMA */
107 };
108 EXPORT_SYMBOL(node_states);
109 
110 /* Protect totalram_pages and zone->managed_pages */
111 static DEFINE_SPINLOCK(managed_page_count_lock);
112 
113 unsigned long totalram_pages __read_mostly;
114 unsigned long totalreserve_pages __read_mostly;
115 unsigned long totalcma_pages __read_mostly;
116 /*
117  * When calculating the number of globally allowed dirty pages, there
118  * is a certain number of per-zone reserves that should not be
119  * considered dirtyable memory.  This is the sum of those reserves
120  * over all existing zones that contribute dirtyable memory.
121  */
122 unsigned long dirty_balance_reserve __read_mostly;
123 
124 int percpu_pagelist_fraction;
125 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
126 
127 #ifdef CONFIG_PM_SLEEP
128 /*
129  * The following functions are used by the suspend/hibernate code to temporarily
130  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
131  * while devices are suspended.  To avoid races with the suspend/hibernate code,
132  * they should always be called with pm_mutex held (gfp_allowed_mask also should
133  * only be modified with pm_mutex held, unless the suspend/hibernate code is
134  * guaranteed not to run in parallel with that modification).
135  */
136 
137 static gfp_t saved_gfp_mask;
138 
139 void pm_restore_gfp_mask(void)
140 {
141         WARN_ON(!mutex_is_locked(&pm_mutex));
142         if (saved_gfp_mask) {
143                 gfp_allowed_mask = saved_gfp_mask;
144                 saved_gfp_mask = 0;
145         }
146 }
147 
148 void pm_restrict_gfp_mask(void)
149 {
150         WARN_ON(!mutex_is_locked(&pm_mutex));
151         WARN_ON(saved_gfp_mask);
152         saved_gfp_mask = gfp_allowed_mask;
153         gfp_allowed_mask &= ~GFP_IOFS;
154 }
155 
156 bool pm_suspended_storage(void)
157 {
158         if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
159                 return false;
160         return true;
161 }
162 #endif /* CONFIG_PM_SLEEP */
163 
164 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
165 unsigned int pageblock_order __read_mostly;
166 #endif
167 
168 static void __free_pages_ok(struct page *page, unsigned int order);
169 
170 /*
171  * results with 256, 32 in the lowmem_reserve sysctl:
172  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
173  *      1G machine -> (16M dma, 784M normal, 224M high)
174  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
175  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
176  *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
177  *
178  * TBD: should special case ZONE_DMA32 machines here - in those we normally
179  * don't need any ZONE_NORMAL reservation
180  */
181 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
182 #ifdef CONFIG_ZONE_DMA
183          256,
184 #endif
185 #ifdef CONFIG_ZONE_DMA32
186          256,
187 #endif
188 #ifdef CONFIG_HIGHMEM
189          32,
190 #endif
191          32,
192 };
193 
194 EXPORT_SYMBOL(totalram_pages);
195 
196 static char * const zone_names[MAX_NR_ZONES] = {
197 #ifdef CONFIG_ZONE_DMA
198          "DMA",
199 #endif
200 #ifdef CONFIG_ZONE_DMA32
201          "DMA32",
202 #endif
203          "Normal",
204 #ifdef CONFIG_HIGHMEM
205          "HighMem",
206 #endif
207          "Movable",
208 };
209 
210 int min_free_kbytes = 1024;
211 int user_min_free_kbytes = -1;
212 
213 static unsigned long __meminitdata nr_kernel_pages;
214 static unsigned long __meminitdata nr_all_pages;
215 static unsigned long __meminitdata dma_reserve;
216 
217 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
218 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
219 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
220 static unsigned long __initdata required_kernelcore;
221 static unsigned long __initdata required_movablecore;
222 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
223 
224 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
225 int movable_zone;
226 EXPORT_SYMBOL(movable_zone);
227 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
228 
229 #if MAX_NUMNODES > 1
230 int nr_node_ids __read_mostly = MAX_NUMNODES;
231 int nr_online_nodes __read_mostly = 1;
232 EXPORT_SYMBOL(nr_node_ids);
233 EXPORT_SYMBOL(nr_online_nodes);
234 #endif
235 
236 int page_group_by_mobility_disabled __read_mostly;
237 
238 void set_pageblock_migratetype(struct page *page, int migratetype)
239 {
240         if (unlikely(page_group_by_mobility_disabled &&
241                      migratetype < MIGRATE_PCPTYPES))
242                 migratetype = MIGRATE_UNMOVABLE;
243 
244         set_pageblock_flags_group(page, (unsigned long)migratetype,
245                                         PB_migrate, PB_migrate_end);
246 }
247 
248 #ifdef CONFIG_DEBUG_VM
249 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
250 {
251         int ret = 0;
252         unsigned seq;
253         unsigned long pfn = page_to_pfn(page);
254         unsigned long sp, start_pfn;
255 
256         do {
257                 seq = zone_span_seqbegin(zone);
258                 start_pfn = zone->zone_start_pfn;
259                 sp = zone->spanned_pages;
260                 if (!zone_spans_pfn(zone, pfn))
261                         ret = 1;
262         } while (zone_span_seqretry(zone, seq));
263 
264         if (ret)
265                 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
266                         pfn, zone_to_nid(zone), zone->name,
267                         start_pfn, start_pfn + sp);
268 
269         return ret;
270 }
271 
272 static int page_is_consistent(struct zone *zone, struct page *page)
273 {
274         if (!pfn_valid_within(page_to_pfn(page)))
275                 return 0;
276         if (zone != page_zone(page))
277                 return 0;
278 
279         return 1;
280 }
281 /*
282  * Temporary debugging check for pages not lying within a given zone.
283  */
284 static int bad_range(struct zone *zone, struct page *page)
285 {
286         if (page_outside_zone_boundaries(zone, page))
287                 return 1;
288         if (!page_is_consistent(zone, page))
289                 return 1;
290 
291         return 0;
292 }
293 #else
294 static inline int bad_range(struct zone *zone, struct page *page)
295 {
296         return 0;
297 }
298 #endif
299 
300 static void bad_page(struct page *page, const char *reason,
301                 unsigned long bad_flags)
302 {
303         static unsigned long resume;
304         static unsigned long nr_shown;
305         static unsigned long nr_unshown;
306 
307         /* Don't complain about poisoned pages */
308         if (PageHWPoison(page)) {
309                 page_mapcount_reset(page); /* remove PageBuddy */
310                 return;
311         }
312 
313         /*
314          * Allow a burst of 60 reports, then keep quiet for that minute;
315          * or allow a steady drip of one report per second.
316          */
317         if (nr_shown == 60) {
318                 if (time_before(jiffies, resume)) {
319                         nr_unshown++;
320                         goto out;
321                 }
322                 if (nr_unshown) {
323                         printk(KERN_ALERT
324                               "BUG: Bad page state: %lu messages suppressed\n",
325                                 nr_unshown);
326                         nr_unshown = 0;
327                 }
328                 nr_shown = 0;
329         }
330         if (nr_shown++ == 0)
331                 resume = jiffies + 60 * HZ;
332 
333         printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
334                 current->comm, page_to_pfn(page));
335         dump_page_badflags(page, reason, bad_flags);
336 
337         print_modules();
338         dump_stack();
339 out:
340         /* Leave bad fields for debug, except PageBuddy could make trouble */
341         page_mapcount_reset(page); /* remove PageBuddy */
342         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
343 }
344 
345 /*
346  * Higher-order pages are called "compound pages".  They are structured thusly:
347  *
348  * The first PAGE_SIZE page is called the "head page".
349  *
350  * The remaining PAGE_SIZE pages are called "tail pages".
351  *
352  * All pages have PG_compound set.  All tail pages have their ->first_page
353  * pointing at the head page.
354  *
355  * The first tail page's ->lru.next holds the address of the compound page's
356  * put_page() function.  Its ->lru.prev holds the order of allocation.
357  * This usage means that zero-order pages may not be compound.
358  */
359 
360 static void free_compound_page(struct page *page)
361 {
362         __free_pages_ok(page, compound_order(page));
363 }
364 
365 void prep_compound_page(struct page *page, unsigned int order)
366 {
367         int i;
368         int nr_pages = 1 << order;
369 
370         set_compound_page_dtor(page, free_compound_page);
371         set_compound_order(page, order);
372         __SetPageHead(page);
373         for (i = 1; i < nr_pages; i++) {
374                 struct page *p = page + i;
375                 set_page_count(p, 0);
376                 p->first_page = page;
377                 /* Make sure p->first_page is always valid for PageTail() */
378                 smp_wmb();
379                 __SetPageTail(p);
380         }
381 }
382 
383 static inline void prep_zero_page(struct page *page, unsigned int order,
384                                                         gfp_t gfp_flags)
385 {
386         int i;
387 
388         /*
389          * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
390          * and __GFP_HIGHMEM from hard or soft interrupt context.
391          */
392         VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
393         for (i = 0; i < (1 << order); i++)
394                 clear_highpage(page + i);
395 }
396 
397 #ifdef CONFIG_DEBUG_PAGEALLOC
398 unsigned int _debug_guardpage_minorder;
399 bool _debug_pagealloc_enabled __read_mostly;
400 bool _debug_guardpage_enabled __read_mostly;
401 
402 static int __init early_debug_pagealloc(char *buf)
403 {
404         if (!buf)
405                 return -EINVAL;
406 
407         if (strcmp(buf, "on") == 0)
408                 _debug_pagealloc_enabled = true;
409 
410         return 0;
411 }
412 early_param("debug_pagealloc", early_debug_pagealloc);
413 
414 static bool need_debug_guardpage(void)
415 {
416         /* If we don't use debug_pagealloc, we don't need guard page */
417         if (!debug_pagealloc_enabled())
418                 return false;
419 
420         return true;
421 }
422 
423 static void init_debug_guardpage(void)
424 {
425         if (!debug_pagealloc_enabled())
426                 return;
427 
428         _debug_guardpage_enabled = true;
429 }
430 
431 struct page_ext_operations debug_guardpage_ops = {
432         .need = need_debug_guardpage,
433         .init = init_debug_guardpage,
434 };
435 
436 static int __init debug_guardpage_minorder_setup(char *buf)
437 {
438         unsigned long res;
439 
440         if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
441                 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
442                 return 0;
443         }
444         _debug_guardpage_minorder = res;
445         printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
446         return 0;
447 }
448 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
449 
450 static inline void set_page_guard(struct zone *zone, struct page *page,
451                                 unsigned int order, int migratetype)
452 {
453         struct page_ext *page_ext;
454 
455         if (!debug_guardpage_enabled())
456                 return;
457 
458         page_ext = lookup_page_ext(page);
459         __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
460 
461         INIT_LIST_HEAD(&page->lru);
462         set_page_private(page, order);
463         /* Guard pages are not available for any usage */
464         __mod_zone_freepage_state(zone, -(1 << order), migratetype);
465 }
466 
467 static inline void clear_page_guard(struct zone *zone, struct page *page,
468                                 unsigned int order, int migratetype)
469 {
470         struct page_ext *page_ext;
471 
472         if (!debug_guardpage_enabled())
473                 return;
474 
475         page_ext = lookup_page_ext(page);
476         __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
477 
478         set_page_private(page, 0);
479         if (!is_migrate_isolate(migratetype))
480                 __mod_zone_freepage_state(zone, (1 << order), migratetype);
481 }
482 #else
483 struct page_ext_operations debug_guardpage_ops = { NULL, };
484 static inline void set_page_guard(struct zone *zone, struct page *page,
485                                 unsigned int order, int migratetype) {}
486 static inline void clear_page_guard(struct zone *zone, struct page *page,
487                                 unsigned int order, int migratetype) {}
488 #endif
489 
490 static inline void set_page_order(struct page *page, unsigned int order)
491 {
492         set_page_private(page, order);
493         __SetPageBuddy(page);
494 }
495 
496 static inline void rmv_page_order(struct page *page)
497 {
498         __ClearPageBuddy(page);
499         set_page_private(page, 0);
500 }
501 
502 /*
503  * This function checks whether a page is free && is the buddy
504  * we can do coalesce a page and its buddy if
505  * (a) the buddy is not in a hole &&
506  * (b) the buddy is in the buddy system &&
507  * (c) a page and its buddy have the same order &&
508  * (d) a page and its buddy are in the same zone.
509  *
510  * For recording whether a page is in the buddy system, we set ->_mapcount
511  * PAGE_BUDDY_MAPCOUNT_VALUE.
512  * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
513  * serialized by zone->lock.
514  *
515  * For recording page's order, we use page_private(page).
516  */
517 static inline int page_is_buddy(struct page *page, struct page *buddy,
518                                                         unsigned int order)
519 {
520         if (!pfn_valid_within(page_to_pfn(buddy)))
521                 return 0;
522 
523         if (page_is_guard(buddy) && page_order(buddy) == order) {
524                 if (page_zone_id(page) != page_zone_id(buddy))
525                         return 0;
526 
527                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
528 
529                 return 1;
530         }
531 
532         if (PageBuddy(buddy) && page_order(buddy) == order) {
533                 /*
534                  * zone check is done late to avoid uselessly
535                  * calculating zone/node ids for pages that could
536                  * never merge.
537                  */
538                 if (page_zone_id(page) != page_zone_id(buddy))
539                         return 0;
540 
541                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
542 
543                 return 1;
544         }
545         return 0;
546 }
547 
548 /*
549  * Freeing function for a buddy system allocator.
550  *
551  * The concept of a buddy system is to maintain direct-mapped table
552  * (containing bit values) for memory blocks of various "orders".
553  * The bottom level table contains the map for the smallest allocatable
554  * units of memory (here, pages), and each level above it describes
555  * pairs of units from the levels below, hence, "buddies".
556  * At a high level, all that happens here is marking the table entry
557  * at the bottom level available, and propagating the changes upward
558  * as necessary, plus some accounting needed to play nicely with other
559  * parts of the VM system.
560  * At each level, we keep a list of pages, which are heads of continuous
561  * free pages of length of (1 << order) and marked with _mapcount
562  * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
563  * field.
564  * So when we are allocating or freeing one, we can derive the state of the
565  * other.  That is, if we allocate a small block, and both were
566  * free, the remainder of the region must be split into blocks.
567  * If a block is freed, and its buddy is also free, then this
568  * triggers coalescing into a block of larger size.
569  *
570  * -- nyc
571  */
572 
573 static inline void __free_one_page(struct page *page,
574                 unsigned long pfn,
575                 struct zone *zone, unsigned int order,
576                 int migratetype)
577 {
578         unsigned long page_idx;
579         unsigned long combined_idx;
580         unsigned long uninitialized_var(buddy_idx);
581         struct page *buddy;
582         unsigned int max_order;
583 
584         max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
585 
586         VM_BUG_ON(!zone_is_initialized(zone));
587         VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
588 
589         VM_BUG_ON(migratetype == -1);
590         if (likely(!is_migrate_isolate(migratetype)))
591                 __mod_zone_freepage_state(zone, 1 << order, migratetype);
592 
593         page_idx = pfn & ((1 << MAX_ORDER) - 1);
594 
595         VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
596         VM_BUG_ON_PAGE(bad_range(zone, page), page);
597 
598 continue_merging:
599         while (order < max_order - 1) {
600                 buddy_idx = __find_buddy_index(page_idx, order);
601                 buddy = page + (buddy_idx - page_idx);
602                 if (!page_is_buddy(page, buddy, order))
603                         goto done_merging;
604                 /*
605                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
606                  * merge with it and move up one order.
607                  */
608                 if (page_is_guard(buddy)) {
609                         clear_page_guard(zone, buddy, order, migratetype);
610                 } else {
611                         list_del(&buddy->lru);
612                         zone->free_area[order].nr_free--;
613                         rmv_page_order(buddy);
614                 }
615                 combined_idx = buddy_idx & page_idx;
616                 page = page + (combined_idx - page_idx);
617                 page_idx = combined_idx;
618                 order++;
619         }
620         if (max_order < MAX_ORDER) {
621                 /* If we are here, it means order is >= pageblock_order.
622                  * We want to prevent merge between freepages on isolate
623                  * pageblock and normal pageblock. Without this, pageblock
624                  * isolation could cause incorrect freepage or CMA accounting.
625                  *
626                  * We don't want to hit this code for the more frequent
627                  * low-order merging.
628                  */
629                 if (unlikely(has_isolate_pageblock(zone))) {
630                         int buddy_mt;
631 
632                         buddy_idx = __find_buddy_index(page_idx, order);
633                         buddy = page + (buddy_idx - page_idx);
634                         buddy_mt = get_pageblock_migratetype(buddy);
635 
636                         if (migratetype != buddy_mt
637                                         && (is_migrate_isolate(migratetype) ||
638                                                 is_migrate_isolate(buddy_mt)))
639                                 goto done_merging;
640                 }
641                 max_order++;
642                 goto continue_merging;
643         }
644 
645 done_merging:
646         set_page_order(page, order);
647 
648         /*
649          * If this is not the largest possible page, check if the buddy
650          * of the next-highest order is free. If it is, it's possible
651          * that pages are being freed that will coalesce soon. In case,
652          * that is happening, add the free page to the tail of the list
653          * so it's less likely to be used soon and more likely to be merged
654          * as a higher order page
655          */
656         if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
657                 struct page *higher_page, *higher_buddy;
658                 combined_idx = buddy_idx & page_idx;
659                 higher_page = page + (combined_idx - page_idx);
660                 buddy_idx = __find_buddy_index(combined_idx, order + 1);
661                 higher_buddy = higher_page + (buddy_idx - combined_idx);
662                 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
663                         list_add_tail(&page->lru,
664                                 &zone->free_area[order].free_list[migratetype]);
665                         goto out;
666                 }
667         }
668 
669         list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
670 out:
671         zone->free_area[order].nr_free++;
672 }
673 
674 static inline int free_pages_check(struct page *page)
675 {
676         const char *bad_reason = NULL;
677         unsigned long bad_flags = 0;
678 
679         if (unlikely(page_mapcount(page)))
680                 bad_reason = "nonzero mapcount";
681         if (unlikely(page->mapping != NULL))
682                 bad_reason = "non-NULL mapping";
683         if (unlikely(atomic_read(&page->_count) != 0))
684                 bad_reason = "nonzero _count";
685         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
686                 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
687                 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
688         }
689 #ifdef CONFIG_MEMCG
690         if (unlikely(page->mem_cgroup))
691                 bad_reason = "page still charged to cgroup";
692 #endif
693         if (unlikely(bad_reason)) {
694                 bad_page(page, bad_reason, bad_flags);
695                 return 1;
696         }
697         page_cpupid_reset_last(page);
698         if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
699                 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
700         return 0;
701 }
702 
703 /*
704  * Frees a number of pages from the PCP lists
705  * Assumes all pages on list are in same zone, and of same order.
706  * count is the number of pages to free.
707  *
708  * If the zone was previously in an "all pages pinned" state then look to
709  * see if this freeing clears that state.
710  *
711  * And clear the zone's pages_scanned counter, to hold off the "all pages are
712  * pinned" detection logic.
713  */
714 static void free_pcppages_bulk(struct zone *zone, int count,
715                                         struct per_cpu_pages *pcp)
716 {
717         int migratetype = 0;
718         int batch_free = 0;
719         int to_free = count;
720         unsigned long nr_scanned;
721 
722         spin_lock(&zone->lock);
723         nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
724         if (nr_scanned)
725                 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
726 
727         while (to_free) {
728                 struct page *page;
729                 struct list_head *list;
730 
731                 /*
732                  * Remove pages from lists in a round-robin fashion. A
733                  * batch_free count is maintained that is incremented when an
734                  * empty list is encountered.  This is so more pages are freed
735                  * off fuller lists instead of spinning excessively around empty
736                  * lists
737                  */
738                 do {
739                         batch_free++;
740                         if (++migratetype == MIGRATE_PCPTYPES)
741                                 migratetype = 0;
742                         list = &pcp->lists[migratetype];
743                 } while (list_empty(list));
744 
745                 /* This is the only non-empty list. Free them all. */
746                 if (batch_free == MIGRATE_PCPTYPES)
747                         batch_free = to_free;
748 
749                 do {
750                         int mt; /* migratetype of the to-be-freed page */
751 
752                         page = list_entry(list->prev, struct page, lru);
753                         /* must delete as __free_one_page list manipulates */
754                         list_del(&page->lru);
755                         mt = get_freepage_migratetype(page);
756                         if (unlikely(has_isolate_pageblock(zone)))
757                                 mt = get_pageblock_migratetype(page);
758 
759                         /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
760                         __free_one_page(page, page_to_pfn(page), zone, 0, mt);
761                         trace_mm_page_pcpu_drain(page, 0, mt);
762                 } while (--to_free && --batch_free && !list_empty(list));
763         }
764         spin_unlock(&zone->lock);
765 }
766 
767 static void free_one_page(struct zone *zone,
768                                 struct page *page, unsigned long pfn,
769                                 unsigned int order,
770                                 int migratetype)
771 {
772         unsigned long nr_scanned;
773         spin_lock(&zone->lock);
774         nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
775         if (nr_scanned)
776                 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
777 
778         if (unlikely(has_isolate_pageblock(zone) ||
779                 is_migrate_isolate(migratetype))) {
780                 migratetype = get_pfnblock_migratetype(page, pfn);
781         }
782         __free_one_page(page, pfn, zone, order, migratetype);
783         spin_unlock(&zone->lock);
784 }
785 
786 static int free_tail_pages_check(struct page *head_page, struct page *page)
787 {
788         if (!IS_ENABLED(CONFIG_DEBUG_VM))
789                 return 0;
790         if (unlikely(!PageTail(page))) {
791                 bad_page(page, "PageTail not set", 0);
792                 return 1;
793         }
794         if (unlikely(page->first_page != head_page)) {
795                 bad_page(page, "first_page not consistent", 0);
796                 return 1;
797         }
798         return 0;
799 }
800 
801 static bool free_pages_prepare(struct page *page, unsigned int order)
802 {
803         bool compound = PageCompound(page);
804         int i, bad = 0;
805 
806         VM_BUG_ON_PAGE(PageTail(page), page);
807         VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
808 
809         trace_mm_page_free(page, order);
810         kmemcheck_free_shadow(page, order);
811         kasan_free_pages(page, order);
812 
813         if (PageAnon(page))
814                 page->mapping = NULL;
815         bad += free_pages_check(page);
816         for (i = 1; i < (1 << order); i++) {
817                 if (compound)
818                         bad += free_tail_pages_check(page, page + i);
819                 bad += free_pages_check(page + i);
820         }
821         if (bad)
822                 return false;
823 
824         reset_page_owner(page, order);
825 
826         if (!PageHighMem(page)) {
827                 debug_check_no_locks_freed(page_address(page),
828                                            PAGE_SIZE << order);
829                 debug_check_no_obj_freed(page_address(page),
830                                            PAGE_SIZE << order);
831         }
832         arch_free_page(page, order);
833         kernel_map_pages(page, 1 << order, 0);
834 
835         return true;
836 }
837 
838 static void __free_pages_ok(struct page *page, unsigned int order)
839 {
840         unsigned long flags;
841         int migratetype;
842         unsigned long pfn = page_to_pfn(page);
843 
844         if (!free_pages_prepare(page, order))
845                 return;
846 
847         migratetype = get_pfnblock_migratetype(page, pfn);
848         local_irq_save(flags);
849         __count_vm_events(PGFREE, 1 << order);
850         set_freepage_migratetype(page, migratetype);
851         free_one_page(page_zone(page), page, pfn, order, migratetype);
852         local_irq_restore(flags);
853 }
854 
855 void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
856                                                         unsigned int order)
857 {
858         unsigned int nr_pages = 1 << order;
859         struct page *p = page;
860         unsigned int loop;
861 
862         prefetchw(p);
863         for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
864                 prefetchw(p + 1);
865                 __ClearPageReserved(p);
866                 set_page_count(p, 0);
867         }
868         __ClearPageReserved(p);
869         set_page_count(p, 0);
870 
871         page_zone(page)->managed_pages += nr_pages;
872         set_page_refcounted(page);
873         __free_pages(page, order);
874 }
875 
876 #ifdef CONFIG_CMA
877 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
878 void __init init_cma_reserved_pageblock(struct page *page)
879 {
880         unsigned i = pageblock_nr_pages;
881         struct page *p = page;
882 
883         do {
884                 __ClearPageReserved(p);
885                 set_page_count(p, 0);
886         } while (++p, --i);
887 
888         set_pageblock_migratetype(page, MIGRATE_CMA);
889 
890         if (pageblock_order >= MAX_ORDER) {
891                 i = pageblock_nr_pages;
892                 p = page;
893                 do {
894                         set_page_refcounted(p);
895                         __free_pages(p, MAX_ORDER - 1);
896                         p += MAX_ORDER_NR_PAGES;
897                 } while (i -= MAX_ORDER_NR_PAGES);
898         } else {
899                 set_page_refcounted(page);
900                 __free_pages(page, pageblock_order);
901         }
902 
903         adjust_managed_page_count(page, pageblock_nr_pages);
904 }
905 #endif
906 
907 /*
908  * The order of subdivision here is critical for the IO subsystem.
909  * Please do not alter this order without good reasons and regression
910  * testing. Specifically, as large blocks of memory are subdivided,
911  * the order in which smaller blocks are delivered depends on the order
912  * they're subdivided in this function. This is the primary factor
913  * influencing the order in which pages are delivered to the IO
914  * subsystem according to empirical testing, and this is also justified
915  * by considering the behavior of a buddy system containing a single
916  * large block of memory acted on by a series of small allocations.
917  * This behavior is a critical factor in sglist merging's success.
918  *
919  * -- nyc
920  */
921 static inline void expand(struct zone *zone, struct page *page,
922         int low, int high, struct free_area *area,
923         int migratetype)
924 {
925         unsigned long size = 1 << high;
926 
927         while (high > low) {
928                 area--;
929                 high--;
930                 size >>= 1;
931                 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
932 
933                 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
934                         debug_guardpage_enabled() &&
935                         high < debug_guardpage_minorder()) {
936                         /*
937                          * Mark as guard pages (or page), that will allow to
938                          * merge back to allocator when buddy will be freed.
939                          * Corresponding page table entries will not be touched,
940                          * pages will stay not present in virtual address space
941                          */
942                         set_page_guard(zone, &page[size], high, migratetype);
943                         continue;
944                 }
945                 list_add(&page[size].lru, &area->free_list[migratetype]);
946                 area->nr_free++;
947                 set_page_order(&page[size], high);
948         }
949 }
950 
951 /*
952  * This page is about to be returned from the page allocator
953  */
954 static inline int check_new_page(struct page *page)
955 {
956         const char *bad_reason = NULL;
957         unsigned long bad_flags = 0;
958 
959         if (unlikely(page_mapcount(page)))
960                 bad_reason = "nonzero mapcount";
961         if (unlikely(page->mapping != NULL))
962                 bad_reason = "non-NULL mapping";
963         if (unlikely(atomic_read(&page->_count) != 0))
964                 bad_reason = "nonzero _count";
965         if (unlikely(page->flags & __PG_HWPOISON)) {
966                 bad_reason = "HWPoisoned (hardware-corrupted)";
967                 bad_flags = __PG_HWPOISON;
968         }
969         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
970                 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
971                 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
972         }
973 #ifdef CONFIG_MEMCG
974         if (unlikely(page->mem_cgroup))
975                 bad_reason = "page still charged to cgroup";
976 #endif
977         if (unlikely(bad_reason)) {
978                 bad_page(page, bad_reason, bad_flags);
979                 return 1;
980         }
981         return 0;
982 }
983 
984 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
985                                                                 int alloc_flags)
986 {
987         int i;
988 
989         for (i = 0; i < (1 << order); i++) {
990                 struct page *p = page + i;
991                 if (unlikely(check_new_page(p)))
992                         return 1;
993         }
994 
995         set_page_private(page, 0);
996         set_page_refcounted(page);
997 
998         arch_alloc_page(page, order);
999         kernel_map_pages(page, 1 << order, 1);
1000         kasan_alloc_pages(page, order);
1001 
1002         if (gfp_flags & __GFP_ZERO)
1003                 prep_zero_page(page, order, gfp_flags);
1004 
1005         if (order && (gfp_flags & __GFP_COMP))
1006                 prep_compound_page(page, order);
1007 
1008         set_page_owner(page, order, gfp_flags);
1009 
1010         /*
1011          * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1012          * allocate the page. The expectation is that the caller is taking
1013          * steps that will free more memory. The caller should avoid the page
1014          * being used for !PFMEMALLOC purposes.
1015          */
1016         if (alloc_flags & ALLOC_NO_WATERMARKS)
1017                 set_page_pfmemalloc(page);
1018         else
1019                 clear_page_pfmemalloc(page);
1020 
1021         return 0;
1022 }
1023 
1024 /*
1025  * Go through the free lists for the given migratetype and remove
1026  * the smallest available page from the freelists
1027  */
1028 static inline
1029 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1030                                                 int migratetype)
1031 {
1032         unsigned int current_order;
1033         struct free_area *area;
1034         struct page *page;
1035 
1036         /* Find a page of the appropriate size in the preferred list */
1037         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1038                 area = &(zone->free_area[current_order]);
1039                 if (list_empty(&area->free_list[migratetype]))
1040                         continue;
1041 
1042                 page = list_entry(area->free_list[migratetype].next,
1043                                                         struct page, lru);
1044                 list_del(&page->lru);
1045                 rmv_page_order(page);
1046                 area->nr_free--;
1047                 expand(zone, page, order, current_order, area, migratetype);
1048                 set_freepage_migratetype(page, migratetype);
1049                 return page;
1050         }
1051 
1052         return NULL;
1053 }
1054 
1055 
1056 /*
1057  * This array describes the order lists are fallen back to when
1058  * the free lists for the desirable migrate type are depleted
1059  */
1060 static int fallbacks[MIGRATE_TYPES][4] = {
1061         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1062         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1063         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
1064 #ifdef CONFIG_CMA
1065         [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
1066 #endif
1067         [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
1068 #ifdef CONFIG_MEMORY_ISOLATION
1069         [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
1070 #endif
1071 };
1072 
1073 #ifdef CONFIG_CMA
1074 static struct page *__rmqueue_cma_fallback(struct zone *zone,
1075                                         unsigned int order)
1076 {
1077         return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1078 }
1079 #else
1080 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1081                                         unsigned int order) { return NULL; }
1082 #endif
1083 
1084 /*
1085  * Move the free pages in a range to the free lists of the requested type.
1086  * Note that start_page and end_pages are not aligned on a pageblock
1087  * boundary. If alignment is required, use move_freepages_block()
1088  */
1089 int move_freepages(struct zone *zone,
1090                           struct page *start_page, struct page *end_page,
1091                           int migratetype)
1092 {
1093         struct page *page;
1094         unsigned int order;
1095         int pages_moved = 0;
1096 
1097 #ifndef CONFIG_HOLES_IN_ZONE
1098         /*
1099          * page_zone is not safe to call in this context when
1100          * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1101          * anyway as we check zone boundaries in move_freepages_block().
1102          * Remove at a later date when no bug reports exist related to
1103          * grouping pages by mobility
1104          */
1105         VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1106 #endif
1107 
1108         for (page = start_page; page <= end_page;) {
1109                 if (!pfn_valid_within(page_to_pfn(page))) {
1110                         page++;
1111                         continue;
1112                 }
1113 
1114                 /* Make sure we are not inadvertently changing nodes */
1115                 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1116 
1117                 if (!PageBuddy(page)) {
1118                         page++;
1119                         continue;
1120                 }
1121 
1122                 order = page_order(page);
1123                 list_move(&page->lru,
1124                           &zone->free_area[order].free_list[migratetype]);
1125                 set_freepage_migratetype(page, migratetype);
1126                 page += 1 << order;
1127                 pages_moved += 1 << order;
1128         }
1129 
1130         return pages_moved;
1131 }
1132 
1133 int move_freepages_block(struct zone *zone, struct page *page,
1134                                 int migratetype)
1135 {
1136         unsigned long start_pfn, end_pfn;
1137         struct page *start_page, *end_page;
1138 
1139         start_pfn = page_to_pfn(page);
1140         start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1141         start_page = pfn_to_page(start_pfn);
1142         end_page = start_page + pageblock_nr_pages - 1;
1143         end_pfn = start_pfn + pageblock_nr_pages - 1;
1144 
1145         /* Do not cross zone boundaries */
1146         if (!zone_spans_pfn(zone, start_pfn))
1147                 start_page = page;
1148         if (!zone_spans_pfn(zone, end_pfn))
1149                 return 0;
1150 
1151         return move_freepages(zone, start_page, end_page, migratetype);
1152 }
1153 
1154 static void change_pageblock_range(struct page *pageblock_page,
1155                                         int start_order, int migratetype)
1156 {
1157         int nr_pageblocks = 1 << (start_order - pageblock_order);
1158 
1159         while (nr_pageblocks--) {
1160                 set_pageblock_migratetype(pageblock_page, migratetype);
1161                 pageblock_page += pageblock_nr_pages;
1162         }
1163 }
1164 
1165 /*
1166  * When we are falling back to another migratetype during allocation, try to
1167  * steal extra free pages from the same pageblocks to satisfy further
1168  * allocations, instead of polluting multiple pageblocks.
1169  *
1170  * If we are stealing a relatively large buddy page, it is likely there will
1171  * be more free pages in the pageblock, so try to steal them all. For
1172  * reclaimable and unmovable allocations, we steal regardless of page size,
1173  * as fragmentation caused by those allocations polluting movable pageblocks
1174  * is worse than movable allocations stealing from unmovable and reclaimable
1175  * pageblocks.
1176  */
1177 static bool can_steal_fallback(unsigned int order, int start_mt)
1178 {
1179         /*
1180          * Leaving this order check is intended, although there is
1181          * relaxed order check in next check. The reason is that
1182          * we can actually steal whole pageblock if this condition met,
1183          * but, below check doesn't guarantee it and that is just heuristic
1184          * so could be changed anytime.
1185          */
1186         if (order >= pageblock_order)
1187                 return true;
1188 
1189         if (order >= pageblock_order / 2 ||
1190                 start_mt == MIGRATE_RECLAIMABLE ||
1191                 start_mt == MIGRATE_UNMOVABLE ||
1192                 page_group_by_mobility_disabled)
1193                 return true;
1194 
1195         return false;
1196 }
1197 
1198 /*
1199  * This function implements actual steal behaviour. If order is large enough,
1200  * we can steal whole pageblock. If not, we first move freepages in this
1201  * pageblock and check whether half of pages are moved or not. If half of
1202  * pages are moved, we can change migratetype of pageblock and permanently
1203  * use it's pages as requested migratetype in the future.
1204  */
1205 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1206                                                           int start_type)
1207 {
1208         unsigned int current_order = page_order(page);
1209         int pages;
1210 
1211         /* Take ownership for orders >= pageblock_order */
1212         if (current_order >= pageblock_order) {
1213                 change_pageblock_range(page, current_order, start_type);
1214                 return;
1215         }
1216 
1217         pages = move_freepages_block(zone, page, start_type);
1218 
1219         /* Claim the whole block if over half of it is free */
1220         if (pages >= (1 << (pageblock_order-1)) ||
1221                         page_group_by_mobility_disabled)
1222                 set_pageblock_migratetype(page, start_type);
1223 }
1224 
1225 /*
1226  * Check whether there is a suitable fallback freepage with requested order.
1227  * If only_stealable is true, this function returns fallback_mt only if
1228  * we can steal other freepages all together. This would help to reduce
1229  * fragmentation due to mixed migratetype pages in one pageblock.
1230  */
1231 int find_suitable_fallback(struct free_area *area, unsigned int order,
1232                         int migratetype, bool only_stealable, bool *can_steal)
1233 {
1234         int i;
1235         int fallback_mt;
1236 
1237         if (area->nr_free == 0)
1238                 return -1;
1239 
1240         *can_steal = false;
1241         for (i = 0;; i++) {
1242                 fallback_mt = fallbacks[migratetype][i];
1243                 if (fallback_mt == MIGRATE_RESERVE)
1244                         break;
1245 
1246                 if (list_empty(&area->free_list[fallback_mt]))
1247                         continue;
1248 
1249                 if (can_steal_fallback(order, migratetype))
1250                         *can_steal = true;
1251 
1252                 if (!only_stealable)
1253                         return fallback_mt;
1254 
1255                 if (*can_steal)
1256                         return fallback_mt;
1257         }
1258 
1259         return -1;
1260 }
1261 
1262 /* Remove an element from the buddy allocator from the fallback list */
1263 static inline struct page *
1264 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1265 {
1266         struct free_area *area;
1267         unsigned int current_order;
1268         struct page *page;
1269         int fallback_mt;
1270         bool can_steal;
1271 
1272         /* Find the largest possible block of pages in the other list */
1273         for (current_order = MAX_ORDER-1;
1274                                 current_order >= order && current_order <= MAX_ORDER-1;
1275                                 --current_order) {
1276                 area = &(zone->free_area[current_order]);
1277                 fallback_mt = find_suitable_fallback(area, current_order,
1278                                 start_migratetype, false, &can_steal);
1279                 if (fallback_mt == -1)
1280                         continue;
1281 
1282                 page = list_entry(area->free_list[fallback_mt].next,
1283                                                 struct page, lru);
1284                 if (can_steal)
1285                         steal_suitable_fallback(zone, page, start_migratetype);
1286 
1287                 /* Remove the page from the freelists */
1288                 area->nr_free--;
1289                 list_del(&page->lru);
1290                 rmv_page_order(page);
1291 
1292                 expand(zone, page, order, current_order, area,
1293                                         start_migratetype);
1294                 /*
1295                  * The freepage_migratetype may differ from pageblock's
1296                  * migratetype depending on the decisions in
1297                  * try_to_steal_freepages(). This is OK as long as it
1298                  * does not differ for MIGRATE_CMA pageblocks. For CMA
1299                  * we need to make sure unallocated pages flushed from
1300                  * pcp lists are returned to the correct freelist.
1301                  */
1302                 set_freepage_migratetype(page, start_migratetype);
1303 
1304                 trace_mm_page_alloc_extfrag(page, order, current_order,
1305                         start_migratetype, fallback_mt);
1306 
1307                 return page;
1308         }
1309 
1310         return NULL;
1311 }
1312 
1313 /*
1314  * Do the hard work of removing an element from the buddy allocator.
1315  * Call me with the zone->lock already held.
1316  */
1317 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1318                                                 int migratetype)
1319 {
1320         struct page *page;
1321 
1322 retry_reserve:
1323         page = __rmqueue_smallest(zone, order, migratetype);
1324 
1325         if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1326                 if (migratetype == MIGRATE_MOVABLE)
1327                         page = __rmqueue_cma_fallback(zone, order);
1328 
1329                 if (!page)
1330                         page = __rmqueue_fallback(zone, order, migratetype);
1331 
1332                 /*
1333                  * Use MIGRATE_RESERVE rather than fail an allocation. goto
1334                  * is used because __rmqueue_smallest is an inline function
1335                  * and we want just one call site
1336                  */
1337                 if (!page) {
1338                         migratetype = MIGRATE_RESERVE;
1339                         goto retry_reserve;
1340                 }
1341         }
1342 
1343         trace_mm_page_alloc_zone_locked(page, order, migratetype);
1344         return page;
1345 }
1346 
1347 /*
1348  * Obtain a specified number of elements from the buddy allocator, all under
1349  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1350  * Returns the number of new pages which were placed at *list.
1351  */
1352 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1353                         unsigned long count, struct list_head *list,
1354                         int migratetype, bool cold)
1355 {
1356         int i;
1357 
1358         spin_lock(&zone->lock);
1359         for (i = 0; i < count; ++i) {
1360                 struct page *page = __rmqueue(zone, order, migratetype);
1361                 if (unlikely(page == NULL))
1362                         break;
1363 
1364                 /*
1365                  * Split buddy pages returned by expand() are received here
1366                  * in physical page order. The page is added to the callers and
1367                  * list and the list head then moves forward. From the callers
1368                  * perspective, the linked list is ordered by page number in
1369                  * some conditions. This is useful for IO devices that can
1370                  * merge IO requests if the physical pages are ordered
1371                  * properly.
1372                  */
1373                 if (likely(!cold))
1374                         list_add(&page->lru, list);
1375                 else
1376                         list_add_tail(&page->lru, list);
1377                 list = &page->lru;
1378                 if (is_migrate_cma(get_freepage_migratetype(page)))
1379                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1380                                               -(1 << order));
1381         }
1382         __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1383         spin_unlock(&zone->lock);
1384         return i;
1385 }
1386 
1387 #ifdef CONFIG_NUMA
1388 /*
1389  * Called from the vmstat counter updater to drain pagesets of this
1390  * currently executing processor on remote nodes after they have
1391  * expired.
1392  *
1393  * Note that this function must be called with the thread pinned to
1394  * a single processor.
1395  */
1396 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1397 {
1398         unsigned long flags;
1399         int to_drain, batch;
1400 
1401         local_irq_save(flags);
1402         batch = READ_ONCE(pcp->batch);
1403         to_drain = min(pcp->count, batch);
1404         if (to_drain > 0) {
1405                 free_pcppages_bulk(zone, to_drain, pcp);
1406                 pcp->count -= to_drain;
1407         }
1408         local_irq_restore(flags);
1409 }
1410 #endif
1411 
1412 /*
1413  * Drain pcplists of the indicated processor and zone.
1414  *
1415  * The processor must either be the current processor and the
1416  * thread pinned to the current processor or a processor that
1417  * is not online.
1418  */
1419 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1420 {
1421         unsigned long flags;
1422         struct per_cpu_pageset *pset;
1423         struct per_cpu_pages *pcp;
1424 
1425         local_irq_save(flags);
1426         pset = per_cpu_ptr(zone->pageset, cpu);
1427 
1428         pcp = &pset->pcp;
1429         if (pcp->count) {
1430                 free_pcppages_bulk(zone, pcp->count, pcp);
1431                 pcp->count = 0;
1432         }
1433         local_irq_restore(flags);
1434 }
1435 
1436 /*
1437  * Drain pcplists of all zones on the indicated processor.
1438  *
1439  * The processor must either be the current processor and the
1440  * thread pinned to the current processor or a processor that
1441  * is not online.
1442  */
1443 static void drain_pages(unsigned int cpu)
1444 {
1445         struct zone *zone;
1446 
1447         for_each_populated_zone(zone) {
1448                 drain_pages_zone(cpu, zone);
1449         }
1450 }
1451 
1452 /*
1453  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1454  *
1455  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1456  * the single zone's pages.
1457  */
1458 void drain_local_pages(struct zone *zone)
1459 {
1460         int cpu = smp_processor_id();
1461 
1462         if (zone)
1463                 drain_pages_zone(cpu, zone);
1464         else
1465                 drain_pages(cpu);
1466 }
1467 
1468 /*
1469  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1470  *
1471  * When zone parameter is non-NULL, spill just the single zone's pages.
1472  *
1473  * Note that this code is protected against sending an IPI to an offline
1474  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1475  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1476  * nothing keeps CPUs from showing up after we populated the cpumask and
1477  * before the call to on_each_cpu_mask().
1478  */
1479 void drain_all_pages(struct zone *zone)
1480 {
1481         int cpu;
1482 
1483         /*
1484          * Allocate in the BSS so we wont require allocation in
1485          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1486          */
1487         static cpumask_t cpus_with_pcps;
1488 
1489         /*
1490          * We don't care about racing with CPU hotplug event
1491          * as offline notification will cause the notified
1492          * cpu to drain that CPU pcps and on_each_cpu_mask
1493          * disables preemption as part of its processing
1494          */
1495         for_each_online_cpu(cpu) {
1496                 struct per_cpu_pageset *pcp;
1497                 struct zone *z;
1498                 bool has_pcps = false;
1499 
1500                 if (zone) {
1501                         pcp = per_cpu_ptr(zone->pageset, cpu);
1502                         if (pcp->pcp.count)
1503                                 has_pcps = true;
1504                 } else {
1505                         for_each_populated_zone(z) {
1506                                 pcp = per_cpu_ptr(z->pageset, cpu);
1507                                 if (pcp->pcp.count) {
1508                                         has_pcps = true;
1509                                         break;
1510                                 }
1511                         }
1512                 }
1513 
1514                 if (has_pcps)
1515                         cpumask_set_cpu(cpu, &cpus_with_pcps);
1516                 else
1517                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
1518         }
1519         on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
1520                                                                 zone, 1);
1521 }
1522 
1523 #ifdef CONFIG_HIBERNATION
1524 
1525 void mark_free_pages(struct zone *zone)
1526 {
1527         unsigned long pfn, max_zone_pfn;
1528         unsigned long flags;
1529         unsigned int order, t;
1530         struct list_head *curr;
1531 
1532         if (zone_is_empty(zone))
1533                 return;
1534 
1535         spin_lock_irqsave(&zone->lock, flags);
1536 
1537         max_zone_pfn = zone_end_pfn(zone);
1538         for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1539                 if (pfn_valid(pfn)) {
1540                         struct page *page = pfn_to_page(pfn);
1541 
1542                         if (!swsusp_page_is_forbidden(page))
1543                                 swsusp_unset_page_free(page);
1544                 }
1545 
1546         for_each_migratetype_order(order, t) {
1547                 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1548                         unsigned long i;
1549 
1550                         pfn = page_to_pfn(list_entry(curr, struct page, lru));
1551                         for (i = 0; i < (1UL << order); i++)
1552                                 swsusp_set_page_free(pfn_to_page(pfn + i));
1553                 }
1554         }
1555         spin_unlock_irqrestore(&zone->lock, flags);
1556 }
1557 #endif /* CONFIG_PM */
1558 
1559 /*
1560  * Free a 0-order page
1561  * cold == true ? free a cold page : free a hot page
1562  */
1563 void free_hot_cold_page(struct page *page, bool cold)
1564 {
1565         struct zone *zone = page_zone(page);
1566         struct per_cpu_pages *pcp;
1567         unsigned long flags;
1568         unsigned long pfn = page_to_pfn(page);
1569         int migratetype;
1570 
1571         if (!free_pages_prepare(page, 0))
1572                 return;
1573 
1574         migratetype = get_pfnblock_migratetype(page, pfn);
1575         set_freepage_migratetype(page, migratetype);
1576         local_irq_save(flags);
1577         __count_vm_event(PGFREE);
1578 
1579         /*
1580          * We only track unmovable, reclaimable and movable on pcp lists.
1581          * Free ISOLATE pages back to the allocator because they are being
1582          * offlined but treat RESERVE as movable pages so we can get those
1583          * areas back if necessary. Otherwise, we may have to free
1584          * excessively into the page allocator
1585          */
1586         if (migratetype >= MIGRATE_PCPTYPES) {
1587                 if (unlikely(is_migrate_isolate(migratetype))) {
1588                         free_one_page(zone, page, pfn, 0, migratetype);
1589                         goto out;
1590                 }
1591                 migratetype = MIGRATE_MOVABLE;
1592         }
1593 
1594         pcp = &this_cpu_ptr(zone->pageset)->pcp;
1595         if (!cold)
1596                 list_add(&page->lru, &pcp->lists[migratetype]);
1597         else
1598                 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1599         pcp->count++;
1600         if (pcp->count >= pcp->high) {
1601                 unsigned long batch = READ_ONCE(pcp->batch);
1602                 free_pcppages_bulk(zone, batch, pcp);
1603                 pcp->count -= batch;
1604         }
1605 
1606 out:
1607         local_irq_restore(flags);
1608 }
1609 
1610 /*
1611  * Free a list of 0-order pages
1612  */
1613 void free_hot_cold_page_list(struct list_head *list, bool cold)
1614 {
1615         struct page *page, *next;
1616 
1617         list_for_each_entry_safe(page, next, list, lru) {
1618                 trace_mm_page_free_batched(page, cold);
1619                 free_hot_cold_page(page, cold);
1620         }
1621 }
1622 
1623 /*
1624  * split_page takes a non-compound higher-order page, and splits it into
1625  * n (1<<order) sub-pages: page[0..n]
1626  * Each sub-page must be freed individually.
1627  *
1628  * Note: this is probably too low level an operation for use in drivers.
1629  * Please consult with lkml before using this in your driver.
1630  */
1631 void split_page(struct page *page, unsigned int order)
1632 {
1633         int i;
1634 
1635         VM_BUG_ON_PAGE(PageCompound(page), page);
1636         VM_BUG_ON_PAGE(!page_count(page), page);
1637 
1638 #ifdef CONFIG_KMEMCHECK
1639         /*
1640          * Split shadow pages too, because free(page[0]) would
1641          * otherwise free the whole shadow.
1642          */
1643         if (kmemcheck_page_is_tracked(page))
1644                 split_page(virt_to_page(page[0].shadow), order);
1645 #endif
1646 
1647         set_page_owner(page, 0, 0);
1648         for (i = 1; i < (1 << order); i++) {
1649                 set_page_refcounted(page + i);
1650                 set_page_owner(page + i, 0, 0);
1651         }
1652 }
1653 EXPORT_SYMBOL_GPL(split_page);
1654 
1655 int __isolate_free_page(struct page *page, unsigned int order)
1656 {
1657         unsigned long watermark;
1658         struct zone *zone;
1659         int mt;
1660 
1661         BUG_ON(!PageBuddy(page));
1662 
1663         zone = page_zone(page);
1664         mt = get_pageblock_migratetype(page);
1665 
1666         if (!is_migrate_isolate(mt)) {
1667                 /* Obey watermarks as if the page was being allocated */
1668                 watermark = low_wmark_pages(zone) + (1 << order);
1669                 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1670                         return 0;
1671 
1672                 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1673         }
1674 
1675         /* Remove page from free list */
1676         list_del(&page->lru);
1677         zone->free_area[order].nr_free--;
1678         rmv_page_order(page);
1679 
1680         /* Set the pageblock if the isolated page is at least a pageblock */
1681         if (order >= pageblock_order - 1) {
1682                 struct page *endpage = page + (1 << order) - 1;
1683                 for (; page < endpage; page += pageblock_nr_pages) {
1684                         int mt = get_pageblock_migratetype(page);
1685                         if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
1686                                 set_pageblock_migratetype(page,
1687                                                           MIGRATE_MOVABLE);
1688                 }
1689         }
1690 
1691         set_page_owner(page, order, 0);
1692         return 1UL << order;
1693 }
1694 
1695 /*
1696  * Similar to split_page except the page is already free. As this is only
1697  * being used for migration, the migratetype of the block also changes.
1698  * As this is called with interrupts disabled, the caller is responsible
1699  * for calling arch_alloc_page() and kernel_map_page() after interrupts
1700  * are enabled.
1701  *
1702  * Note: this is probably too low level an operation for use in drivers.
1703  * Please consult with lkml before using this in your driver.
1704  */
1705 int split_free_page(struct page *page)
1706 {
1707         unsigned int order;
1708         int nr_pages;
1709 
1710         order = page_order(page);
1711 
1712         nr_pages = __isolate_free_page(page, order);
1713         if (!nr_pages)
1714                 return 0;
1715 
1716         /* Split into individual pages */
1717         set_page_refcounted(page);
1718         split_page(page, order);
1719         return nr_pages;
1720 }
1721 
1722 /*
1723  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1724  */
1725 static inline
1726 struct page *buffered_rmqueue(struct zone *preferred_zone,
1727                         struct zone *zone, unsigned int order,
1728                         gfp_t gfp_flags, int migratetype)
1729 {
1730         unsigned long flags;
1731         struct page *page;
1732         bool cold = ((gfp_flags & __GFP_COLD) != 0);
1733 
1734         if (likely(order == 0)) {
1735                 struct per_cpu_pages *pcp;
1736                 struct list_head *list;
1737 
1738                 local_irq_save(flags);
1739                 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1740                 list = &pcp->lists[migratetype];
1741                 if (list_empty(list)) {
1742                         pcp->count += rmqueue_bulk(zone, 0,
1743                                         pcp->batch, list,
1744                                         migratetype, cold);
1745                         if (unlikely(list_empty(list)))
1746                                 goto failed;
1747                 }
1748 
1749                 if (cold)
1750                         page = list_entry(list->prev, struct page, lru);
1751                 else
1752                         page = list_entry(list->next, struct page, lru);
1753 
1754                 list_del(&page->lru);
1755                 pcp->count--;
1756         } else {
1757                 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1758                         /*
1759                          * __GFP_NOFAIL is not to be used in new code.
1760                          *
1761                          * All __GFP_NOFAIL callers should be fixed so that they
1762                          * properly detect and handle allocation failures.
1763                          *
1764                          * We most definitely don't want callers attempting to
1765                          * allocate greater than order-1 page units with
1766                          * __GFP_NOFAIL.
1767                          */
1768                         WARN_ON_ONCE(order > 1);
1769                 }
1770                 spin_lock_irqsave(&zone->lock, flags);
1771                 page = __rmqueue(zone, order, migratetype);
1772                 spin_unlock(&zone->lock);
1773                 if (!page)
1774                         goto failed;
1775                 __mod_zone_freepage_state(zone, -(1 << order),
1776                                           get_freepage_migratetype(page));
1777         }
1778 
1779         __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1780         if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
1781             !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
1782                 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
1783 
1784         __count_zone_vm_events(PGALLOC, zone, 1 << order);
1785         zone_statistics(preferred_zone, zone, gfp_flags);
1786         local_irq_restore(flags);
1787 
1788         VM_BUG_ON_PAGE(bad_range(zone, page), page);
1789         return page;
1790 
1791 failed:
1792         local_irq_restore(flags);
1793         return NULL;
1794 }
1795 
1796 #ifdef CONFIG_FAIL_PAGE_ALLOC
1797 
1798 static struct {
1799         struct fault_attr attr;
1800 
1801         u32 ignore_gfp_highmem;
1802         u32 ignore_gfp_wait;
1803         u32 min_order;
1804 } fail_page_alloc = {
1805         .attr = FAULT_ATTR_INITIALIZER,
1806         .ignore_gfp_wait = 1,
1807         .ignore_gfp_highmem = 1,
1808         .min_order = 1,
1809 };
1810 
1811 static int __init setup_fail_page_alloc(char *str)
1812 {
1813         return setup_fault_attr(&fail_page_alloc.attr, str);
1814 }
1815 __setup("fail_page_alloc=", setup_fail_page_alloc);
1816 
1817 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1818 {
1819         if (order < fail_page_alloc.min_order)
1820                 return false;
1821         if (gfp_mask & __GFP_NOFAIL)
1822                 return false;
1823         if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1824                 return false;
1825         if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1826                 return false;
1827 
1828         return should_fail(&fail_page_alloc.attr, 1 << order);
1829 }
1830 
1831 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1832 
1833 static int __init fail_page_alloc_debugfs(void)
1834 {
1835         umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1836         struct dentry *dir;
1837 
1838         dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1839                                         &fail_page_alloc.attr);
1840         if (IS_ERR(dir))
1841                 return PTR_ERR(dir);
1842 
1843         if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1844                                 &fail_page_alloc.ignore_gfp_wait))
1845                 goto fail;
1846         if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1847                                 &fail_page_alloc.ignore_gfp_highmem))
1848                 goto fail;
1849         if (!debugfs_create_u32("min-order", mode, dir,
1850                                 &fail_page_alloc.min_order))
1851                 goto fail;
1852 
1853         return 0;
1854 fail:
1855         debugfs_remove_recursive(dir);
1856 
1857         return -ENOMEM;
1858 }
1859 
1860 late_initcall(fail_page_alloc_debugfs);
1861 
1862 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1863 
1864 #else /* CONFIG_FAIL_PAGE_ALLOC */
1865 
1866 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1867 {
1868         return false;
1869 }
1870 
1871 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1872 
1873 /*
1874  * Return true if free pages are above 'mark'. This takes into account the order
1875  * of the allocation.
1876  */
1877 static bool __zone_watermark_ok(struct zone *z, unsigned int order,
1878                         unsigned long mark, int classzone_idx, int alloc_flags,
1879                         long free_pages)
1880 {
1881         /* free_pages may go negative - that's OK */
1882         long min = mark;
1883         int o;
1884         long free_cma = 0;
1885 
1886         free_pages -= (1 << order) - 1;
1887         if (alloc_flags & ALLOC_HIGH)
1888                 min -= min / 2;
1889         if (alloc_flags & ALLOC_HARDER)
1890                 min -= min / 4;
1891 #ifdef CONFIG_CMA
1892         /* If allocation can't use CMA areas don't use free CMA pages */
1893         if (!(alloc_flags & ALLOC_CMA))
1894                 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1895 #endif
1896 
1897         if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
1898                 return false;
1899         for (o = 0; o < order; o++) {
1900                 /* At the next order, this order's pages become unavailable */
1901                 free_pages -= z->free_area[o].nr_free << o;
1902 
1903                 /* Require fewer higher order pages to be free */
1904                 min >>= 1;
1905 
1906                 if (free_pages <= min)
1907                         return false;
1908         }
1909         return true;
1910 }
1911 
1912 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1913                       int classzone_idx, int alloc_flags)
1914 {
1915         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1916                                         zone_page_state(z, NR_FREE_PAGES));
1917 }
1918 
1919 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1920                         unsigned long mark, int classzone_idx, int alloc_flags)
1921 {
1922         long free_pages = zone_page_state(z, NR_FREE_PAGES);
1923 
1924         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1925                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1926 
1927         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1928                                                                 free_pages);
1929 }
1930 
1931 #ifdef CONFIG_NUMA
1932 /*
1933  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1934  * skip over zones that are not allowed by the cpuset, or that have
1935  * been recently (in last second) found to be nearly full.  See further
1936  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1937  * that have to skip over a lot of full or unallowed zones.
1938  *
1939  * If the zonelist cache is present in the passed zonelist, then
1940  * returns a pointer to the allowed node mask (either the current
1941  * tasks mems_allowed, or node_states[N_MEMORY].)
1942  *
1943  * If the zonelist cache is not available for this zonelist, does
1944  * nothing and returns NULL.
1945  *
1946  * If the fullzones BITMAP in the zonelist cache is stale (more than
1947  * a second since last zap'd) then we zap it out (clear its bits.)
1948  *
1949  * We hold off even calling zlc_setup, until after we've checked the
1950  * first zone in the zonelist, on the theory that most allocations will
1951  * be satisfied from that first zone, so best to examine that zone as
1952  * quickly as we can.
1953  */
1954 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1955 {
1956         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1957         nodemask_t *allowednodes;       /* zonelist_cache approximation */
1958 
1959         zlc = zonelist->zlcache_ptr;
1960         if (!zlc)
1961                 return NULL;
1962 
1963         if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1964                 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1965                 zlc->last_full_zap = jiffies;
1966         }
1967 
1968         allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1969                                         &cpuset_current_mems_allowed :
1970                                         &node_states[N_MEMORY];
1971         return allowednodes;
1972 }
1973 
1974 /*
1975  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1976  * if it is worth looking at further for free memory:
1977  *  1) Check that the zone isn't thought to be full (doesn't have its
1978  *     bit set in the zonelist_cache fullzones BITMAP).
1979  *  2) Check that the zones node (obtained from the zonelist_cache
1980  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1981  * Return true (non-zero) if zone is worth looking at further, or
1982  * else return false (zero) if it is not.
1983  *
1984  * This check -ignores- the distinction between various watermarks,
1985  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1986  * found to be full for any variation of these watermarks, it will
1987  * be considered full for up to one second by all requests, unless
1988  * we are so low on memory on all allowed nodes that we are forced
1989  * into the second scan of the zonelist.
1990  *
1991  * In the second scan we ignore this zonelist cache and exactly
1992  * apply the watermarks to all zones, even it is slower to do so.
1993  * We are low on memory in the second scan, and should leave no stone
1994  * unturned looking for a free page.
1995  */
1996 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1997                                                 nodemask_t *allowednodes)
1998 {
1999         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
2000         int i;                          /* index of *z in zonelist zones */
2001         int n;                          /* node that zone *z is on */
2002 
2003         zlc = zonelist->zlcache_ptr;
2004         if (!zlc)
2005                 return 1;
2006 
2007         i = z - zonelist->_zonerefs;
2008         n = zlc->z_to_n[i];
2009 
2010         /* This zone is worth trying if it is allowed but not full */
2011         return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
2012 }
2013 
2014 /*
2015  * Given 'z' scanning a zonelist, set the corresponding bit in
2016  * zlc->fullzones, so that subsequent attempts to allocate a page
2017  * from that zone don't waste time re-examining it.
2018  */
2019 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2020 {
2021         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
2022         int i;                          /* index of *z in zonelist zones */
2023 
2024         zlc = zonelist->zlcache_ptr;
2025         if (!zlc)
2026                 return;
2027 
2028         i = z - zonelist->_zonerefs;
2029 
2030         set_bit(i, zlc->fullzones);
2031 }
2032 
2033 /*
2034  * clear all zones full, called after direct reclaim makes progress so that
2035  * a zone that was recently full is not skipped over for up to a second
2036  */
2037 static void zlc_clear_zones_full(struct zonelist *zonelist)
2038 {
2039         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
2040 
2041         zlc = zonelist->zlcache_ptr;
2042         if (!zlc)
2043                 return;
2044 
2045         bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2046 }
2047 
2048 static bool zone_local(struct zone *local_zone, struct zone *zone)
2049 {
2050         return local_zone->node == zone->node;
2051 }
2052 
2053 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2054 {
2055         return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
2056                                 RECLAIM_DISTANCE;
2057 }
2058 
2059 #else   /* CONFIG_NUMA */
2060 
2061 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
2062 {
2063         return NULL;
2064 }
2065 
2066 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
2067                                 nodemask_t *allowednodes)
2068 {
2069         return 1;
2070 }
2071 
2072 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2073 {
2074 }
2075 
2076 static void zlc_clear_zones_full(struct zonelist *zonelist)
2077 {
2078 }
2079 
2080 static bool zone_local(struct zone *local_zone, struct zone *zone)
2081 {
2082         return true;
2083 }
2084 
2085 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2086 {
2087         return true;
2088 }
2089 
2090 #endif  /* CONFIG_NUMA */
2091 
2092 static void reset_alloc_batches(struct zone *preferred_zone)
2093 {
2094         struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2095 
2096         do {
2097                 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2098                         high_wmark_pages(zone) - low_wmark_pages(zone) -
2099                         atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
2100                 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
2101         } while (zone++ != preferred_zone);
2102 }
2103 
2104 /*
2105  * get_page_from_freelist goes through the zonelist trying to allocate
2106  * a page.
2107  */
2108 static struct page *
2109 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2110                                                 const struct alloc_context *ac)
2111 {
2112         struct zonelist *zonelist = ac->zonelist;
2113         struct zoneref *z;
2114         struct page *page = NULL;
2115         struct zone *zone;
2116         nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
2117         int zlc_active = 0;             /* set if using zonelist_cache */
2118         int did_zlc_setup = 0;          /* just call zlc_setup() one time */
2119         bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
2120                                 (gfp_mask & __GFP_WRITE);
2121         int nr_fair_skipped = 0;
2122         bool zonelist_rescan;
2123 
2124 zonelist_scan:
2125         zonelist_rescan = false;
2126 
2127         /*
2128          * Scan zonelist, looking for a zone with enough free.
2129          * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
2130          */
2131         for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2132                                                                 ac->nodemask) {
2133                 unsigned long mark;
2134 
2135                 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2136                         !zlc_zone_worth_trying(zonelist, z, allowednodes))
2137                                 continue;
2138                 if (cpusets_enabled() &&
2139                         (alloc_flags & ALLOC_CPUSET) &&
2140                         !cpuset_zone_allowed(zone, gfp_mask))
2141                                 continue;
2142                 /*
2143                  * Distribute pages in proportion to the individual
2144                  * zone size to ensure fair page aging.  The zone a
2145                  * page was allocated in should have no effect on the
2146                  * time the page has in memory before being reclaimed.
2147                  */
2148                 if (alloc_flags & ALLOC_FAIR) {
2149                         if (!zone_local(ac->preferred_zone, zone))
2150                                 break;
2151                         if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
2152                                 nr_fair_skipped++;
2153                                 continue;
2154                         }
2155                 }
2156                 /*
2157                  * When allocating a page cache page for writing, we
2158                  * want to get it from a zone that is within its dirty
2159                  * limit, such that no single zone holds more than its
2160                  * proportional share of globally allowed dirty pages.
2161                  * The dirty limits take into account the zone's
2162                  * lowmem reserves and high watermark so that kswapd
2163                  * should be able to balance it without having to
2164                  * write pages from its LRU list.
2165                  *
2166                  * This may look like it could increase pressure on
2167                  * lower zones by failing allocations in higher zones
2168                  * before they are full.  But the pages that do spill
2169                  * over are limited as the lower zones are protected
2170                  * by this very same mechanism.  It should not become
2171                  * a practical burden to them.
2172                  *
2173                  * XXX: For now, allow allocations to potentially
2174                  * exceed the per-zone dirty limit in the slowpath
2175                  * (ALLOC_WMARK_LOW unset) before going into reclaim,
2176                  * which is important when on a NUMA setup the allowed
2177                  * zones are together not big enough to reach the
2178                  * global limit.  The proper fix for these situations
2179                  * will require awareness of zones in the
2180                  * dirty-throttling and the flusher threads.
2181                  */
2182                 if (consider_zone_dirty && !zone_dirty_ok(zone))
2183                         continue;
2184 
2185                 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2186                 if (!zone_watermark_ok(zone, order, mark,
2187                                        ac->classzone_idx, alloc_flags)) {
2188                         int ret;
2189 
2190                         /* Checked here to keep the fast path fast */
2191                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2192                         if (alloc_flags & ALLOC_NO_WATERMARKS)
2193                                 goto try_this_zone;
2194 
2195                         if (IS_ENABLED(CONFIG_NUMA) &&
2196                                         !did_zlc_setup && nr_online_nodes > 1) {
2197                                 /*
2198                                  * we do zlc_setup if there are multiple nodes
2199                                  * and before considering the first zone allowed
2200                                  * by the cpuset.
2201                                  */
2202                                 allowednodes = zlc_setup(zonelist, alloc_flags);
2203                                 zlc_active = 1;
2204                                 did_zlc_setup = 1;
2205                         }
2206 
2207                         if (zone_reclaim_mode == 0 ||
2208                             !zone_allows_reclaim(ac->preferred_zone, zone))
2209                                 goto this_zone_full;
2210 
2211                         /*
2212                          * As we may have just activated ZLC, check if the first
2213                          * eligible zone has failed zone_reclaim recently.
2214                          */
2215                         if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2216                                 !zlc_zone_worth_trying(zonelist, z, allowednodes))
2217                                 continue;
2218 
2219                         ret = zone_reclaim(zone, gfp_mask, order);
2220                         switch (ret) {
2221                         case ZONE_RECLAIM_NOSCAN:
2222                                 /* did not scan */
2223                                 continue;
2224                         case ZONE_RECLAIM_FULL:
2225                                 /* scanned but unreclaimable */
2226                                 continue;
2227                         default:
2228                                 /* did we reclaim enough */
2229                                 if (zone_watermark_ok(zone, order, mark,
2230                                                 ac->classzone_idx, alloc_flags))
2231                                         goto try_this_zone;
2232 
2233                                 /*
2234                                  * Failed to reclaim enough to meet watermark.
2235                                  * Only mark the zone full if checking the min
2236                                  * watermark or if we failed to reclaim just
2237                                  * 1<<order pages or else the page allocator
2238                                  * fastpath will prematurely mark zones full
2239                                  * when the watermark is between the low and
2240                                  * min watermarks.
2241                                  */
2242                                 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
2243                                     ret == ZONE_RECLAIM_SOME)
2244                                         goto this_zone_full;
2245 
2246                                 continue;
2247                         }
2248                 }
2249 
2250 try_this_zone:
2251                 page = buffered_rmqueue(ac->preferred_zone, zone, order,
2252                                                 gfp_mask, ac->migratetype);
2253                 if (page) {
2254                         if (prep_new_page(page, order, gfp_mask, alloc_flags))
2255                                 goto try_this_zone;
2256                         return page;
2257                 }
2258 this_zone_full:
2259                 if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
2260                         zlc_mark_zone_full(zonelist, z);
2261         }
2262 
2263         /*
2264          * The first pass makes sure allocations are spread fairly within the
2265          * local node.  However, the local node might have free pages left
2266          * after the fairness batches are exhausted, and remote zones haven't
2267          * even been considered yet.  Try once more without fairness, and
2268          * include remote zones now, before entering the slowpath and waking
2269          * kswapd: prefer spilling to a remote zone over swapping locally.
2270          */
2271         if (alloc_flags & ALLOC_FAIR) {
2272                 alloc_flags &= ~ALLOC_FAIR;
2273                 if (nr_fair_skipped) {
2274                         zonelist_rescan = true;
2275                         reset_alloc_batches(ac->preferred_zone);
2276                 }
2277                 if (nr_online_nodes > 1)
2278                         zonelist_rescan = true;
2279         }
2280 
2281         if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
2282                 /* Disable zlc cache for second zonelist scan */
2283                 zlc_active = 0;
2284                 zonelist_rescan = true;
2285         }
2286 
2287         if (zonelist_rescan)
2288                 goto zonelist_scan;
2289 
2290         return NULL;
2291 }
2292 
2293 /*
2294  * Large machines with many possible nodes should not always dump per-node
2295  * meminfo in irq context.
2296  */
2297 static inline bool should_suppress_show_mem(void)
2298 {
2299         bool ret = false;
2300 
2301 #if NODES_SHIFT > 8
2302         ret = in_interrupt();
2303 #endif
2304         return ret;
2305 }
2306 
2307 static DEFINE_RATELIMIT_STATE(nopage_rs,
2308                 DEFAULT_RATELIMIT_INTERVAL,
2309                 DEFAULT_RATELIMIT_BURST);
2310 
2311 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
2312 {
2313         unsigned int filter = SHOW_MEM_FILTER_NODES;
2314 
2315         if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2316             debug_guardpage_minorder() > 0)
2317                 return;
2318 
2319         /*
2320          * This documents exceptions given to allocations in certain
2321          * contexts that are allowed to allocate outside current's set
2322          * of allowed nodes.
2323          */
2324         if (!(gfp_mask & __GFP_NOMEMALLOC))
2325                 if (test_thread_flag(TIF_MEMDIE) ||
2326                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
2327                         filter &= ~SHOW_MEM_FILTER_NODES;
2328         if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2329                 filter &= ~SHOW_MEM_FILTER_NODES;
2330 
2331         if (fmt) {
2332                 struct va_format vaf;
2333                 va_list args;
2334 
2335                 va_start(args, fmt);
2336 
2337                 vaf.fmt = fmt;
2338                 vaf.va = &args;
2339 
2340                 pr_warn("%pV", &vaf);
2341 
2342                 va_end(args);
2343         }
2344 
2345         pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
2346                 current->comm, order, gfp_mask);
2347 
2348         dump_stack();
2349         if (!should_suppress_show_mem())
2350                 show_mem(filter);
2351 }
2352 
2353 static inline int
2354 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2355                                 unsigned long did_some_progress,
2356                                 unsigned long pages_reclaimed)
2357 {
2358         /* Do not loop if specifically requested */
2359         if (gfp_mask & __GFP_NORETRY)
2360                 return 0;
2361 
2362         /* Always retry if specifically requested */
2363         if (gfp_mask & __GFP_NOFAIL)
2364                 return 1;
2365 
2366         /*
2367          * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2368          * making forward progress without invoking OOM. Suspend also disables
2369          * storage devices so kswapd will not help. Bail if we are suspending.
2370          */
2371         if (!did_some_progress && pm_suspended_storage())
2372                 return 0;
2373 
2374         /*
2375          * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2376          * means __GFP_NOFAIL, but that may not be true in other
2377          * implementations.
2378          */
2379         if (order <= PAGE_ALLOC_COSTLY_ORDER)
2380                 return 1;
2381 
2382         /*
2383          * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2384          * specified, then we retry until we no longer reclaim any pages
2385          * (above), or we've reclaimed an order of pages at least as
2386          * large as the allocation's order. In both cases, if the
2387          * allocation still fails, we stop retrying.
2388          */
2389         if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2390                 return 1;
2391 
2392         return 0;
2393 }
2394 
2395 static inline struct page *
2396 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2397         const struct alloc_context *ac, unsigned long *did_some_progress)
2398 {
2399         struct page *page;
2400 
2401         *did_some_progress = 0;
2402 
2403         /*
2404          * Acquire the per-zone oom lock for each zone.  If that
2405          * fails, somebody else is making progress for us.
2406          */
2407         if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) {
2408                 *did_some_progress = 1;
2409                 schedule_timeout_uninterruptible(1);
2410                 return NULL;
2411         }
2412 
2413         /*
2414          * Go through the zonelist yet one more time, keep very high watermark
2415          * here, this is only to catch a parallel oom killing, we must fail if
2416          * we're still under heavy pressure.
2417          */
2418         page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2419                                         ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
2420         if (page)
2421                 goto out;
2422 
2423         if (!(gfp_mask & __GFP_NOFAIL)) {
2424                 /* Coredumps can quickly deplete all memory reserves */
2425                 if (current->flags & PF_DUMPCORE)
2426                         goto out;
2427                 /* The OOM killer will not help higher order allocs */
2428                 if (order > PAGE_ALLOC_COSTLY_ORDER)
2429                         goto out;
2430                 /* The OOM killer does not needlessly kill tasks for lowmem */
2431                 if (ac->high_zoneidx < ZONE_NORMAL)
2432                         goto out;
2433                 /* The OOM killer does not compensate for light reclaim */
2434                 if (!(gfp_mask & __GFP_FS)) {
2435                         /*
2436                          * XXX: Page reclaim didn't yield anything,
2437                          * and the OOM killer can't be invoked, but
2438                          * keep looping as per should_alloc_retry().
2439                          */
2440                         *did_some_progress = 1;
2441                         goto out;
2442                 }
2443                 /* The OOM killer may not free memory on a specific node */
2444                 if (gfp_mask & __GFP_THISNODE)
2445                         goto out;
2446         }
2447         /* Exhausted what can be done so it's blamo time */
2448         if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
2449                         || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
2450                 *did_some_progress = 1;
2451 out:
2452         oom_zonelist_unlock(ac->zonelist, gfp_mask);
2453         return page;
2454 }
2455 
2456 #ifdef CONFIG_COMPACTION
2457 /* Try memory compaction for high-order allocations before reclaim */
2458 static struct page *
2459 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2460                 int alloc_flags, const struct alloc_context *ac,
2461                 enum migrate_mode mode, int *contended_compaction,
2462                 bool *deferred_compaction)
2463 {
2464         unsigned long compact_result;
2465         struct page *page;
2466 
2467         if (!order)
2468                 return NULL;
2469 
2470         current->flags |= PF_MEMALLOC;
2471         compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2472                                                 mode, contended_compaction);
2473         current->flags &= ~PF_MEMALLOC;
2474 
2475         switch (compact_result) {
2476         case COMPACT_DEFERRED:
2477                 *deferred_compaction = true;
2478                 /* fall-through */
2479         case COMPACT_SKIPPED:
2480                 return NULL;
2481         default:
2482                 break;
2483         }
2484 
2485         /*
2486          * At least in one zone compaction wasn't deferred or skipped, so let's
2487          * count a compaction stall
2488          */
2489         count_vm_event(COMPACTSTALL);
2490 
2491         page = get_page_from_freelist(gfp_mask, order,
2492                                         alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2493 
2494         if (page) {
2495                 struct zone *zone = page_zone(page);
2496 
2497                 zone->compact_blockskip_flush = false;
2498                 compaction_defer_reset(zone, order, true);
2499                 count_vm_event(COMPACTSUCCESS);
2500                 return page;
2501         }
2502 
2503         /*
2504          * It's bad if compaction run occurs and fails. The most likely reason
2505          * is that pages exist, but not enough to satisfy watermarks.
2506          */
2507         count_vm_event(COMPACTFAIL);
2508 
2509         cond_resched();
2510 
2511         return NULL;
2512 }
2513 #else
2514 static inline struct page *
2515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2516                 int alloc_flags, const struct alloc_context *ac,
2517                 enum migrate_mode mode, int *contended_compaction,
2518                 bool *deferred_compaction)
2519 {
2520         return NULL;
2521 }
2522 #endif /* CONFIG_COMPACTION */
2523 
2524 /* Perform direct synchronous page reclaim */
2525 static int
2526 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
2527                                         const struct alloc_context *ac)
2528 {
2529         struct reclaim_state reclaim_state;
2530         int progress;
2531 
2532         cond_resched();
2533 
2534         /* We now go into synchronous reclaim */
2535         cpuset_memory_pressure_bump();
2536         current->flags |= PF_MEMALLOC;
2537         lockdep_set_current_reclaim_state(gfp_mask);
2538         reclaim_state.reclaimed_slab = 0;
2539         current->reclaim_state = &reclaim_state;
2540 
2541         progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2542                                                                 ac->nodemask);
2543 
2544         current->reclaim_state = NULL;
2545         lockdep_clear_current_reclaim_state();
2546         current->flags &= ~PF_MEMALLOC;
2547 
2548         cond_resched();
2549 
2550         return progress;
2551 }
2552 
2553 /* The really slow allocator path where we enter direct reclaim */
2554 static inline struct page *
2555 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2556                 int alloc_flags, const struct alloc_context *ac,
2557                 unsigned long *did_some_progress)
2558 {
2559         struct page *page = NULL;
2560         bool drained = false;
2561 
2562         *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
2563         if (unlikely(!(*did_some_progress)))
2564                 return NULL;
2565 
2566         /* After successful reclaim, reconsider all zones for allocation */
2567         if (IS_ENABLED(CONFIG_NUMA))
2568                 zlc_clear_zones_full(ac->zonelist);
2569 
2570 retry:
2571         page = get_page_from_freelist(gfp_mask, order,
2572                                         alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2573 
2574         /*
2575          * If an allocation failed after direct reclaim, it could be because
2576          * pages are pinned on the per-cpu lists. Drain them and try again
2577          */
2578         if (!page && !drained) {
2579                 drain_all_pages(NULL);
2580                 drained = true;
2581                 goto retry;
2582         }
2583 
2584         return page;
2585 }
2586 
2587 /*
2588  * This is called in the allocator slow-path if the allocation request is of
2589  * sufficient urgency to ignore watermarks and take other desperate measures
2590  */
2591 static inline struct page *
2592 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2593                                 const struct alloc_context *ac)
2594 {
2595         struct page *page;
2596 
2597         do {
2598                 page = get_page_from_freelist(gfp_mask, order,
2599                                                 ALLOC_NO_WATERMARKS, ac);
2600 
2601                 if (!page && gfp_mask & __GFP_NOFAIL)
2602                         wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
2603                                                                         HZ/50);
2604         } while (!page && (gfp_mask & __GFP_NOFAIL));
2605 
2606         return page;
2607 }
2608 
2609 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
2610 {
2611         struct zoneref *z;
2612         struct zone *zone;
2613 
2614         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2615                                                 ac->high_zoneidx, ac->nodemask)
2616                 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
2617 }
2618 
2619 static inline int
2620 gfp_to_alloc_flags(gfp_t gfp_mask)
2621 {
2622         int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2623         const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2624 
2625         /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2626         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2627 
2628         /*
2629          * The caller may dip into page reserves a bit more if the caller
2630          * cannot run direct reclaim, or if the caller has realtime scheduling
2631          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2632          * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2633          */
2634         alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2635 
2636         if (atomic) {
2637                 /*
2638                  * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2639                  * if it can't schedule.
2640                  */
2641                 if (!(gfp_mask & __GFP_NOMEMALLOC))
2642                         alloc_flags |= ALLOC_HARDER;
2643                 /*
2644                  * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2645                  * comment for __cpuset_node_allowed().
2646                  */
2647                 alloc_flags &= ~ALLOC_CPUSET;
2648         } else if (unlikely(rt_task(current)) && !in_interrupt())
2649                 alloc_flags |= ALLOC_HARDER;
2650 
2651         if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2652                 if (gfp_mask & __GFP_MEMALLOC)
2653                         alloc_flags |= ALLOC_NO_WATERMARKS;
2654                 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2655                         alloc_flags |= ALLOC_NO_WATERMARKS;
2656                 else if (!in_interrupt() &&
2657                                 ((current->flags & PF_MEMALLOC) ||
2658                                  unlikely(test_thread_flag(TIF_MEMDIE))))
2659                         alloc_flags |= ALLOC_NO_WATERMARKS;
2660         }
2661 #ifdef CONFIG_CMA
2662         if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2663                 alloc_flags |= ALLOC_CMA;
2664 #endif
2665         return alloc_flags;
2666 }
2667 
2668 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2669 {
2670         return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2671 }
2672 
2673 static inline struct page *
2674 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2675                                                 struct alloc_context *ac)
2676 {
2677         const gfp_t wait = gfp_mask & __GFP_WAIT;
2678         struct page *page = NULL;
2679         int alloc_flags;
2680         unsigned long pages_reclaimed = 0;
2681         unsigned long did_some_progress;
2682         enum migrate_mode migration_mode = MIGRATE_ASYNC;
2683         bool deferred_compaction = false;
2684         int contended_compaction = COMPACT_CONTENDED_NONE;
2685 
2686         /*
2687          * In the slowpath, we sanity check order to avoid ever trying to
2688          * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2689          * be using allocators in order of preference for an area that is
2690          * too large.
2691          */
2692         if (order >= MAX_ORDER) {
2693                 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2694                 return NULL;
2695         }
2696 
2697         /*
2698          * If this allocation cannot block and it is for a specific node, then
2699          * fail early.  There's no need to wakeup kswapd or retry for a
2700          * speculative node-specific allocation.
2701          */
2702         if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait)
2703                 goto nopage;
2704 
2705 retry:
2706         if (!(gfp_mask & __GFP_NO_KSWAPD))
2707                 wake_all_kswapds(order, ac);
2708 
2709         /*
2710          * OK, we're below the kswapd watermark and have kicked background
2711          * reclaim. Now things get more complex, so set up alloc_flags according
2712          * to how we want to proceed.
2713          */
2714         alloc_flags = gfp_to_alloc_flags(gfp_mask);
2715 
2716         /*
2717          * Find the true preferred zone if the allocation is unconstrained by
2718          * cpusets.
2719          */
2720         if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
2721                 struct zoneref *preferred_zoneref;
2722                 preferred_zoneref = first_zones_zonelist(ac->zonelist,
2723                                 ac->high_zoneidx, NULL, &ac->preferred_zone);
2724                 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
2725         }
2726 
2727         /* This is the last chance, in general, before the goto nopage. */
2728         page = get_page_from_freelist(gfp_mask, order,
2729                                 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
2730         if (page)
2731                 goto got_pg;
2732 
2733         /* Allocate without watermarks if the context allows */
2734         if (alloc_flags & ALLOC_NO_WATERMARKS) {
2735                 /*
2736                  * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2737                  * the allocation is high priority and these type of
2738                  * allocations are system rather than user orientated
2739                  */
2740                 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
2741 
2742                 page = __alloc_pages_high_priority(gfp_mask, order, ac);
2743 
2744                 if (page) {
2745                         goto got_pg;
2746                 }
2747         }
2748 
2749         /* Atomic allocations - we can't balance anything */
2750         if (!wait) {
2751                 /*
2752                  * All existing users of the deprecated __GFP_NOFAIL are
2753                  * blockable, so warn of any new users that actually allow this
2754                  * type of allocation to fail.
2755                  */
2756                 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
2757                 goto nopage;
2758         }
2759 
2760         /* Avoid recursion of direct reclaim */
2761         if (current->flags & PF_MEMALLOC)
2762                 goto nopage;
2763 
2764         /* Avoid allocations with no watermarks from looping endlessly */
2765         if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2766                 goto nopage;
2767 
2768         /*
2769          * Try direct compaction. The first pass is asynchronous. Subsequent
2770          * attempts after direct reclaim are synchronous
2771          */
2772         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
2773                                         migration_mode,
2774                                         &contended_compaction,
2775                                         &deferred_compaction);
2776         if (page)
2777                 goto got_pg;
2778 
2779         /* Checks for THP-specific high-order allocations */
2780         if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
2781                 /*
2782                  * If compaction is deferred for high-order allocations, it is
2783                  * because sync compaction recently failed. If this is the case
2784                  * and the caller requested a THP allocation, we do not want
2785                  * to heavily disrupt the system, so we fail the allocation
2786                  * instead of entering direct reclaim.
2787                  */
2788                 if (deferred_compaction)
2789                         goto nopage;
2790 
2791                 /*
2792                  * In all zones where compaction was attempted (and not
2793                  * deferred or skipped), lock contention has been detected.
2794                  * For THP allocation we do not want to disrupt the others
2795                  * so we fallback to base pages instead.
2796                  */
2797                 if (contended_compaction == COMPACT_CONTENDED_LOCK)
2798                         goto nopage;
2799 
2800                 /*
2801                  * If compaction was aborted due to need_resched(), we do not
2802                  * want to further increase allocation latency, unless it is
2803                  * khugepaged trying to collapse.
2804                  */
2805                 if (contended_compaction == COMPACT_CONTENDED_SCHED
2806                         && !(current->flags & PF_KTHREAD))
2807                         goto nopage;
2808         }
2809 
2810         /*
2811          * It can become very expensive to allocate transparent hugepages at
2812          * fault, so use asynchronous memory compaction for THP unless it is
2813          * khugepaged trying to collapse.
2814          */
2815         if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
2816                                                 (current->flags & PF_KTHREAD))
2817                 migration_mode = MIGRATE_SYNC_LIGHT;
2818 
2819         /* Try direct reclaim and then allocating */
2820         page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
2821                                                         &did_some_progress);
2822         if (page)
2823                 goto got_pg;
2824 
2825         /* Check if we should retry the allocation */
2826         pages_reclaimed += did_some_progress;
2827         if (should_alloc_retry(gfp_mask, order, did_some_progress,
2828                                                 pages_reclaimed)) {
2829                 /*
2830                  * If we fail to make progress by freeing individual
2831                  * pages, but the allocation wants us to keep going,
2832                  * start OOM killing tasks.
2833                  */
2834                 if (!did_some_progress) {
2835                         page = __alloc_pages_may_oom(gfp_mask, order, ac,
2836                                                         &did_some_progress);
2837                         if (page)
2838                                 goto got_pg;
2839                         if (!did_some_progress)
2840                                 goto nopage;
2841                 }
2842                 /* Wait for some write requests to complete then retry */
2843                 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
2844                 goto retry;
2845         } else {
2846                 /*
2847                  * High-order allocations do not necessarily loop after
2848                  * direct reclaim and reclaim/compaction depends on compaction
2849                  * being called after reclaim so call directly if necessary
2850                  */
2851                 page = __alloc_pages_direct_compact(gfp_mask, order,
2852                                         alloc_flags, ac, migration_mode,
2853                                         &contended_compaction,
2854                                         &deferred_compaction);
2855                 if (page)
2856                         goto got_pg;
2857         }
2858 
2859 nopage:
2860         warn_alloc_failed(gfp_mask, order, NULL);
2861 got_pg:
2862         return page;
2863 }
2864 
2865 /*
2866  * This is the 'heart' of the zoned buddy allocator.
2867  */
2868 struct page *
2869 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2870                         struct zonelist *zonelist, nodemask_t *nodemask)
2871 {
2872         struct zoneref *preferred_zoneref;
2873         struct page *page = NULL;
2874         unsigned int cpuset_mems_cookie;
2875         int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
2876         gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
2877         struct alloc_context ac = {
2878                 .high_zoneidx = gfp_zone(gfp_mask),
2879                 .nodemask = nodemask,
2880                 .migratetype = gfpflags_to_migratetype(gfp_mask),
2881         };
2882 
2883         gfp_mask &= gfp_allowed_mask;
2884 
2885         lockdep_trace_alloc(gfp_mask);
2886 
2887         might_sleep_if(gfp_mask & __GFP_WAIT);
2888 
2889         if (should_fail_alloc_page(gfp_mask, order))
2890                 return NULL;
2891 
2892         /*
2893          * Check the zones suitable for the gfp_mask contain at least one
2894          * valid zone. It's possible to have an empty zonelist as a result
2895          * of __GFP_THISNODE and a memoryless node
2896          */
2897         if (unlikely(!zonelist->_zonerefs->zone))
2898                 return NULL;
2899 
2900         if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
2901                 alloc_flags |= ALLOC_CMA;
2902 
2903 retry_cpuset:
2904         cpuset_mems_cookie = read_mems_allowed_begin();
2905 
2906         /* We set it here, as __alloc_pages_slowpath might have changed it */
2907         ac.zonelist = zonelist;
2908         /* The preferred zone is used for statistics later */
2909         preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
2910                                 ac.nodemask ? : &cpuset_current_mems_allowed,
2911                                 &ac.preferred_zone);
2912         if (!ac.preferred_zone)
2913                 goto out;
2914         ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
2915 
2916         /* First allocation attempt */
2917         alloc_mask = gfp_mask|__GFP_HARDWALL;
2918         page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
2919         if (unlikely(!page)) {
2920                 /*
2921                  * Runtime PM, block IO and its error handling path
2922                  * can deadlock because I/O on the device might not
2923                  * complete.
2924                  */
2925                 alloc_mask = memalloc_noio_flags(gfp_mask);
2926 
2927                 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
2928         }
2929 
2930         if (kmemcheck_enabled && page)
2931                 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2932 
2933         trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
2934 
2935 out:
2936         /*
2937          * When updating a task's mems_allowed, it is possible to race with
2938          * parallel threads in such a way that an allocation can fail while
2939          * the mask is being updated. If a page allocation is about to fail,
2940          * check if the cpuset changed during allocation and if so, retry.
2941          */
2942         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2943                 goto retry_cpuset;
2944 
2945         return page;
2946 }
2947 EXPORT_SYMBOL(__alloc_pages_nodemask);
2948 
2949 /*
2950  * Common helper functions.
2951  */
2952 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2953 {
2954         struct page *page;
2955 
2956         /*
2957          * __get_free_pages() returns a 32-bit address, which cannot represent
2958          * a highmem page
2959          */
2960         VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2961 
2962         page = alloc_pages(gfp_mask, order);
2963         if (!page)
2964                 return 0;
2965         return (unsigned long) page_address(page);
2966 }
2967 EXPORT_SYMBOL(__get_free_pages);
2968 
2969 unsigned long get_zeroed_page(gfp_t gfp_mask)
2970 {
2971         return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2972 }
2973 EXPORT_SYMBOL(get_zeroed_page);
2974 
2975 void __free_pages(struct page *page, unsigned int order)
2976 {
2977         if (put_page_testzero(page)) {
2978                 if (order == 0)
2979                         free_hot_cold_page(page, false);
2980                 else
2981                         __free_pages_ok(page, order);
2982         }
2983 }
2984 
2985 EXPORT_SYMBOL(__free_pages);
2986 
2987 void free_pages(unsigned long addr, unsigned int order)
2988 {
2989         if (addr != 0) {
2990                 VM_BUG_ON(!virt_addr_valid((void *)addr));
2991                 __free_pages(virt_to_page((void *)addr), order);
2992         }
2993 }
2994 
2995 EXPORT_SYMBOL(free_pages);
2996 
2997 /*
2998  * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
2999  * of the current memory cgroup.
3000  *
3001  * It should be used when the caller would like to use kmalloc, but since the
3002  * allocation is large, it has to fall back to the page allocator.
3003  */
3004 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3005 {
3006         struct page *page;
3007         struct mem_cgroup *memcg = NULL;
3008 
3009         if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3010                 return NULL;
3011         page = alloc_pages(gfp_mask, order);
3012         memcg_kmem_commit_charge(page, memcg, order);
3013         return page;
3014 }
3015 
3016 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3017 {
3018         struct page *page;
3019         struct mem_cgroup *memcg = NULL;
3020 
3021         if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3022                 return NULL;
3023         page = alloc_pages_node(nid, gfp_mask, order);
3024         memcg_kmem_commit_charge(page, memcg, order);
3025         return page;
3026 }
3027 
3028 /*
3029  * __free_kmem_pages and free_kmem_pages will free pages allocated with
3030  * alloc_kmem_pages.
3031  */
3032 void __free_kmem_pages(struct page *page, unsigned int order)
3033 {
3034         memcg_kmem_uncharge_pages(page, order);
3035         __free_pages(page, order);
3036 }
3037 
3038 void free_kmem_pages(unsigned long addr, unsigned int order)
3039 {
3040         if (addr != 0) {
3041                 VM_BUG_ON(!virt_addr_valid((void *)addr));
3042                 __free_kmem_pages(virt_to_page((void *)addr), order);
3043         }
3044 }
3045 
3046 static void *make_alloc_exact(unsigned long addr, unsigned int order,
3047                 size_t size)
3048 {
3049         if (addr) {
3050                 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3051                 unsigned long used = addr + PAGE_ALIGN(size);
3052 
3053                 split_page(virt_to_page((void *)addr), order);
3054                 while (used < alloc_end) {
3055                         free_page(used);
3056                         used += PAGE_SIZE;
3057                 }
3058         }
3059         return (void *)addr;
3060 }
3061 
3062 /**
3063  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3064  * @size: the number of bytes to allocate
3065  * @gfp_mask: GFP flags for the allocation
3066  *
3067  * This function is similar to alloc_pages(), except that it allocates the
3068  * minimum number of pages to satisfy the request.  alloc_pages() can only
3069  * allocate memory in power-of-two pages.
3070  *
3071  * This function is also limited by MAX_ORDER.
3072  *
3073  * Memory allocated by this function must be released by free_pages_exact().
3074  */
3075 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3076 {
3077         unsigned int order = get_order(size);
3078         unsigned long addr;
3079 
3080         addr = __get_free_pages(gfp_mask, order);
3081         return make_alloc_exact(addr, order, size);
3082 }
3083 EXPORT_SYMBOL(alloc_pages_exact);
3084 
3085 /**
3086  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3087  *                         pages on a node.
3088  * @nid: the preferred node ID where memory should be allocated
3089  * @size: the number of bytes to allocate
3090  * @gfp_mask: GFP flags for the allocation
3091  *
3092  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3093  * back.
3094  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
3095  * but is not exact.
3096  */
3097 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3098 {
3099         unsigned int order = get_order(size);
3100         struct page *p = alloc_pages_node(nid, gfp_mask, order);
3101         if (!p)
3102                 return NULL;
3103         return make_alloc_exact((unsigned long)page_address(p), order, size);
3104 }
3105 
3106 /**
3107  * free_pages_exact - release memory allocated via alloc_pages_exact()
3108  * @virt: the value returned by alloc_pages_exact.
3109  * @size: size of allocation, same value as passed to alloc_pages_exact().
3110  *
3111  * Release the memory allocated by a previous call to alloc_pages_exact.
3112  */
3113 void free_pages_exact(void *virt, size_t size)
3114 {
3115         unsigned long addr = (unsigned long)virt;
3116         unsigned long end = addr + PAGE_ALIGN(size);
3117 
3118         while (addr < end) {
3119                 free_page(addr);
3120                 addr += PAGE_SIZE;
3121         }
3122 }
3123 EXPORT_SYMBOL(free_pages_exact);
3124 
3125 /**
3126  * nr_free_zone_pages - count number of pages beyond high watermark
3127  * @offset: The zone index of the highest zone
3128  *
3129  * nr_free_zone_pages() counts the number of counts pages which are beyond the
3130  * high watermark within all zones at or below a given zone index.  For each
3131  * zone, the number of pages is calculated as:
3132  *     managed_pages - high_pages
3133  */
3134 static unsigned long nr_free_zone_pages(int offset)
3135 {
3136         struct zoneref *z;
3137         struct zone *zone;
3138 
3139         /* Just pick one node, since fallback list is circular */
3140         unsigned long sum = 0;
3141 
3142         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
3143 
3144         for_each_zone_zonelist(zone, z, zonelist, offset) {
3145                 unsigned long size = zone->managed_pages;
3146                 unsigned long high = high_wmark_pages(zone);
3147                 if (size > high)
3148                         sum += size - high;
3149         }
3150 
3151         return sum;
3152 }
3153 
3154 /**
3155  * nr_free_buffer_pages - count number of pages beyond high watermark
3156  *
3157  * nr_free_buffer_pages() counts the number of pages which are beyond the high
3158  * watermark within ZONE_DMA and ZONE_NORMAL.
3159  */
3160 unsigned long nr_free_buffer_pages(void)
3161 {
3162         return nr_free_zone_pages(gfp_zone(GFP_USER));
3163 }
3164 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
3165 
3166 /**
3167  * nr_free_pagecache_pages - count number of pages beyond high watermark
3168  *
3169  * nr_free_pagecache_pages() counts the number of pages which are beyond the
3170  * high watermark within all zones.
3171  */
3172 unsigned long nr_free_pagecache_pages(void)
3173 {
3174         return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
3175 }
3176 
3177 static inline void show_node(struct zone *zone)
3178 {
3179         if (IS_ENABLED(CONFIG_NUMA))
3180                 printk("Node %d ", zone_to_nid(zone));
3181 }
3182 
3183 void si_meminfo(struct sysinfo *val)
3184 {
3185         val->totalram = totalram_pages;
3186         val->sharedram = global_page_state(NR_SHMEM);
3187         val->freeram = global_page_state(NR_FREE_PAGES);
3188         val->bufferram = nr_blockdev_pages();
3189         val->totalhigh = totalhigh_pages;
3190         val->freehigh = nr_free_highpages();
3191         val->mem_unit = PAGE_SIZE;
3192 }
3193 
3194 EXPORT_SYMBOL(si_meminfo);
3195 
3196 #ifdef CONFIG_NUMA
3197 void si_meminfo_node(struct sysinfo *val, int nid)
3198 {
3199         int zone_type;          /* needs to be signed */
3200         unsigned long managed_pages = 0;
3201         pg_data_t *pgdat = NODE_DATA(nid);
3202 
3203         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3204                 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3205         val->totalram = managed_pages;
3206         val->sharedram = node_page_state(nid, NR_SHMEM);
3207         val->freeram = node_page_state(nid, NR_FREE_PAGES);
3208 #ifdef CONFIG_HIGHMEM
3209         val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
3210         val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3211                         NR_FREE_PAGES);
3212 #else
3213         val->totalhigh = 0;
3214         val->freehigh = 0;
3215 #endif
3216         val->mem_unit = PAGE_SIZE;
3217 }
3218 #endif
3219 
3220 /*
3221  * Determine whether the node should be displayed or not, depending on whether
3222  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
3223  */
3224 bool skip_free_areas_node(unsigned int flags, int nid)
3225 {
3226         bool ret = false;
3227         unsigned int cpuset_mems_cookie;
3228 
3229         if (!(flags & SHOW_MEM_FILTER_NODES))
3230                 goto out;
3231 
3232         do {
3233                 cpuset_mems_cookie = read_mems_allowed_begin();
3234                 ret = !node_isset(nid, cpuset_current_mems_allowed);
3235         } while (read_mems_allowed_retry(cpuset_mems_cookie));
3236 out:
3237         return ret;
3238 }
3239 
3240 #define K(x) ((x) << (PAGE_SHIFT-10))
3241 
3242 static void show_migration_types(unsigned char type)
3243 {
3244         static const char types[MIGRATE_TYPES] = {
3245                 [MIGRATE_UNMOVABLE]     = 'U',
3246                 [MIGRATE_RECLAIMABLE]   = 'E',
3247                 [MIGRATE_MOVABLE]       = 'M',
3248                 [MIGRATE_RESERVE]       = 'R',
3249 #ifdef CONFIG_CMA
3250                 [MIGRATE_CMA]           = 'C',
3251 #endif
3252 #ifdef CONFIG_MEMORY_ISOLATION
3253                 [MIGRATE_ISOLATE]       = 'I',
3254 #endif
3255         };
3256         char tmp[MIGRATE_TYPES + 1];
3257         char *p = tmp;
3258         int i;
3259 
3260         for (i = 0; i < MIGRATE_TYPES; i++) {
3261                 if (type & (1 << i))
3262                         *p++ = types[i];
3263         }
3264 
3265         *p = '\0';
3266         printk("(%s) ", tmp);
3267 }
3268 
3269 /*
3270  * Show free area list (used inside shift_scroll-lock stuff)
3271  * We also calculate the percentage fragmentation. We do this by counting the
3272  * memory on each free list with the exception of the first item on the list.
3273  *
3274  * Bits in @filter:
3275  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3276  *   cpuset.
3277  */
3278 void show_free_areas(unsigned int filter)
3279 {
3280         unsigned long free_pcp = 0;
3281         int cpu;
3282         struct zone *zone;
3283 
3284         for_each_populated_zone(zone) {
3285                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3286                         continue;
3287 
3288                 for_each_online_cpu(cpu)
3289                         free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3290         }
3291 
3292         printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3293                 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
3294                 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3295                 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
3296                 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
3297                 " free:%lu free_pcp:%lu free_cma:%lu\n",
3298                 global_page_state(NR_ACTIVE_ANON),
3299                 global_page_state(NR_INACTIVE_ANON),
3300                 global_page_state(NR_ISOLATED_ANON),
3301                 global_page_state(NR_ACTIVE_FILE),
3302                 global_page_state(NR_INACTIVE_FILE),
3303                 global_page_state(NR_ISOLATED_FILE),
3304                 global_page_state(NR_UNEVICTABLE),
3305                 global_page_state(NR_FILE_DIRTY),
3306                 global_page_state(NR_WRITEBACK),
3307                 global_page_state(NR_UNSTABLE_NFS),
3308                 global_page_state(NR_SLAB_RECLAIMABLE),
3309                 global_page_state(NR_SLAB_UNRECLAIMABLE),
3310                 global_page_state(NR_FILE_MAPPED),
3311                 global_page_state(NR_SHMEM),
3312                 global_page_state(NR_PAGETABLE),
3313                 global_page_state(NR_BOUNCE),
3314                 global_page_state(NR_FREE_PAGES),
3315                 free_pcp,
3316                 global_page_state(NR_FREE_CMA_PAGES));
3317 
3318         for_each_populated_zone(zone) {
3319                 int i;
3320 
3321                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3322                         continue;
3323 
3324                 free_pcp = 0;
3325                 for_each_online_cpu(cpu)
3326                         free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3327 
3328                 show_node(zone);
3329                 printk("%s"
3330                         " free:%lukB"
3331                         " min:%lukB"
3332                         " low:%lukB"
3333                         " high:%lukB"
3334                         " active_anon:%lukB"
3335                         " inactive_anon:%lukB"
3336                         " active_file:%lukB"
3337                         " inactive_file:%lukB"
3338                         " unevictable:%lukB"
3339                         " isolated(anon):%lukB"
3340                         " isolated(file):%lukB"
3341                         " present:%lukB"
3342                         " managed:%lukB"
3343                         " mlocked:%lukB"
3344                         " dirty:%lukB"
3345                         " writeback:%lukB"
3346                         " mapped:%lukB"
3347                         " shmem:%lukB"
3348                         " slab_reclaimable:%lukB"
3349                         " slab_unreclaimable:%lukB"
3350                         " kernel_stack:%lukB"
3351                         " pagetables:%lukB"
3352                         " unstable:%lukB"
3353                         " bounce:%lukB"
3354                         " free_pcp:%lukB"
3355                         " local_pcp:%ukB"
3356                         " free_cma:%lukB"
3357                         " writeback_tmp:%lukB"
3358                         " pages_scanned:%lu"
3359                         " all_unreclaimable? %s"
3360                         "\n",
3361                         zone->name,
3362                         K(zone_page_state(zone, NR_FREE_PAGES)),
3363                         K(min_wmark_pages(zone)),
3364                         K(low_wmark_pages(zone)),
3365                         K(high_wmark_pages(zone)),
3366                         K(zone_page_state(zone, NR_ACTIVE_ANON)),
3367                         K(zone_page_state(zone, NR_INACTIVE_ANON)),
3368                         K(zone_page_state(zone, NR_ACTIVE_FILE)),
3369                         K(zone_page_state(zone, NR_INACTIVE_FILE)),
3370                         K(zone_page_state(zone, NR_UNEVICTABLE)),
3371                         K(zone_page_state(zone, NR_ISOLATED_ANON)),
3372                         K(zone_page_state(zone, NR_ISOLATED_FILE)),
3373                         K(zone->present_pages),
3374                         K(zone->managed_pages),
3375                         K(zone_page_state(zone, NR_MLOCK)),
3376                         K(zone_page_state(zone, NR_FILE_DIRTY)),
3377                         K(zone_page_state(zone, NR_WRITEBACK)),
3378                         K(zone_page_state(zone, NR_FILE_MAPPED)),
3379                         K(zone_page_state(zone, NR_SHMEM)),
3380                         K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3381                         K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3382                         zone_page_state(zone, NR_KERNEL_STACK) *
3383                                 THREAD_SIZE / 1024,
3384                         K(zone_page_state(zone, NR_PAGETABLE)),
3385                         K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3386                         K(zone_page_state(zone, NR_BOUNCE)),
3387                         K(free_pcp),
3388                         K(this_cpu_read(zone->pageset->pcp.count)),
3389                         K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3390                         K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3391                         K(zone_page_state(zone, NR_PAGES_SCANNED)),
3392                         (!zone_reclaimable(zone) ? "yes" : "no")
3393                         );
3394                 printk("lowmem_reserve[]:");
3395                 for (i = 0; i < MAX_NR_ZONES; i++)
3396                         printk(" %ld", zone->lowmem_reserve[i]);
3397                 printk("\n");
3398         }
3399 
3400         for_each_populated_zone(zone) {
3401                 unsigned int order;
3402                 unsigned long nr[MAX_ORDER], flags, total = 0;
3403                 unsigned char types[MAX_ORDER];
3404 
3405                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3406                         continue;
3407                 show_node(zone);
3408                 printk("%s: ", zone->name);
3409 
3410                 spin_lock_irqsave(&zone->lock, flags);
3411                 for (order = 0; order < MAX_ORDER; order++) {
3412                         struct free_area *area = &zone->free_area[order];
3413                         int type;
3414 
3415                         nr[order] = area->nr_free;
3416                         total += nr[order] << order;
3417 
3418                         types[order] = 0;
3419                         for (type = 0; type < MIGRATE_TYPES; type++) {
3420                                 if (!list_empty(&area->free_list[type]))
3421                                         types[order] |= 1 << type;
3422                         }
3423                 }
3424                 spin_unlock_irqrestore(&zone->lock, flags);
3425                 for (order = 0; order < MAX_ORDER; order++) {
3426                         printk("%lu*%lukB ", nr[order], K(1UL) << order);
3427                         if (nr[order])
3428                                 show_migration_types(types[order]);
3429                 }
3430                 printk("= %lukB\n", K(total));
3431         }
3432 
3433         hugetlb_show_meminfo();
3434 
3435         printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3436 
3437         show_swap_cache_info();
3438 }
3439 
3440 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3441 {
3442         zoneref->zone = zone;
3443         zoneref->zone_idx = zone_idx(zone);
3444 }
3445 
3446 /*
3447  * Builds allocation fallback zone lists.
3448  *
3449  * Add all populated zones of a node to the zonelist.
3450  */
3451 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3452                                 int nr_zones)
3453 {
3454         struct zone *zone;
3455         enum zone_type zone_type = MAX_NR_ZONES;
3456 
3457         do {
3458                 zone_type--;
3459                 zone = pgdat->node_zones + zone_type;
3460                 if (populated_zone(zone)) {
3461                         zoneref_set_zone(zone,
3462                                 &zonelist->_zonerefs[nr_zones++]);
3463                         check_highest_zone(zone_type);
3464                 }
3465         } while (zone_type);
3466 
3467         return nr_zones;
3468 }
3469 
3470 
3471 /*
3472  *  zonelist_order:
3473  *  0 = automatic detection of better ordering.
3474  *  1 = order by ([node] distance, -zonetype)
3475  *  2 = order by (-zonetype, [node] distance)
3476  *
3477  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3478  *  the same zonelist. So only NUMA can configure this param.
3479  */
3480 #define ZONELIST_ORDER_DEFAULT  0
3481 #define ZONELIST_ORDER_NODE     1
3482 #define ZONELIST_ORDER_ZONE     2
3483 
3484 /* zonelist order in the kernel.
3485  * set_zonelist_order() will set this to NODE or ZONE.
3486  */
3487 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3488 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3489 
3490 
3491 #ifdef CONFIG_NUMA
3492 /* The value user specified ....changed by config */
3493 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3494 /* string for sysctl */
3495 #define NUMA_ZONELIST_ORDER_LEN 16
3496 char numa_zonelist_order[16] = "default";
3497 
3498 /*
3499  * interface for configure zonelist ordering.
3500  * command line option "numa_zonelist_order"
3501  *      = "[dD]efault   - default, automatic configuration.
3502  *      = "[nN]ode      - order by node locality, then by zone within node
3503  *      = "[zZ]one      - order by zone, then by locality within zone
3504  */
3505 
3506 static int __parse_numa_zonelist_order(char *s)
3507 {
3508         if (*s == 'd' || *s == 'D') {
3509                 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3510         } else if (*s == 'n' || *s == 'N') {
3511                 user_zonelist_order = ZONELIST_ORDER_NODE;
3512         } else if (*s == 'z' || *s == 'Z') {
3513                 user_zonelist_order = ZONELIST_ORDER_ZONE;
3514         } else {
3515                 printk(KERN_WARNING
3516                         "Ignoring invalid numa_zonelist_order value:  "
3517                         "%s\n", s);
3518                 return -EINVAL;
3519         }
3520         return 0;
3521 }
3522 
3523 static __init int setup_numa_zonelist_order(char *s)
3524 {
3525         int ret;
3526 
3527         if (!s)
3528                 return 0;
3529 
3530         ret = __parse_numa_zonelist_order(s);
3531         if (ret == 0)
3532                 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3533 
3534         return ret;
3535 }
3536 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3537 
3538 /*
3539  * sysctl handler for numa_zonelist_order
3540  */
3541 int numa_zonelist_order_handler(struct ctl_table *table, int write,
3542                 void __user *buffer, size_t *length,
3543                 loff_t *ppos)
3544 {
3545         char saved_string[NUMA_ZONELIST_ORDER_LEN];
3546         int ret;
3547         static DEFINE_MUTEX(zl_order_mutex);
3548 
3549         mutex_lock(&zl_order_mutex);
3550         if (write) {
3551                 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3552                         ret = -EINVAL;
3553                         goto out;
3554                 }
3555                 strcpy(saved_string, (char *)table->data);
3556         }
3557         ret = proc_dostring(table, write, buffer, length, ppos);
3558         if (ret)
3559                 goto out;
3560         if (write) {
3561                 int oldval = user_zonelist_order;
3562 
3563                 ret = __parse_numa_zonelist_order((char *)table->data);
3564                 if (ret) {
3565                         /*
3566                          * bogus value.  restore saved string
3567                          */
3568                         strncpy((char *)table->data, saved_string,
3569                                 NUMA_ZONELIST_ORDER_LEN);
3570                         user_zonelist_order = oldval;
3571                 } else if (oldval != user_zonelist_order) {
3572                         mutex_lock(&zonelists_mutex);
3573                         build_all_zonelists(NULL, NULL);
3574                         mutex_unlock(&zonelists_mutex);
3575                 }
3576         }
3577 out:
3578         mutex_unlock(&zl_order_mutex);
3579         return ret;
3580 }
3581 
3582 
3583 #define MAX_NODE_LOAD (nr_online_nodes)
3584 static int node_load[MAX_NUMNODES];
3585 
3586 /**
3587  * find_next_best_node - find the next node that should appear in a given node's fallback list
3588  * @node: node whose fallback list we're appending
3589  * @used_node_mask: nodemask_t of already used nodes
3590  *
3591  * We use a number of factors to determine which is the next node that should
3592  * appear on a given node's fallback list.  The node should not have appeared
3593  * already in @node's fallback list, and it should be the next closest node
3594  * according to the distance array (which contains arbitrary distance values
3595  * from each node to each node in the system), and should also prefer nodes
3596  * with no CPUs, since presumably they'll have very little allocation pressure
3597  * on them otherwise.
3598  * It returns -1 if no node is found.
3599  */
3600 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3601 {
3602         int n, val;
3603         int min_val = INT_MAX;
3604         int best_node = NUMA_NO_NODE;
3605         const struct cpumask *tmp = cpumask_of_node(0);
3606 
3607         /* Use the local node if we haven't already */
3608         if (!node_isset(node, *used_node_mask)) {
3609                 node_set(node, *used_node_mask);
3610                 return node;
3611         }
3612 
3613         for_each_node_state(n, N_MEMORY) {
3614 
3615                 /* Don't want a node to appear more than once */
3616                 if (node_isset(n, *used_node_mask))
3617                         continue;
3618 
3619                 /* Use the distance array to find the distance */
3620                 val = node_distance(node, n);
3621 
3622                 /* Penalize nodes under us ("prefer the next node") */
3623                 val += (n < node);
3624 
3625                 /* Give preference to headless and unused nodes */
3626                 tmp = cpumask_of_node(n);
3627                 if (!cpumask_empty(tmp))
3628                         val += PENALTY_FOR_NODE_WITH_CPUS;
3629 
3630                 /* Slight preference for less loaded node */
3631                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3632                 val += node_load[n];
3633 
3634                 if (val < min_val) {
3635                         min_val = val;
3636                         best_node = n;
3637                 }
3638         }
3639 
3640         if (best_node >= 0)
3641                 node_set(best_node, *used_node_mask);
3642 
3643         return best_node;
3644 }
3645 
3646 
3647 /*
3648  * Build zonelists ordered by node and zones within node.
3649  * This results in maximum locality--normal zone overflows into local
3650  * DMA zone, if any--but risks exhausting DMA zone.
3651  */
3652 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3653 {
3654         int j;
3655         struct zonelist *zonelist;
3656 
3657         zonelist = &pgdat->node_zonelists[0];
3658         for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3659                 ;
3660         j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3661         zonelist->_zonerefs[j].zone = NULL;
3662         zonelist->_zonerefs[j].zone_idx = 0;
3663 }
3664 
3665 /*
3666  * Build gfp_thisnode zonelists
3667  */
3668 static void build_thisnode_zonelists(pg_data_t *pgdat)
3669 {
3670         int j;
3671         struct zonelist *zonelist;
3672 
3673         zonelist = &pgdat->node_zonelists[1];
3674         j = build_zonelists_node(pgdat, zonelist, 0);
3675         zonelist->_zonerefs[j].zone = NULL;
3676         zonelist->_zonerefs[j].zone_idx = 0;
3677 }
3678 
3679 /*
3680  * Build zonelists ordered by zone and nodes within zones.
3681  * This results in conserving DMA zone[s] until all Normal memory is
3682  * exhausted, but results in overflowing to remote node while memory
3683  * may still exist in local DMA zone.
3684  */
3685 static int node_order[MAX_NUMNODES];
3686 
3687 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3688 {
3689         int pos, j, node;
3690         int zone_type;          /* needs to be signed */
3691         struct zone *z;
3692         struct zonelist *zonelist;
3693 
3694         zonelist = &pgdat->node_zonelists[0];
3695         pos = 0;
3696         for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3697                 for (j = 0; j < nr_nodes; j++) {
3698                         node = node_order[j];
3699                         z = &NODE_DATA(node)->node_zones[zone_type];
3700                         if (populated_zone(z)) {
3701                                 zoneref_set_zone(z,
3702                                         &zonelist->_zonerefs[pos++]);
3703                                 check_highest_zone(zone_type);
3704                         }
3705                 }
3706         }
3707         zonelist->_zonerefs[pos].zone = NULL;
3708         zonelist->_zonerefs[pos].zone_idx = 0;
3709 }
3710 
3711 #if defined(CONFIG_64BIT)
3712 /*
3713  * Devices that require DMA32/DMA are relatively rare and do not justify a
3714  * penalty to every machine in case the specialised case applies. Default
3715  * to Node-ordering on 64-bit NUMA machines
3716  */
3717 static int default_zonelist_order(void)
3718 {
3719         return ZONELIST_ORDER_NODE;
3720 }
3721 #else
3722 /*
3723  * On 32-bit, the Normal zone needs to be preserved for allocations accessible
3724  * by the kernel. If processes running on node 0 deplete the low memory zone
3725  * then reclaim will occur more frequency increasing stalls and potentially
3726  * be easier to OOM if a large percentage of the zone is under writeback or
3727  * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
3728  * Hence, default to zone ordering on 32-bit.
3729  */
3730 static int default_zonelist_order(void)
3731 {
3732         return ZONELIST_ORDER_ZONE;
3733 }
3734 #endif /* CONFIG_64BIT */
3735 
3736 static void set_zonelist_order(void)
3737 {
3738         if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3739                 current_zonelist_order = default_zonelist_order();
3740         else
3741                 current_zonelist_order = user_zonelist_order;
3742 }
3743 
3744 static void build_zonelists(pg_data_t *pgdat)
3745 {
3746         int j, node, load;
3747         enum zone_type i;
3748         nodemask_t used_mask;
3749         int local_node, prev_node;
3750         struct zonelist *zonelist;
3751         unsigned int order = current_zonelist_order;
3752 
3753         /* initialize zonelists */
3754         for (i = 0; i < MAX_ZONELISTS; i++) {
3755                 zonelist = pgdat->node_zonelists + i;
3756                 zonelist->_zonerefs[0].zone = NULL;
3757                 zonelist->_zonerefs[0].zone_idx = 0;
3758         }
3759 
3760         /* NUMA-aware ordering of nodes */
3761         local_node = pgdat->node_id;
3762         load = nr_online_nodes;
3763         prev_node = local_node;
3764         nodes_clear(used_mask);
3765 
3766         memset(node_order, 0, sizeof(node_order));
3767         j = 0;
3768 
3769         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3770                 /*
3771                  * We don't want to pressure a particular node.
3772                  * So adding penalty to the first node in same
3773                  * distance group to make it round-robin.
3774                  */
3775                 if (node_distance(local_node, node) !=
3776                     node_distance(local_node, prev_node))
3777                         node_load[node] = load;
3778 
3779                 prev_node = node;
3780                 load--;
3781                 if (order == ZONELIST_ORDER_NODE)
3782                         build_zonelists_in_node_order(pgdat, node);
3783                 else
3784                         node_order[j++] = node; /* remember order */
3785         }
3786 
3787         if (order == ZONELIST_ORDER_ZONE) {
3788                 /* calculate node order -- i.e., DMA last! */
3789                 build_zonelists_in_zone_order(pgdat, j);
3790         }
3791 
3792         build_thisnode_zonelists(pgdat);
3793 }
3794 
3795 /* Construct the zonelist performance cache - see further mmzone.h */
3796 static void build_zonelist_cache(pg_data_t *pgdat)
3797 {
3798         struct zonelist *zonelist;
3799         struct zonelist_cache *zlc;
3800         struct zoneref *z;
3801 
3802         zonelist = &pgdat->node_zonelists[0];
3803         zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3804         bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3805         for (z = zonelist->_zonerefs; z->zone; z++)
3806                 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3807 }
3808 
3809 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3810 /*
3811  * Return node id of node used for "local" allocations.
3812  * I.e., first node id of first zone in arg node's generic zonelist.
3813  * Used for initializing percpu 'numa_mem', which is used primarily
3814  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3815  */
3816 int local_memory_node(int node)
3817 {
3818         struct zone *zone;
3819 
3820         (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3821                                    gfp_zone(GFP_KERNEL),
3822                                    NULL,
3823                                    &zone);
3824         return zone->node;
3825 }
3826 #endif
3827 
3828 #else   /* CONFIG_NUMA */
3829 
3830 static void set_zonelist_order(void)
3831 {
3832         current_zonelist_order = ZONELIST_ORDER_ZONE;
3833 }
3834 
3835 static void build_zonelists(pg_data_t *pgdat)
3836 {
3837         int node, local_node;
3838         enum zone_type j;
3839         struct zonelist *zonelist;
3840 
3841         local_node = pgdat->node_id;
3842 
3843         zonelist = &pgdat->node_zonelists[0];
3844         j = build_zonelists_node(pgdat, zonelist, 0);
3845 
3846         /*
3847          * Now we build the zonelist so that it contains the zones
3848          * of all the other nodes.
3849          * We don't want to pressure a particular node, so when
3850          * building the zones for node N, we make sure that the
3851          * zones coming right after the local ones are those from
3852          * node N+1 (modulo N)
3853          */
3854         for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3855                 if (!node_online(node))
3856                         continue;
3857                 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3858         }
3859         for (node = 0; node < local_node; node++) {
3860                 if (!node_online(node))
3861                         continue;
3862                 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3863         }
3864 
3865         zonelist->_zonerefs[j].zone = NULL;
3866         zonelist->_zonerefs[j].zone_idx = 0;
3867 }
3868 
3869 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3870 static void build_zonelist_cache(pg_data_t *pgdat)
3871 {
3872         pgdat->node_zonelists[0].zlcache_ptr = NULL;
3873 }
3874 
3875 #endif  /* CONFIG_NUMA */
3876 
3877 /*
3878  * Boot pageset table. One per cpu which is going to be used for all
3879  * zones and all nodes. The parameters will be set in such a way
3880  * that an item put on a list will immediately be handed over to
3881  * the buddy list. This is safe since pageset manipulation is done
3882  * with interrupts disabled.
3883  *
3884  * The boot_pagesets must be kept even after bootup is complete for
3885  * unused processors and/or zones. They do play a role for bootstrapping
3886  * hotplugged processors.
3887  *
3888  * zoneinfo_show() and maybe other functions do
3889  * not check if the processor is online before following the pageset pointer.
3890  * Other parts of the kernel may not check if the zone is available.
3891  */
3892 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3893 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3894 static void setup_zone_pageset(struct zone *zone);
3895 
3896 /*
3897  * Global mutex to protect against size modification of zonelists
3898  * as well as to serialize pageset setup for the new populated zone.
3899  */
3900 DEFINE_MUTEX(zonelists_mutex);
3901 
3902 /* return values int ....just for stop_machine() */
3903 static int __build_all_zonelists(void *data)
3904 {
3905         int nid;
3906         int cpu;
3907         pg_data_t *self = data;
3908 
3909 #ifdef CONFIG_NUMA
3910         memset(node_load, 0, sizeof(node_load));
3911 #endif
3912 
3913         if (self && !node_online(self->node_id)) {
3914                 build_zonelists(self);
3915                 build_zonelist_cache(self);
3916         }
3917 
3918         for_each_online_node(nid) {
3919                 pg_data_t *pgdat = NODE_DATA(nid);
3920 
3921                 build_zonelists(pgdat);
3922                 build_zonelist_cache(pgdat);
3923         }
3924 
3925         /*
3926          * Initialize the boot_pagesets that are going to be used
3927          * for bootstrapping processors. The real pagesets for
3928          * each zone will be allocated later when the per cpu
3929          * allocator is available.
3930          *
3931          * boot_pagesets are used also for bootstrapping offline
3932          * cpus if the system is already booted because the pagesets
3933          * are needed to initialize allocators on a specific cpu too.
3934          * F.e. the percpu allocator needs the page allocator which
3935          * needs the percpu allocator in order to allocate its pagesets
3936          * (a chicken-egg dilemma).
3937          */
3938         for_each_possible_cpu(cpu) {
3939                 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3940 
3941 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3942                 /*
3943                  * We now know the "local memory node" for each node--
3944                  * i.e., the node of the first zone in the generic zonelist.
3945                  * Set up numa_mem percpu variable for on-line cpus.  During
3946                  * boot, only the boot cpu should be on-line;  we'll init the
3947                  * secondary cpus' numa_mem as they come on-line.  During
3948                  * node/memory hotplug, we'll fixup all on-line cpus.
3949                  */
3950                 if (cpu_online(cpu))
3951                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3952 #endif
3953         }
3954 
3955         return 0;
3956 }
3957 
3958 static noinline void __init
3959 build_all_zonelists_init(void)
3960 {
3961         __build_all_zonelists(NULL);
3962         mminit_verify_zonelist();
3963         cpuset_init_current_mems_allowed();
3964 }
3965 
3966 /*
3967  * Called with zonelists_mutex held always
3968  * unless system_state == SYSTEM_BOOTING.
3969  *
3970  * __ref due to (1) call of __meminit annotated setup_zone_pageset
3971  * [we're only called with non-NULL zone through __meminit paths] and
3972  * (2) call of __init annotated helper build_all_zonelists_init
3973  * [protected by SYSTEM_BOOTING].
3974  */
3975 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
3976 {
3977         set_zonelist_order();
3978 
3979         if (system_state == SYSTEM_BOOTING) {
3980                 build_all_zonelists_init();
3981         } else {
3982 #ifdef CONFIG_MEMORY_HOTPLUG
3983                 if (zone)
3984                         setup_zone_pageset(zone);
3985 #endif
3986                 /* we have to stop all cpus to guarantee there is no user
3987                    of zonelist */
3988                 stop_machine(__build_all_zonelists, pgdat, NULL);
3989                 /* cpuset refresh routine should be here */
3990         }
3991         vm_total_pages = nr_free_pagecache_pages();
3992         /*
3993          * Disable grouping by mobility if the number of pages in the
3994          * system is too low to allow the mechanism to work. It would be
3995          * more accurate, but expensive to check per-zone. This check is
3996          * made on memory-hotadd so a system can start with mobility
3997          * disabled and enable it later
3998          */
3999         if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
4000                 page_group_by_mobility_disabled = 1;
4001         else
4002                 page_group_by_mobility_disabled = 0;
4003 
4004         pr_info("Built %i zonelists in %s order, mobility grouping %s.  "
4005                 "Total pages: %ld\n",
4006                         nr_online_nodes,
4007                         zonelist_order_name[current_zonelist_order],
4008                         page_group_by_mobility_disabled ? "off" : "on",
4009                         vm_total_pages);
4010 #ifdef CONFIG_NUMA
4011         pr_info("Policy zone: %s\n", zone_names[policy_zone]);
4012 #endif
4013 }
4014 
4015 /*
4016  * Helper functions to size the waitqueue hash table.
4017  * Essentially these want to choose hash table sizes sufficiently
4018  * large so that collisions trying to wait on pages are rare.
4019  * But in fact, the number of active page waitqueues on typical
4020  * systems is ridiculously low, less than 200. So this is even
4021  * conservative, even though it seems large.
4022  *
4023  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4024  * waitqueues, i.e. the size of the waitq table given the number of pages.
4025  */
4026 #define PAGES_PER_WAITQUEUE     256
4027 
4028 #ifndef CONFIG_MEMORY_HOTPLUG
4029 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4030 {
4031         unsigned long size = 1;
4032 
4033         pages /= PAGES_PER_WAITQUEUE;
4034 
4035         while (size < pages)
4036                 size <<= 1;
4037 
4038         /*
4039          * Once we have dozens or even hundreds of threads sleeping
4040          * on IO we've got bigger problems than wait queue collision.
4041          * Limit the size of the wait table to a reasonable size.
4042          */
4043         size = min(size, 4096UL);
4044 
4045         return max(size, 4UL);
4046 }
4047 #else
4048 /*
4049  * A zone's size might be changed by hot-add, so it is not possible to determine
4050  * a suitable size for its wait_table.  So we use the maximum size now.
4051  *
4052  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
4053  *
4054  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
4055  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4056  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
4057  *
4058  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4059  * or more by the traditional way. (See above).  It equals:
4060  *
4061  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
4062  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
4063  *    powerpc (64K page size)             : =  (32G +16M)byte.
4064  */
4065 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4066 {
4067         return 4096UL;
4068 }
4069 #endif
4070 
4071 /*
4072  * This is an integer logarithm so that shifts can be used later
4073  * to extract the more random high bits from the multiplicative
4074  * hash function before the remainder is taken.
4075  */
4076 static inline unsigned long wait_table_bits(unsigned long size)
4077 {
4078         return ffz(~size);
4079 }
4080 
4081 /*
4082  * Check if a pageblock contains reserved pages
4083  */
4084 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
4085 {
4086         unsigned long pfn;
4087 
4088         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4089                 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
4090                         return 1;
4091         }
4092         return 0;
4093 }
4094 
4095 /*
4096  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
4097  * of blocks reserved is based on min_wmark_pages(zone). The memory within
4098  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
4099  * higher will lead to a bigger reserve which will get freed as contiguous
4100  * blocks as reclaim kicks in
4101  */
4102 static void setup_zone_migrate_reserve(struct zone *zone)
4103 {
4104         unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
4105         struct page *page;
4106         unsigned long block_migratetype;
4107         int reserve;
4108         int old_reserve;
4109 
4110         /*
4111          * Get the start pfn, end pfn and the number of blocks to reserve
4112          * We have to be careful to be aligned to pageblock_nr_pages to
4113          * make sure that we always check pfn_valid for the first page in
4114          * the block.
4115          */
4116         start_pfn = zone->zone_start_pfn;
4117         end_pfn = zone_end_pfn(zone);
4118         start_pfn = roundup(start_pfn, pageblock_nr_pages);
4119         reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
4120                                                         pageblock_order;
4121 
4122         /*
4123          * Reserve blocks are generally in place to help high-order atomic
4124          * allocations that are short-lived. A min_free_kbytes value that
4125          * would result in more than 2 reserve blocks for atomic allocations
4126          * is assumed to be in place to help anti-fragmentation for the
4127          * future allocation of hugepages at runtime.
4128          */
4129         reserve = min(2, reserve);
4130         old_reserve = zone->nr_migrate_reserve_block;
4131 
4132         /* When memory hot-add, we almost always need to do nothing */
4133         if (reserve == old_reserve)
4134                 return;
4135         zone->nr_migrate_reserve_block = reserve;
4136 
4137         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
4138                 if (!pfn_valid(pfn))
4139                         continue;
4140                 page = pfn_to_page(pfn);
4141 
4142                 /* Watch out for overlapping nodes */
4143                 if (page_to_nid(page) != zone_to_nid(zone))
4144                         continue;
4145 
4146                 block_migratetype = get_pageblock_migratetype(page);
4147 
4148                 /* Only test what is necessary when the reserves are not met */
4149                 if (reserve > 0) {
4150                         /*
4151                          * Blocks with reserved pages will never free, skip
4152                          * them.
4153                          */
4154                         block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
4155                         if (pageblock_is_reserved(pfn, block_end_pfn))
4156                                 continue;
4157 
4158                         /* If this block is reserved, account for it */
4159                         if (block_migratetype == MIGRATE_RESERVE) {
4160                                 reserve--;
4161                                 continue;
4162                         }
4163 
4164                         /* Suitable for reserving if this block is movable */
4165                         if (block_migratetype == MIGRATE_MOVABLE) {
4166                                 set_pageblock_migratetype(page,
4167                                                         MIGRATE_RESERVE);
4168                                 move_freepages_block(zone, page,
4169                                                         MIGRATE_RESERVE);
4170                                 reserve--;
4171                                 continue;
4172                         }
4173                 } else if (!old_reserve) {
4174                         /*
4175                          * At boot time we don't need to scan the whole zone
4176                          * for turning off MIGRATE_RESERVE.
4177                          */
4178                         break;
4179                 }
4180 
4181                 /*
4182                  * If the reserve is met and this is a previous reserved block,
4183                  * take it back
4184                  */
4185                 if (block_migratetype == MIGRATE_RESERVE) {
4186                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4187                         move_freepages_block(zone, page, MIGRATE_MOVABLE);
4188                 }
4189         }
4190 }
4191 
4192 /*
4193  * Initially all pages are reserved - free ones are freed
4194  * up by free_all_bootmem() once the early boot process is
4195  * done. Non-atomic initialization, single-pass.
4196  */
4197 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4198                 unsigned long start_pfn, enum memmap_context context)
4199 {
4200         struct page *page;
4201         unsigned long end_pfn = start_pfn + size;
4202         unsigned long pfn;
4203         struct zone *z;
4204 
4205         if (highest_memmap_pfn < end_pfn - 1)
4206                 highest_memmap_pfn = end_pfn - 1;
4207 
4208         z = &NODE_DATA(nid)->node_zones[zone];
4209         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4210                 /*
4211                  * There can be holes in boot-time mem_map[]s
4212                  * handed to this function.  They do not
4213                  * exist on hotplugged memory.
4214                  */
4215                 if (context == MEMMAP_EARLY) {
4216                         if (!early_pfn_valid(pfn))
4217                                 continue;
4218                         if (!early_pfn_in_nid(pfn, nid))
4219                                 continue;
4220                 }
4221                 page = pfn_to_page(pfn);
4222                 set_page_links(page, zone, nid, pfn);
4223                 mminit_verify_page_links(page, zone, nid, pfn);
4224                 init_page_count(page);
4225                 page_mapcount_reset(page);
4226                 page_cpupid_reset_last(page);
4227                 SetPageReserved(page);
4228                 /*
4229                  * Mark the block movable so that blocks are reserved for
4230                  * movable at startup. This will force kernel allocations
4231                  * to reserve their blocks rather than leaking throughout
4232                  * the address space during boot when many long-lived
4233                  * kernel allocations are made. Later some blocks near
4234                  * the start are marked MIGRATE_RESERVE by
4235                  * setup_zone_migrate_reserve()
4236                  *
4237                  * bitmap is created for zone's valid pfn range. but memmap
4238                  * can be created for invalid pages (for alignment)
4239                  * check here not to call set_pageblock_migratetype() against
4240                  * pfn out of zone.
4241                  */
4242                 if ((z->zone_start_pfn <= pfn)
4243                     && (pfn < zone_end_pfn(z))
4244                     && !(pfn & (pageblock_nr_pages - 1)))
4245                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4246 
4247                 INIT_LIST_HEAD(&page->lru);
4248 #ifdef WANT_PAGE_VIRTUAL
4249                 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
4250                 if (!is_highmem_idx(zone))
4251                         set_page_address(page, __va(pfn << PAGE_SHIFT));
4252 #endif
4253         }
4254 }
4255 
4256 static void __meminit zone_init_free_lists(struct zone *zone)
4257 {
4258         unsigned int order, t;
4259         for_each_migratetype_order(order, t) {
4260                 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
4261                 zone->free_area[order].nr_free = 0;
4262         }
4263 }
4264 
4265 #ifndef __HAVE_ARCH_MEMMAP_INIT
4266 #define memmap_init(size, nid, zone, start_pfn) \
4267         memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4268 #endif
4269 
4270 static int zone_batchsize(struct zone *zone)
4271 {
4272 #ifdef CONFIG_MMU
4273         int batch;
4274 
4275         /*
4276          * The per-cpu-pages pools are set to around 1000th of the
4277          * size of the zone.  But no more than 1/2 of a meg.
4278          *
4279          * OK, so we don't know how big the cache is.  So guess.
4280          */
4281         batch = zone->managed_pages / 1024;
4282         if (batch * PAGE_SIZE > 512 * 1024)
4283                 batch = (512 * 1024) / PAGE_SIZE;
4284         batch /= 4;             /* We effectively *= 4 below */
4285         if (batch < 1)
4286                 batch = 1;
4287 
4288         /*
4289          * Clamp the batch to a 2^n - 1 value. Having a power
4290          * of 2 value was found to be more likely to have
4291          * suboptimal cache aliasing properties in some cases.
4292          *
4293          * For example if 2 tasks are alternately allocating
4294          * batches of pages, one task can end up with a lot
4295          * of pages of one half of the possible page colors
4296          * and the other with pages of the other colors.
4297          */
4298         batch = rounddown_pow_of_two(batch + batch/2) - 1;
4299 
4300         return batch;
4301 
4302 #else
4303         /* The deferral and batching of frees should be suppressed under NOMMU
4304          * conditions.
4305          *
4306          * The problem is that NOMMU needs to be able to allocate large chunks
4307          * of contiguous memory as there's no hardware page translation to
4308          * assemble apparent contiguous memory from discontiguous pages.
4309          *
4310          * Queueing large contiguous runs of pages for batching, however,
4311          * causes the pages to actually be freed in smaller chunks.  As there
4312          * can be a significant delay between the individual batches being
4313          * recycled, this leads to the once large chunks of space being
4314          * fragmented and becoming unavailable for high-order allocations.
4315          */
4316         return 0;
4317 #endif
4318 }
4319 
4320 /*
4321  * pcp->high and pcp->batch values are related and dependent on one another:
4322  * ->batch must never be higher then ->high.
4323  * The following function updates them in a safe manner without read side
4324  * locking.
4325  *
4326  * Any new users of pcp->batch and pcp->high should ensure they can cope with
4327  * those fields changing asynchronously (acording the the above rule).
4328  *
4329  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4330  * outside of boot time (or some other assurance that no concurrent updaters
4331  * exist).
4332  */
4333 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4334                 unsigned long batch)
4335 {
4336        /* start with a fail safe value for batch */
4337         pcp->batch = 1;
4338         smp_wmb();
4339 
4340        /* Update high, then batch, in order */
4341         pcp->high = high;
4342         smp_wmb();
4343 
4344         pcp->batch = batch;
4345 }
4346 
4347 /* a companion to pageset_set_high() */
4348 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4349 {
4350         pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4351 }
4352 
4353 static void pageset_init(struct per_cpu_pageset *p)
4354 {
4355         struct per_cpu_pages *pcp;
4356         int migratetype;
4357 
4358         memset(p, 0, sizeof(*p));
4359 
4360         pcp = &p->pcp;
4361         pcp->count = 0;
4362         for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4363                 INIT_LIST_HEAD(&pcp->lists[migratetype]);
4364 }
4365 
4366 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4367 {
4368         pageset_init(p);
4369         pageset_set_batch(p, batch);
4370 }
4371 
4372 /*
4373  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
4374  * to the value high for the pageset p.
4375  */
4376 static void pageset_set_high(struct per_cpu_pageset *p,
4377                                 unsigned long high)
4378 {
4379         unsigned long batch = max(1UL, high / 4);
4380         if ((high / 4) > (PAGE_SHIFT * 8))
4381                 batch = PAGE_SHIFT * 8;
4382 
4383         pageset_update(&p->pcp, high, batch);
4384 }
4385 
4386 static void pageset_set_high_and_batch(struct zone *zone,
4387                                        struct per_cpu_pageset *pcp)
4388 {
4389         if (percpu_pagelist_fraction)
4390                 pageset_set_high(pcp,
4391                         (zone->managed_pages /
4392                                 percpu_pagelist_fraction));
4393         else
4394                 pageset_set_batch(pcp, zone_batchsize(zone));
4395 }
4396 
4397 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4398 {
4399         struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4400 
4401         pageset_init(pcp);
4402         pageset_set_high_and_batch(zone, pcp);
4403 }
4404 
4405 static void __meminit setup_zone_pageset(struct zone *zone)
4406 {
4407         int cpu;
4408         zone->pageset = alloc_percpu(struct per_cpu_pageset);
4409         for_each_possible_cpu(cpu)
4410                 zone_pageset_init(zone, cpu);
4411 }
4412 
4413 /*
4414  * Allocate per cpu pagesets and initialize them.
4415  * Before this call only boot pagesets were available.
4416  */
4417 void __init setup_per_cpu_pageset(void)
4418 {
4419         struct zone *zone;
4420 
4421         for_each_populated_zone(zone)
4422                 setup_zone_pageset(zone);
4423 }
4424 
4425 static noinline __init_refok
4426 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4427 {
4428         int i;
4429         size_t alloc_size;
4430 
4431         /*
4432          * The per-page waitqueue mechanism uses hashed waitqueues
4433          * per zone.
4434          */
4435         zone->wait_table_hash_nr_entries =
4436                  wait_table_hash_nr_entries(zone_size_pages);
4437         zone->wait_table_bits =
4438                 wait_table_bits(zone->wait_table_hash_nr_entries);
4439         alloc_size = zone->wait_table_hash_nr_entries
4440                                         * sizeof(wait_queue_head_t);
4441 
4442         if (!slab_is_available()) {
4443                 zone->wait_table = (wait_queue_head_t *)
4444                         memblock_virt_alloc_node_nopanic(
4445                                 alloc_size, zone->zone_pgdat->node_id);
4446         } else {
4447                 /*
4448                  * This case means that a zone whose size was 0 gets new memory
4449                  * via memory hot-add.
4450                  * But it may be the case that a new node was hot-added.  In
4451                  * this case vmalloc() will not be able to use this new node's
4452                  * memory - this wait_table must be initialized to use this new
4453                  * node itself as well.
4454                  * To use this new node's memory, further consideration will be
4455                  * necessary.
4456                  */
4457                 zone->wait_table = vmalloc(alloc_size);
4458         }
4459         if (!zone->wait_table)
4460                 return -ENOMEM;
4461 
4462         for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4463                 init_waitqueue_head(zone->wait_table + i);
4464 
4465         return 0;
4466 }
4467 
4468 static __meminit void zone_pcp_init(struct zone *zone)
4469 {
4470         /*
4471          * per cpu subsystem is not up at this point. The following code
4472          * relies on the ability of the linker to provide the
4473          * offset of a (static) per cpu variable into the per cpu area.
4474          */
4475         zone->pageset = &boot_pageset;
4476 
4477         if (populated_zone(zone))
4478                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
4479                         zone->name, zone->present_pages,
4480                                          zone_batchsize(zone));
4481 }
4482 
4483 int __meminit init_currently_empty_zone(struct zone *zone,
4484                                         unsigned long zone_start_pfn,
4485                                         unsigned long size,
4486                                         enum memmap_context context)
4487 {
4488         struct pglist_data *pgdat = zone->zone_pgdat;
4489         int ret;
4490         ret = zone_wait_table_init(zone, size);
4491         if (ret)
4492                 return ret;
4493         pgdat->nr_zones = zone_idx(zone) + 1;
4494 
4495         zone->zone_start_pfn = zone_start_pfn;
4496 
4497         mminit_dprintk(MMINIT_TRACE, "memmap_init",
4498                         "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4499                         pgdat->node_id,
4500                         (unsigned long)zone_idx(zone),
4501                         zone_start_pfn, (zone_start_pfn + size));
4502 
4503         zone_init_free_lists(zone);
4504 
4505         return 0;
4506 }
4507 
4508 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4509 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4510 /*
4511  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4512  */
4513 int __meminit __early_pfn_to_nid(unsigned long pfn)
4514 {
4515         unsigned long start_pfn, end_pfn;
4516         int nid;
4517         /*
4518          * NOTE: The following SMP-unsafe globals are only used early in boot
4519          * when the kernel is running single-threaded.
4520          */
4521         static unsigned long __meminitdata last_start_pfn, last_end_pfn;
4522         static int __meminitdata last_nid;
4523 
4524         if (last_start_pfn <= pfn && pfn < last_end_pfn)
4525                 return last_nid;
4526 
4527         nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4528         if (nid != -1) {
4529                 last_start_pfn = start_pfn;
4530                 last_end_pfn = end_pfn;
4531                 last_nid = nid;
4532         }
4533 
4534         return nid;
4535 }
4536 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4537 
4538 int __meminit early_pfn_to_nid(unsigned long pfn)
4539 {
4540         int nid;
4541 
4542         nid = __early_pfn_to_nid(pfn);
4543         if (nid >= 0)
4544                 return nid;
4545         /* just returns 0 */
4546         return 0;
4547 }
4548 
4549 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
4550 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4551 {
4552         int nid;
4553 
4554         nid = __early_pfn_to_nid(pfn);
4555         if (nid >= 0 && nid != node)
4556                 return false;
4557         return true;
4558 }
4559 #endif
4560 
4561 /**
4562  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4563  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4564  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4565  *
4566  * If an architecture guarantees that all ranges registered contain no holes
4567  * and may be freed, this this function may be used instead of calling
4568  * memblock_free_early_nid() manually.
4569  */
4570 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4571 {
4572         unsigned long start_pfn, end_pfn;
4573         int i, this_nid;
4574 
4575         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4576                 start_pfn = min(start_pfn, max_low_pfn);
4577                 end_pfn = min(end_pfn, max_low_pfn);
4578 
4579                 if (start_pfn < end_pfn)
4580                         memblock_free_early_nid(PFN_PHYS(start_pfn),
4581                                         (end_pfn - start_pfn) << PAGE_SHIFT,
4582                                         this_nid);
4583         }
4584 }
4585 
4586 /**
4587  * sparse_memory_present_with_active_regions - Call memory_present for each active range
4588  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4589  *
4590  * If an architecture guarantees that all ranges registered contain no holes and may
4591  * be freed, this function may be used instead of calling memory_present() manually.
4592  */
4593 void __init sparse_memory_present_with_active_regions(int nid)
4594 {
4595         unsigned long start_pfn, end_pfn;
4596         int i, this_nid;
4597 
4598         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4599                 memory_present(this_nid, start_pfn, end_pfn);
4600 }
4601 
4602 /**
4603  * get_pfn_range_for_nid - Return the start and end page frames for a node
4604  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4605  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4606  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4607  *
4608  * It returns the start and end page frame of a node based on information
4609  * provided by memblock_set_node(). If called for a node
4610  * with no available memory, a warning is printed and the start and end
4611  * PFNs will be 0.
4612  */
4613 void __meminit get_pfn_range_for_nid(unsigned int nid,
4614                         unsigned long *start_pfn, unsigned long *end_pfn)
4615 {
4616         unsigned long this_start_pfn, this_end_pfn;
4617         int i;
4618 
4619         *start_pfn = -1UL;
4620         *end_pfn = 0;
4621 
4622         for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4623                 *start_pfn = min(*start_pfn, this_start_pfn);
4624                 *end_pfn = max(*end_pfn, this_end_pfn);
4625         }
4626 
4627         if (*start_pfn == -1UL)
4628                 *start_pfn = 0;
4629 }
4630 
4631 /*
4632  * This finds a zone that can be used for ZONE_MOVABLE pages. The
4633  * assumption is made that zones within a node are ordered in monotonic
4634  * increasing memory addresses so that the "highest" populated zone is used
4635  */
4636 static void __init find_usable_zone_for_movable(void)
4637 {
4638         int zone_index;
4639         for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4640                 if (zone_index == ZONE_MOVABLE)
4641                         continue;
4642 
4643                 if (arch_zone_highest_possible_pfn[zone_index] >
4644                                 arch_zone_lowest_possible_pfn[zone_index])
4645                         break;
4646         }
4647 
4648         VM_BUG_ON(zone_index == -1);
4649         movable_zone = zone_index;
4650 }
4651 
4652 /*
4653  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4654  * because it is sized independent of architecture. Unlike the other zones,
4655  * the starting point for ZONE_MOVABLE is not fixed. It may be different
4656  * in each node depending on the size of each node and how evenly kernelcore
4657  * is distributed. This helper function adjusts the zone ranges
4658  * provided by the architecture for a given node by using the end of the
4659  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4660  * zones within a node are in order of monotonic increases memory addresses
4661  */
4662 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4663                                         unsigned long zone_type,
4664                                         unsigned long node_start_pfn,
4665                                         unsigned long node_end_pfn,
4666                                         unsigned long *zone_start_pfn,
4667                                         unsigned long *zone_end_pfn)
4668 {
4669         /* Only adjust if ZONE_MOVABLE is on this node */
4670         if (zone_movable_pfn[nid]) {
4671                 /* Size ZONE_MOVABLE */
4672                 if (zone_type == ZONE_MOVABLE) {
4673                         *zone_start_pfn = zone_movable_pfn[nid];
4674                         *zone_end_pfn = min(node_end_pfn,
4675                                 arch_zone_highest_possible_pfn[movable_zone]);
4676 
4677                 /* Adjust for ZONE_MOVABLE starting within this range */
4678                 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4679                                 *zone_end_pfn > zone_movable_pfn[nid]) {
4680                         *zone_end_pfn = zone_movable_pfn[nid];
4681 
4682                 /* Check if this whole range is within ZONE_MOVABLE */
4683                 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4684                         *zone_start_pfn = *zone_end_pfn;
4685         }
4686 }
4687 
4688 /*
4689  * Return the number of pages a zone spans in a node, including holes
4690  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4691  */
4692 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4693                                         unsigned long zone_type,
4694                                         unsigned long node_start_pfn,
4695                                         unsigned long node_end_pfn,
4696                                         unsigned long *ignored)
4697 {
4698         unsigned long zone_start_pfn, zone_end_pfn;
4699 
4700         /* Get the start and end of the zone */
4701         zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4702         zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4703         adjust_zone_range_for_zone_movable(nid, zone_type,
4704                                 node_start_pfn, node_end_pfn,
4705                                 &zone_start_pfn, &zone_end_pfn);
4706 
4707         /* Check that this node has pages within the zone's required range */
4708         if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4709                 return 0;
4710 
4711         /* Move the zone boundaries inside the node if necessary */
4712         zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4713         zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4714 
4715         /* Return the spanned pages */
4716         return zone_end_pfn - zone_start_pfn;
4717 }
4718 
4719 /*
4720  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4721  * then all holes in the requested range will be accounted for.
4722  */
4723 unsigned long __meminit __absent_pages_in_range(int nid,
4724                                 unsigned long range_start_pfn,
4725                                 unsigned long range_end_pfn)
4726 {
4727         unsigned long nr_absent = range_end_pfn - range_start_pfn;
4728         unsigned long start_pfn, end_pfn;
4729         int i;
4730 
4731         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4732                 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4733                 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4734                 nr_absent -= end_pfn - start_pfn;
4735         }
4736         return nr_absent;
4737 }
4738 
4739 /**
4740  * absent_pages_in_range - Return number of page frames in holes within a range
4741  * @start_pfn: The start PFN to start searching for holes
4742  * @end_pfn: The end PFN to stop searching for holes
4743  *
4744  * It returns the number of pages frames in memory holes within a range.
4745  */
4746 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4747                                                         unsigned long end_pfn)
4748 {
4749         return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4750 }
4751 
4752 /* Return the number of page frames in holes in a zone on a node */
4753 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4754                                         unsigned long zone_type,
4755                                         unsigned long node_start_pfn,
4756                                         unsigned long node_end_pfn,
4757                                         unsigned long *ignored)
4758 {
4759         unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4760         unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4761         unsigned long zone_start_pfn, zone_end_pfn;
4762 
4763         zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4764         zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4765 
4766         adjust_zone_range_for_zone_movable(nid, zone_type,
4767                         node_start_pfn, node_end_pfn,
4768                         &zone_start_pfn, &zone_end_pfn);
4769         return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4770 }
4771 
4772 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4773 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4774                                         unsigned long zone_type,
4775                                         unsigned long node_start_pfn,
4776                                         unsigned long node_end_pfn,
4777                                         unsigned long *zones_size)
4778 {
4779         return zones_size[zone_type];
4780 }
4781 
4782 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4783                                                 unsigned long zone_type,
4784                                                 unsigned long node_start_pfn,
4785                                                 unsigned long node_end_pfn,
4786                                                 unsigned long *zholes_size)
4787 {
4788         if (!zholes_size)
4789                 return 0;
4790 
4791         return zholes_size[zone_type];
4792 }
4793 
4794 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4795 
4796 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4797                                                 unsigned long node_start_pfn,
4798                                                 unsigned long node_end_pfn,
4799                                                 unsigned long *zones_size,
4800                                                 unsigned long *zholes_size)
4801 {
4802         unsigned long realtotalpages, totalpages = 0;
4803         enum zone_type i;
4804 
4805         for (i = 0; i < MAX_NR_ZONES; i++)
4806                 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4807                                                          node_start_pfn,
4808                                                          node_end_pfn,
4809                                                          zones_size);
4810         pgdat->node_spanned_pages = totalpages;
4811 
4812         realtotalpages = totalpages;
4813         for (i = 0; i < MAX_NR_ZONES; i++)
4814                 realtotalpages -=
4815                         zone_absent_pages_in_node(pgdat->node_id, i,
4816                                                   node_start_pfn, node_end_pfn,
4817                                                   zholes_size);
4818         pgdat->node_present_pages = realtotalpages;
4819         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4820                                                         realtotalpages);
4821 }
4822 
4823 #ifndef CONFIG_SPARSEMEM
4824 /*
4825  * Calculate the size of the zone->blockflags rounded to an unsigned long
4826  * Start by making sure zonesize is a multiple of pageblock_order by rounding
4827  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4828  * round what is now in bits to nearest long in bits, then return it in
4829  * bytes.
4830  */
4831 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4832 {
4833         unsigned long usemapsize;
4834 
4835         zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4836         usemapsize = roundup(zonesize, pageblock_nr_pages);
4837         usemapsize = usemapsize >> pageblock_order;
4838         usemapsize *= NR_PAGEBLOCK_BITS;
4839         usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4840 
4841         return usemapsize / 8;
4842 }
4843 
4844 static void __init setup_usemap(struct pglist_data *pgdat,
4845                                 struct zone *zone,
4846                                 unsigned long zone_start_pfn,
4847                                 unsigned long zonesize)
4848 {
4849         unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4850         zone->pageblock_flags = NULL;
4851         if (usemapsize)
4852                 zone->pageblock_flags =
4853                         memblock_virt_alloc_node_nopanic(usemapsize,
4854                                                          pgdat->node_id);
4855 }
4856 #else
4857 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4858                                 unsigned long zone_start_pfn, unsigned long zonesize) {}
4859 #endif /* CONFIG_SPARSEMEM */
4860 
4861 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4862 
4863 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4864 void __paginginit set_pageblock_order(void)
4865 {
4866         unsigned int order;
4867 
4868         /* Check that pageblock_nr_pages has not already been setup */
4869         if (pageblock_order)
4870                 return;
4871 
4872         if (HPAGE_SHIFT > PAGE_SHIFT)
4873                 order = HUGETLB_PAGE_ORDER;
4874         else
4875                 order = MAX_ORDER - 1;
4876 
4877         /*
4878          * Assume the largest contiguous order of interest is a huge page.
4879          * This value may be variable depending on boot parameters on IA64 and
4880          * powerpc.
4881          */
4882         pageblock_order = order;
4883 }
4884 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4885 
4886 /*
4887  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4888  * is unused as pageblock_order is set at compile-time. See
4889  * include/linux/pageblock-flags.h for the values of pageblock_order based on
4890  * the kernel config
4891  */
4892 void __paginginit set_pageblock_order(void)
4893 {
4894 }
4895 
4896 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4897 
4898 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4899                                                    unsigned long present_pages)
4900 {
4901         unsigned long pages = spanned_pages;
4902 
4903         /*
4904          * Provide a more accurate estimation if there are holes within
4905          * the zone and SPARSEMEM is in use. If there are holes within the
4906          * zone, each populated memory region may cost us one or two extra
4907          * memmap pages due to alignment because memmap pages for each
4908          * populated regions may not naturally algined on page boundary.
4909          * So the (present_pages >> 4) heuristic is a tradeoff for that.
4910          */
4911         if (spanned_pages > present_pages + (present_pages >> 4) &&
4912             IS_ENABLED(CONFIG_SPARSEMEM))
4913                 pages = present_pages;
4914 
4915         return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4916 }
4917 
4918 /*
4919  * Set up the zone data structures:
4920  *   - mark all pages reserved
4921  *   - mark all memory queues empty
4922  *   - clear the memory bitmaps
4923  *
4924  * NOTE: pgdat should get zeroed by caller.
4925  */
4926 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4927                 unsigned long node_start_pfn, unsigned long node_end_pfn,
4928                 unsigned long *zones_size, unsigned long *zholes_size)
4929 {
4930         enum zone_type j;
4931         int nid = pgdat->node_id;
4932         unsigned long zone_start_pfn = pgdat->node_start_pfn;
4933         int ret;
4934 
4935         pgdat_resize_init(pgdat);
4936 #ifdef CONFIG_NUMA_BALANCING
4937         spin_lock_init(&pgdat->numabalancing_migrate_lock);
4938         pgdat->numabalancing_migrate_nr_pages = 0;
4939         pgdat->numabalancing_migrate_next_window = jiffies;
4940 #endif
4941         init_waitqueue_head(&pgdat->kswapd_wait);
4942         init_waitqueue_head(&pgdat->pfmemalloc_wait);
4943         pgdat_page_ext_init(pgdat);
4944 
4945         for (j = 0; j < MAX_NR_ZONES; j++) {
4946                 struct zone *zone = pgdat->node_zones + j;
4947                 unsigned long size, realsize, freesize, memmap_pages;
4948 
4949                 size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
4950                                                   node_end_pfn, zones_size);
4951                 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
4952                                                                 node_start_pfn,
4953                                                                 node_end_pfn,
4954                                                                 zholes_size);
4955 
4956                 /*
4957                  * Adjust freesize so that it accounts for how much memory
4958                  * is used by this zone for memmap. This affects the watermark
4959                  * and per-cpu initialisations
4960                  */
4961                 memmap_pages = calc_memmap_size(size, realsize);
4962                 if (!is_highmem_idx(j)) {
4963                         if (freesize >= memmap_pages) {
4964                                 freesize -= memmap_pages;
4965                                 if (memmap_pages)
4966                                         printk(KERN_DEBUG
4967                                                "  %s zone: %lu pages used for memmap\n",
4968                                                zone_names[j], memmap_pages);
4969                         } else
4970                                 printk(KERN_WARNING
4971                                         "  %s zone: %lu pages exceeds freesize %lu\n",
4972                                         zone_names[j], memmap_pages, freesize);
4973                 }
4974 
4975                 /* Account for reserved pages */
4976                 if (j == 0 && freesize > dma_reserve) {
4977                         freesize -= dma_reserve;
4978                         printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4979                                         zone_names[0], dma_reserve);
4980                 }
4981 
4982                 if (!is_highmem_idx(j))
4983                         nr_kernel_pages += freesize;
4984                 /* Charge for highmem memmap if there are enough kernel pages */
4985                 else if (nr_kernel_pages > memmap_pages * 2)
4986                         nr_kernel_pages -= memmap_pages;
4987                 nr_all_pages += freesize;
4988 
4989                 zone->spanned_pages = size;
4990                 zone->present_pages = realsize;
4991                 /*
4992                  * Set an approximate value for lowmem here, it will be adjusted
4993                  * when the bootmem allocator frees pages into the buddy system.
4994                  * And all highmem pages will be managed by the buddy system.
4995                  */
4996                 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
4997 #ifdef CONFIG_NUMA
4998                 zone->node = nid;
4999                 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
5000                                                 / 100;
5001                 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
5002 #endif
5003                 zone->name = zone_names[j];
5004                 spin_lock_init(&zone->lock);
5005                 spin_lock_init(&zone->lru_lock);
5006                 zone_seqlock_init(zone);
5007                 zone->zone_pgdat = pgdat;
5008                 zone_pcp_init(zone);
5009 
5010                 /* For bootup, initialized properly in watermark setup */
5011                 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5012 
5013                 lruvec_init(&zone->lruvec);
5014                 if (!size)
5015                         continue;
5016 
5017                 set_pageblock_order();
5018                 setup_usemap(pgdat, zone, zone_start_pfn, size);
5019                 ret = init_currently_empty_zone(zone, zone_start_pfn,
5020                                                 size, MEMMAP_EARLY);
5021                 BUG_ON(ret);
5022                 memmap_init(size, nid, j, zone_start_pfn);
5023                 zone_start_pfn += size;
5024         }
5025 }
5026 
5027 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
5028 {
5029         /* Skip empty nodes */
5030         if (!pgdat->node_spanned_pages)
5031                 return;
5032 
5033 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5034         /* ia64 gets its own node_mem_map, before this, without bootmem */
5035         if (!pgdat->node_mem_map) {
5036                 unsigned long size, start, end;
5037                 struct page *map;
5038 
5039                 /*
5040                  * The zone's endpoints aren't required to be MAX_ORDER
5041                  * aligned but the node_mem_map endpoints must be in order
5042                  * for the buddy allocator to function correctly.
5043                  */
5044                 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5045                 end = pgdat_end_pfn(pgdat);
5046                 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5047                 size =  (end - start) * sizeof(struct page);
5048                 map = alloc_remap(pgdat->node_id, size);
5049                 if (!map)
5050                         map = memblock_virt_alloc_node_nopanic(size,
5051                                                                pgdat->node_id);
5052                 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
5053         }
5054 #ifndef CONFIG_NEED_MULTIPLE_NODES
5055         /*
5056          * With no DISCONTIG, the global mem_map is just set as node 0's
5057          */
5058         if (pgdat == NODE_DATA(0)) {
5059                 mem_map = NODE_DATA(0)->node_mem_map;
5060 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5061                 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
5062                         mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
5063 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5064         }
5065 #endif
5066 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
5067 }
5068 
5069 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5070                 unsigned long node_start_pfn, unsigned long *zholes_size)
5071 {
5072         pg_data_t *pgdat = NODE_DATA(nid);
5073         unsigned long start_pfn = 0;
5074         unsigned long end_pfn = 0;
5075 
5076         /* pg_data_t should be reset to zero when it's allocated */
5077         WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5078 
5079         pgdat->node_id = nid;
5080         pgdat->node_start_pfn = node_start_pfn;
5081 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5082         get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5083         pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
5084                 (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1);
5085 #endif
5086         calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5087                                   zones_size, zholes_size);
5088 
5089         alloc_node_mem_map(pgdat);
5090 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5091         printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5092                 nid, (unsigned long)pgdat,
5093                 (unsigned long)pgdat->node_mem_map);
5094 #endif
5095 
5096         free_area_init_core(pgdat, start_pfn, end_pfn,
5097                             zones_size, zholes_size);
5098 }
5099 
5100 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5101 
5102 #if MAX_NUMNODES > 1
5103 /*
5104  * Figure out the number of possible node ids.
5105  */
5106 void __init setup_nr_node_ids(void)
5107 {
5108         unsigned int node;
5109         unsigned int highest = 0;
5110 
5111         for_each_node_mask(node, node_possible_map)
5112                 highest = node;
5113         nr_node_ids = highest + 1;
5114 }
5115 #endif
5116 
5117 /**
5118  * node_map_pfn_alignment - determine the maximum internode alignment
5119  *
5120  * This function should be called after node map is populated and sorted.
5121  * It calculates the maximum power of two alignment which can distinguish
5122  * all the nodes.
5123  *
5124  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5125  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
5126  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
5127  * shifted, 1GiB is enough and this function will indicate so.
5128  *
5129  * This is used to test whether pfn -> nid mapping of the chosen memory
5130  * model has fine enough granularity to avoid incorrect mapping for the
5131  * populated node map.
5132  *
5133  * Returns the determined alignment in pfn's.  0 if there is no alignment
5134  * requirement (single node).
5135  */
5136 unsigned long __init node_map_pfn_alignment(void)
5137 {
5138         unsigned long accl_mask = 0, last_end = 0;
5139         unsigned long start, end, mask;
5140         int last_nid = -1;
5141         int i, nid;
5142 
5143         for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
5144                 if (!start || last_nid < 0 || last_nid == nid) {
5145                         last_nid = nid;
5146                         last_end = end;
5147                         continue;
5148                 }
5149 
5150                 /*
5151                  * Start with a mask granular enough to pin-point to the
5152                  * start pfn and tick off bits one-by-one until it becomes
5153                  * too coarse to separate the current node from the last.
5154                  */
5155                 mask = ~((1 << __ffs(start)) - 1);
5156                 while (mask && last_end <= (start & (mask << 1)))
5157                         mask <<= 1;
5158 
5159                 /* accumulate all internode masks */
5160                 accl_mask |= mask;
5161         }
5162 
5163         /* convert mask to number of pages */
5164         return ~accl_mask + 1;
5165 }
5166 
5167 /* Find the lowest pfn for a node */
5168 static unsigned long __init find_min_pfn_for_node(int nid)
5169 {
5170         unsigned long min_pfn = ULONG_MAX;
5171         unsigned long start_pfn;
5172         int i;
5173 
5174         for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5175                 min_pfn = min(min_pfn, start_pfn);
5176 
5177         if (min_pfn == ULONG_MAX) {
5178                 printk(KERN_WARNING
5179                         "Could not find start_pfn for node %d\n", nid);
5180                 return 0;
5181         }
5182 
5183         return min_pfn;
5184 }
5185 
5186 /**
5187  * find_min_pfn_with_active_regions - Find the minimum PFN registered
5188  *
5189  * It returns the minimum PFN based on information provided via
5190  * memblock_set_node().
5191  */
5192 unsigned long __init find_min_pfn_with_active_regions(void)
5193 {
5194         return find_min_pfn_for_node(MAX_NUMNODES);
5195 }
5196 
5197 /*
5198  * early_calculate_totalpages()
5199  * Sum pages in active regions for movable zone.
5200  * Populate N_MEMORY for calculating usable_nodes.
5201  */
5202 static unsigned long __init early_calculate_totalpages(void)
5203 {
5204         unsigned long totalpages = 0;
5205         unsigned long start_pfn, end_pfn;
5206         int i, nid;
5207 
5208         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5209                 unsigned long pages = end_pfn - start_pfn;
5210 
5211                 totalpages += pages;
5212                 if (pages)
5213                         node_set_state(nid, N_MEMORY);
5214         }
5215         return totalpages;
5216 }
5217 
5218 /*
5219  * Find the PFN the Movable zone begins in each node. Kernel memory
5220  * is spread evenly between nodes as long as the nodes have enough
5221  * memory. When they don't, some nodes will have more kernelcore than
5222  * others
5223  */
5224 static void __init find_zone_movable_pfns_for_nodes(void)
5225 {
5226         int i, nid;
5227         unsigned long usable_startpfn;
5228         unsigned long kernelcore_node, kernelcore_remaining;
5229         /* save the state before borrow the nodemask */
5230         nodemask_t saved_node_state = node_states[N_MEMORY];
5231         unsigned long totalpages = early_calculate_totalpages();
5232         int usable_nodes = nodes_weight(node_states[N_MEMORY]);
5233         struct memblock_region *r;
5234 
5235         /* Need to find movable_zone earlier when movable_node is specified. */
5236         find_usable_zone_for_movable();
5237 
5238         /*
5239          * If movable_node is specified, ignore kernelcore and movablecore
5240          * options.
5241          */
5242         if (movable_node_is_enabled()) {
5243                 for_each_memblock(memory, r) {
5244                         if (!memblock_is_hotpluggable(r))
5245                                 continue;
5246 
5247                         nid = r->nid;
5248 
5249                         usable_startpfn = PFN_DOWN(r->base);
5250                         zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5251                                 min(usable_startpfn, zone_movable_pfn[nid]) :
5252                                 usable_startpfn;
5253                 }
5254 
5255                 goto out2;
5256         }
5257 
5258         /*
5259          * If movablecore=nn[KMG] was specified, calculate what size of
5260          * kernelcore that corresponds so that memory usable for
5261          * any allocation type is evenly spread. If both kernelcore
5262          * and movablecore are specified, then the value of kernelcore
5263          * will be used for required_kernelcore if it's greater than
5264          * what movablecore would have allowed.
5265          */
5266         if (required_movablecore) {
5267                 unsigned long corepages;
5268 
5269                 /*
5270                  * Round-up so that ZONE_MOVABLE is at least as large as what
5271                  * was requested by the user
5272                  */
5273                 required_movablecore =
5274                         roundup(required_movablecore, MAX_ORDER_NR_PAGES);
5275                 corepages = totalpages - required_movablecore;
5276 
5277                 required_kernelcore = max(required_kernelcore, corepages);
5278         }
5279 
5280         /* If kernelcore was not specified, there is no ZONE_MOVABLE */
5281         if (!required_kernelcore)
5282                 goto out;
5283 
5284         /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
5285         usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5286 
5287 restart:
5288         /* Spread kernelcore memory as evenly as possible throughout nodes */
5289         kernelcore_node = required_kernelcore / usable_nodes;
5290         for_each_node_state(nid, N_MEMORY) {
5291                 unsigned long start_pfn, end_pfn;
5292 
5293                 /*
5294                  * Recalculate kernelcore_node if the division per node
5295                  * now exceeds what is necessary to satisfy the requested
5296                  * amount of memory for the kernel
5297                  */
5298                 if (required_kernelcore < kernelcore_node)
5299                         kernelcore_node = required_kernelcore / usable_nodes;
5300 
5301                 /*
5302                  * As the map is walked, we track how much memory is usable
5303                  * by the kernel using kernelcore_remaining. When it is
5304                  * 0, the rest of the node is usable by ZONE_MOVABLE
5305                  */
5306                 kernelcore_remaining = kernelcore_node;
5307 
5308                 /* Go through each range of PFNs within this node */
5309                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5310                         unsigned long size_pages;
5311 
5312                         start_pfn = max(start_pfn, zone_movable_pfn[nid]);
5313                         if (start_pfn >= end_pfn)
5314                                 continue;
5315 
5316                         /* Account for what is only usable for kernelcore */
5317                         if (start_pfn < usable_startpfn) {
5318                                 unsigned long kernel_pages;
5319                                 kernel_pages = min(end_pfn, usable_startpfn)
5320                                                                 - start_pfn;
5321 
5322                                 kernelcore_remaining -= min(kernel_pages,
5323                                                         kernelcore_remaining);
5324                                 required_kernelcore -= min(kernel_pages,
5325                                                         required_kernelcore);
5326 
5327                                 /* Continue if range is now fully accounted */
5328                                 if (end_pfn <= usable_startpfn) {
5329 
5330                                         /*
5331                                          * Push zone_movable_pfn to the end so
5332                                          * that if we have to rebalance
5333                                          * kernelcore across nodes, we will
5334                                          * not double account here
5335                                          */
5336                                         zone_movable_pfn[nid] = end_pfn;
5337                                         continue;
5338                                 }
5339                                 start_pfn = usable_startpfn;
5340                         }
5341 
5342                         /*
5343                          * The usable PFN range for ZONE_MOVABLE is from
5344                          * start_pfn->end_pfn. Calculate size_pages as the
5345                          * number of pages used as kernelcore
5346                          */
5347                         size_pages = end_pfn - start_pfn;
5348                         if (size_pages > kernelcore_remaining)
5349                                 size_pages = kernelcore_remaining;
5350                         zone_movable_pfn[nid] = start_pfn + size_pages;
5351 
5352                         /*
5353                          * Some kernelcore has been met, update counts and
5354                          * break if the kernelcore for this node has been
5355                          * satisfied
5356                          */
5357                         required_kernelcore -= min(required_kernelcore,
5358                                                                 size_pages);
5359                         kernelcore_remaining -= size_pages;
5360                         if (!kernelcore_remaining)
5361                                 break;
5362                 }
5363         }
5364 
5365         /*
5366          * If there is still required_kernelcore, we do another pass with one
5367          * less node in the count. This will push zone_movable_pfn[nid] further
5368          * along on the nodes that still have memory until kernelcore is
5369          * satisfied
5370          */
5371         usable_nodes--;
5372         if (usable_nodes && required_kernelcore > usable_nodes)
5373                 goto restart;
5374 
5375 out2:
5376         /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5377         for (nid = 0; nid < MAX_NUMNODES; nid++)
5378                 zone_movable_pfn[nid] =
5379                         roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5380 
5381 out:
5382         /* restore the node_state */
5383         node_states[N_MEMORY] = saved_node_state;
5384 }
5385 
5386 /* Any regular or high memory on that node ? */
5387 static void check_for_memory(pg_data_t *pgdat, int nid)
5388 {
5389         enum zone_type zone_type;
5390 
5391         if (N_MEMORY == N_NORMAL_MEMORY)
5392                 return;
5393 
5394         for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5395                 struct zone *zone = &pgdat->node_zones[zone_type];
5396                 if (populated_zone(zone)) {
5397                         node_set_state(nid, N_HIGH_MEMORY);
5398                         if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5399                             zone_type <= ZONE_NORMAL)
5400                                 node_set_state(nid, N_NORMAL_MEMORY);
5401                         break;
5402                 }
5403         }
5404 }
5405 
5406 /**
5407  * free_area_init_nodes - Initialise all pg_data_t and zone data
5408  * @max_zone_pfn: an array of max PFNs for each zone
5409  *
5410  * This will call free_area_init_node() for each active node in the system.
5411  * Using the page ranges provided by memblock_set_node(), the size of each
5412  * zone in each node and their holes is calculated. If the maximum PFN
5413  * between two adjacent zones match, it is assumed that the zone is empty.
5414  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5415  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5416  * starts where the previous one ended. For example, ZONE_DMA32 starts
5417  * at arch_max_dma_pfn.
5418  */
5419 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5420 {
5421         unsigned long start_pfn, end_pfn;
5422         int i, nid;
5423 
5424         /* Record where the zone boundaries are */
5425         memset(arch_zone_lowest_possible_pfn, 0,
5426                                 sizeof(arch_zone_lowest_possible_pfn));
5427         memset(arch_zone_highest_possible_pfn, 0,
5428                                 sizeof(arch_zone_highest_possible_pfn));
5429         arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5430         arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5431         for (i = 1; i < MAX_NR_ZONES; i++) {
5432                 if (i == ZONE_MOVABLE)
5433                         continue;
5434                 arch_zone_lowest_possible_pfn[i] =
5435                         arch_zone_highest_possible_pfn[i-1];
5436                 arch_zone_highest_possible_pfn[i] =
5437                         max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5438         }
5439         arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5440         arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5441 
5442         /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5443         memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
5444         find_zone_movable_pfns_for_nodes();
5445 
5446         /* Print out the zone ranges */
5447         pr_info("Zone ranges:\n");
5448         for (i = 0; i < MAX_NR_ZONES; i++) {
5449                 if (i == ZONE_MOVABLE)
5450                         continue;
5451                 pr_info("  %-8s ", zone_names[i]);
5452                 if (arch_zone_lowest_possible_pfn[i] ==
5453                                 arch_zone_highest_possible_pfn[i])
5454                         pr_cont("empty\n");
5455                 else
5456                         pr_cont("[mem %#018Lx-%#018Lx]\n",
5457                                 (u64)arch_zone_lowest_possible_pfn[i]
5458                                         << PAGE_SHIFT,
5459                                 ((u64)arch_zone_highest_possible_pfn[i]
5460                                         << PAGE_SHIFT) - 1);
5461         }
5462 
5463         /* Print out the PFNs ZONE_MOVABLE begins at in each node */
5464         pr_info("Movable zone start for each node\n");
5465         for (i = 0; i < MAX_NUMNODES; i++) {
5466                 if (zone_movable_pfn[i])
5467                         pr_info("  Node %d: %#018Lx\n", i,
5468                                (u64)zone_movable_pfn[i] << PAGE_SHIFT);
5469         }
5470 
5471         /* Print out the early node map */
5472         pr_info("Early memory node ranges\n");
5473         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
5474                 pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
5475                         (u64)start_pfn << PAGE_SHIFT,
5476                         ((u64)end_pfn << PAGE_SHIFT) - 1);
5477 
5478         /* Initialise every node */
5479         mminit_verify_pageflags_layout();
5480         setup_nr_node_ids();
5481         for_each_online_node(nid) {
5482                 pg_data_t *pgdat = NODE_DATA(nid);
5483                 free_area_init_node(nid, NULL,
5484                                 find_min_pfn_for_node(nid), NULL);
5485 
5486                 /* Any memory on that node */
5487                 if (pgdat->node_present_pages)
5488                         node_set_state(nid, N_MEMORY);
5489                 check_for_memory(pgdat, nid);
5490         }
5491 }
5492 
5493 static int __init cmdline_parse_core(char *p, unsigned long *core)
5494 {
5495         unsigned long long coremem;
5496         if (!p)
5497                 return -EINVAL;
5498 
5499         coremem = memparse(p, &p);
5500         *core = coremem >> PAGE_SHIFT;
5501 
5502         /* Paranoid check that UL is enough for the coremem value */
5503         WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5504 
5505         return 0;
5506 }
5507 
5508 /*
5509  * kernelcore=size sets the amount of memory for use for allocations that
5510  * cannot be reclaimed or migrated.
5511  */
5512 static int __init cmdline_parse_kernelcore(char *p)
5513 {
5514         return cmdline_parse_core(p, &required_kernelcore);
5515 }
5516 
5517 /*
5518  * movablecore=size sets the amount of memory for use for allocations that
5519  * can be reclaimed or migrated.
5520  */
5521 static int __init cmdline_parse_movablecore(char *p)
5522 {
5523         return cmdline_parse_core(p, &required_movablecore);
5524 }
5525 
5526 early_param("kernelcore", cmdline_parse_kernelcore);
5527 early_param("movablecore", cmdline_parse_movablecore);
5528 
5529 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5530 
5531 void adjust_managed_page_count(struct page *page, long count)
5532 {
5533         spin_lock(&managed_page_count_lock);
5534         page_zone(page)->managed_pages += count;
5535         totalram_pages += count;
5536 #ifdef CONFIG_HIGHMEM
5537         if (PageHighMem(page))
5538                 totalhigh_pages += count;
5539 #endif
5540         spin_unlock(&managed_page_count_lock);
5541 }
5542 EXPORT_SYMBOL(adjust_managed_page_count);
5543 
5544 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5545 {
5546         void *pos;
5547         unsigned long pages = 0;
5548 
5549         start = (void *)PAGE_ALIGN((unsigned long)start);
5550         end = (void *)((unsigned long)end & PAGE_MASK);
5551         for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5552                 if ((unsigned int)poison <= 0xFF)
5553                         memset(pos, poison, PAGE_SIZE);
5554                 free_reserved_page(virt_to_page(pos));
5555         }
5556 
5557         if (pages && s)
5558                 pr_info("Freeing %s memory: %ldK\n",
5559                         s, pages << (PAGE_SHIFT - 10));
5560 
5561         return pages;
5562 }
5563 EXPORT_SYMBOL(free_reserved_area);
5564 
5565 #ifdef  CONFIG_HIGHMEM
5566 void free_highmem_page(struct page *page)
5567 {
5568         __free_reserved_page(page);
5569         totalram_pages++;
5570         page_zone(page)->managed_pages++;
5571         totalhigh_pages++;
5572 }
5573 #endif
5574 
5575 
5576 void __init mem_init_print_info(const char *str)
5577 {
5578         unsigned long physpages, codesize, datasize, rosize, bss_size;
5579         unsigned long init_code_size, init_data_size;
5580 
5581         physpages = get_num_physpages();
5582         codesize = _etext - _stext;
5583         datasize = _edata - _sdata;
5584         rosize = __end_rodata - __start_rodata;
5585         bss_size = __bss_stop - __bss_start;
5586         init_data_size = __init_end - __init_begin;
5587         init_code_size = _einittext - _sinittext;
5588 
5589         /*
5590          * Detect special cases and adjust section sizes accordingly:
5591          * 1) .init.* may be embedded into .data sections
5592          * 2) .init.text.* may be out of [__init_begin, __init_end],
5593          *    please refer to arch/tile/kernel/vmlinux.lds.S.
5594          * 3) .rodata.* may be embedded into .text or .data sections.
5595          */
5596 #define adj_init_size(start, end, size, pos, adj) \
5597         do { \
5598                 if (start <= pos && pos < end && size > adj) \
5599                         size -= adj; \
5600         } while (0)
5601 
5602         adj_init_size(__init_begin, __init_end, init_data_size,
5603                      _sinittext, init_code_size);
5604         adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
5605         adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
5606         adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
5607         adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
5608 
5609 #undef  adj_init_size
5610 
5611         pr_info("Memory: %luK/%luK available "
5612                "(%luK kernel code, %luK rwdata, %luK rodata, "
5613                "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
5614 #ifdef  CONFIG_HIGHMEM
5615                ", %luK highmem"
5616 #endif
5617                "%s%s)\n",
5618                nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
5619                codesize >> 10, datasize >> 10, rosize >> 10,
5620                (init_data_size + init_code_size) >> 10, bss_size >> 10,
5621                (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
5622                totalcma_pages << (PAGE_SHIFT-10),
5623 #ifdef  CONFIG_HIGHMEM
5624                totalhigh_pages << (PAGE_SHIFT-10),
5625 #endif
5626                str ? ", " : "", str ? str : "");
5627 }
5628 
5629 /**
5630  * set_dma_reserve - set the specified number of pages reserved in the first zone
5631  * @new_dma_reserve: The number of pages to mark reserved
5632  *
5633  * The per-cpu batchsize and zone watermarks are determined by present_pages.
5634  * In the DMA zone, a significant percentage may be consumed by kernel image
5635  * and other unfreeable allocations which can skew the watermarks badly. This
5636  * function may optionally be used to account for unfreeable pages in the
5637  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5638  * smaller per-cpu batchsize.
5639  */
5640 void __init set_dma_reserve(unsigned long new_dma_reserve)
5641 {
5642         dma_reserve = new_dma_reserve;
5643 }
5644 
5645 void __init free_area_init(unsigned long *zones_size)
5646 {
5647         free_area_init_node(0, zones_size,
5648                         __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5649 }
5650 
5651 static int page_alloc_cpu_notify(struct notifier_block *self,
5652                                  unsigned long action, void *hcpu)
5653 {
5654         int cpu = (unsigned long)hcpu;
5655 
5656         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5657                 lru_add_drain_cpu(cpu);
5658                 drain_pages(cpu);
5659 
5660                 /*
5661                  * Spill the event counters of the dead processor
5662                  * into the current processors event counters.
5663                  * This artificially elevates the count of the current
5664                  * processor.
5665                  */
5666                 vm_events_fold_cpu(cpu);
5667 
5668                 /*
5669                  * Zero the differential counters of the dead processor
5670                  * so that the vm statistics are consistent.
5671                  *
5672                  * This is only okay since the processor is dead and cannot
5673                  * race with what we are doing.
5674                  */
5675                 cpu_vm_stats_fold(cpu);
5676         }
5677         return NOTIFY_OK;
5678 }
5679 
5680 void __init page_alloc_init(void)
5681 {
5682         hotcpu_notifier(page_alloc_cpu_notify, 0);
5683 }
5684 
5685 /*
5686  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5687  *      or min_free_kbytes changes.
5688  */
5689 static void calculate_totalreserve_pages(void)
5690 {
5691         struct pglist_data *pgdat;
5692         unsigned long reserve_pages = 0;
5693         enum zone_type i, j;
5694 
5695         for_each_online_pgdat(pgdat) {
5696                 for (i = 0; i < MAX_NR_ZONES; i++) {
5697                         struct zone *zone = pgdat->node_zones + i;
5698                         long max = 0;
5699 
5700                         /* Find valid and maximum lowmem_reserve in the zone */
5701                         for (j = i; j < MAX_NR_ZONES; j++) {
5702                                 if (zone->lowmem_reserve[j] > max)
5703                                         max = zone->lowmem_reserve[j];
5704                         }
5705 
5706                         /* we treat the high watermark as reserved pages. */
5707                         max += high_wmark_pages(zone);
5708 
5709                         if (max > zone->managed_pages)
5710                                 max = zone->managed_pages;
5711                         reserve_pages += max;
5712                         /*
5713                          * Lowmem reserves are not available to
5714                          * GFP_HIGHUSER page cache allocations and
5715                          * kswapd tries to balance zones to their high
5716                          * watermark.  As a result, neither should be
5717                          * regarded as dirtyable memory, to prevent a
5718                          * situation where reclaim has to clean pages
5719                          * in order to balance the zones.
5720                          */
5721                         zone->dirty_balance_reserve = max;
5722                 }
5723         }
5724         dirty_balance_reserve = reserve_pages;
5725         totalreserve_pages = reserve_pages;
5726 }
5727 
5728 /*
5729  * setup_per_zone_lowmem_reserve - called whenever
5730  *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
5731  *      has a correct pages reserved value, so an adequate number of
5732  *      pages are left in the zone after a successful __alloc_pages().
5733  */
5734 static void setup_per_zone_lowmem_reserve(void)
5735 {
5736         struct pglist_data *pgdat;
5737         enum zone_type j, idx;
5738 
5739         for_each_online_pgdat(pgdat) {
5740                 for (j = 0; j < MAX_NR_ZONES; j++) {
5741                         struct zone *zone = pgdat->node_zones + j;
5742                         unsigned long managed_pages = zone->managed_pages;
5743 
5744                         zone->lowmem_reserve[j] = 0;
5745 
5746                         idx = j;
5747                         while (idx) {
5748                                 struct zone *lower_zone;
5749 
5750                                 idx--;
5751 
5752                                 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5753                                         sysctl_lowmem_reserve_ratio[idx] = 1;
5754 
5755                                 lower_zone = pgdat->node_zones + idx;
5756                                 lower_zone->lowmem_reserve[j] = managed_pages /
5757                                         sysctl_lowmem_reserve_ratio[idx];
5758                                 managed_pages += lower_zone->managed_pages;
5759                         }
5760                 }
5761         }
5762 
5763         /* update totalreserve_pages */
5764         calculate_totalreserve_pages();
5765 }
5766 
5767 static void __setup_per_zone_wmarks(void)
5768 {
5769         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5770         unsigned long lowmem_pages = 0;
5771         struct zone *zone;
5772         unsigned long flags;
5773 
5774         /* Calculate total number of !ZONE_HIGHMEM pages */
5775         for_each_zone(zone) {
5776                 if (!is_highmem(zone))
5777                         lowmem_pages += zone->managed_pages;
5778         }
5779 
5780         for_each_zone(zone) {
5781                 u64 tmp;
5782 
5783                 spin_lock_irqsave(&zone->lock, flags);
5784                 tmp = (u64)pages_min * zone->managed_pages;
5785                 do_div(tmp, lowmem_pages);
5786                 if (is_highmem(zone)) {
5787                         /*
5788                          * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5789                          * need highmem pages, so cap pages_min to a small
5790                          * value here.
5791                          *
5792                          * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5793                          * deltas control asynch page reclaim, and so should
5794                          * not be capped for highmem.
5795                          */
5796                         unsigned long min_pages;
5797 
5798                         min_pages = zone->managed_pages / 1024;
5799                         min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5800                         zone->watermark[WMARK_MIN] = min_pages;
5801                 } else {
5802                         /*
5803                          * If it's a lowmem zone, reserve a number of pages
5804                          * proportionate to the zone's size.
5805                          */
5806                         zone->watermark[WMARK_MIN] = tmp;
5807                 }
5808 
5809                 zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5810                 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5811 
5812                 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
5813                         high_wmark_pages(zone) - low_wmark_pages(zone) -
5814                         atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5815 
5816                 setup_zone_migrate_reserve(zone);
5817                 spin_unlock_irqrestore(&zone->lock, flags);
5818         }
5819 
5820         /* update totalreserve_pages */
5821         calculate_totalreserve_pages();
5822 }
5823 
5824 /**
5825  * setup_per_zone_wmarks - called when min_free_kbytes changes
5826  * or when memory is hot-{added|removed}
5827  *
5828  * Ensures that the watermark[min,low,high] values for each zone are set
5829  * correctly with respect to min_free_kbytes.
5830  */
5831 void setup_per_zone_wmarks(void)
5832 {
5833         mutex_lock(&zonelists_mutex);
5834         __setup_per_zone_wmarks();
5835         mutex_unlock(&zonelists_mutex);
5836 }
5837 
5838 /*
5839  * The inactive anon list should be small enough that the VM never has to
5840  * do too much work, but large enough that each inactive page has a chance
5841  * to be referenced again before it is swapped out.
5842  *
5843  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5844  * INACTIVE_ANON pages on this zone's LRU, maintained by the
5845  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5846  * the anonymous pages are kept on the inactive list.
5847  *
5848  * total     target    max
5849  * memory    ratio     inactive anon
5850  * -------------------------------------
5851  *   10MB       1         5MB
5852  *  100MB       1        50MB
5853  *    1GB       3       250MB
5854  *   10GB      10       0.9GB
5855  *  100GB      31         3GB
5856  *    1TB     101        10GB
5857  *   10TB     320        32GB
5858  */
5859 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5860 {
5861         unsigned int gb, ratio;
5862 
5863         /* Zone size in gigabytes */
5864         gb = zone->managed_pages >> (30 - PAGE_SHIFT);
5865         if (gb)
5866                 ratio = int_sqrt(10 * gb);
5867         else
5868                 ratio = 1;
5869 
5870         zone->inactive_ratio = ratio;
5871 }
5872 
5873 static void __meminit setup_per_zone_inactive_ratio(void)
5874 {
5875         struct zone *zone;
5876 
5877         for_each_zone(zone)
5878                 calculate_zone_inactive_ratio(zone);
5879 }
5880 
5881 /*
5882  * Initialise min_free_kbytes.
5883  *
5884  * For small machines we want it small (128k min).  For large machines
5885  * we want it large (64MB max).  But it is not linear, because network
5886  * bandwidth does not increase linearly with machine size.  We use
5887  *
5888  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5889  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
5890  *
5891  * which yields
5892  *
5893  * 16MB:        512k
5894  * 32MB:        724k
5895  * 64MB:        1024k
5896  * 128MB:       1448k
5897  * 256MB:       2048k
5898  * 512MB:       2896k
5899  * 1024MB:      4096k
5900  * 2048MB:      5792k
5901  * 4096MB:      8192k
5902  * 8192MB:      11584k
5903  * 16384MB:     16384k
5904  */
5905 int __meminit init_per_zone_wmark_min(void)
5906 {
5907         unsigned long lowmem_kbytes;
5908         int new_min_free_kbytes;
5909 
5910         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5911         new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5912 
5913         if (new_min_free_kbytes > user_min_free_kbytes) {
5914                 min_free_kbytes = new_min_free_kbytes;
5915                 if (min_free_kbytes < 128)
5916                         min_free_kbytes = 128;
5917                 if (min_free_kbytes > 65536)
5918                         min_free_kbytes = 65536;
5919         } else {
5920                 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
5921                                 new_min_free_kbytes, user_min_free_kbytes);
5922         }
5923         setup_per_zone_wmarks();
5924         refresh_zone_stat_thresholds();
5925         setup_per_zone_lowmem_reserve();
5926         setup_per_zone_inactive_ratio();
5927         return 0;
5928 }
5929 core_initcall(init_per_zone_wmark_min)
5930 
5931 /*
5932  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5933  *      that we can call two helper functions whenever min_free_kbytes
5934  *      changes.
5935  */
5936 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
5937         void __user *buffer, size_t *length, loff_t *ppos)
5938 {
5939         int rc;
5940 
5941         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5942         if (rc)
5943                 return rc;
5944 
5945         if (write) {
5946                 user_min_free_kbytes = min_free_kbytes;
5947                 setup_per_zone_wmarks();
5948         }
5949         return 0;
5950 }
5951 
5952 #ifdef CONFIG_NUMA
5953 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
5954         void __user *buffer, size_t *length, loff_t *ppos)
5955 {
5956         struct zone *zone;
5957         int rc;
5958 
5959         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5960         if (rc)
5961                 return rc;
5962 
5963         for_each_zone(zone)
5964                 zone->min_unmapped_pages = (zone->managed_pages *
5965                                 sysctl_min_unmapped_ratio) / 100;
5966         return 0;
5967 }
5968 
5969 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
5970         void __user *buffer, size_t *length, loff_t *ppos)
5971 {
5972         struct zone *zone;
5973         int rc;
5974 
5975         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5976         if (rc)
5977                 return rc;
5978 
5979         for_each_zone(zone)
5980                 zone->min_slab_pages = (zone->managed_pages *
5981                                 sysctl_min_slab_ratio) / 100;
5982         return 0;
5983 }
5984 #endif
5985 
5986 /*
5987  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5988  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5989  *      whenever sysctl_lowmem_reserve_ratio changes.
5990  *
5991  * The reserve ratio obviously has absolutely no relation with the
5992  * minimum watermarks. The lowmem reserve ratio can only make sense
5993  * if in function of the boot time zone sizes.
5994  */
5995 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
5996         void __user *buffer, size_t *length, loff_t *ppos)
5997 {
5998         proc_dointvec_minmax(table, write, buffer, length, ppos);
5999         setup_per_zone_lowmem_reserve();
6000         return 0;
6001 }
6002 
6003 /*
6004  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
6005  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
6006  * pagelist can have before it gets flushed back to buddy allocator.
6007  */
6008 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
6009         void __user *buffer, size_t *length, loff_t *ppos)
6010 {
6011         struct zone *zone;
6012         int old_percpu_pagelist_fraction;
6013         int ret;
6014 
6015         mutex_lock(&pcp_batch_high_lock);
6016         old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6017 
6018         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6019         if (!write || ret < 0)
6020                 goto out;
6021 
6022         /* Sanity checking to avoid pcp imbalance */
6023         if (percpu_pagelist_fraction &&
6024             percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6025                 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6026                 ret = -EINVAL;
6027                 goto out;
6028         }
6029 
6030         /* No change? */
6031         if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6032                 goto out;
6033 
6034         for_each_populated_zone(zone) {
6035                 unsigned int cpu;
6036 
6037                 for_each_possible_cpu(cpu)
6038                         pageset_set_high_and_batch(zone,
6039                                         per_cpu_ptr(zone->pageset, cpu));
6040         }
6041 out:
6042         mutex_unlock(&pcp_batch_high_lock);
6043         return ret;
6044 }
6045 
6046 int hashdist = HASHDIST_DEFAULT;
6047 
6048 #ifdef CONFIG_NUMA
6049 static int __init set_hashdist(char *str)
6050 {
6051         if (!str)
6052                 return 0;
6053         hashdist = simple_strtoul(str, &str, 0);
6054         return 1;
6055 }
6056 __setup("hashdist=", set_hashdist);
6057 #endif
6058 
6059 /*
6060  * allocate a large system hash table from bootmem
6061  * - it is assumed that the hash table must contain an exact power-of-2
6062  *   quantity of entries
6063  * - limit is the number of hash buckets, not the total allocation size
6064  */
6065 void *__init alloc_large_system_hash(const char *tablename,
6066                                      unsigned long bucketsize,
6067                                      unsigned long numentries,
6068                                      int scale,
6069                                      int flags,
6070                                      unsigned int *_hash_shift,
6071                                      unsigned int *_hash_mask,
6072                                      unsigned long low_limit,
6073                                      unsigned long high_limit)
6074 {
6075         unsigned long long max = high_limit;
6076         unsigned long log2qty, size;
6077         void *table = NULL;
6078 
6079         /* allow the kernel cmdline to have a say */
6080         if (!numentries) {
6081                 /* round applicable memory size up to nearest megabyte */
6082                 numentries = nr_kernel_pages;
6083 
6084                 /* It isn't necessary when PAGE_SIZE >= 1MB */
6085                 if (PAGE_SHIFT < 20)
6086                         numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6087 
6088                 /* limit to 1 bucket per 2^scale bytes of low memory */
6089                 if (scale > PAGE_SHIFT)
6090                         numentries >>= (scale - PAGE_SHIFT);
6091                 else
6092                         numentries <<= (PAGE_SHIFT - scale);
6093 
6094                 /* Make sure we've got at least a 0-order allocation.. */
6095                 if (unlikely(flags & HASH_SMALL)) {
6096                         /* Makes no sense without HASH_EARLY */
6097                         WARN_ON(!(flags & HASH_EARLY));
6098                         if (!(numentries >> *_hash_shift)) {
6099                                 numentries = 1UL << *_hash_shift;
6100                                 BUG_ON(!numentries);
6101                         }
6102                 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
6103                         numentries = PAGE_SIZE / bucketsize;
6104         }
6105         numentries = roundup_pow_of_two(numentries);
6106 
6107         /* limit allocation size to 1/16 total memory by default */
6108         if (max == 0) {
6109                 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6110                 do_div(max, bucketsize);
6111         }
6112         max = min(max, 0x80000000ULL);
6113 
6114         if (numentries < low_limit)
6115                 numentries = low_limit;
6116         if (numentries > max)
6117                 numentries = max;
6118 
6119         log2qty = ilog2(numentries);
6120 
6121         do {
6122                 size = bucketsize << log2qty;
6123                 if (flags & HASH_EARLY)
6124                         table = memblock_virt_alloc_nopanic(size, 0);
6125                 else if (hashdist)
6126                         table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6127                 else {
6128                         /*
6129                          * If bucketsize is not a power-of-two, we may free
6130                          * some pages at the end of hash table which
6131                          * alloc_pages_exact() automatically does
6132                          */
6133                         if (get_order(size) < MAX_ORDER) {
6134                                 table = alloc_pages_exact(size, GFP_ATOMIC);
6135                                 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6136                         }
6137                 }
6138         } while (!table && size > PAGE_SIZE && --log2qty);
6139 
6140         if (!table)
6141                 panic("Failed to allocate %s hash table\n", tablename);
6142 
6143         printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
6144                tablename,
6145                (1UL << log2qty),
6146                ilog2(size) - PAGE_SHIFT,
6147                size);
6148 
6149         if (_hash_shift)
6150                 *_hash_shift = log2qty;
6151         if (_hash_mask)
6152                 *_hash_mask = (1 << log2qty) - 1;
6153 
6154         return table;
6155 }
6156 
6157 /* Return a pointer to the bitmap storing bits affecting a block of pages */
6158 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6159                                                         unsigned long pfn)
6160 {
6161 #ifdef CONFIG_SPARSEMEM
6162         return __pfn_to_section(pfn)->pageblock_flags;
6163 #else
6164         return zone->pageblock_flags;
6165 #endif /* CONFIG_SPARSEMEM */
6166 }
6167 
6168 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6169 {
6170 #ifdef CONFIG_SPARSEMEM
6171         pfn &= (PAGES_PER_SECTION-1);
6172         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6173 #else
6174         pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
6175         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6176 #endif /* CONFIG_SPARSEMEM */
6177 }
6178 
6179 /**
6180  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6181  * @page: The page within the block of interest
6182  * @pfn: The target page frame number
6183  * @end_bitidx: The last bit of interest to retrieve
6184  * @mask: mask of bits that the caller is interested in
6185  *
6186  * Return: pageblock_bits flags
6187  */
6188 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6189                                         unsigned long end_bitidx,
6190                                         unsigned long mask)
6191 {
6192         struct zone *zone;
6193         unsigned long *bitmap;
6194         unsigned long bitidx, word_bitidx;
6195         unsigned long word;
6196 
6197         zone = page_zone(page);
6198         bitmap = get_pageblock_bitmap(zone, pfn);
6199         bitidx = pfn_to_bitidx(zone, pfn);
6200         word_bitidx = bitidx / BITS_PER_LONG;
6201         bitidx &= (BITS_PER_LONG-1);
6202 
6203         word = bitmap[word_bitidx];
6204         bitidx += end_bitidx;
6205         return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
6206 }
6207 
6208 /**
6209  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6210  * @page: The page within the block of interest
6211  * @flags: The flags to set
6212  * @pfn: The target page frame number
6213  * @end_bitidx: The last bit of interest
6214  * @mask: mask of bits that the caller is interested in
6215  */
6216 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6217                                         unsigned long pfn,
6218                                         unsigned long end_bitidx,
6219                                         unsigned long mask)
6220 {
6221         struct zone *zone;
6222         unsigned long *bitmap;
6223         unsigned long bitidx, word_bitidx;
6224         unsigned long old_word, word;
6225 
6226         BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
6227 
6228         zone = page_zone(page);
6229         bitmap = get_pageblock_bitmap(zone, pfn);
6230         bitidx = pfn_to_bitidx(zone, pfn);
6231         word_bitidx = bitidx / BITS_PER_LONG;
6232         bitidx &= (BITS_PER_LONG-1);
6233 
6234         VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6235 
6236         bitidx += end_bitidx;
6237         mask <<= (BITS_PER_LONG - bitidx - 1);
6238         flags <<= (BITS_PER_LONG - bitidx - 1);
6239 
6240         word = READ_ONCE(bitmap[word_bitidx]);
6241         for (;;) {
6242                 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6243                 if (word == old_word)
6244                         break;
6245                 word = old_word;
6246         }
6247 }
6248 
6249 /*
6250  * This function checks whether pageblock includes unmovable pages or not.
6251  * If @count is not zero, it is okay to include less @count unmovable pages
6252  *
6253  * PageLRU check without isolation or lru_lock could race so that
6254  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6255  * expect this function should be exact.
6256  */
6257 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6258                          bool skip_hwpoisoned_pages)
6259 {
6260         unsigned long pfn, iter, found;
6261         int mt;
6262 
6263         /*
6264          * For avoiding noise data, lru_add_drain_all() should be called
6265          * If ZONE_MOVABLE, the zone never contains unmovable pages
6266          */
6267         if (zone_idx(zone) == ZONE_MOVABLE)
6268                 return false;
6269         mt = get_pageblock_migratetype(page);
6270         if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
6271                 return false;
6272 
6273         pfn = page_to_pfn(page);
6274         for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6275                 unsigned long check = pfn + iter;
6276 
6277                 if (!pfn_valid_within(check))
6278                         continue;
6279 
6280                 page = pfn_to_page(check);
6281 
6282                 /*
6283                  * Hugepages are not in LRU lists, but they're movable.
6284                  * We need not scan over tail pages bacause we don't
6285                  * handle each tail page individually in migration.
6286                  */
6287                 if (PageHuge(page)) {
6288                         iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6289                         continue;
6290                 }
6291 
6292                 /*
6293                  * We can't use page_count without pin a page
6294                  * because another CPU can free compound page.
6295                  * This check already skips compound tails of THP
6296                  * because their page->_count is zero at all time.
6297                  */
6298                 if (!atomic_read(&page->_count)) {
6299                         if (PageBuddy(page))
6300                                 iter += (1 << page_order(page)) - 1;
6301                         continue;
6302                 }
6303 
6304                 /*
6305                  * The HWPoisoned page may be not in buddy system, and
6306                  * page_count() is not 0.
6307                  */
6308                 if (skip_hwpoisoned_pages && PageHWPoison(page))
6309                         continue;
6310 
6311                 if (!PageLRU(page))
6312                         found++;
6313                 /*
6314                  * If there are RECLAIMABLE pages, we need to check
6315                  * it.  But now, memory offline itself doesn't call
6316                  * shrink_node_slabs() and it still to be fixed.
6317                  */
6318                 /*
6319                  * If the page is not RAM, page_count()should be 0.
6320                  * we don't need more check. This is an _used_ not-movable page.
6321                  *
6322                  * The problematic thing here is PG_reserved pages. PG_reserved
6323                  * is set to both of a memory hole page and a _used_ kernel
6324                  * page at boot.
6325                  */
6326                 if (found > count)
6327                         return true;
6328         }
6329         return false;
6330 }
6331 
6332 bool is_pageblock_removable_nolock(struct page *page)
6333 {
6334         struct zone *zone;
6335         unsigned long pfn;
6336 
6337         /*
6338          * We have to be careful here because we are iterating over memory
6339          * sections which are not zone aware so we might end up outside of
6340          * the zone but still within the section.
6341          * We have to take care about the node as well. If the node is offline
6342          * its NODE_DATA will be NULL - see page_zone.
6343          */
6344         if (!node_online(page_to_nid(page)))
6345                 return false;
6346 
6347         zone = page_zone(page);
6348         pfn = page_to_pfn(page);
6349         if (!zone_spans_pfn(zone, pfn))
6350                 return false;
6351 
6352         return !has_unmovable_pages(zone, page, 0, true);
6353 }
6354 
6355 #ifdef CONFIG_CMA
6356 
6357 static unsigned long pfn_max_align_down(unsigned long pfn)
6358 {
6359         return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6360                              pageblock_nr_pages) - 1);
6361 }
6362 
6363 static unsigned long pfn_max_align_up(unsigned long pfn)
6364 {
6365         return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6366                                 pageblock_nr_pages));
6367 }
6368 
6369 /* [start, end) must belong to a single zone. */
6370 static int __alloc_contig_migrate_range(struct compact_control *cc,
6371                                         unsigned long start, unsigned long end)
6372 {
6373         /* This function is based on compact_zone() from compaction.c. */
6374         unsigned long nr_reclaimed;
6375         unsigned long pfn = start;
6376         unsigned int tries = 0;
6377         int ret = 0;
6378 
6379         migrate_prep();
6380 
6381         while (pfn < end || !list_empty(&cc->migratepages)) {
6382                 if (fatal_signal_pending(current)) {
6383                         ret = -EINTR;
6384                         break;
6385                 }
6386 
6387                 if (list_empty(&cc->migratepages)) {
6388                         cc->nr_migratepages = 0;
6389                         pfn = isolate_migratepages_range(cc, pfn, end);
6390                         if (!pfn) {
6391                                 ret = -EINTR;
6392                                 break;
6393                         }
6394                         tries = 0;
6395                 } else if (++tries == 5) {
6396                         ret = ret < 0 ? ret : -EBUSY;
6397                         break;
6398                 }
6399 
6400                 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6401                                                         &cc->migratepages);
6402                 cc->nr_migratepages -= nr_reclaimed;
6403 
6404                 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6405                                     NULL, 0, cc->mode, MR_CMA);
6406         }
6407         if (ret < 0) {
6408                 putback_movable_pages(&cc->migratepages);
6409                 return ret;
6410         }
6411         return 0;