~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/memory_hotplug.c

Version: ~ [ linux-4.19-rc7 ] ~ [ linux-4.18.12 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.74 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.131 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.159 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.123 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.59 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/mm/memory_hotplug.c
  3  *
  4  *  Copyright (C)
  5  */
  6 
  7 #include <linux/stddef.h>
  8 #include <linux/mm.h>
  9 #include <linux/sched/signal.h>
 10 #include <linux/swap.h>
 11 #include <linux/interrupt.h>
 12 #include <linux/pagemap.h>
 13 #include <linux/compiler.h>
 14 #include <linux/export.h>
 15 #include <linux/pagevec.h>
 16 #include <linux/writeback.h>
 17 #include <linux/slab.h>
 18 #include <linux/sysctl.h>
 19 #include <linux/cpu.h>
 20 #include <linux/memory.h>
 21 #include <linux/memremap.h>
 22 #include <linux/memory_hotplug.h>
 23 #include <linux/highmem.h>
 24 #include <linux/vmalloc.h>
 25 #include <linux/ioport.h>
 26 #include <linux/delay.h>
 27 #include <linux/migrate.h>
 28 #include <linux/page-isolation.h>
 29 #include <linux/pfn.h>
 30 #include <linux/suspend.h>
 31 #include <linux/mm_inline.h>
 32 #include <linux/firmware-map.h>
 33 #include <linux/stop_machine.h>
 34 #include <linux/hugetlb.h>
 35 #include <linux/memblock.h>
 36 #include <linux/bootmem.h>
 37 #include <linux/compaction.h>
 38 
 39 #include <asm/tlbflush.h>
 40 
 41 #include "internal.h"
 42 
 43 /*
 44  * online_page_callback contains pointer to current page onlining function.
 45  * Initially it is generic_online_page(). If it is required it could be
 46  * changed by calling set_online_page_callback() for callback registration
 47  * and restore_online_page_callback() for generic callback restore.
 48  */
 49 
 50 static void generic_online_page(struct page *page);
 51 
 52 static online_page_callback_t online_page_callback = generic_online_page;
 53 static DEFINE_MUTEX(online_page_callback_lock);
 54 
 55 DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
 56 
 57 void get_online_mems(void)
 58 {
 59         percpu_down_read(&mem_hotplug_lock);
 60 }
 61 
 62 void put_online_mems(void)
 63 {
 64         percpu_up_read(&mem_hotplug_lock);
 65 }
 66 
 67 bool movable_node_enabled = false;
 68 
 69 #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
 70 bool memhp_auto_online;
 71 #else
 72 bool memhp_auto_online = true;
 73 #endif
 74 EXPORT_SYMBOL_GPL(memhp_auto_online);
 75 
 76 static int __init setup_memhp_default_state(char *str)
 77 {
 78         if (!strcmp(str, "online"))
 79                 memhp_auto_online = true;
 80         else if (!strcmp(str, "offline"))
 81                 memhp_auto_online = false;
 82 
 83         return 1;
 84 }
 85 __setup("memhp_default_state=", setup_memhp_default_state);
 86 
 87 void mem_hotplug_begin(void)
 88 {
 89         cpus_read_lock();
 90         percpu_down_write(&mem_hotplug_lock);
 91 }
 92 
 93 void mem_hotplug_done(void)
 94 {
 95         percpu_up_write(&mem_hotplug_lock);
 96         cpus_read_unlock();
 97 }
 98 
 99 /* add this memory to iomem resource */
100 static struct resource *register_memory_resource(u64 start, u64 size)
101 {
102         struct resource *res, *conflict;
103         res = kzalloc(sizeof(struct resource), GFP_KERNEL);
104         if (!res)
105                 return ERR_PTR(-ENOMEM);
106 
107         res->name = "System RAM";
108         res->start = start;
109         res->end = start + size - 1;
110         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
111         conflict =  request_resource_conflict(&iomem_resource, res);
112         if (conflict) {
113                 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
114                         pr_debug("Device unaddressable memory block "
115                                  "memory hotplug at %#010llx !\n",
116                                  (unsigned long long)start);
117                 }
118                 pr_debug("System RAM resource %pR cannot be added\n", res);
119                 kfree(res);
120                 return ERR_PTR(-EEXIST);
121         }
122         return res;
123 }
124 
125 static void release_memory_resource(struct resource *res)
126 {
127         if (!res)
128                 return;
129         release_resource(res);
130         kfree(res);
131         return;
132 }
133 
134 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
135 void get_page_bootmem(unsigned long info,  struct page *page,
136                       unsigned long type)
137 {
138         page->freelist = (void *)type;
139         SetPagePrivate(page);
140         set_page_private(page, info);
141         page_ref_inc(page);
142 }
143 
144 void put_page_bootmem(struct page *page)
145 {
146         unsigned long type;
147 
148         type = (unsigned long) page->freelist;
149         BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
150                type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
151 
152         if (page_ref_dec_return(page) == 1) {
153                 page->freelist = NULL;
154                 ClearPagePrivate(page);
155                 set_page_private(page, 0);
156                 INIT_LIST_HEAD(&page->lru);
157                 free_reserved_page(page);
158         }
159 }
160 
161 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
162 #ifndef CONFIG_SPARSEMEM_VMEMMAP
163 static void register_page_bootmem_info_section(unsigned long start_pfn)
164 {
165         unsigned long *usemap, mapsize, section_nr, i;
166         struct mem_section *ms;
167         struct page *page, *memmap;
168 
169         section_nr = pfn_to_section_nr(start_pfn);
170         ms = __nr_to_section(section_nr);
171 
172         /* Get section's memmap address */
173         memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
174 
175         /*
176          * Get page for the memmap's phys address
177          * XXX: need more consideration for sparse_vmemmap...
178          */
179         page = virt_to_page(memmap);
180         mapsize = sizeof(struct page) * PAGES_PER_SECTION;
181         mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
182 
183         /* remember memmap's page */
184         for (i = 0; i < mapsize; i++, page++)
185                 get_page_bootmem(section_nr, page, SECTION_INFO);
186 
187         usemap = ms->pageblock_flags;
188         page = virt_to_page(usemap);
189 
190         mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
191 
192         for (i = 0; i < mapsize; i++, page++)
193                 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
194 
195 }
196 #else /* CONFIG_SPARSEMEM_VMEMMAP */
197 static void register_page_bootmem_info_section(unsigned long start_pfn)
198 {
199         unsigned long *usemap, mapsize, section_nr, i;
200         struct mem_section *ms;
201         struct page *page, *memmap;
202 
203         section_nr = pfn_to_section_nr(start_pfn);
204         ms = __nr_to_section(section_nr);
205 
206         memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
207 
208         register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
209 
210         usemap = ms->pageblock_flags;
211         page = virt_to_page(usemap);
212 
213         mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
214 
215         for (i = 0; i < mapsize; i++, page++)
216                 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
217 }
218 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
219 
220 void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
221 {
222         unsigned long i, pfn, end_pfn, nr_pages;
223         int node = pgdat->node_id;
224         struct page *page;
225 
226         nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
227         page = virt_to_page(pgdat);
228 
229         for (i = 0; i < nr_pages; i++, page++)
230                 get_page_bootmem(node, page, NODE_INFO);
231 
232         pfn = pgdat->node_start_pfn;
233         end_pfn = pgdat_end_pfn(pgdat);
234 
235         /* register section info */
236         for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
237                 /*
238                  * Some platforms can assign the same pfn to multiple nodes - on
239                  * node0 as well as nodeN.  To avoid registering a pfn against
240                  * multiple nodes we check that this pfn does not already
241                  * reside in some other nodes.
242                  */
243                 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
244                         register_page_bootmem_info_section(pfn);
245         }
246 }
247 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
248 
249 static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
250                 struct vmem_altmap *altmap, bool want_memblock)
251 {
252         int ret;
253 
254         if (pfn_valid(phys_start_pfn))
255                 return -EEXIST;
256 
257         ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
258         if (ret < 0)
259                 return ret;
260 
261         if (!want_memblock)
262                 return 0;
263 
264         return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn));
265 }
266 
267 /*
268  * Reasonably generic function for adding memory.  It is
269  * expected that archs that support memory hotplug will
270  * call this function after deciding the zone to which to
271  * add the new pages.
272  */
273 int __ref __add_pages(int nid, unsigned long phys_start_pfn,
274                 unsigned long nr_pages, struct vmem_altmap *altmap,
275                 bool want_memblock)
276 {
277         unsigned long i;
278         int err = 0;
279         int start_sec, end_sec;
280 
281         /* during initialize mem_map, align hot-added range to section */
282         start_sec = pfn_to_section_nr(phys_start_pfn);
283         end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
284 
285         if (altmap) {
286                 /*
287                  * Validate altmap is within bounds of the total request
288                  */
289                 if (altmap->base_pfn != phys_start_pfn
290                                 || vmem_altmap_offset(altmap) > nr_pages) {
291                         pr_warn_once("memory add fail, invalid altmap\n");
292                         err = -EINVAL;
293                         goto out;
294                 }
295                 altmap->alloc = 0;
296         }
297 
298         for (i = start_sec; i <= end_sec; i++) {
299                 err = __add_section(nid, section_nr_to_pfn(i), altmap,
300                                 want_memblock);
301 
302                 /*
303                  * EEXIST is finally dealt with by ioresource collision
304                  * check. see add_memory() => register_memory_resource()
305                  * Warning will be printed if there is collision.
306                  */
307                 if (err && (err != -EEXIST))
308                         break;
309                 err = 0;
310                 cond_resched();
311         }
312         vmemmap_populate_print_last();
313 out:
314         return err;
315 }
316 
317 #ifdef CONFIG_MEMORY_HOTREMOVE
318 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
319 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
320                                      unsigned long start_pfn,
321                                      unsigned long end_pfn)
322 {
323         struct mem_section *ms;
324 
325         for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
326                 ms = __pfn_to_section(start_pfn);
327 
328                 if (unlikely(!valid_section(ms)))
329                         continue;
330 
331                 if (unlikely(pfn_to_nid(start_pfn) != nid))
332                         continue;
333 
334                 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
335                         continue;
336 
337                 return start_pfn;
338         }
339 
340         return 0;
341 }
342 
343 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
344 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
345                                     unsigned long start_pfn,
346                                     unsigned long end_pfn)
347 {
348         struct mem_section *ms;
349         unsigned long pfn;
350 
351         /* pfn is the end pfn of a memory section. */
352         pfn = end_pfn - 1;
353         for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
354                 ms = __pfn_to_section(pfn);
355 
356                 if (unlikely(!valid_section(ms)))
357                         continue;
358 
359                 if (unlikely(pfn_to_nid(pfn) != nid))
360                         continue;
361 
362                 if (zone && zone != page_zone(pfn_to_page(pfn)))
363                         continue;
364 
365                 return pfn;
366         }
367 
368         return 0;
369 }
370 
371 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
372                              unsigned long end_pfn)
373 {
374         unsigned long zone_start_pfn = zone->zone_start_pfn;
375         unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
376         unsigned long zone_end_pfn = z;
377         unsigned long pfn;
378         struct mem_section *ms;
379         int nid = zone_to_nid(zone);
380 
381         zone_span_writelock(zone);
382         if (zone_start_pfn == start_pfn) {
383                 /*
384                  * If the section is smallest section in the zone, it need
385                  * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
386                  * In this case, we find second smallest valid mem_section
387                  * for shrinking zone.
388                  */
389                 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
390                                                 zone_end_pfn);
391                 if (pfn) {
392                         zone->zone_start_pfn = pfn;
393                         zone->spanned_pages = zone_end_pfn - pfn;
394                 }
395         } else if (zone_end_pfn == end_pfn) {
396                 /*
397                  * If the section is biggest section in the zone, it need
398                  * shrink zone->spanned_pages.
399                  * In this case, we find second biggest valid mem_section for
400                  * shrinking zone.
401                  */
402                 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
403                                                start_pfn);
404                 if (pfn)
405                         zone->spanned_pages = pfn - zone_start_pfn + 1;
406         }
407 
408         /*
409          * The section is not biggest or smallest mem_section in the zone, it
410          * only creates a hole in the zone. So in this case, we need not
411          * change the zone. But perhaps, the zone has only hole data. Thus
412          * it check the zone has only hole or not.
413          */
414         pfn = zone_start_pfn;
415         for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
416                 ms = __pfn_to_section(pfn);
417 
418                 if (unlikely(!valid_section(ms)))
419                         continue;
420 
421                 if (page_zone(pfn_to_page(pfn)) != zone)
422                         continue;
423 
424                  /* If the section is current section, it continues the loop */
425                 if (start_pfn == pfn)
426                         continue;
427 
428                 /* If we find valid section, we have nothing to do */
429                 zone_span_writeunlock(zone);
430                 return;
431         }
432 
433         /* The zone has no valid section */
434         zone->zone_start_pfn = 0;
435         zone->spanned_pages = 0;
436         zone_span_writeunlock(zone);
437 }
438 
439 static void shrink_pgdat_span(struct pglist_data *pgdat,
440                               unsigned long start_pfn, unsigned long end_pfn)
441 {
442         unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
443         unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
444         unsigned long pgdat_end_pfn = p;
445         unsigned long pfn;
446         struct mem_section *ms;
447         int nid = pgdat->node_id;
448 
449         if (pgdat_start_pfn == start_pfn) {
450                 /*
451                  * If the section is smallest section in the pgdat, it need
452                  * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
453                  * In this case, we find second smallest valid mem_section
454                  * for shrinking zone.
455                  */
456                 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
457                                                 pgdat_end_pfn);
458                 if (pfn) {
459                         pgdat->node_start_pfn = pfn;
460                         pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
461                 }
462         } else if (pgdat_end_pfn == end_pfn) {
463                 /*
464                  * If the section is biggest section in the pgdat, it need
465                  * shrink pgdat->node_spanned_pages.
466                  * In this case, we find second biggest valid mem_section for
467                  * shrinking zone.
468                  */
469                 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
470                                                start_pfn);
471                 if (pfn)
472                         pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
473         }
474 
475         /*
476          * If the section is not biggest or smallest mem_section in the pgdat,
477          * it only creates a hole in the pgdat. So in this case, we need not
478          * change the pgdat.
479          * But perhaps, the pgdat has only hole data. Thus it check the pgdat
480          * has only hole or not.
481          */
482         pfn = pgdat_start_pfn;
483         for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
484                 ms = __pfn_to_section(pfn);
485 
486                 if (unlikely(!valid_section(ms)))
487                         continue;
488 
489                 if (pfn_to_nid(pfn) != nid)
490                         continue;
491 
492                  /* If the section is current section, it continues the loop */
493                 if (start_pfn == pfn)
494                         continue;
495 
496                 /* If we find valid section, we have nothing to do */
497                 return;
498         }
499 
500         /* The pgdat has no valid section */
501         pgdat->node_start_pfn = 0;
502         pgdat->node_spanned_pages = 0;
503 }
504 
505 static void __remove_zone(struct zone *zone, unsigned long start_pfn)
506 {
507         struct pglist_data *pgdat = zone->zone_pgdat;
508         int nr_pages = PAGES_PER_SECTION;
509         unsigned long flags;
510 
511         pgdat_resize_lock(zone->zone_pgdat, &flags);
512         shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
513         shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
514         pgdat_resize_unlock(zone->zone_pgdat, &flags);
515 }
516 
517 static int __remove_section(struct zone *zone, struct mem_section *ms,
518                 unsigned long map_offset, struct vmem_altmap *altmap)
519 {
520         unsigned long start_pfn;
521         int scn_nr;
522         int ret = -EINVAL;
523 
524         if (!valid_section(ms))
525                 return ret;
526 
527         ret = unregister_memory_section(ms);
528         if (ret)
529                 return ret;
530 
531         scn_nr = __section_nr(ms);
532         start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
533         __remove_zone(zone, start_pfn);
534 
535         sparse_remove_one_section(zone, ms, map_offset, altmap);
536         return 0;
537 }
538 
539 /**
540  * __remove_pages() - remove sections of pages from a zone
541  * @zone: zone from which pages need to be removed
542  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
543  * @nr_pages: number of pages to remove (must be multiple of section size)
544  * @altmap: alternative device page map or %NULL if default memmap is used
545  *
546  * Generic helper function to remove section mappings and sysfs entries
547  * for the section of the memory we are removing. Caller needs to make
548  * sure that pages are marked reserved and zones are adjust properly by
549  * calling offline_pages().
550  */
551 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
552                  unsigned long nr_pages, struct vmem_altmap *altmap)
553 {
554         unsigned long i;
555         unsigned long map_offset = 0;
556         int sections_to_remove, ret = 0;
557 
558         /* In the ZONE_DEVICE case device driver owns the memory region */
559         if (is_dev_zone(zone)) {
560                 if (altmap)
561                         map_offset = vmem_altmap_offset(altmap);
562         } else {
563                 resource_size_t start, size;
564 
565                 start = phys_start_pfn << PAGE_SHIFT;
566                 size = nr_pages * PAGE_SIZE;
567 
568                 ret = release_mem_region_adjustable(&iomem_resource, start,
569                                         size);
570                 if (ret) {
571                         resource_size_t endres = start + size - 1;
572 
573                         pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
574                                         &start, &endres, ret);
575                 }
576         }
577 
578         clear_zone_contiguous(zone);
579 
580         /*
581          * We can only remove entire sections
582          */
583         BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
584         BUG_ON(nr_pages % PAGES_PER_SECTION);
585 
586         sections_to_remove = nr_pages / PAGES_PER_SECTION;
587         for (i = 0; i < sections_to_remove; i++) {
588                 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
589 
590                 ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
591                                 altmap);
592                 map_offset = 0;
593                 if (ret)
594                         break;
595         }
596 
597         set_zone_contiguous(zone);
598 
599         return ret;
600 }
601 #endif /* CONFIG_MEMORY_HOTREMOVE */
602 
603 int set_online_page_callback(online_page_callback_t callback)
604 {
605         int rc = -EINVAL;
606 
607         get_online_mems();
608         mutex_lock(&online_page_callback_lock);
609 
610         if (online_page_callback == generic_online_page) {
611                 online_page_callback = callback;
612                 rc = 0;
613         }
614 
615         mutex_unlock(&online_page_callback_lock);
616         put_online_mems();
617 
618         return rc;
619 }
620 EXPORT_SYMBOL_GPL(set_online_page_callback);
621 
622 int restore_online_page_callback(online_page_callback_t callback)
623 {
624         int rc = -EINVAL;
625 
626         get_online_mems();
627         mutex_lock(&online_page_callback_lock);
628 
629         if (online_page_callback == callback) {
630                 online_page_callback = generic_online_page;
631                 rc = 0;
632         }
633 
634         mutex_unlock(&online_page_callback_lock);
635         put_online_mems();
636 
637         return rc;
638 }
639 EXPORT_SYMBOL_GPL(restore_online_page_callback);
640 
641 void __online_page_set_limits(struct page *page)
642 {
643 }
644 EXPORT_SYMBOL_GPL(__online_page_set_limits);
645 
646 void __online_page_increment_counters(struct page *page)
647 {
648         adjust_managed_page_count(page, 1);
649 }
650 EXPORT_SYMBOL_GPL(__online_page_increment_counters);
651 
652 void __online_page_free(struct page *page)
653 {
654         __free_reserved_page(page);
655 }
656 EXPORT_SYMBOL_GPL(__online_page_free);
657 
658 static void generic_online_page(struct page *page)
659 {
660         __online_page_set_limits(page);
661         __online_page_increment_counters(page);
662         __online_page_free(page);
663 }
664 
665 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
666                         void *arg)
667 {
668         unsigned long i;
669         unsigned long onlined_pages = *(unsigned long *)arg;
670         struct page *page;
671 
672         if (PageReserved(pfn_to_page(start_pfn)))
673                 for (i = 0; i < nr_pages; i++) {
674                         page = pfn_to_page(start_pfn + i);
675                         (*online_page_callback)(page);
676                         onlined_pages++;
677                 }
678 
679         online_mem_sections(start_pfn, start_pfn + nr_pages);
680 
681         *(unsigned long *)arg = onlined_pages;
682         return 0;
683 }
684 
685 /* check which state of node_states will be changed when online memory */
686 static void node_states_check_changes_online(unsigned long nr_pages,
687         struct zone *zone, struct memory_notify *arg)
688 {
689         int nid = zone_to_nid(zone);
690         enum zone_type zone_last = ZONE_NORMAL;
691 
692         /*
693          * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
694          * contains nodes which have zones of 0...ZONE_NORMAL,
695          * set zone_last to ZONE_NORMAL.
696          *
697          * If we don't have HIGHMEM nor movable node,
698          * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
699          * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
700          */
701         if (N_MEMORY == N_NORMAL_MEMORY)
702                 zone_last = ZONE_MOVABLE;
703 
704         /*
705          * if the memory to be online is in a zone of 0...zone_last, and
706          * the zones of 0...zone_last don't have memory before online, we will
707          * need to set the node to node_states[N_NORMAL_MEMORY] after
708          * the memory is online.
709          */
710         if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
711                 arg->status_change_nid_normal = nid;
712         else
713                 arg->status_change_nid_normal = -1;
714 
715 #ifdef CONFIG_HIGHMEM
716         /*
717          * If we have movable node, node_states[N_HIGH_MEMORY]
718          * contains nodes which have zones of 0...ZONE_HIGHMEM,
719          * set zone_last to ZONE_HIGHMEM.
720          *
721          * If we don't have movable node, node_states[N_NORMAL_MEMORY]
722          * contains nodes which have zones of 0...ZONE_MOVABLE,
723          * set zone_last to ZONE_MOVABLE.
724          */
725         zone_last = ZONE_HIGHMEM;
726         if (N_MEMORY == N_HIGH_MEMORY)
727                 zone_last = ZONE_MOVABLE;
728 
729         if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
730                 arg->status_change_nid_high = nid;
731         else
732                 arg->status_change_nid_high = -1;
733 #else
734         arg->status_change_nid_high = arg->status_change_nid_normal;
735 #endif
736 
737         /*
738          * if the node don't have memory befor online, we will need to
739          * set the node to node_states[N_MEMORY] after the memory
740          * is online.
741          */
742         if (!node_state(nid, N_MEMORY))
743                 arg->status_change_nid = nid;
744         else
745                 arg->status_change_nid = -1;
746 }
747 
748 static void node_states_set_node(int node, struct memory_notify *arg)
749 {
750         if (arg->status_change_nid_normal >= 0)
751                 node_set_state(node, N_NORMAL_MEMORY);
752 
753         if (arg->status_change_nid_high >= 0)
754                 node_set_state(node, N_HIGH_MEMORY);
755 
756         node_set_state(node, N_MEMORY);
757 }
758 
759 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
760                 unsigned long nr_pages)
761 {
762         unsigned long old_end_pfn = zone_end_pfn(zone);
763 
764         if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
765                 zone->zone_start_pfn = start_pfn;
766 
767         zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
768 }
769 
770 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
771                                      unsigned long nr_pages)
772 {
773         unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
774 
775         if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
776                 pgdat->node_start_pfn = start_pfn;
777 
778         pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
779 }
780 
781 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
782                 unsigned long nr_pages, struct vmem_altmap *altmap)
783 {
784         struct pglist_data *pgdat = zone->zone_pgdat;
785         int nid = pgdat->node_id;
786         unsigned long flags;
787 
788         if (zone_is_empty(zone))
789                 init_currently_empty_zone(zone, start_pfn, nr_pages);
790 
791         clear_zone_contiguous(zone);
792 
793         /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
794         pgdat_resize_lock(pgdat, &flags);
795         zone_span_writelock(zone);
796         resize_zone_range(zone, start_pfn, nr_pages);
797         zone_span_writeunlock(zone);
798         resize_pgdat_range(pgdat, start_pfn, nr_pages);
799         pgdat_resize_unlock(pgdat, &flags);
800 
801         /*
802          * TODO now we have a visible range of pages which are not associated
803          * with their zone properly. Not nice but set_pfnblock_flags_mask
804          * expects the zone spans the pfn range. All the pages in the range
805          * are reserved so nobody should be touching them so we should be safe
806          */
807         memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
808                         MEMMAP_HOTPLUG, altmap);
809 
810         set_zone_contiguous(zone);
811 }
812 
813 /*
814  * Returns a default kernel memory zone for the given pfn range.
815  * If no kernel zone covers this pfn range it will automatically go
816  * to the ZONE_NORMAL.
817  */
818 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
819                 unsigned long nr_pages)
820 {
821         struct pglist_data *pgdat = NODE_DATA(nid);
822         int zid;
823 
824         for (zid = 0; zid <= ZONE_NORMAL; zid++) {
825                 struct zone *zone = &pgdat->node_zones[zid];
826 
827                 if (zone_intersects(zone, start_pfn, nr_pages))
828                         return zone;
829         }
830 
831         return &pgdat->node_zones[ZONE_NORMAL];
832 }
833 
834 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
835                 unsigned long nr_pages)
836 {
837         struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
838                         nr_pages);
839         struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
840         bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
841         bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
842 
843         /*
844          * We inherit the existing zone in a simple case where zones do not
845          * overlap in the given range
846          */
847         if (in_kernel ^ in_movable)
848                 return (in_kernel) ? kernel_zone : movable_zone;
849 
850         /*
851          * If the range doesn't belong to any zone or two zones overlap in the
852          * given range then we use movable zone only if movable_node is
853          * enabled because we always online to a kernel zone by default.
854          */
855         return movable_node_enabled ? movable_zone : kernel_zone;
856 }
857 
858 struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
859                 unsigned long nr_pages)
860 {
861         if (online_type == MMOP_ONLINE_KERNEL)
862                 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
863 
864         if (online_type == MMOP_ONLINE_MOVABLE)
865                 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
866 
867         return default_zone_for_pfn(nid, start_pfn, nr_pages);
868 }
869 
870 /*
871  * Associates the given pfn range with the given node and the zone appropriate
872  * for the given online type.
873  */
874 static struct zone * __meminit move_pfn_range(int online_type, int nid,
875                 unsigned long start_pfn, unsigned long nr_pages)
876 {
877         struct zone *zone;
878 
879         zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
880         move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
881         return zone;
882 }
883 
884 /* Must be protected by mem_hotplug_begin() or a device_lock */
885 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
886 {
887         unsigned long flags;
888         unsigned long onlined_pages = 0;
889         struct zone *zone;
890         int need_zonelists_rebuild = 0;
891         int nid;
892         int ret;
893         struct memory_notify arg;
894         struct memory_block *mem;
895 
896         /*
897          * We can't use pfn_to_nid() because nid might be stored in struct page
898          * which is not yet initialized. Instead, we find nid from memory block.
899          */
900         mem = find_memory_block(__pfn_to_section(pfn));
901         nid = mem->nid;
902 
903         /* associate pfn range with the zone */
904         zone = move_pfn_range(online_type, nid, pfn, nr_pages);
905 
906         arg.start_pfn = pfn;
907         arg.nr_pages = nr_pages;
908         node_states_check_changes_online(nr_pages, zone, &arg);
909 
910         ret = memory_notify(MEM_GOING_ONLINE, &arg);
911         ret = notifier_to_errno(ret);
912         if (ret)
913                 goto failed_addition;
914 
915         /*
916          * If this zone is not populated, then it is not in zonelist.
917          * This means the page allocator ignores this zone.
918          * So, zonelist must be updated after online.
919          */
920         if (!populated_zone(zone)) {
921                 need_zonelists_rebuild = 1;
922                 setup_zone_pageset(zone);
923         }
924 
925         ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
926                 online_pages_range);
927         if (ret) {
928                 if (need_zonelists_rebuild)
929                         zone_pcp_reset(zone);
930                 goto failed_addition;
931         }
932 
933         zone->present_pages += onlined_pages;
934 
935         pgdat_resize_lock(zone->zone_pgdat, &flags);
936         zone->zone_pgdat->node_present_pages += onlined_pages;
937         pgdat_resize_unlock(zone->zone_pgdat, &flags);
938 
939         if (onlined_pages) {
940                 node_states_set_node(nid, &arg);
941                 if (need_zonelists_rebuild)
942                         build_all_zonelists(NULL);
943                 else
944                         zone_pcp_update(zone);
945         }
946 
947         init_per_zone_wmark_min();
948 
949         if (onlined_pages) {
950                 kswapd_run(nid);
951                 kcompactd_run(nid);
952         }
953 
954         vm_total_pages = nr_free_pagecache_pages();
955 
956         writeback_set_ratelimit();
957 
958         if (onlined_pages)
959                 memory_notify(MEM_ONLINE, &arg);
960         return 0;
961 
962 failed_addition:
963         pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
964                  (unsigned long long) pfn << PAGE_SHIFT,
965                  (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
966         memory_notify(MEM_CANCEL_ONLINE, &arg);
967         return ret;
968 }
969 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
970 
971 static void reset_node_present_pages(pg_data_t *pgdat)
972 {
973         struct zone *z;
974 
975         for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
976                 z->present_pages = 0;
977 
978         pgdat->node_present_pages = 0;
979 }
980 
981 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
982 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
983 {
984         struct pglist_data *pgdat;
985         unsigned long start_pfn = PFN_DOWN(start);
986 
987         pgdat = NODE_DATA(nid);
988         if (!pgdat) {
989                 pgdat = arch_alloc_nodedata(nid);
990                 if (!pgdat)
991                         return NULL;
992 
993                 arch_refresh_nodedata(nid, pgdat);
994         } else {
995                 /*
996                  * Reset the nr_zones, order and classzone_idx before reuse.
997                  * Note that kswapd will init kswapd_classzone_idx properly
998                  * when it starts in the near future.
999                  */
1000                 pgdat->nr_zones = 0;
1001                 pgdat->kswapd_order = 0;
1002                 pgdat->kswapd_classzone_idx = 0;
1003         }
1004 
1005         /* we can use NODE_DATA(nid) from here */
1006 
1007         pgdat->node_id = nid;
1008         pgdat->node_start_pfn = start_pfn;
1009 
1010         /* init node's zones as empty zones, we don't have any present pages.*/
1011         free_area_init_core_hotplug(nid);
1012         pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1013 
1014         /*
1015          * The node we allocated has no zone fallback lists. For avoiding
1016          * to access not-initialized zonelist, build here.
1017          */
1018         build_all_zonelists(pgdat);
1019 
1020         /*
1021          * When memory is hot-added, all the memory is in offline state. So
1022          * clear all zones' present_pages because they will be updated in
1023          * online_pages() and offline_pages().
1024          */
1025         reset_node_managed_pages(pgdat);
1026         reset_node_present_pages(pgdat);
1027 
1028         return pgdat;
1029 }
1030 
1031 static void rollback_node_hotadd(int nid)
1032 {
1033         pg_data_t *pgdat = NODE_DATA(nid);
1034 
1035         arch_refresh_nodedata(nid, NULL);
1036         free_percpu(pgdat->per_cpu_nodestats);
1037         arch_free_nodedata(pgdat);
1038         return;
1039 }
1040 
1041 
1042 /**
1043  * try_online_node - online a node if offlined
1044  * @nid: the node ID
1045  * @start: start addr of the node
1046  * @set_node_online: Whether we want to online the node
1047  * called by cpu_up() to online a node without onlined memory.
1048  *
1049  * Returns:
1050  * 1 -> a new node has been allocated
1051  * 0 -> the node is already online
1052  * -ENOMEM -> the node could not be allocated
1053  */
1054 static int __try_online_node(int nid, u64 start, bool set_node_online)
1055 {
1056         pg_data_t *pgdat;
1057         int ret = 1;
1058 
1059         if (node_online(nid))
1060                 return 0;
1061 
1062         pgdat = hotadd_new_pgdat(nid, start);
1063         if (!pgdat) {
1064                 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1065                 ret = -ENOMEM;
1066                 goto out;
1067         }
1068 
1069         if (set_node_online) {
1070                 node_set_online(nid);
1071                 ret = register_one_node(nid);
1072                 BUG_ON(ret);
1073         }
1074 out:
1075         return ret;
1076 }
1077 
1078 /*
1079  * Users of this function always want to online/register the node
1080  */
1081 int try_online_node(int nid)
1082 {
1083         int ret;
1084 
1085         mem_hotplug_begin();
1086         ret =  __try_online_node(nid, 0, true);
1087         mem_hotplug_done();
1088         return ret;
1089 }
1090 
1091 static int check_hotplug_memory_range(u64 start, u64 size)
1092 {
1093         unsigned long block_sz = memory_block_size_bytes();
1094         u64 block_nr_pages = block_sz >> PAGE_SHIFT;
1095         u64 nr_pages = size >> PAGE_SHIFT;
1096         u64 start_pfn = PFN_DOWN(start);
1097 
1098         /* memory range must be block size aligned */
1099         if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) ||
1100             !IS_ALIGNED(nr_pages, block_nr_pages)) {
1101                 pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
1102                        block_sz, start, size);
1103                 return -EINVAL;
1104         }
1105 
1106         return 0;
1107 }
1108 
1109 static int online_memory_block(struct memory_block *mem, void *arg)
1110 {
1111         return device_online(&mem->dev);
1112 }
1113 
1114 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1115 int __ref add_memory_resource(int nid, struct resource *res, bool online)
1116 {
1117         u64 start, size;
1118         bool new_node = false;
1119         int ret;
1120 
1121         start = res->start;
1122         size = resource_size(res);
1123 
1124         ret = check_hotplug_memory_range(start, size);
1125         if (ret)
1126                 return ret;
1127 
1128         mem_hotplug_begin();
1129 
1130         /*
1131          * Add new range to memblock so that when hotadd_new_pgdat() is called
1132          * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1133          * this new range and calculate total pages correctly.  The range will
1134          * be removed at hot-remove time.
1135          */
1136         memblock_add_node(start, size, nid);
1137 
1138         ret = __try_online_node(nid, start, false);
1139         if (ret < 0)
1140                 goto error;
1141         new_node = ret;
1142 
1143         /* call arch's memory hotadd */
1144         ret = arch_add_memory(nid, start, size, NULL, true);
1145         if (ret < 0)
1146                 goto error;
1147 
1148         if (new_node) {
1149                 /* If sysfs file of new node can't be created, cpu on the node
1150                  * can't be hot-added. There is no rollback way now.
1151                  * So, check by BUG_ON() to catch it reluctantly..
1152                  * We online node here. We can't roll back from here.
1153                  */
1154                 node_set_online(nid);
1155                 ret = __register_one_node(nid);
1156                 BUG_ON(ret);
1157         }
1158 
1159         /* link memory sections under this node.*/
1160         ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
1161         BUG_ON(ret);
1162 
1163         /* create new memmap entry */
1164         firmware_map_add_hotplug(start, start + size, "System RAM");
1165 
1166         /* online pages if requested */
1167         if (online)
1168                 walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
1169                                   NULL, online_memory_block);
1170 
1171         goto out;
1172 
1173 error:
1174         /* rollback pgdat allocation and others */
1175         if (new_node)
1176                 rollback_node_hotadd(nid);
1177         memblock_remove(start, size);
1178 
1179 out:
1180         mem_hotplug_done();
1181         return ret;
1182 }
1183 EXPORT_SYMBOL_GPL(add_memory_resource);
1184 
1185 int __ref add_memory(int nid, u64 start, u64 size)
1186 {
1187         struct resource *res;
1188         int ret;
1189 
1190         res = register_memory_resource(start, size);
1191         if (IS_ERR(res))
1192                 return PTR_ERR(res);
1193 
1194         ret = add_memory_resource(nid, res, memhp_auto_online);
1195         if (ret < 0)
1196                 release_memory_resource(res);
1197         return ret;
1198 }
1199 EXPORT_SYMBOL_GPL(add_memory);
1200 
1201 #ifdef CONFIG_MEMORY_HOTREMOVE
1202 /*
1203  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1204  * set and the size of the free page is given by page_order(). Using this,
1205  * the function determines if the pageblock contains only free pages.
1206  * Due to buddy contraints, a free page at least the size of a pageblock will
1207  * be located at the start of the pageblock
1208  */
1209 static inline int pageblock_free(struct page *page)
1210 {
1211         return PageBuddy(page) && page_order(page) >= pageblock_order;
1212 }
1213 
1214 /* Return the start of the next active pageblock after a given page */
1215 static struct page *next_active_pageblock(struct page *page)
1216 {
1217         /* Ensure the starting page is pageblock-aligned */
1218         BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1219 
1220         /* If the entire pageblock is free, move to the end of free page */
1221         if (pageblock_free(page)) {
1222                 int order;
1223                 /* be careful. we don't have locks, page_order can be changed.*/
1224                 order = page_order(page);
1225                 if ((order < MAX_ORDER) && (order >= pageblock_order))
1226                         return page + (1 << order);
1227         }
1228 
1229         return page + pageblock_nr_pages;
1230 }
1231 
1232 static bool is_pageblock_removable_nolock(struct page *page)
1233 {
1234         struct zone *zone;
1235         unsigned long pfn;
1236 
1237         /*
1238          * We have to be careful here because we are iterating over memory
1239          * sections which are not zone aware so we might end up outside of
1240          * the zone but still within the section.
1241          * We have to take care about the node as well. If the node is offline
1242          * its NODE_DATA will be NULL - see page_zone.
1243          */
1244         if (!node_online(page_to_nid(page)))
1245                 return false;
1246 
1247         zone = page_zone(page);
1248         pfn = page_to_pfn(page);
1249         if (!zone_spans_pfn(zone, pfn))
1250                 return false;
1251 
1252         return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
1253 }
1254 
1255 /* Checks if this range of memory is likely to be hot-removable. */
1256 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1257 {
1258         struct page *page = pfn_to_page(start_pfn);
1259         struct page *end_page = page + nr_pages;
1260 
1261         /* Check the starting page of each pageblock within the range */
1262         for (; page < end_page; page = next_active_pageblock(page)) {
1263                 if (!is_pageblock_removable_nolock(page))
1264                         return false;
1265                 cond_resched();
1266         }
1267 
1268         /* All pageblocks in the memory block are likely to be hot-removable */
1269         return true;
1270 }
1271 
1272 /*
1273  * Confirm all pages in a range [start, end) belong to the same zone.
1274  * When true, return its valid [start, end).
1275  */
1276 int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1277                          unsigned long *valid_start, unsigned long *valid_end)
1278 {
1279         unsigned long pfn, sec_end_pfn;
1280         unsigned long start, end;
1281         struct zone *zone = NULL;
1282         struct page *page;
1283         int i;
1284         for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1285              pfn < end_pfn;
1286              pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1287                 /* Make sure the memory section is present first */
1288                 if (!present_section_nr(pfn_to_section_nr(pfn)))
1289                         continue;
1290                 for (; pfn < sec_end_pfn && pfn < end_pfn;
1291                      pfn += MAX_ORDER_NR_PAGES) {
1292                         i = 0;
1293                         /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1294                         while ((i < MAX_ORDER_NR_PAGES) &&
1295                                 !pfn_valid_within(pfn + i))
1296                                 i++;
1297                         if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1298                                 continue;
1299                         page = pfn_to_page(pfn + i);
1300                         if (zone && page_zone(page) != zone)
1301                                 return 0;
1302                         if (!zone)
1303                                 start = pfn + i;
1304                         zone = page_zone(page);
1305                         end = pfn + MAX_ORDER_NR_PAGES;
1306                 }
1307         }
1308 
1309         if (zone) {
1310                 *valid_start = start;
1311                 *valid_end = min(end, end_pfn);
1312                 return 1;
1313         } else {
1314                 return 0;
1315         }
1316 }
1317 
1318 /*
1319  * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1320  * non-lru movable pages and hugepages). We scan pfn because it's much
1321  * easier than scanning over linked list. This function returns the pfn
1322  * of the first found movable page if it's found, otherwise 0.
1323  */
1324 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1325 {
1326         unsigned long pfn;
1327         struct page *page;
1328         for (pfn = start; pfn < end; pfn++) {
1329                 if (pfn_valid(pfn)) {
1330                         page = pfn_to_page(pfn);
1331                         if (PageLRU(page))
1332                                 return pfn;
1333                         if (__PageMovable(page))
1334                                 return pfn;
1335                         if (PageHuge(page)) {
1336                                 if (hugepage_migration_supported(page_hstate(page)) &&
1337                                     page_huge_active(page))
1338                                         return pfn;
1339                                 else
1340                                         pfn = round_up(pfn + 1,
1341                                                 1 << compound_order(page)) - 1;
1342                         }
1343                 }
1344         }
1345         return 0;
1346 }
1347 
1348 static struct page *new_node_page(struct page *page, unsigned long private)
1349 {
1350         int nid = page_to_nid(page);
1351         nodemask_t nmask = node_states[N_MEMORY];
1352 
1353         /*
1354          * try to allocate from a different node but reuse this node if there
1355          * are no other online nodes to be used (e.g. we are offlining a part
1356          * of the only existing node)
1357          */
1358         node_clear(nid, nmask);
1359         if (nodes_empty(nmask))
1360                 node_set(nid, nmask);
1361 
1362         return new_page_nodemask(page, nid, &nmask);
1363 }
1364 
1365 #define NR_OFFLINE_AT_ONCE_PAGES        (256)
1366 static int
1367 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1368 {
1369         unsigned long pfn;
1370         struct page *page;
1371         int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1372         int not_managed = 0;
1373         int ret = 0;
1374         LIST_HEAD(source);
1375 
1376         for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1377                 if (!pfn_valid(pfn))
1378                         continue;
1379                 page = pfn_to_page(pfn);
1380 
1381                 if (PageHuge(page)) {
1382                         struct page *head = compound_head(page);
1383                         pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1384                         if (compound_order(head) > PFN_SECTION_SHIFT) {
1385                                 ret = -EBUSY;
1386                                 break;
1387                         }
1388                         if (isolate_huge_page(page, &source))
1389                                 move_pages -= 1 << compound_order(head);
1390                         continue;
1391                 } else if (PageTransHuge(page))
1392                         pfn = page_to_pfn(compound_head(page))
1393                                 + hpage_nr_pages(page) - 1;
1394 
1395                 if (!get_page_unless_zero(page))
1396                         continue;
1397                 /*
1398                  * We can skip free pages. And we can deal with pages on
1399                  * LRU and non-lru movable pages.
1400                  */
1401                 if (PageLRU(page))
1402                         ret = isolate_lru_page(page);
1403                 else
1404                         ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1405                 if (!ret) { /* Success */
1406                         put_page(page);
1407                         list_add_tail(&page->lru, &source);
1408                         move_pages--;
1409                         if (!__PageMovable(page))
1410                                 inc_node_page_state(page, NR_ISOLATED_ANON +
1411                                                     page_is_file_cache(page));
1412 
1413                 } else {
1414 #ifdef CONFIG_DEBUG_VM
1415                         pr_alert("failed to isolate pfn %lx\n", pfn);
1416                         dump_page(page, "isolation failed");
1417 #endif
1418                         put_page(page);
1419                         /* Because we don't have big zone->lock. we should
1420                            check this again here. */
1421                         if (page_count(page)) {
1422                                 not_managed++;
1423                                 ret = -EBUSY;
1424                                 break;
1425                         }
1426                 }
1427         }
1428         if (!list_empty(&source)) {
1429                 if (not_managed) {
1430                         putback_movable_pages(&source);
1431                         goto out;
1432                 }
1433 
1434                 /* Allocate a new page from the nearest neighbor node */
1435                 ret = migrate_pages(&source, new_node_page, NULL, 0,
1436                                         MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1437                 if (ret)
1438                         putback_movable_pages(&source);
1439         }
1440 out:
1441         return ret;
1442 }
1443 
1444 /*
1445  * remove from free_area[] and mark all as Reserved.
1446  */
1447 static int
1448 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1449                         void *data)
1450 {
1451         __offline_isolated_pages(start, start + nr_pages);
1452         return 0;
1453 }
1454 
1455 static void
1456 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1457 {
1458         walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
1459                                 offline_isolated_pages_cb);
1460 }
1461 
1462 /*
1463  * Check all pages in range, recoreded as memory resource, are isolated.
1464  */
1465 static int
1466 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1467                         void *data)
1468 {
1469         int ret;
1470         long offlined = *(long *)data;
1471         ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
1472         offlined = nr_pages;
1473         if (!ret)
1474                 *(long *)data += offlined;
1475         return ret;
1476 }
1477 
1478 static long
1479 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1480 {
1481         long offlined = 0;
1482         int ret;
1483 
1484         ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
1485                         check_pages_isolated_cb);
1486         if (ret < 0)
1487                 offlined = (long)ret;
1488         return offlined;
1489 }
1490 
1491 static int __init cmdline_parse_movable_node(char *p)
1492 {
1493 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1494         movable_node_enabled = true;
1495 #else
1496         pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
1497 #endif
1498         return 0;
1499 }
1500 early_param("movable_node", cmdline_parse_movable_node);
1501 
1502 /* check which state of node_states will be changed when offline memory */
1503 static void node_states_check_changes_offline(unsigned long nr_pages,
1504                 struct zone *zone, struct memory_notify *arg)
1505 {
1506         struct pglist_data *pgdat = zone->zone_pgdat;
1507         unsigned long present_pages = 0;
1508         enum zone_type zt, zone_last = ZONE_NORMAL;
1509 
1510         /*
1511          * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1512          * contains nodes which have zones of 0...ZONE_NORMAL,
1513          * set zone_last to ZONE_NORMAL.
1514          *
1515          * If we don't have HIGHMEM nor movable node,
1516          * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1517          * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1518          */
1519         if (N_MEMORY == N_NORMAL_MEMORY)
1520                 zone_last = ZONE_MOVABLE;
1521 
1522         /*
1523          * check whether node_states[N_NORMAL_MEMORY] will be changed.
1524          * If the memory to be offline is in a zone of 0...zone_last,
1525          * and it is the last present memory, 0...zone_last will
1526          * become empty after offline , thus we can determind we will
1527          * need to clear the node from node_states[N_NORMAL_MEMORY].
1528          */
1529         for (zt = 0; zt <= zone_last; zt++)
1530                 present_pages += pgdat->node_zones[zt].present_pages;
1531         if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1532                 arg->status_change_nid_normal = zone_to_nid(zone);
1533         else
1534                 arg->status_change_nid_normal = -1;
1535 
1536 #ifdef CONFIG_HIGHMEM
1537         /*
1538          * If we have movable node, node_states[N_HIGH_MEMORY]
1539          * contains nodes which have zones of 0...ZONE_HIGHMEM,
1540          * set zone_last to ZONE_HIGHMEM.
1541          *
1542          * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1543          * contains nodes which have zones of 0...ZONE_MOVABLE,
1544          * set zone_last to ZONE_MOVABLE.
1545          */
1546         zone_last = ZONE_HIGHMEM;
1547         if (N_MEMORY == N_HIGH_MEMORY)
1548                 zone_last = ZONE_MOVABLE;
1549 
1550         for (; zt <= zone_last; zt++)
1551                 present_pages += pgdat->node_zones[zt].present_pages;
1552         if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1553                 arg->status_change_nid_high = zone_to_nid(zone);
1554         else
1555                 arg->status_change_nid_high = -1;
1556 #else
1557         arg->status_change_nid_high = arg->status_change_nid_normal;
1558 #endif
1559 
1560         /*
1561          * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1562          */
1563         zone_last = ZONE_MOVABLE;
1564 
1565         /*
1566          * check whether node_states[N_HIGH_MEMORY] will be changed
1567          * If we try to offline the last present @nr_pages from the node,
1568          * we can determind we will need to clear the node from
1569          * node_states[N_HIGH_MEMORY].
1570          */
1571         for (; zt <= zone_last; zt++)
1572                 present_pages += pgdat->node_zones[zt].present_pages;
1573         if (nr_pages >= present_pages)
1574                 arg->status_change_nid = zone_to_nid(zone);
1575         else
1576                 arg->status_change_nid = -1;
1577 }
1578 
1579 static void node_states_clear_node(int node, struct memory_notify *arg)
1580 {
1581         if (arg->status_change_nid_normal >= 0)
1582                 node_clear_state(node, N_NORMAL_MEMORY);
1583 
1584         if ((N_MEMORY != N_NORMAL_MEMORY) &&
1585             (arg->status_change_nid_high >= 0))
1586                 node_clear_state(node, N_HIGH_MEMORY);
1587 
1588         if ((N_MEMORY != N_HIGH_MEMORY) &&
1589             (arg->status_change_nid >= 0))
1590                 node_clear_state(node, N_MEMORY);
1591 }
1592 
1593 static int __ref __offline_pages(unsigned long start_pfn,
1594                   unsigned long end_pfn)
1595 {
1596         unsigned long pfn, nr_pages;
1597         long offlined_pages;
1598         int ret, node;
1599         unsigned long flags;
1600         unsigned long valid_start, valid_end;
1601         struct zone *zone;
1602         struct memory_notify arg;
1603 
1604         /* at least, alignment against pageblock is necessary */
1605         if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1606                 return -EINVAL;
1607         if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1608                 return -EINVAL;
1609         /* This makes hotplug much easier...and readable.
1610            we assume this for now. .*/
1611         if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
1612                 return -EINVAL;
1613 
1614         zone = page_zone(pfn_to_page(valid_start));
1615         node = zone_to_nid(zone);
1616         nr_pages = end_pfn - start_pfn;
1617 
1618         /* set above range as isolated */
1619         ret = start_isolate_page_range(start_pfn, end_pfn,
1620                                        MIGRATE_MOVABLE, true);
1621         if (ret)
1622                 return ret;
1623 
1624         arg.start_pfn = start_pfn;
1625         arg.nr_pages = nr_pages;
1626         node_states_check_changes_offline(nr_pages, zone, &arg);
1627 
1628         ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1629         ret = notifier_to_errno(ret);
1630         if (ret)
1631                 goto failed_removal;
1632 
1633         pfn = start_pfn;
1634 repeat:
1635         /* start memory hot removal */
1636         ret = -EINTR;
1637         if (signal_pending(current))
1638                 goto failed_removal;
1639 
1640         cond_resched();
1641         lru_add_drain_all();
1642         drain_all_pages(zone);
1643 
1644         pfn = scan_movable_pages(start_pfn, end_pfn);
1645         if (pfn) { /* We have movable pages */
1646                 ret = do_migrate_range(pfn, end_pfn);
1647                 goto repeat;
1648         }
1649 
1650         /*
1651          * dissolve free hugepages in the memory block before doing offlining
1652          * actually in order to make hugetlbfs's object counting consistent.
1653          */
1654         ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1655         if (ret)
1656                 goto failed_removal;
1657         /* check again */
1658         offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1659         if (offlined_pages < 0)
1660                 goto repeat;
1661         pr_info("Offlined Pages %ld\n", offlined_pages);
1662         /* Ok, all of our target is isolated.
1663            We cannot do rollback at this point. */
1664         offline_isolated_pages(start_pfn, end_pfn);
1665         /* reset pagetype flags and makes migrate type to be MOVABLE */
1666         undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1667         /* removal success */
1668         adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1669         zone->present_pages -= offlined_pages;
1670 
1671         pgdat_resize_lock(zone->zone_pgdat, &flags);
1672         zone->zone_pgdat->node_present_pages -= offlined_pages;
1673         pgdat_resize_unlock(zone->zone_pgdat, &flags);
1674 
1675         init_per_zone_wmark_min();
1676 
1677         if (!populated_zone(zone)) {
1678                 zone_pcp_reset(zone);
1679                 build_all_zonelists(NULL);
1680         } else
1681                 zone_pcp_update(zone);
1682 
1683         node_states_clear_node(node, &arg);
1684         if (arg.status_change_nid >= 0) {
1685                 kswapd_stop(node);
1686                 kcompactd_stop(node);
1687         }
1688 
1689         vm_total_pages = nr_free_pagecache_pages();
1690         writeback_set_ratelimit();
1691 
1692         memory_notify(MEM_OFFLINE, &arg);
1693         return 0;
1694 
1695 failed_removal:
1696         pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1697                  (unsigned long long) start_pfn << PAGE_SHIFT,
1698                  ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1699         memory_notify(MEM_CANCEL_OFFLINE, &arg);
1700         /* pushback to free area */
1701         undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1702         return ret;
1703 }
1704 
1705 /* Must be protected by mem_hotplug_begin() or a device_lock */
1706 int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1707 {
1708         return __offline_pages(start_pfn, start_pfn + nr_pages);
1709 }
1710 #endif /* CONFIG_MEMORY_HOTREMOVE */
1711 
1712 /**
1713  * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1714  * @start_pfn: start pfn of the memory range
1715  * @end_pfn: end pfn of the memory range
1716  * @arg: argument passed to func
1717  * @func: callback for each memory section walked
1718  *
1719  * This function walks through all present mem sections in range
1720  * [start_pfn, end_pfn) and call func on each mem section.
1721  *
1722  * Returns the return value of func.
1723  */
1724 int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1725                 void *arg, int (*func)(struct memory_block *, void *))
1726 {
1727         struct memory_block *mem = NULL;
1728         struct mem_section *section;
1729         unsigned long pfn, section_nr;
1730         int ret;
1731 
1732         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1733                 section_nr = pfn_to_section_nr(pfn);
1734                 if (!present_section_nr(section_nr))
1735                         continue;
1736 
1737                 section = __nr_to_section(section_nr);
1738                 /* same memblock? */
1739                 if (mem)
1740                         if ((section_nr >= mem->start_section_nr) &&
1741                             (section_nr <= mem->end_section_nr))
1742                                 continue;
1743 
1744                 mem = find_memory_block_hinted(section, mem);
1745                 if (!mem)
1746                         continue;
1747 
1748                 ret = func(mem, arg);
1749                 if (ret) {
1750                         kobject_put(&mem->dev.kobj);
1751                         return ret;
1752                 }
1753         }
1754 
1755         if (mem)
1756                 kobject_put(&mem->dev.kobj);
1757 
1758         return 0;
1759 }
1760 
1761 #ifdef CONFIG_MEMORY_HOTREMOVE
1762 static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1763 {
1764         int ret = !is_memblock_offlined(mem);
1765 
1766         if (unlikely(ret)) {
1767                 phys_addr_t beginpa, endpa;
1768 
1769                 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1770                 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1771                 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1772                         &beginpa, &endpa);
1773         }
1774 
1775         return ret;
1776 }
1777 
1778 static int check_cpu_on_node(pg_data_t *pgdat)
1779 {
1780         int cpu;
1781 
1782         for_each_present_cpu(cpu) {
1783                 if (cpu_to_node(cpu) == pgdat->node_id)
1784                         /*
1785                          * the cpu on this node isn't removed, and we can't
1786                          * offline this node.
1787                          */
1788                         return -EBUSY;
1789         }
1790 
1791         return 0;
1792 }
1793 
1794 static void unmap_cpu_on_node(pg_data_t *pgdat)
1795 {
1796 #ifdef CONFIG_ACPI_NUMA
1797         int cpu;
1798 
1799         for_each_possible_cpu(cpu)
1800                 if (cpu_to_node(cpu) == pgdat->node_id)
1801                         numa_clear_node(cpu);
1802 #endif
1803 }
1804 
1805 static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
1806 {
1807         int ret;
1808 
1809         ret = check_cpu_on_node(pgdat);
1810         if (ret)
1811                 return ret;
1812 
1813         /*
1814          * the node will be offlined when we come here, so we can clear
1815          * the cpu_to_node() now.
1816          */
1817 
1818         unmap_cpu_on_node(pgdat);
1819         return 0;
1820 }
1821 
1822 /**
1823  * try_offline_node
1824  * @nid: the node ID
1825  *
1826  * Offline a node if all memory sections and cpus of the node are removed.
1827  *
1828  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1829  * and online/offline operations before this call.
1830  */
1831 void try_offline_node(int nid)
1832 {
1833         pg_data_t *pgdat = NODE_DATA(nid);
1834         unsigned long start_pfn = pgdat->node_start_pfn;
1835         unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1836         unsigned long pfn;
1837 
1838         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1839                 unsigned long section_nr = pfn_to_section_nr(pfn);
1840 
1841                 if (!present_section_nr(section_nr))
1842                         continue;
1843 
1844                 if (pfn_to_nid(pfn) != nid)
1845                         continue;
1846 
1847                 /*
1848                  * some memory sections of this node are not removed, and we
1849                  * can't offline node now.
1850                  */
1851                 return;
1852         }
1853 
1854         if (check_and_unmap_cpu_on_node(pgdat))
1855                 return;
1856 
1857         /*
1858          * all memory/cpu of this node are removed, we can offline this
1859          * node now.
1860          */
1861         node_set_offline(nid);
1862         unregister_one_node(nid);
1863 }
1864 EXPORT_SYMBOL(try_offline_node);
1865 
1866 /**
1867  * remove_memory
1868  * @nid: the node ID
1869  * @start: physical address of the region to remove
1870  * @size: size of the region to remove
1871  *
1872  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1873  * and online/offline operations before this call, as required by
1874  * try_offline_node().
1875  */
1876 void __ref remove_memory(int nid, u64 start, u64 size)
1877 {
1878         int ret;
1879 
1880         BUG_ON(check_hotplug_memory_range(start, size));
1881 
1882         mem_hotplug_begin();
1883 
1884         /*
1885          * All memory blocks must be offlined before removing memory.  Check
1886          * whether all memory blocks in question are offline and trigger a BUG()
1887          * if this is not the case.
1888          */
1889         ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
1890                                 check_memblock_offlined_cb);
1891         if (ret)
1892                 BUG();
1893 
1894         /* remove memmap entry */
1895         firmware_map_remove(start, start + size, "System RAM");
1896         memblock_free(start, size);
1897         memblock_remove(start, size);
1898 
1899         arch_remove_memory(start, size, NULL);
1900 
1901         try_offline_node(nid);
1902 
1903         mem_hotplug_done();
1904 }
1905 EXPORT_SYMBOL_GPL(remove_memory);
1906 #endif /* CONFIG_MEMORY_HOTREMOVE */
1907 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp