~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/migrate.c

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Memory Migration functionality - linux/mm/migration.c
  3  *
  4  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
  5  *
  6  * Page migration was first developed in the context of the memory hotplug
  7  * project. The main authors of the migration code are:
  8  *
  9  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
 10  * Hirokazu Takahashi <taka@valinux.co.jp>
 11  * Dave Hansen <haveblue@us.ibm.com>
 12  * Christoph Lameter
 13  */
 14 
 15 #include <linux/migrate.h>
 16 #include <linux/export.h>
 17 #include <linux/swap.h>
 18 #include <linux/swapops.h>
 19 #include <linux/pagemap.h>
 20 #include <linux/buffer_head.h>
 21 #include <linux/mm_inline.h>
 22 #include <linux/nsproxy.h>
 23 #include <linux/pagevec.h>
 24 #include <linux/ksm.h>
 25 #include <linux/rmap.h>
 26 #include <linux/topology.h>
 27 #include <linux/cpu.h>
 28 #include <linux/cpuset.h>
 29 #include <linux/writeback.h>
 30 #include <linux/mempolicy.h>
 31 #include <linux/vmalloc.h>
 32 #include <linux/security.h>
 33 #include <linux/memcontrol.h>
 34 #include <linux/syscalls.h>
 35 #include <linux/hugetlb.h>
 36 #include <linux/hugetlb_cgroup.h>
 37 #include <linux/gfp.h>
 38 #include <linux/balloon_compaction.h>
 39 #include <linux/mmu_notifier.h>
 40 
 41 #include <asm/tlbflush.h>
 42 
 43 #define CREATE_TRACE_POINTS
 44 #include <trace/events/migrate.h>
 45 
 46 #include "internal.h"
 47 
 48 /*
 49  * migrate_prep() needs to be called before we start compiling a list of pages
 50  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
 51  * undesirable, use migrate_prep_local()
 52  */
 53 int migrate_prep(void)
 54 {
 55         /*
 56          * Clear the LRU lists so pages can be isolated.
 57          * Note that pages may be moved off the LRU after we have
 58          * drained them. Those pages will fail to migrate like other
 59          * pages that may be busy.
 60          */
 61         lru_add_drain_all();
 62 
 63         return 0;
 64 }
 65 
 66 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
 67 int migrate_prep_local(void)
 68 {
 69         lru_add_drain();
 70 
 71         return 0;
 72 }
 73 
 74 /*
 75  * Put previously isolated pages back onto the appropriate lists
 76  * from where they were once taken off for compaction/migration.
 77  *
 78  * This function shall be used whenever the isolated pageset has been
 79  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
 80  * and isolate_huge_page().
 81  */
 82 void putback_movable_pages(struct list_head *l)
 83 {
 84         struct page *page;
 85         struct page *page2;
 86 
 87         list_for_each_entry_safe(page, page2, l, lru) {
 88                 if (unlikely(PageHuge(page))) {
 89                         putback_active_hugepage(page);
 90                         continue;
 91                 }
 92                 list_del(&page->lru);
 93                 dec_zone_page_state(page, NR_ISOLATED_ANON +
 94                                 page_is_file_cache(page));
 95                 if (unlikely(isolated_balloon_page(page)))
 96                         balloon_page_putback(page);
 97                 else
 98                         putback_lru_page(page);
 99         }
100 }
101 
102 /*
103  * Restore a potential migration pte to a working pte entry
104  */
105 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
106                                  unsigned long addr, void *old)
107 {
108         struct mm_struct *mm = vma->vm_mm;
109         swp_entry_t entry;
110         pmd_t *pmd;
111         pte_t *ptep, pte;
112         spinlock_t *ptl;
113 
114         if (unlikely(PageHuge(new))) {
115                 ptep = huge_pte_offset(mm, addr);
116                 if (!ptep)
117                         goto out;
118                 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
119         } else {
120                 pmd = mm_find_pmd(mm, addr);
121                 if (!pmd)
122                         goto out;
123 
124                 ptep = pte_offset_map(pmd, addr);
125 
126                 /*
127                  * Peek to check is_swap_pte() before taking ptlock?  No, we
128                  * can race mremap's move_ptes(), which skips anon_vma lock.
129                  */
130 
131                 ptl = pte_lockptr(mm, pmd);
132         }
133 
134         spin_lock(ptl);
135         pte = *ptep;
136         if (!is_swap_pte(pte))
137                 goto unlock;
138 
139         entry = pte_to_swp_entry(pte);
140 
141         if (!is_migration_entry(entry) ||
142             migration_entry_to_page(entry) != old)
143                 goto unlock;
144 
145         get_page(new);
146         pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
147         if (pte_swp_soft_dirty(*ptep))
148                 pte = pte_mksoft_dirty(pte);
149 
150         /* Recheck VMA as permissions can change since migration started  */
151         if (is_write_migration_entry(entry))
152                 pte = maybe_mkwrite(pte, vma);
153 
154 #ifdef CONFIG_HUGETLB_PAGE
155         if (PageHuge(new)) {
156                 pte = pte_mkhuge(pte);
157                 pte = arch_make_huge_pte(pte, vma, new, 0);
158         }
159 #endif
160         flush_dcache_page(new);
161         set_pte_at(mm, addr, ptep, pte);
162 
163         if (PageHuge(new)) {
164                 if (PageAnon(new))
165                         hugepage_add_anon_rmap(new, vma, addr);
166                 else
167                         page_dup_rmap(new);
168         } else if (PageAnon(new))
169                 page_add_anon_rmap(new, vma, addr);
170         else
171                 page_add_file_rmap(new);
172 
173         /* No need to invalidate - it was non-present before */
174         update_mmu_cache(vma, addr, ptep);
175 unlock:
176         pte_unmap_unlock(ptep, ptl);
177 out:
178         return SWAP_AGAIN;
179 }
180 
181 /*
182  * Get rid of all migration entries and replace them by
183  * references to the indicated page.
184  */
185 static void remove_migration_ptes(struct page *old, struct page *new)
186 {
187         struct rmap_walk_control rwc = {
188                 .rmap_one = remove_migration_pte,
189                 .arg = old,
190         };
191 
192         rmap_walk(new, &rwc);
193 }
194 
195 /*
196  * Something used the pte of a page under migration. We need to
197  * get to the page and wait until migration is finished.
198  * When we return from this function the fault will be retried.
199  */
200 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
201                                 spinlock_t *ptl)
202 {
203         pte_t pte;
204         swp_entry_t entry;
205         struct page *page;
206 
207         spin_lock(ptl);
208         pte = *ptep;
209         if (!is_swap_pte(pte))
210                 goto out;
211 
212         entry = pte_to_swp_entry(pte);
213         if (!is_migration_entry(entry))
214                 goto out;
215 
216         page = migration_entry_to_page(entry);
217 
218         /*
219          * Once radix-tree replacement of page migration started, page_count
220          * *must* be zero. And, we don't want to call wait_on_page_locked()
221          * against a page without get_page().
222          * So, we use get_page_unless_zero(), here. Even failed, page fault
223          * will occur again.
224          */
225         if (!get_page_unless_zero(page))
226                 goto out;
227         pte_unmap_unlock(ptep, ptl);
228         wait_on_page_locked(page);
229         put_page(page);
230         return;
231 out:
232         pte_unmap_unlock(ptep, ptl);
233 }
234 
235 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
236                                 unsigned long address)
237 {
238         spinlock_t *ptl = pte_lockptr(mm, pmd);
239         pte_t *ptep = pte_offset_map(pmd, address);
240         __migration_entry_wait(mm, ptep, ptl);
241 }
242 
243 void migration_entry_wait_huge(struct vm_area_struct *vma,
244                 struct mm_struct *mm, pte_t *pte)
245 {
246         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
247         __migration_entry_wait(mm, pte, ptl);
248 }
249 
250 #ifdef CONFIG_BLOCK
251 /* Returns true if all buffers are successfully locked */
252 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
253                                                         enum migrate_mode mode)
254 {
255         struct buffer_head *bh = head;
256 
257         /* Simple case, sync compaction */
258         if (mode != MIGRATE_ASYNC) {
259                 do {
260                         get_bh(bh);
261                         lock_buffer(bh);
262                         bh = bh->b_this_page;
263 
264                 } while (bh != head);
265 
266                 return true;
267         }
268 
269         /* async case, we cannot block on lock_buffer so use trylock_buffer */
270         do {
271                 get_bh(bh);
272                 if (!trylock_buffer(bh)) {
273                         /*
274                          * We failed to lock the buffer and cannot stall in
275                          * async migration. Release the taken locks
276                          */
277                         struct buffer_head *failed_bh = bh;
278                         put_bh(failed_bh);
279                         bh = head;
280                         while (bh != failed_bh) {
281                                 unlock_buffer(bh);
282                                 put_bh(bh);
283                                 bh = bh->b_this_page;
284                         }
285                         return false;
286                 }
287 
288                 bh = bh->b_this_page;
289         } while (bh != head);
290         return true;
291 }
292 #else
293 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
294                                                         enum migrate_mode mode)
295 {
296         return true;
297 }
298 #endif /* CONFIG_BLOCK */
299 
300 /*
301  * Replace the page in the mapping.
302  *
303  * The number of remaining references must be:
304  * 1 for anonymous pages without a mapping
305  * 2 for pages with a mapping
306  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
307  */
308 int migrate_page_move_mapping(struct address_space *mapping,
309                 struct page *newpage, struct page *page,
310                 struct buffer_head *head, enum migrate_mode mode,
311                 int extra_count)
312 {
313         int expected_count = 1 + extra_count;
314         void **pslot;
315 
316         if (!mapping) {
317                 /* Anonymous page without mapping */
318                 if (page_count(page) != expected_count)
319                         return -EAGAIN;
320                 return MIGRATEPAGE_SUCCESS;
321         }
322 
323         spin_lock_irq(&mapping->tree_lock);
324 
325         pslot = radix_tree_lookup_slot(&mapping->page_tree,
326                                         page_index(page));
327 
328         expected_count += 1 + page_has_private(page);
329         if (page_count(page) != expected_count ||
330                 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
331                 spin_unlock_irq(&mapping->tree_lock);
332                 return -EAGAIN;
333         }
334 
335         if (!page_freeze_refs(page, expected_count)) {
336                 spin_unlock_irq(&mapping->tree_lock);
337                 return -EAGAIN;
338         }
339 
340         /*
341          * In the async migration case of moving a page with buffers, lock the
342          * buffers using trylock before the mapping is moved. If the mapping
343          * was moved, we later failed to lock the buffers and could not move
344          * the mapping back due to an elevated page count, we would have to
345          * block waiting on other references to be dropped.
346          */
347         if (mode == MIGRATE_ASYNC && head &&
348                         !buffer_migrate_lock_buffers(head, mode)) {
349                 page_unfreeze_refs(page, expected_count);
350                 spin_unlock_irq(&mapping->tree_lock);
351                 return -EAGAIN;
352         }
353 
354         /*
355          * Now we know that no one else is looking at the page.
356          */
357         get_page(newpage);      /* add cache reference */
358         if (PageSwapCache(page)) {
359                 SetPageSwapCache(newpage);
360                 set_page_private(newpage, page_private(page));
361         }
362 
363         radix_tree_replace_slot(pslot, newpage);
364 
365         /*
366          * Drop cache reference from old page by unfreezing
367          * to one less reference.
368          * We know this isn't the last reference.
369          */
370         page_unfreeze_refs(page, expected_count - 1);
371 
372         /*
373          * If moved to a different zone then also account
374          * the page for that zone. Other VM counters will be
375          * taken care of when we establish references to the
376          * new page and drop references to the old page.
377          *
378          * Note that anonymous pages are accounted for
379          * via NR_FILE_PAGES and NR_ANON_PAGES if they
380          * are mapped to swap space.
381          */
382         __dec_zone_page_state(page, NR_FILE_PAGES);
383         __inc_zone_page_state(newpage, NR_FILE_PAGES);
384         if (!PageSwapCache(page) && PageSwapBacked(page)) {
385                 __dec_zone_page_state(page, NR_SHMEM);
386                 __inc_zone_page_state(newpage, NR_SHMEM);
387         }
388         spin_unlock_irq(&mapping->tree_lock);
389 
390         return MIGRATEPAGE_SUCCESS;
391 }
392 
393 /*
394  * The expected number of remaining references is the same as that
395  * of migrate_page_move_mapping().
396  */
397 int migrate_huge_page_move_mapping(struct address_space *mapping,
398                                    struct page *newpage, struct page *page)
399 {
400         int expected_count;
401         void **pslot;
402 
403         if (!mapping) {
404                 if (page_count(page) != 1)
405                         return -EAGAIN;
406                 return MIGRATEPAGE_SUCCESS;
407         }
408 
409         spin_lock_irq(&mapping->tree_lock);
410 
411         pslot = radix_tree_lookup_slot(&mapping->page_tree,
412                                         page_index(page));
413 
414         expected_count = 2 + page_has_private(page);
415         if (page_count(page) != expected_count ||
416                 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
417                 spin_unlock_irq(&mapping->tree_lock);
418                 return -EAGAIN;
419         }
420 
421         if (!page_freeze_refs(page, expected_count)) {
422                 spin_unlock_irq(&mapping->tree_lock);
423                 return -EAGAIN;
424         }
425 
426         get_page(newpage);
427 
428         radix_tree_replace_slot(pslot, newpage);
429 
430         page_unfreeze_refs(page, expected_count - 1);
431 
432         spin_unlock_irq(&mapping->tree_lock);
433         return MIGRATEPAGE_SUCCESS;
434 }
435 
436 /*
437  * Gigantic pages are so large that we do not guarantee that page++ pointer
438  * arithmetic will work across the entire page.  We need something more
439  * specialized.
440  */
441 static void __copy_gigantic_page(struct page *dst, struct page *src,
442                                 int nr_pages)
443 {
444         int i;
445         struct page *dst_base = dst;
446         struct page *src_base = src;
447 
448         for (i = 0; i < nr_pages; ) {
449                 cond_resched();
450                 copy_highpage(dst, src);
451 
452                 i++;
453                 dst = mem_map_next(dst, dst_base, i);
454                 src = mem_map_next(src, src_base, i);
455         }
456 }
457 
458 static void copy_huge_page(struct page *dst, struct page *src)
459 {
460         int i;
461         int nr_pages;
462 
463         if (PageHuge(src)) {
464                 /* hugetlbfs page */
465                 struct hstate *h = page_hstate(src);
466                 nr_pages = pages_per_huge_page(h);
467 
468                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
469                         __copy_gigantic_page(dst, src, nr_pages);
470                         return;
471                 }
472         } else {
473                 /* thp page */
474                 BUG_ON(!PageTransHuge(src));
475                 nr_pages = hpage_nr_pages(src);
476         }
477 
478         for (i = 0; i < nr_pages; i++) {
479                 cond_resched();
480                 copy_highpage(dst + i, src + i);
481         }
482 }
483 
484 /*
485  * Copy the page to its new location
486  */
487 void migrate_page_copy(struct page *newpage, struct page *page)
488 {
489         int cpupid;
490 
491         if (PageHuge(page) || PageTransHuge(page))
492                 copy_huge_page(newpage, page);
493         else
494                 copy_highpage(newpage, page);
495 
496         if (PageError(page))
497                 SetPageError(newpage);
498         if (PageReferenced(page))
499                 SetPageReferenced(newpage);
500         if (PageUptodate(page))
501                 SetPageUptodate(newpage);
502         if (TestClearPageActive(page)) {
503                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
504                 SetPageActive(newpage);
505         } else if (TestClearPageUnevictable(page))
506                 SetPageUnevictable(newpage);
507         if (PageChecked(page))
508                 SetPageChecked(newpage);
509         if (PageMappedToDisk(page))
510                 SetPageMappedToDisk(newpage);
511 
512         if (PageDirty(page)) {
513                 clear_page_dirty_for_io(page);
514                 /*
515                  * Want to mark the page and the radix tree as dirty, and
516                  * redo the accounting that clear_page_dirty_for_io undid,
517                  * but we can't use set_page_dirty because that function
518                  * is actually a signal that all of the page has become dirty.
519                  * Whereas only part of our page may be dirty.
520                  */
521                 if (PageSwapBacked(page))
522                         SetPageDirty(newpage);
523                 else
524                         __set_page_dirty_nobuffers(newpage);
525         }
526 
527         /*
528          * Copy NUMA information to the new page, to prevent over-eager
529          * future migrations of this same page.
530          */
531         cpupid = page_cpupid_xchg_last(page, -1);
532         page_cpupid_xchg_last(newpage, cpupid);
533 
534         mlock_migrate_page(newpage, page);
535         ksm_migrate_page(newpage, page);
536         /*
537          * Please do not reorder this without considering how mm/ksm.c's
538          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
539          */
540         ClearPageSwapCache(page);
541         ClearPagePrivate(page);
542         set_page_private(page, 0);
543 
544         /*
545          * If any waiters have accumulated on the new page then
546          * wake them up.
547          */
548         if (PageWriteback(newpage))
549                 end_page_writeback(newpage);
550 }
551 
552 /************************************************************
553  *                    Migration functions
554  ***********************************************************/
555 
556 /*
557  * Common logic to directly migrate a single page suitable for
558  * pages that do not use PagePrivate/PagePrivate2.
559  *
560  * Pages are locked upon entry and exit.
561  */
562 int migrate_page(struct address_space *mapping,
563                 struct page *newpage, struct page *page,
564                 enum migrate_mode mode)
565 {
566         int rc;
567 
568         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
569 
570         rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
571 
572         if (rc != MIGRATEPAGE_SUCCESS)
573                 return rc;
574 
575         migrate_page_copy(newpage, page);
576         return MIGRATEPAGE_SUCCESS;
577 }
578 EXPORT_SYMBOL(migrate_page);
579 
580 #ifdef CONFIG_BLOCK
581 /*
582  * Migration function for pages with buffers. This function can only be used
583  * if the underlying filesystem guarantees that no other references to "page"
584  * exist.
585  */
586 int buffer_migrate_page(struct address_space *mapping,
587                 struct page *newpage, struct page *page, enum migrate_mode mode)
588 {
589         struct buffer_head *bh, *head;
590         int rc;
591 
592         if (!page_has_buffers(page))
593                 return migrate_page(mapping, newpage, page, mode);
594 
595         head = page_buffers(page);
596 
597         rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
598 
599         if (rc != MIGRATEPAGE_SUCCESS)
600                 return rc;
601 
602         /*
603          * In the async case, migrate_page_move_mapping locked the buffers
604          * with an IRQ-safe spinlock held. In the sync case, the buffers
605          * need to be locked now
606          */
607         if (mode != MIGRATE_ASYNC)
608                 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
609 
610         ClearPagePrivate(page);
611         set_page_private(newpage, page_private(page));
612         set_page_private(page, 0);
613         put_page(page);
614         get_page(newpage);
615 
616         bh = head;
617         do {
618                 set_bh_page(bh, newpage, bh_offset(bh));
619                 bh = bh->b_this_page;
620 
621         } while (bh != head);
622 
623         SetPagePrivate(newpage);
624 
625         migrate_page_copy(newpage, page);
626 
627         bh = head;
628         do {
629                 unlock_buffer(bh);
630                 put_bh(bh);
631                 bh = bh->b_this_page;
632 
633         } while (bh != head);
634 
635         return MIGRATEPAGE_SUCCESS;
636 }
637 EXPORT_SYMBOL(buffer_migrate_page);
638 #endif
639 
640 /*
641  * Writeback a page to clean the dirty state
642  */
643 static int writeout(struct address_space *mapping, struct page *page)
644 {
645         struct writeback_control wbc = {
646                 .sync_mode = WB_SYNC_NONE,
647                 .nr_to_write = 1,
648                 .range_start = 0,
649                 .range_end = LLONG_MAX,
650                 .for_reclaim = 1
651         };
652         int rc;
653 
654         if (!mapping->a_ops->writepage)
655                 /* No write method for the address space */
656                 return -EINVAL;
657 
658         if (!clear_page_dirty_for_io(page))
659                 /* Someone else already triggered a write */
660                 return -EAGAIN;
661 
662         /*
663          * A dirty page may imply that the underlying filesystem has
664          * the page on some queue. So the page must be clean for
665          * migration. Writeout may mean we loose the lock and the
666          * page state is no longer what we checked for earlier.
667          * At this point we know that the migration attempt cannot
668          * be successful.
669          */
670         remove_migration_ptes(page, page);
671 
672         rc = mapping->a_ops->writepage(page, &wbc);
673 
674         if (rc != AOP_WRITEPAGE_ACTIVATE)
675                 /* unlocked. Relock */
676                 lock_page(page);
677 
678         return (rc < 0) ? -EIO : -EAGAIN;
679 }
680 
681 /*
682  * Default handling if a filesystem does not provide a migration function.
683  */
684 static int fallback_migrate_page(struct address_space *mapping,
685         struct page *newpage, struct page *page, enum migrate_mode mode)
686 {
687         if (PageDirty(page)) {
688                 /* Only writeback pages in full synchronous migration */
689                 if (mode != MIGRATE_SYNC)
690                         return -EBUSY;
691                 return writeout(mapping, page);
692         }
693 
694         /*
695          * Buffers may be managed in a filesystem specific way.
696          * We must have no buffers or drop them.
697          */
698         if (page_has_private(page) &&
699             !try_to_release_page(page, GFP_KERNEL))
700                 return -EAGAIN;
701 
702         return migrate_page(mapping, newpage, page, mode);
703 }
704 
705 /*
706  * Move a page to a newly allocated page
707  * The page is locked and all ptes have been successfully removed.
708  *
709  * The new page will have replaced the old page if this function
710  * is successful.
711  *
712  * Return value:
713  *   < 0 - error code
714  *  MIGRATEPAGE_SUCCESS - success
715  */
716 static int move_to_new_page(struct page *newpage, struct page *page,
717                                 int page_was_mapped, enum migrate_mode mode)
718 {
719         struct address_space *mapping;
720         int rc;
721 
722         /*
723          * Block others from accessing the page when we get around to
724          * establishing additional references. We are the only one
725          * holding a reference to the new page at this point.
726          */
727         if (!trylock_page(newpage))
728                 BUG();
729 
730         /* Prepare mapping for the new page.*/
731         newpage->index = page->index;
732         newpage->mapping = page->mapping;
733         if (PageSwapBacked(page))
734                 SetPageSwapBacked(newpage);
735 
736         mapping = page_mapping(page);
737         if (!mapping)
738                 rc = migrate_page(mapping, newpage, page, mode);
739         else if (mapping->a_ops->migratepage)
740                 /*
741                  * Most pages have a mapping and most filesystems provide a
742                  * migratepage callback. Anonymous pages are part of swap
743                  * space which also has its own migratepage callback. This
744                  * is the most common path for page migration.
745                  */
746                 rc = mapping->a_ops->migratepage(mapping,
747                                                 newpage, page, mode);
748         else
749                 rc = fallback_migrate_page(mapping, newpage, page, mode);
750 
751         if (rc != MIGRATEPAGE_SUCCESS) {
752                 newpage->mapping = NULL;
753         } else {
754                 mem_cgroup_migrate(page, newpage, false);
755                 if (page_was_mapped)
756                         remove_migration_ptes(page, newpage);
757                 page->mapping = NULL;
758         }
759 
760         unlock_page(newpage);
761 
762         return rc;
763 }
764 
765 static int __unmap_and_move(struct page *page, struct page *newpage,
766                                 int force, enum migrate_mode mode)
767 {
768         int rc = -EAGAIN;
769         int page_was_mapped = 0;
770         struct anon_vma *anon_vma = NULL;
771 
772         if (!trylock_page(page)) {
773                 if (!force || mode == MIGRATE_ASYNC)
774                         goto out;
775 
776                 /*
777                  * It's not safe for direct compaction to call lock_page.
778                  * For example, during page readahead pages are added locked
779                  * to the LRU. Later, when the IO completes the pages are
780                  * marked uptodate and unlocked. However, the queueing
781                  * could be merging multiple pages for one bio (e.g.
782                  * mpage_readpages). If an allocation happens for the
783                  * second or third page, the process can end up locking
784                  * the same page twice and deadlocking. Rather than
785                  * trying to be clever about what pages can be locked,
786                  * avoid the use of lock_page for direct compaction
787                  * altogether.
788                  */
789                 if (current->flags & PF_MEMALLOC)
790                         goto out;
791 
792                 lock_page(page);
793         }
794 
795         if (PageWriteback(page)) {
796                 /*
797                  * Only in the case of a full synchronous migration is it
798                  * necessary to wait for PageWriteback. In the async case,
799                  * the retry loop is too short and in the sync-light case,
800                  * the overhead of stalling is too much
801                  */
802                 if (mode != MIGRATE_SYNC) {
803                         rc = -EBUSY;
804                         goto out_unlock;
805                 }
806                 if (!force)
807                         goto out_unlock;
808                 wait_on_page_writeback(page);
809         }
810         /*
811          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
812          * we cannot notice that anon_vma is freed while we migrates a page.
813          * This get_anon_vma() delays freeing anon_vma pointer until the end
814          * of migration. File cache pages are no problem because of page_lock()
815          * File Caches may use write_page() or lock_page() in migration, then,
816          * just care Anon page here.
817          */
818         if (PageAnon(page) && !PageKsm(page)) {
819                 /*
820                  * Only page_lock_anon_vma_read() understands the subtleties of
821                  * getting a hold on an anon_vma from outside one of its mms.
822                  */
823                 anon_vma = page_get_anon_vma(page);
824                 if (anon_vma) {
825                         /*
826                          * Anon page
827                          */
828                 } else if (PageSwapCache(page)) {
829                         /*
830                          * We cannot be sure that the anon_vma of an unmapped
831                          * swapcache page is safe to use because we don't
832                          * know in advance if the VMA that this page belonged
833                          * to still exists. If the VMA and others sharing the
834                          * data have been freed, then the anon_vma could
835                          * already be invalid.
836                          *
837                          * To avoid this possibility, swapcache pages get
838                          * migrated but are not remapped when migration
839                          * completes
840                          */
841                 } else {
842                         goto out_unlock;
843                 }
844         }
845 
846         if (unlikely(isolated_balloon_page(page))) {
847                 /*
848                  * A ballooned page does not need any special attention from
849                  * physical to virtual reverse mapping procedures.
850                  * Skip any attempt to unmap PTEs or to remap swap cache,
851                  * in order to avoid burning cycles at rmap level, and perform
852                  * the page migration right away (proteced by page lock).
853                  */
854                 rc = balloon_page_migrate(newpage, page, mode);
855                 goto out_unlock;
856         }
857 
858         /*
859          * Corner case handling:
860          * 1. When a new swap-cache page is read into, it is added to the LRU
861          * and treated as swapcache but it has no rmap yet.
862          * Calling try_to_unmap() against a page->mapping==NULL page will
863          * trigger a BUG.  So handle it here.
864          * 2. An orphaned page (see truncate_complete_page) might have
865          * fs-private metadata. The page can be picked up due to memory
866          * offlining.  Everywhere else except page reclaim, the page is
867          * invisible to the vm, so the page can not be migrated.  So try to
868          * free the metadata, so the page can be freed.
869          */
870         if (!page->mapping) {
871                 VM_BUG_ON_PAGE(PageAnon(page), page);
872                 if (page_has_private(page)) {
873                         try_to_free_buffers(page);
874                         goto out_unlock;
875                 }
876                 goto skip_unmap;
877         }
878 
879         /* Establish migration ptes or remove ptes */
880         if (page_mapped(page)) {
881                 try_to_unmap(page,
882                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
883                 page_was_mapped = 1;
884         }
885 
886 skip_unmap:
887         if (!page_mapped(page))
888                 rc = move_to_new_page(newpage, page, page_was_mapped, mode);
889 
890         if (rc && page_was_mapped)
891                 remove_migration_ptes(page, page);
892 
893         /* Drop an anon_vma reference if we took one */
894         if (anon_vma)
895                 put_anon_vma(anon_vma);
896 
897 out_unlock:
898         unlock_page(page);
899 out:
900         return rc;
901 }
902 
903 /*
904  * Obtain the lock on page, remove all ptes and migrate the page
905  * to the newly allocated page in newpage.
906  */
907 static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
908                         unsigned long private, struct page *page, int force,
909                         enum migrate_mode mode)
910 {
911         int rc = 0;
912         int *result = NULL;
913         struct page *newpage = get_new_page(page, private, &result);
914 
915         if (!newpage)
916                 return -ENOMEM;
917 
918         if (page_count(page) == 1) {
919                 /* page was freed from under us. So we are done. */
920                 goto out;
921         }
922 
923         if (unlikely(PageTransHuge(page)))
924                 if (unlikely(split_huge_page(page)))
925                         goto out;
926 
927         rc = __unmap_and_move(page, newpage, force, mode);
928 
929 out:
930         if (rc != -EAGAIN) {
931                 /*
932                  * A page that has been migrated has all references
933                  * removed and will be freed. A page that has not been
934                  * migrated will have kepts its references and be
935                  * restored.
936                  */
937                 list_del(&page->lru);
938                 dec_zone_page_state(page, NR_ISOLATED_ANON +
939                                 page_is_file_cache(page));
940                 putback_lru_page(page);
941         }
942 
943         /*
944          * If migration was not successful and there's a freeing callback, use
945          * it.  Otherwise, putback_lru_page() will drop the reference grabbed
946          * during isolation.
947          */
948         if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
949                 ClearPageSwapBacked(newpage);
950                 put_new_page(newpage, private);
951         } else if (unlikely(__is_movable_balloon_page(newpage))) {
952                 /* drop our reference, page already in the balloon */
953                 put_page(newpage);
954         } else
955                 putback_lru_page(newpage);
956 
957         if (result) {
958                 if (rc)
959                         *result = rc;
960                 else
961                         *result = page_to_nid(newpage);
962         }
963         return rc;
964 }
965 
966 /*
967  * Counterpart of unmap_and_move_page() for hugepage migration.
968  *
969  * This function doesn't wait the completion of hugepage I/O
970  * because there is no race between I/O and migration for hugepage.
971  * Note that currently hugepage I/O occurs only in direct I/O
972  * where no lock is held and PG_writeback is irrelevant,
973  * and writeback status of all subpages are counted in the reference
974  * count of the head page (i.e. if all subpages of a 2MB hugepage are
975  * under direct I/O, the reference of the head page is 512 and a bit more.)
976  * This means that when we try to migrate hugepage whose subpages are
977  * doing direct I/O, some references remain after try_to_unmap() and
978  * hugepage migration fails without data corruption.
979  *
980  * There is also no race when direct I/O is issued on the page under migration,
981  * because then pte is replaced with migration swap entry and direct I/O code
982  * will wait in the page fault for migration to complete.
983  */
984 static int unmap_and_move_huge_page(new_page_t get_new_page,
985                                 free_page_t put_new_page, unsigned long private,
986                                 struct page *hpage, int force,
987                                 enum migrate_mode mode)
988 {
989         int rc = 0;
990         int *result = NULL;
991         int page_was_mapped = 0;
992         struct page *new_hpage;
993         struct anon_vma *anon_vma = NULL;
994 
995         /*
996          * Movability of hugepages depends on architectures and hugepage size.
997          * This check is necessary because some callers of hugepage migration
998          * like soft offline and memory hotremove don't walk through page
999          * tables or check whether the hugepage is pmd-based or not before
1000          * kicking migration.
1001          */
1002         if (!hugepage_migration_supported(page_hstate(hpage))) {
1003                 putback_active_hugepage(hpage);
1004                 return -ENOSYS;
1005         }
1006 
1007         new_hpage = get_new_page(hpage, private, &result);
1008         if (!new_hpage)
1009                 return -ENOMEM;
1010 
1011         rc = -EAGAIN;
1012 
1013         if (!trylock_page(hpage)) {
1014                 if (!force || mode != MIGRATE_SYNC)
1015                         goto out;
1016                 lock_page(hpage);
1017         }
1018 
1019         if (PageAnon(hpage))
1020                 anon_vma = page_get_anon_vma(hpage);
1021 
1022         if (page_mapped(hpage)) {
1023                 try_to_unmap(hpage,
1024                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1025                 page_was_mapped = 1;
1026         }
1027 
1028         if (!page_mapped(hpage))
1029                 rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode);
1030 
1031         if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
1032                 remove_migration_ptes(hpage, hpage);
1033 
1034         if (anon_vma)
1035                 put_anon_vma(anon_vma);
1036 
1037         if (rc == MIGRATEPAGE_SUCCESS)
1038                 hugetlb_cgroup_migrate(hpage, new_hpage);
1039 
1040         unlock_page(hpage);
1041 out:
1042         if (rc != -EAGAIN)
1043                 putback_active_hugepage(hpage);
1044 
1045         /*
1046          * If migration was not successful and there's a freeing callback, use
1047          * it.  Otherwise, put_page() will drop the reference grabbed during
1048          * isolation.
1049          */
1050         if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
1051                 put_new_page(new_hpage, private);
1052         else
1053                 put_page(new_hpage);
1054 
1055         if (result) {
1056                 if (rc)
1057                         *result = rc;
1058                 else
1059                         *result = page_to_nid(new_hpage);
1060         }
1061         return rc;
1062 }
1063 
1064 /*
1065  * migrate_pages - migrate the pages specified in a list, to the free pages
1066  *                 supplied as the target for the page migration
1067  *
1068  * @from:               The list of pages to be migrated.
1069  * @get_new_page:       The function used to allocate free pages to be used
1070  *                      as the target of the page migration.
1071  * @put_new_page:       The function used to free target pages if migration
1072  *                      fails, or NULL if no special handling is necessary.
1073  * @private:            Private data to be passed on to get_new_page()
1074  * @mode:               The migration mode that specifies the constraints for
1075  *                      page migration, if any.
1076  * @reason:             The reason for page migration.
1077  *
1078  * The function returns after 10 attempts or if no pages are movable any more
1079  * because the list has become empty or no retryable pages exist any more.
1080  * The caller should call putback_lru_pages() to return pages to the LRU
1081  * or free list only if ret != 0.
1082  *
1083  * Returns the number of pages that were not migrated, or an error code.
1084  */
1085 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1086                 free_page_t put_new_page, unsigned long private,
1087                 enum migrate_mode mode, int reason)
1088 {
1089         int retry = 1;
1090         int nr_failed = 0;
1091         int nr_succeeded = 0;
1092         int pass = 0;
1093         struct page *page;
1094         struct page *page2;
1095         int swapwrite = current->flags & PF_SWAPWRITE;
1096         int rc;
1097 
1098         if (!swapwrite)
1099                 current->flags |= PF_SWAPWRITE;
1100 
1101         for(pass = 0; pass < 10 && retry; pass++) {
1102                 retry = 0;
1103 
1104                 list_for_each_entry_safe(page, page2, from, lru) {
1105                         cond_resched();
1106 
1107                         if (PageHuge(page))
1108                                 rc = unmap_and_move_huge_page(get_new_page,
1109                                                 put_new_page, private, page,
1110                                                 pass > 2, mode);
1111                         else
1112                                 rc = unmap_and_move(get_new_page, put_new_page,
1113                                                 private, page, pass > 2, mode);
1114 
1115                         switch(rc) {
1116                         case -ENOMEM:
1117                                 goto out;
1118                         case -EAGAIN:
1119                                 retry++;
1120                                 break;
1121                         case MIGRATEPAGE_SUCCESS:
1122                                 nr_succeeded++;
1123                                 break;
1124                         default:
1125                                 /*
1126                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1127                                  * unlike -EAGAIN case, the failed page is
1128                                  * removed from migration page list and not
1129                                  * retried in the next outer loop.
1130                                  */
1131                                 nr_failed++;
1132                                 break;
1133                         }
1134                 }
1135         }
1136         rc = nr_failed + retry;
1137 out:
1138         if (nr_succeeded)
1139                 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1140         if (nr_failed)
1141                 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1142         trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1143 
1144         if (!swapwrite)
1145                 current->flags &= ~PF_SWAPWRITE;
1146 
1147         return rc;
1148 }
1149 
1150 #ifdef CONFIG_NUMA
1151 /*
1152  * Move a list of individual pages
1153  */
1154 struct page_to_node {
1155         unsigned long addr;
1156         struct page *page;
1157         int node;
1158         int status;
1159 };
1160 
1161 static struct page *new_page_node(struct page *p, unsigned long private,
1162                 int **result)
1163 {
1164         struct page_to_node *pm = (struct page_to_node *)private;
1165 
1166         while (pm->node != MAX_NUMNODES && pm->page != p)
1167                 pm++;
1168 
1169         if (pm->node == MAX_NUMNODES)
1170                 return NULL;
1171 
1172         *result = &pm->status;
1173 
1174         if (PageHuge(p))
1175                 return alloc_huge_page_node(page_hstate(compound_head(p)),
1176                                         pm->node);
1177         else
1178                 return alloc_pages_exact_node(pm->node,
1179                                 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1180 }
1181 
1182 /*
1183  * Move a set of pages as indicated in the pm array. The addr
1184  * field must be set to the virtual address of the page to be moved
1185  * and the node number must contain a valid target node.
1186  * The pm array ends with node = MAX_NUMNODES.
1187  */
1188 static int do_move_page_to_node_array(struct mm_struct *mm,
1189                                       struct page_to_node *pm,
1190                                       int migrate_all)
1191 {
1192         int err;
1193         struct page_to_node *pp;
1194         LIST_HEAD(pagelist);
1195 
1196         down_read(&mm->mmap_sem);
1197 
1198         /*
1199          * Build a list of pages to migrate
1200          */
1201         for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1202                 struct vm_area_struct *vma;
1203                 struct page *page;
1204 
1205                 err = -EFAULT;
1206                 vma = find_vma(mm, pp->addr);
1207                 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1208                         goto set_status;
1209 
1210                 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1211 
1212                 err = PTR_ERR(page);
1213                 if (IS_ERR(page))
1214                         goto set_status;
1215 
1216                 err = -ENOENT;
1217                 if (!page)
1218                         goto set_status;
1219 
1220                 /* Use PageReserved to check for zero page */
1221                 if (PageReserved(page))
1222                         goto put_and_set;
1223 
1224                 pp->page = page;
1225                 err = page_to_nid(page);
1226 
1227                 if (err == pp->node)
1228                         /*
1229                          * Node already in the right place
1230                          */
1231                         goto put_and_set;
1232 
1233                 err = -EACCES;
1234                 if (page_mapcount(page) > 1 &&
1235                                 !migrate_all)
1236                         goto put_and_set;
1237 
1238                 if (PageHuge(page)) {
1239                         if (PageHead(page))
1240                                 isolate_huge_page(page, &pagelist);
1241                         goto put_and_set;
1242                 }
1243 
1244                 err = isolate_lru_page(page);
1245                 if (!err) {
1246                         list_add_tail(&page->lru, &pagelist);
1247                         inc_zone_page_state(page, NR_ISOLATED_ANON +
1248                                             page_is_file_cache(page));
1249                 }
1250 put_and_set:
1251                 /*
1252                  * Either remove the duplicate refcount from
1253                  * isolate_lru_page() or drop the page ref if it was
1254                  * not isolated.
1255                  */
1256                 put_page(page);
1257 set_status:
1258                 pp->status = err;
1259         }
1260 
1261         err = 0;
1262         if (!list_empty(&pagelist)) {
1263                 err = migrate_pages(&pagelist, new_page_node, NULL,
1264                                 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1265                 if (err)
1266                         putback_movable_pages(&pagelist);
1267         }
1268 
1269         up_read(&mm->mmap_sem);
1270         return err;
1271 }
1272 
1273 /*
1274  * Migrate an array of page address onto an array of nodes and fill
1275  * the corresponding array of status.
1276  */
1277 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1278                          unsigned long nr_pages,
1279                          const void __user * __user *pages,
1280                          const int __user *nodes,
1281                          int __user *status, int flags)
1282 {
1283         struct page_to_node *pm;
1284         unsigned long chunk_nr_pages;
1285         unsigned long chunk_start;
1286         int err;
1287 
1288         err = -ENOMEM;
1289         pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1290         if (!pm)
1291                 goto out;
1292 
1293         migrate_prep();
1294 
1295         /*
1296          * Store a chunk of page_to_node array in a page,
1297          * but keep the last one as a marker
1298          */
1299         chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1300 
1301         for (chunk_start = 0;
1302              chunk_start < nr_pages;
1303              chunk_start += chunk_nr_pages) {
1304                 int j;
1305 
1306                 if (chunk_start + chunk_nr_pages > nr_pages)
1307                         chunk_nr_pages = nr_pages - chunk_start;
1308 
1309                 /* fill the chunk pm with addrs and nodes from user-space */
1310                 for (j = 0; j < chunk_nr_pages; j++) {
1311                         const void __user *p;
1312                         int node;
1313 
1314                         err = -EFAULT;
1315                         if (get_user(p, pages + j + chunk_start))
1316                                 goto out_pm;
1317                         pm[j].addr = (unsigned long) p;
1318 
1319                         if (get_user(node, nodes + j + chunk_start))
1320                                 goto out_pm;
1321 
1322                         err = -ENODEV;
1323                         if (node < 0 || node >= MAX_NUMNODES)
1324                                 goto out_pm;
1325 
1326                         if (!node_state(node, N_MEMORY))
1327                                 goto out_pm;
1328 
1329                         err = -EACCES;
1330                         if (!node_isset(node, task_nodes))
1331                                 goto out_pm;
1332 
1333                         pm[j].node = node;
1334                 }
1335 
1336                 /* End marker for this chunk */
1337                 pm[chunk_nr_pages].node = MAX_NUMNODES;
1338 
1339                 /* Migrate this chunk */
1340                 err = do_move_page_to_node_array(mm, pm,
1341                                                  flags & MPOL_MF_MOVE_ALL);
1342                 if (err < 0)
1343                         goto out_pm;
1344 
1345                 /* Return status information */
1346                 for (j = 0; j < chunk_nr_pages; j++)
1347                         if (put_user(pm[j].status, status + j + chunk_start)) {
1348                                 err = -EFAULT;
1349                                 goto out_pm;
1350                         }
1351         }
1352         err = 0;
1353 
1354 out_pm:
1355         free_page((unsigned long)pm);
1356 out:
1357         return err;
1358 }
1359 
1360 /*
1361  * Determine the nodes of an array of pages and store it in an array of status.
1362  */
1363 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1364                                 const void __user **pages, int *status)
1365 {
1366         unsigned long i;
1367 
1368         down_read(&mm->mmap_sem);
1369 
1370         for (i = 0; i < nr_pages; i++) {
1371                 unsigned long addr = (unsigned long)(*pages);
1372                 struct vm_area_struct *vma;
1373                 struct page *page;
1374                 int err = -EFAULT;
1375 
1376                 vma = find_vma(mm, addr);
1377                 if (!vma || addr < vma->vm_start)
1378                         goto set_status;
1379 
1380                 page = follow_page(vma, addr, 0);
1381 
1382                 err = PTR_ERR(page);
1383                 if (IS_ERR(page))
1384                         goto set_status;
1385 
1386                 err = -ENOENT;
1387                 /* Use PageReserved to check for zero page */
1388                 if (!page || PageReserved(page))
1389                         goto set_status;
1390 
1391                 err = page_to_nid(page);
1392 set_status:
1393                 *status = err;
1394 
1395                 pages++;
1396                 status++;
1397         }
1398 
1399         up_read(&mm->mmap_sem);
1400 }
1401 
1402 /*
1403  * Determine the nodes of a user array of pages and store it in
1404  * a user array of status.
1405  */
1406 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1407                          const void __user * __user *pages,
1408                          int __user *status)
1409 {
1410 #define DO_PAGES_STAT_CHUNK_NR 16
1411         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1412         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1413 
1414         while (nr_pages) {
1415                 unsigned long chunk_nr;
1416 
1417                 chunk_nr = nr_pages;
1418                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1419                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1420 
1421                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1422                         break;
1423 
1424                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1425 
1426                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1427                         break;
1428 
1429                 pages += chunk_nr;
1430                 status += chunk_nr;
1431                 nr_pages -= chunk_nr;
1432         }
1433         return nr_pages ? -EFAULT : 0;
1434 }
1435 
1436 /*
1437  * Move a list of pages in the address space of the currently executing
1438  * process.
1439  */
1440 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1441                 const void __user * __user *, pages,
1442                 const int __user *, nodes,
1443                 int __user *, status, int, flags)
1444 {
1445         const struct cred *cred = current_cred(), *tcred;
1446         struct task_struct *task;
1447         struct mm_struct *mm;
1448         int err;
1449         nodemask_t task_nodes;
1450 
1451         /* Check flags */
1452         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1453                 return -EINVAL;
1454 
1455         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1456                 return -EPERM;
1457 
1458         /* Find the mm_struct */
1459         rcu_read_lock();
1460         task = pid ? find_task_by_vpid(pid) : current;
1461         if (!task) {
1462                 rcu_read_unlock();
1463                 return -ESRCH;
1464         }
1465         get_task_struct(task);
1466 
1467         /*
1468          * Check if this process has the right to modify the specified
1469          * process. The right exists if the process has administrative
1470          * capabilities, superuser privileges or the same
1471          * userid as the target process.
1472          */
1473         tcred = __task_cred(task);
1474         if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1475             !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1476             !capable(CAP_SYS_NICE)) {
1477                 rcu_read_unlock();
1478                 err = -EPERM;
1479                 goto out;
1480         }
1481         rcu_read_unlock();
1482 
1483         err = security_task_movememory(task);
1484         if (err)
1485                 goto out;
1486 
1487         task_nodes = cpuset_mems_allowed(task);
1488         mm = get_task_mm(task);
1489         put_task_struct(task);
1490 
1491         if (!mm)
1492                 return -EINVAL;
1493 
1494         if (nodes)
1495                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1496                                     nodes, status, flags);
1497         else
1498                 err = do_pages_stat(mm, nr_pages, pages, status);
1499 
1500         mmput(mm);
1501         return err;
1502 
1503 out:
1504         put_task_struct(task);
1505         return err;
1506 }
1507 
1508 #ifdef CONFIG_NUMA_BALANCING
1509 /*
1510  * Returns true if this is a safe migration target node for misplaced NUMA
1511  * pages. Currently it only checks the watermarks which crude
1512  */
1513 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1514                                    unsigned long nr_migrate_pages)
1515 {
1516         int z;
1517         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1518                 struct zone *zone = pgdat->node_zones + z;
1519 
1520                 if (!populated_zone(zone))
1521                         continue;
1522 
1523                 if (!zone_reclaimable(zone))
1524                         continue;
1525 
1526                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1527                 if (!zone_watermark_ok(zone, 0,
1528                                        high_wmark_pages(zone) +
1529                                        nr_migrate_pages,
1530                                        0, 0))
1531                         continue;
1532                 return true;
1533         }
1534         return false;
1535 }
1536 
1537 static struct page *alloc_misplaced_dst_page(struct page *page,
1538                                            unsigned long data,
1539                                            int **result)
1540 {
1541         int nid = (int) data;
1542         struct page *newpage;
1543 
1544         newpage = alloc_pages_exact_node(nid,
1545                                          (GFP_HIGHUSER_MOVABLE |
1546                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
1547                                           __GFP_NORETRY | __GFP_NOWARN) &
1548                                          ~GFP_IOFS, 0);
1549 
1550         return newpage;
1551 }
1552 
1553 /*
1554  * page migration rate limiting control.
1555  * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1556  * window of time. Default here says do not migrate more than 1280M per second.
1557  * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
1558  * as it is faults that reset the window, pte updates will happen unconditionally
1559  * if there has not been a fault since @pteupdate_interval_millisecs after the
1560  * throttle window closed.
1561  */
1562 static unsigned int migrate_interval_millisecs __read_mostly = 100;
1563 static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
1564 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1565 
1566 /* Returns true if NUMA migration is currently rate limited */
1567 bool migrate_ratelimited(int node)
1568 {
1569         pg_data_t *pgdat = NODE_DATA(node);
1570 
1571         if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
1572                                 msecs_to_jiffies(pteupdate_interval_millisecs)))
1573                 return false;
1574 
1575         if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
1576                 return false;
1577 
1578         return true;
1579 }
1580 
1581 /* Returns true if the node is migrate rate-limited after the update */
1582 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1583                                         unsigned long nr_pages)
1584 {
1585         /*
1586          * Rate-limit the amount of data that is being migrated to a node.
1587          * Optimal placement is no good if the memory bus is saturated and
1588          * all the time is being spent migrating!
1589          */
1590         if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1591                 spin_lock(&pgdat->numabalancing_migrate_lock);
1592                 pgdat->numabalancing_migrate_nr_pages = 0;
1593                 pgdat->numabalancing_migrate_next_window = jiffies +
1594                         msecs_to_jiffies(migrate_interval_millisecs);
1595                 spin_unlock(&pgdat->numabalancing_migrate_lock);
1596         }
1597         if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1598                 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1599                                                                 nr_pages);
1600                 return true;
1601         }
1602 
1603         /*
1604          * This is an unlocked non-atomic update so errors are possible.
1605          * The consequences are failing to migrate when we potentiall should
1606          * have which is not severe enough to warrant locking. If it is ever
1607          * a problem, it can be converted to a per-cpu counter.
1608          */
1609         pgdat->numabalancing_migrate_nr_pages += nr_pages;
1610         return false;
1611 }
1612 
1613 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1614 {
1615         int page_lru;
1616 
1617         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1618 
1619         /* Avoid migrating to a node that is nearly full */
1620         if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1621                 return 0;
1622 
1623         if (isolate_lru_page(page))
1624                 return 0;
1625 
1626         /*
1627          * migrate_misplaced_transhuge_page() skips page migration's usual
1628          * check on page_count(), so we must do it here, now that the page
1629          * has been isolated: a GUP pin, or any other pin, prevents migration.
1630          * The expected page count is 3: 1 for page's mapcount and 1 for the
1631          * caller's pin and 1 for the reference taken by isolate_lru_page().
1632          */
1633         if (PageTransHuge(page) && page_count(page) != 3) {
1634                 putback_lru_page(page);
1635                 return 0;
1636         }
1637 
1638         page_lru = page_is_file_cache(page);
1639         mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
1640                                 hpage_nr_pages(page));
1641 
1642         /*
1643          * Isolating the page has taken another reference, so the
1644          * caller's reference can be safely dropped without the page
1645          * disappearing underneath us during migration.
1646          */
1647         put_page(page);
1648         return 1;
1649 }
1650 
1651 bool pmd_trans_migrating(pmd_t pmd)
1652 {
1653         struct page *page = pmd_page(pmd);
1654         return PageLocked(page);
1655 }
1656 
1657 /*
1658  * Attempt to migrate a misplaced page to the specified destination
1659  * node. Caller is expected to have an elevated reference count on
1660  * the page that will be dropped by this function before returning.
1661  */
1662 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1663                            int node)
1664 {
1665         pg_data_t *pgdat = NODE_DATA(node);
1666         int isolated;
1667         int nr_remaining;
1668         LIST_HEAD(migratepages);
1669 
1670         /*
1671          * Don't migrate file pages that are mapped in multiple processes
1672          * with execute permissions as they are probably shared libraries.
1673          */
1674         if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1675             (vma->vm_flags & VM_EXEC))
1676                 goto out;
1677 
1678         /*
1679          * Rate-limit the amount of data that is being migrated to a node.
1680          * Optimal placement is no good if the memory bus is saturated and
1681          * all the time is being spent migrating!
1682          */
1683         if (numamigrate_update_ratelimit(pgdat, 1))
1684                 goto out;
1685 
1686         isolated = numamigrate_isolate_page(pgdat, page);
1687         if (!isolated)
1688                 goto out;
1689 
1690         list_add(&page->lru, &migratepages);
1691         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1692                                      NULL, node, MIGRATE_ASYNC,
1693                                      MR_NUMA_MISPLACED);
1694         if (nr_remaining) {
1695                 if (!list_empty(&migratepages)) {
1696                         list_del(&page->lru);
1697                         dec_zone_page_state(page, NR_ISOLATED_ANON +
1698                                         page_is_file_cache(page));
1699                         putback_lru_page(page);
1700                 }
1701                 isolated = 0;
1702         } else
1703                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1704         BUG_ON(!list_empty(&migratepages));
1705         return isolated;
1706 
1707 out:
1708         put_page(page);
1709         return 0;
1710 }
1711 #endif /* CONFIG_NUMA_BALANCING */
1712 
1713 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1714 /*
1715  * Migrates a THP to a given target node. page must be locked and is unlocked
1716  * before returning.
1717  */
1718 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1719                                 struct vm_area_struct *vma,
1720                                 pmd_t *pmd, pmd_t entry,
1721                                 unsigned long address,
1722                                 struct page *page, int node)
1723 {
1724         spinlock_t *ptl;
1725         pg_data_t *pgdat = NODE_DATA(node);
1726         int isolated = 0;
1727         struct page *new_page = NULL;
1728         int page_lru = page_is_file_cache(page);
1729         unsigned long mmun_start = address & HPAGE_PMD_MASK;
1730         unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1731         pmd_t orig_entry;
1732 
1733         /*
1734          * Rate-limit the amount of data that is being migrated to a node.
1735          * Optimal placement is no good if the memory bus is saturated and
1736          * all the time is being spent migrating!
1737          */
1738         if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1739                 goto out_dropref;
1740 
1741         new_page = alloc_pages_node(node,
1742                 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
1743                 HPAGE_PMD_ORDER);
1744         if (!new_page)
1745                 goto out_fail;
1746 
1747         isolated = numamigrate_isolate_page(pgdat, page);
1748         if (!isolated) {
1749                 put_page(new_page);
1750                 goto out_fail;
1751         }
1752 
1753         if (mm_tlb_flush_pending(mm))
1754                 flush_tlb_range(vma, mmun_start, mmun_end);
1755 
1756         /* Prepare a page as a migration target */
1757         __set_page_locked(new_page);
1758         SetPageSwapBacked(new_page);
1759 
1760         /* anon mapping, we can simply copy page->mapping to the new page: */
1761         new_page->mapping = page->mapping;
1762         new_page->index = page->index;
1763         migrate_page_copy(new_page, page);
1764         WARN_ON(PageLRU(new_page));
1765 
1766         /* Recheck the target PMD */
1767         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1768         ptl = pmd_lock(mm, pmd);
1769         if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1770 fail_putback:
1771                 spin_unlock(ptl);
1772                 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1773 
1774                 /* Reverse changes made by migrate_page_copy() */
1775                 if (TestClearPageActive(new_page))
1776                         SetPageActive(page);
1777                 if (TestClearPageUnevictable(new_page))
1778                         SetPageUnevictable(page);
1779                 mlock_migrate_page(page, new_page);
1780 
1781                 unlock_page(new_page);
1782                 put_page(new_page);             /* Free it */
1783 
1784                 /* Retake the callers reference and putback on LRU */
1785                 get_page(page);
1786                 putback_lru_page(page);
1787                 mod_zone_page_state(page_zone(page),
1788                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1789 
1790                 goto out_unlock;
1791         }
1792 
1793         orig_entry = *pmd;
1794         entry = mk_pmd(new_page, vma->vm_page_prot);
1795         entry = pmd_mkhuge(entry);
1796         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1797 
1798         /*
1799          * Clear the old entry under pagetable lock and establish the new PTE.
1800          * Any parallel GUP will either observe the old page blocking on the
1801          * page lock, block on the page table lock or observe the new page.
1802          * The SetPageUptodate on the new page and page_add_new_anon_rmap
1803          * guarantee the copy is visible before the pagetable update.
1804          */
1805         flush_cache_range(vma, mmun_start, mmun_end);
1806         page_add_anon_rmap(new_page, vma, mmun_start);
1807         pmdp_clear_flush_notify(vma, mmun_start, pmd);
1808         set_pmd_at(mm, mmun_start, pmd, entry);
1809         flush_tlb_range(vma, mmun_start, mmun_end);
1810         update_mmu_cache_pmd(vma, address, &entry);
1811 
1812         if (page_count(page) != 2) {
1813                 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1814                 flush_tlb_range(vma, mmun_start, mmun_end);
1815                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
1816                 update_mmu_cache_pmd(vma, address, &entry);
1817                 page_remove_rmap(new_page);
1818                 goto fail_putback;
1819         }
1820 
1821         mem_cgroup_migrate(page, new_page, false);
1822 
1823         page_remove_rmap(page);
1824 
1825         spin_unlock(ptl);
1826         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1827 
1828         /* Take an "isolate" reference and put new page on the LRU. */
1829         get_page(new_page);
1830         putback_lru_page(new_page);
1831 
1832         unlock_page(new_page);
1833         unlock_page(page);
1834         put_page(page);                 /* Drop the rmap reference */
1835         put_page(page);                 /* Drop the LRU isolation reference */
1836 
1837         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1838         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1839 
1840         mod_zone_page_state(page_zone(page),
1841                         NR_ISOLATED_ANON + page_lru,
1842                         -HPAGE_PMD_NR);
1843         return isolated;
1844 
1845 out_fail:
1846         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1847 out_dropref:
1848         ptl = pmd_lock(mm, pmd);
1849         if (pmd_same(*pmd, entry)) {
1850                 entry = pmd_modify(entry, vma->vm_page_prot);
1851                 set_pmd_at(mm, mmun_start, pmd, entry);
1852                 update_mmu_cache_pmd(vma, address, &entry);
1853         }
1854         spin_unlock(ptl);
1855 
1856 out_unlock:
1857         unlock_page(page);
1858         put_page(page);
1859         return 0;
1860 }
1861 #endif /* CONFIG_NUMA_BALANCING */
1862 
1863 #endif /* CONFIG_NUMA */
1864 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp