~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_isolation.c

Version: ~ [ linux-5.5 ] ~ [ linux-5.4.15 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.98 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.167 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.211 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.211 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/mm/page_isolation.c
  3  */
  4 
  5 #include <linux/mm.h>
  6 #include <linux/page-isolation.h>
  7 #include <linux/pageblock-flags.h>
  8 #include "internal.h"
  9 
 10 static inline struct page *
 11 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
 12 {
 13         int i;
 14         for (i = 0; i < nr_pages; i++)
 15                 if (pfn_valid_within(pfn + i))
 16                         break;
 17         if (unlikely(i == nr_pages))
 18                 return NULL;
 19         return pfn_to_page(pfn + i);
 20 }
 21 
 22 /*
 23  * start_isolate_page_range() -- make page-allocation-type of range of pages
 24  * to be MIGRATE_ISOLATE.
 25  * @start_pfn: The lower PFN of the range to be isolated.
 26  * @end_pfn: The upper PFN of the range to be isolated.
 27  * @migratetype: migrate type to set in error recovery.
 28  *
 29  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 30  * the range will never be allocated. Any free pages and pages freed in the
 31  * future will not be allocated again.
 32  *
 33  * start_pfn/end_pfn must be aligned to pageblock_order.
 34  * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 35  */
 36 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 37                              unsigned migratetype)
 38 {
 39         unsigned long pfn;
 40         unsigned long undo_pfn;
 41         struct page *page;
 42 
 43         BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 44         BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 45 
 46         for (pfn = start_pfn;
 47              pfn < end_pfn;
 48              pfn += pageblock_nr_pages) {
 49                 page = __first_valid_page(pfn, pageblock_nr_pages);
 50                 if (page && set_migratetype_isolate(page)) {
 51                         undo_pfn = pfn;
 52                         goto undo;
 53                 }
 54         }
 55         return 0;
 56 undo:
 57         for (pfn = start_pfn;
 58              pfn < undo_pfn;
 59              pfn += pageblock_nr_pages)
 60                 unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 61 
 62         return -EBUSY;
 63 }
 64 
 65 /*
 66  * Make isolated pages available again.
 67  */
 68 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 69                             unsigned migratetype)
 70 {
 71         unsigned long pfn;
 72         struct page *page;
 73         BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 74         BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 75         for (pfn = start_pfn;
 76              pfn < end_pfn;
 77              pfn += pageblock_nr_pages) {
 78                 page = __first_valid_page(pfn, pageblock_nr_pages);
 79                 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 80                         continue;
 81                 unset_migratetype_isolate(page, migratetype);
 82         }
 83         return 0;
 84 }
 85 /*
 86  * Test all pages in the range is free(means isolated) or not.
 87  * all pages in [start_pfn...end_pfn) must be in the same zone.
 88  * zone->lock must be held before call this.
 89  *
 90  * Returns 1 if all pages in the range are isolated.
 91  */
 92 static int
 93 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
 94 {
 95         struct page *page;
 96 
 97         while (pfn < end_pfn) {
 98                 if (!pfn_valid_within(pfn)) {
 99                         pfn++;
100                         continue;
101                 }
102                 page = pfn_to_page(pfn);
103                 if (PageBuddy(page))
104                         pfn += 1 << page_order(page);
105                 else if (page_count(page) == 0 &&
106                                 page_private(page) == MIGRATE_ISOLATE)
107                         pfn += 1;
108                 else
109                         break;
110         }
111         if (pfn < end_pfn)
112                 return 0;
113         return 1;
114 }
115 
116 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
117 {
118         unsigned long pfn, flags;
119         struct page *page;
120         struct zone *zone;
121         int ret;
122 
123         /*
124          * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
125          * is not aligned to pageblock_nr_pages.
126          * Then we just check pagetype fist.
127          */
128         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
129                 page = __first_valid_page(pfn, pageblock_nr_pages);
130                 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
131                         break;
132         }
133         page = __first_valid_page(start_pfn, end_pfn - start_pfn);
134         if ((pfn < end_pfn) || !page)
135                 return -EBUSY;
136         /* Check all pages are free or Marked as ISOLATED */
137         zone = page_zone(page);
138         spin_lock_irqsave(&zone->lock, flags);
139         ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
140         spin_unlock_irqrestore(&zone->lock, flags);
141         return ret ? 0 : -EBUSY;
142 }
143 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp