~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/power/snapshot.c

Version: ~ [ linux-6.1-rc7 ] ~ [ linux-6.0.10 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.80 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.156 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.225 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.267 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.300 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.334 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/kernel/power/snapshot.c
  3  *
  4  * This file provides system snapshot/restore functionality for swsusp.
  5  *
  6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
  7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  8  *
  9  * This file is released under the GPLv2.
 10  *
 11  */
 12 
 13 #define pr_fmt(fmt) "PM: " fmt
 14 
 15 #include <linux/version.h>
 16 #include <linux/module.h>
 17 #include <linux/mm.h>
 18 #include <linux/suspend.h>
 19 #include <linux/delay.h>
 20 #include <linux/bitops.h>
 21 #include <linux/spinlock.h>
 22 #include <linux/kernel.h>
 23 #include <linux/pm.h>
 24 #include <linux/device.h>
 25 #include <linux/init.h>
 26 #include <linux/bootmem.h>
 27 #include <linux/nmi.h>
 28 #include <linux/syscalls.h>
 29 #include <linux/console.h>
 30 #include <linux/highmem.h>
 31 #include <linux/list.h>
 32 #include <linux/slab.h>
 33 #include <linux/compiler.h>
 34 #include <linux/ktime.h>
 35 #include <linux/set_memory.h>
 36 
 37 #include <linux/uaccess.h>
 38 #include <asm/mmu_context.h>
 39 #include <asm/pgtable.h>
 40 #include <asm/tlbflush.h>
 41 #include <asm/io.h>
 42 
 43 #include "power.h"
 44 
 45 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
 46 static bool hibernate_restore_protection;
 47 static bool hibernate_restore_protection_active;
 48 
 49 void enable_restore_image_protection(void)
 50 {
 51         hibernate_restore_protection = true;
 52 }
 53 
 54 static inline void hibernate_restore_protection_begin(void)
 55 {
 56         hibernate_restore_protection_active = hibernate_restore_protection;
 57 }
 58 
 59 static inline void hibernate_restore_protection_end(void)
 60 {
 61         hibernate_restore_protection_active = false;
 62 }
 63 
 64 static inline void hibernate_restore_protect_page(void *page_address)
 65 {
 66         if (hibernate_restore_protection_active)
 67                 set_memory_ro((unsigned long)page_address, 1);
 68 }
 69 
 70 static inline void hibernate_restore_unprotect_page(void *page_address)
 71 {
 72         if (hibernate_restore_protection_active)
 73                 set_memory_rw((unsigned long)page_address, 1);
 74 }
 75 #else
 76 static inline void hibernate_restore_protection_begin(void) {}
 77 static inline void hibernate_restore_protection_end(void) {}
 78 static inline void hibernate_restore_protect_page(void *page_address) {}
 79 static inline void hibernate_restore_unprotect_page(void *page_address) {}
 80 #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
 81 
 82 static int swsusp_page_is_free(struct page *);
 83 static void swsusp_set_page_forbidden(struct page *);
 84 static void swsusp_unset_page_forbidden(struct page *);
 85 
 86 /*
 87  * Number of bytes to reserve for memory allocations made by device drivers
 88  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
 89  * cause image creation to fail (tunable via /sys/power/reserved_size).
 90  */
 91 unsigned long reserved_size;
 92 
 93 void __init hibernate_reserved_size_init(void)
 94 {
 95         reserved_size = SPARE_PAGES * PAGE_SIZE;
 96 }
 97 
 98 /*
 99  * Preferred image size in bytes (tunable via /sys/power/image_size).
100  * When it is set to N, swsusp will do its best to ensure the image
101  * size will not exceed N bytes, but if that is impossible, it will
102  * try to create the smallest image possible.
103  */
104 unsigned long image_size;
105 
106 void __init hibernate_image_size_init(void)
107 {
108         image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
109 }
110 
111 /*
112  * List of PBEs needed for restoring the pages that were allocated before
113  * the suspend and included in the suspend image, but have also been
114  * allocated by the "resume" kernel, so their contents cannot be written
115  * directly to their "original" page frames.
116  */
117 struct pbe *restore_pblist;
118 
119 /* struct linked_page is used to build chains of pages */
120 
121 #define LINKED_PAGE_DATA_SIZE   (PAGE_SIZE - sizeof(void *))
122 
123 struct linked_page {
124         struct linked_page *next;
125         char data[LINKED_PAGE_DATA_SIZE];
126 } __packed;
127 
128 /*
129  * List of "safe" pages (ie. pages that were not used by the image kernel
130  * before hibernation) that may be used as temporary storage for image kernel
131  * memory contents.
132  */
133 static struct linked_page *safe_pages_list;
134 
135 /* Pointer to an auxiliary buffer (1 page) */
136 static void *buffer;
137 
138 #define PG_ANY          0
139 #define PG_SAFE         1
140 #define PG_UNSAFE_CLEAR 1
141 #define PG_UNSAFE_KEEP  0
142 
143 static unsigned int allocated_unsafe_pages;
144 
145 /**
146  * get_image_page - Allocate a page for a hibernation image.
147  * @gfp_mask: GFP mask for the allocation.
148  * @safe_needed: Get pages that were not used before hibernation (restore only)
149  *
150  * During image restoration, for storing the PBE list and the image data, we can
151  * only use memory pages that do not conflict with the pages used before
152  * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
153  * using allocated_unsafe_pages.
154  *
155  * Each allocated image page is marked as PageNosave and PageNosaveFree so that
156  * swsusp_free() can release it.
157  */
158 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
159 {
160         void *res;
161 
162         res = (void *)get_zeroed_page(gfp_mask);
163         if (safe_needed)
164                 while (res && swsusp_page_is_free(virt_to_page(res))) {
165                         /* The page is unsafe, mark it for swsusp_free() */
166                         swsusp_set_page_forbidden(virt_to_page(res));
167                         allocated_unsafe_pages++;
168                         res = (void *)get_zeroed_page(gfp_mask);
169                 }
170         if (res) {
171                 swsusp_set_page_forbidden(virt_to_page(res));
172                 swsusp_set_page_free(virt_to_page(res));
173         }
174         return res;
175 }
176 
177 static void *__get_safe_page(gfp_t gfp_mask)
178 {
179         if (safe_pages_list) {
180                 void *ret = safe_pages_list;
181 
182                 safe_pages_list = safe_pages_list->next;
183                 memset(ret, 0, PAGE_SIZE);
184                 return ret;
185         }
186         return get_image_page(gfp_mask, PG_SAFE);
187 }
188 
189 unsigned long get_safe_page(gfp_t gfp_mask)
190 {
191         return (unsigned long)__get_safe_page(gfp_mask);
192 }
193 
194 static struct page *alloc_image_page(gfp_t gfp_mask)
195 {
196         struct page *page;
197 
198         page = alloc_page(gfp_mask);
199         if (page) {
200                 swsusp_set_page_forbidden(page);
201                 swsusp_set_page_free(page);
202         }
203         return page;
204 }
205 
206 static void recycle_safe_page(void *page_address)
207 {
208         struct linked_page *lp = page_address;
209 
210         lp->next = safe_pages_list;
211         safe_pages_list = lp;
212 }
213 
214 /**
215  * free_image_page - Free a page allocated for hibernation image.
216  * @addr: Address of the page to free.
217  * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
218  *
219  * The page to free should have been allocated by get_image_page() (page flags
220  * set by it are affected).
221  */
222 static inline void free_image_page(void *addr, int clear_nosave_free)
223 {
224         struct page *page;
225 
226         BUG_ON(!virt_addr_valid(addr));
227 
228         page = virt_to_page(addr);
229 
230         swsusp_unset_page_forbidden(page);
231         if (clear_nosave_free)
232                 swsusp_unset_page_free(page);
233 
234         __free_page(page);
235 }
236 
237 static inline void free_list_of_pages(struct linked_page *list,
238                                       int clear_page_nosave)
239 {
240         while (list) {
241                 struct linked_page *lp = list->next;
242 
243                 free_image_page(list, clear_page_nosave);
244                 list = lp;
245         }
246 }
247 
248 /*
249  * struct chain_allocator is used for allocating small objects out of
250  * a linked list of pages called 'the chain'.
251  *
252  * The chain grows each time when there is no room for a new object in
253  * the current page.  The allocated objects cannot be freed individually.
254  * It is only possible to free them all at once, by freeing the entire
255  * chain.
256  *
257  * NOTE: The chain allocator may be inefficient if the allocated objects
258  * are not much smaller than PAGE_SIZE.
259  */
260 struct chain_allocator {
261         struct linked_page *chain;      /* the chain */
262         unsigned int used_space;        /* total size of objects allocated out
263                                            of the current page */
264         gfp_t gfp_mask;         /* mask for allocating pages */
265         int safe_needed;        /* if set, only "safe" pages are allocated */
266 };
267 
268 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
269                        int safe_needed)
270 {
271         ca->chain = NULL;
272         ca->used_space = LINKED_PAGE_DATA_SIZE;
273         ca->gfp_mask = gfp_mask;
274         ca->safe_needed = safe_needed;
275 }
276 
277 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
278 {
279         void *ret;
280 
281         if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282                 struct linked_page *lp;
283 
284                 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285                                         get_image_page(ca->gfp_mask, PG_ANY);
286                 if (!lp)
287                         return NULL;
288 
289                 lp->next = ca->chain;
290                 ca->chain = lp;
291                 ca->used_space = 0;
292         }
293         ret = ca->chain->data + ca->used_space;
294         ca->used_space += size;
295         return ret;
296 }
297 
298 /**
299  * Data types related to memory bitmaps.
300  *
301  * Memory bitmap is a structure consiting of many linked lists of
302  * objects.  The main list's elements are of type struct zone_bitmap
303  * and each of them corresonds to one zone.  For each zone bitmap
304  * object there is a list of objects of type struct bm_block that
305  * represent each blocks of bitmap in which information is stored.
306  *
307  * struct memory_bitmap contains a pointer to the main list of zone
308  * bitmap objects, a struct bm_position used for browsing the bitmap,
309  * and a pointer to the list of pages used for allocating all of the
310  * zone bitmap objects and bitmap block objects.
311  *
312  * NOTE: It has to be possible to lay out the bitmap in memory
313  * using only allocations of order 0.  Additionally, the bitmap is
314  * designed to work with arbitrary number of zones (this is over the
315  * top for now, but let's avoid making unnecessary assumptions ;-).
316  *
317  * struct zone_bitmap contains a pointer to a list of bitmap block
318  * objects and a pointer to the bitmap block object that has been
319  * most recently used for setting bits.  Additionally, it contains the
320  * PFNs that correspond to the start and end of the represented zone.
321  *
322  * struct bm_block contains a pointer to the memory page in which
323  * information is stored (in the form of a block of bitmap)
324  * It also contains the pfns that correspond to the start and end of
325  * the represented memory area.
326  *
327  * The memory bitmap is organized as a radix tree to guarantee fast random
328  * access to the bits. There is one radix tree for each zone (as returned
329  * from create_mem_extents).
330  *
331  * One radix tree is represented by one struct mem_zone_bm_rtree. There are
332  * two linked lists for the nodes of the tree, one for the inner nodes and
333  * one for the leave nodes. The linked leave nodes are used for fast linear
334  * access of the memory bitmap.
335  *
336  * The struct rtree_node represents one node of the radix tree.
337  */
338 
339 #define BM_END_OF_MAP   (~0UL)
340 
341 #define BM_BITS_PER_BLOCK       (PAGE_SIZE * BITS_PER_BYTE)
342 #define BM_BLOCK_SHIFT          (PAGE_SHIFT + 3)
343 #define BM_BLOCK_MASK           ((1UL << BM_BLOCK_SHIFT) - 1)
344 
345 /*
346  * struct rtree_node is a wrapper struct to link the nodes
347  * of the rtree together for easy linear iteration over
348  * bits and easy freeing
349  */
350 struct rtree_node {
351         struct list_head list;
352         unsigned long *data;
353 };
354 
355 /*
356  * struct mem_zone_bm_rtree represents a bitmap used for one
357  * populated memory zone.
358  */
359 struct mem_zone_bm_rtree {
360         struct list_head list;          /* Link Zones together         */
361         struct list_head nodes;         /* Radix Tree inner nodes      */
362         struct list_head leaves;        /* Radix Tree leaves           */
363         unsigned long start_pfn;        /* Zone start page frame       */
364         unsigned long end_pfn;          /* Zone end page frame + 1     */
365         struct rtree_node *rtree;       /* Radix Tree Root             */
366         int levels;                     /* Number of Radix Tree Levels */
367         unsigned int blocks;            /* Number of Bitmap Blocks     */
368 };
369 
370 /* strcut bm_position is used for browsing memory bitmaps */
371 
372 struct bm_position {
373         struct mem_zone_bm_rtree *zone;
374         struct rtree_node *node;
375         unsigned long node_pfn;
376         int node_bit;
377 };
378 
379 struct memory_bitmap {
380         struct list_head zones;
381         struct linked_page *p_list;     /* list of pages used to store zone
382                                            bitmap objects and bitmap block
383                                            objects */
384         struct bm_position cur; /* most recently used bit position */
385 };
386 
387 /* Functions that operate on memory bitmaps */
388 
389 #define BM_ENTRIES_PER_LEVEL    (PAGE_SIZE / sizeof(unsigned long))
390 #if BITS_PER_LONG == 32
391 #define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 2)
392 #else
393 #define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 3)
394 #endif
395 #define BM_RTREE_LEVEL_MASK     ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396 
397 /**
398  * alloc_rtree_node - Allocate a new node and add it to the radix tree.
399  *
400  * This function is used to allocate inner nodes as well as the
401  * leave nodes of the radix tree. It also adds the node to the
402  * corresponding linked list passed in by the *list parameter.
403  */
404 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405                                            struct chain_allocator *ca,
406                                            struct list_head *list)
407 {
408         struct rtree_node *node;
409 
410         node = chain_alloc(ca, sizeof(struct rtree_node));
411         if (!node)
412                 return NULL;
413 
414         node->data = get_image_page(gfp_mask, safe_needed);
415         if (!node->data)
416                 return NULL;
417 
418         list_add_tail(&node->list, list);
419 
420         return node;
421 }
422 
423 /**
424  * add_rtree_block - Add a new leave node to the radix tree.
425  *
426  * The leave nodes need to be allocated in order to keep the leaves
427  * linked list in order. This is guaranteed by the zone->blocks
428  * counter.
429  */
430 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431                            int safe_needed, struct chain_allocator *ca)
432 {
433         struct rtree_node *node, *block, **dst;
434         unsigned int levels_needed, block_nr;
435         int i;
436 
437         block_nr = zone->blocks;
438         levels_needed = 0;
439 
440         /* How many levels do we need for this block nr? */
441         while (block_nr) {
442                 levels_needed += 1;
443                 block_nr >>= BM_RTREE_LEVEL_SHIFT;
444         }
445 
446         /* Make sure the rtree has enough levels */
447         for (i = zone->levels; i < levels_needed; i++) {
448                 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
449                                         &zone->nodes);
450                 if (!node)
451                         return -ENOMEM;
452 
453                 node->data[0] = (unsigned long)zone->rtree;
454                 zone->rtree = node;
455                 zone->levels += 1;
456         }
457 
458         /* Allocate new block */
459         block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
460         if (!block)
461                 return -ENOMEM;
462 
463         /* Now walk the rtree to insert the block */
464         node = zone->rtree;
465         dst = &zone->rtree;
466         block_nr = zone->blocks;
467         for (i = zone->levels; i > 0; i--) {
468                 int index;
469 
470                 if (!node) {
471                         node = alloc_rtree_node(gfp_mask, safe_needed, ca,
472                                                 &zone->nodes);
473                         if (!node)
474                                 return -ENOMEM;
475                         *dst = node;
476                 }
477 
478                 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479                 index &= BM_RTREE_LEVEL_MASK;
480                 dst = (struct rtree_node **)&((*dst)->data[index]);
481                 node = *dst;
482         }
483 
484         zone->blocks += 1;
485         *dst = block;
486 
487         return 0;
488 }
489 
490 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491                                int clear_nosave_free);
492 
493 /**
494  * create_zone_bm_rtree - Create a radix tree for one zone.
495  *
496  * Allocated the mem_zone_bm_rtree structure and initializes it.
497  * This function also allocated and builds the radix tree for the
498  * zone.
499  */
500 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
501                                                       int safe_needed,
502                                                       struct chain_allocator *ca,
503                                                       unsigned long start,
504                                                       unsigned long end)
505 {
506         struct mem_zone_bm_rtree *zone;
507         unsigned int i, nr_blocks;
508         unsigned long pages;
509 
510         pages = end - start;
511         zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
512         if (!zone)
513                 return NULL;
514 
515         INIT_LIST_HEAD(&zone->nodes);
516         INIT_LIST_HEAD(&zone->leaves);
517         zone->start_pfn = start;
518         zone->end_pfn = end;
519         nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
520 
521         for (i = 0; i < nr_blocks; i++) {
522                 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523                         free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
524                         return NULL;
525                 }
526         }
527 
528         return zone;
529 }
530 
531 /**
532  * free_zone_bm_rtree - Free the memory of the radix tree.
533  *
534  * Free all node pages of the radix tree. The mem_zone_bm_rtree
535  * structure itself is not freed here nor are the rtree_node
536  * structs.
537  */
538 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539                                int clear_nosave_free)
540 {
541         struct rtree_node *node;
542 
543         list_for_each_entry(node, &zone->nodes, list)
544                 free_image_page(node->data, clear_nosave_free);
545 
546         list_for_each_entry(node, &zone->leaves, list)
547                 free_image_page(node->data, clear_nosave_free);
548 }
549 
550 static void memory_bm_position_reset(struct memory_bitmap *bm)
551 {
552         bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
553                                   list);
554         bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555                                   struct rtree_node, list);
556         bm->cur.node_pfn = 0;
557         bm->cur.node_bit = 0;
558 }
559 
560 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
561 
562 struct mem_extent {
563         struct list_head hook;
564         unsigned long start;
565         unsigned long end;
566 };
567 
568 /**
569  * free_mem_extents - Free a list of memory extents.
570  * @list: List of extents to free.
571  */
572 static void free_mem_extents(struct list_head *list)
573 {
574         struct mem_extent *ext, *aux;
575 
576         list_for_each_entry_safe(ext, aux, list, hook) {
577                 list_del(&ext->hook);
578                 kfree(ext);
579         }
580 }
581 
582 /**
583  * create_mem_extents - Create a list of memory extents.
584  * @list: List to put the extents into.
585  * @gfp_mask: Mask to use for memory allocations.
586  *
587  * The extents represent contiguous ranges of PFNs.
588  */
589 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
590 {
591         struct zone *zone;
592 
593         INIT_LIST_HEAD(list);
594 
595         for_each_populated_zone(zone) {
596                 unsigned long zone_start, zone_end;
597                 struct mem_extent *ext, *cur, *aux;
598 
599                 zone_start = zone->zone_start_pfn;
600                 zone_end = zone_end_pfn(zone);
601 
602                 list_for_each_entry(ext, list, hook)
603                         if (zone_start <= ext->end)
604                                 break;
605 
606                 if (&ext->hook == list || zone_end < ext->start) {
607                         /* New extent is necessary */
608                         struct mem_extent *new_ext;
609 
610                         new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
611                         if (!new_ext) {
612                                 free_mem_extents(list);
613                                 return -ENOMEM;
614                         }
615                         new_ext->start = zone_start;
616                         new_ext->end = zone_end;
617                         list_add_tail(&new_ext->hook, &ext->hook);
618                         continue;
619                 }
620 
621                 /* Merge this zone's range of PFNs with the existing one */
622                 if (zone_start < ext->start)
623                         ext->start = zone_start;
624                 if (zone_end > ext->end)
625                         ext->end = zone_end;
626 
627                 /* More merging may be possible */
628                 cur = ext;
629                 list_for_each_entry_safe_continue(cur, aux, list, hook) {
630                         if (zone_end < cur->start)
631                                 break;
632                         if (zone_end < cur->end)
633                                 ext->end = cur->end;
634                         list_del(&cur->hook);
635                         kfree(cur);
636                 }
637         }
638 
639         return 0;
640 }
641 
642 /**
643  * memory_bm_create - Allocate memory for a memory bitmap.
644  */
645 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
646                             int safe_needed)
647 {
648         struct chain_allocator ca;
649         struct list_head mem_extents;
650         struct mem_extent *ext;
651         int error;
652 
653         chain_init(&ca, gfp_mask, safe_needed);
654         INIT_LIST_HEAD(&bm->zones);
655 
656         error = create_mem_extents(&mem_extents, gfp_mask);
657         if (error)
658                 return error;
659 
660         list_for_each_entry(ext, &mem_extents, hook) {
661                 struct mem_zone_bm_rtree *zone;
662 
663                 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664                                             ext->start, ext->end);
665                 if (!zone) {
666                         error = -ENOMEM;
667                         goto Error;
668                 }
669                 list_add_tail(&zone->list, &bm->zones);
670         }
671 
672         bm->p_list = ca.chain;
673         memory_bm_position_reset(bm);
674  Exit:
675         free_mem_extents(&mem_extents);
676         return error;
677 
678  Error:
679         bm->p_list = ca.chain;
680         memory_bm_free(bm, PG_UNSAFE_CLEAR);
681         goto Exit;
682 }
683 
684 /**
685  * memory_bm_free - Free memory occupied by the memory bitmap.
686  * @bm: Memory bitmap.
687  */
688 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
689 {
690         struct mem_zone_bm_rtree *zone;
691 
692         list_for_each_entry(zone, &bm->zones, list)
693                 free_zone_bm_rtree(zone, clear_nosave_free);
694 
695         free_list_of_pages(bm->p_list, clear_nosave_free);
696 
697         INIT_LIST_HEAD(&bm->zones);
698 }
699 
700 /**
701  * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
702  *
703  * Find the bit in memory bitmap @bm that corresponds to the given PFN.
704  * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
705  *
706  * Walk the radix tree to find the page containing the bit that represents @pfn
707  * and return the position of the bit in @addr and @bit_nr.
708  */
709 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710                               void **addr, unsigned int *bit_nr)
711 {
712         struct mem_zone_bm_rtree *curr, *zone;
713         struct rtree_node *node;
714         int i, block_nr;
715 
716         zone = bm->cur.zone;
717 
718         if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
719                 goto zone_found;
720 
721         zone = NULL;
722 
723         /* Find the right zone */
724         list_for_each_entry(curr, &bm->zones, list) {
725                 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
726                         zone = curr;
727                         break;
728                 }
729         }
730 
731         if (!zone)
732                 return -EFAULT;
733 
734 zone_found:
735         /*
736          * We have found the zone. Now walk the radix tree to find the leaf node
737          * for our PFN.
738          */
739         node = bm->cur.node;
740         if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
741                 goto node_found;
742 
743         node      = zone->rtree;
744         block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
745 
746         for (i = zone->levels; i > 0; i--) {
747                 int index;
748 
749                 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
750                 index &= BM_RTREE_LEVEL_MASK;
751                 BUG_ON(node->data[index] == 0);
752                 node = (struct rtree_node *)node->data[index];
753         }
754 
755 node_found:
756         /* Update last position */
757         bm->cur.zone = zone;
758         bm->cur.node = node;
759         bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
760 
761         /* Set return values */
762         *addr = node->data;
763         *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
764 
765         return 0;
766 }
767 
768 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
769 {
770         void *addr;
771         unsigned int bit;
772         int error;
773 
774         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
775         BUG_ON(error);
776         set_bit(bit, addr);
777 }
778 
779 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
780 {
781         void *addr;
782         unsigned int bit;
783         int error;
784 
785         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
786         if (!error)
787                 set_bit(bit, addr);
788 
789         return error;
790 }
791 
792 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
793 {
794         void *addr;
795         unsigned int bit;
796         int error;
797 
798         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799         BUG_ON(error);
800         clear_bit(bit, addr);
801 }
802 
803 static void memory_bm_clear_current(struct memory_bitmap *bm)
804 {
805         int bit;
806 
807         bit = max(bm->cur.node_bit - 1, 0);
808         clear_bit(bit, bm->cur.node->data);
809 }
810 
811 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
812 {
813         void *addr;
814         unsigned int bit;
815         int error;
816 
817         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
818         BUG_ON(error);
819         return test_bit(bit, addr);
820 }
821 
822 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
823 {
824         void *addr;
825         unsigned int bit;
826 
827         return !memory_bm_find_bit(bm, pfn, &addr, &bit);
828 }
829 
830 /*
831  * rtree_next_node - Jump to the next leaf node.
832  *
833  * Set the position to the beginning of the next node in the
834  * memory bitmap. This is either the next node in the current
835  * zone's radix tree or the first node in the radix tree of the
836  * next zone.
837  *
838  * Return true if there is a next node, false otherwise.
839  */
840 static bool rtree_next_node(struct memory_bitmap *bm)
841 {
842         if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
843                 bm->cur.node = list_entry(bm->cur.node->list.next,
844                                           struct rtree_node, list);
845                 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
846                 bm->cur.node_bit  = 0;
847                 touch_softlockup_watchdog();
848                 return true;
849         }
850 
851         /* No more nodes, goto next zone */
852         if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
853                 bm->cur.zone = list_entry(bm->cur.zone->list.next,
854                                   struct mem_zone_bm_rtree, list);
855                 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
856                                           struct rtree_node, list);
857                 bm->cur.node_pfn = 0;
858                 bm->cur.node_bit = 0;
859                 return true;
860         }
861 
862         /* No more zones */
863         return false;
864 }
865 
866 /**
867  * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
868  * @bm: Memory bitmap.
869  *
870  * Starting from the last returned position this function searches for the next
871  * set bit in @bm and returns the PFN represented by it.  If no more bits are
872  * set, BM_END_OF_MAP is returned.
873  *
874  * It is required to run memory_bm_position_reset() before the first call to
875  * this function for the given memory bitmap.
876  */
877 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
878 {
879         unsigned long bits, pfn, pages;
880         int bit;
881 
882         do {
883                 pages     = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
884                 bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
885                 bit       = find_next_bit(bm->cur.node->data, bits,
886                                           bm->cur.node_bit);
887                 if (bit < bits) {
888                         pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
889                         bm->cur.node_bit = bit + 1;
890                         return pfn;
891                 }
892         } while (rtree_next_node(bm));
893 
894         return BM_END_OF_MAP;
895 }
896 
897 /*
898  * This structure represents a range of page frames the contents of which
899  * should not be saved during hibernation.
900  */
901 struct nosave_region {
902         struct list_head list;
903         unsigned long start_pfn;
904         unsigned long end_pfn;
905 };
906 
907 static LIST_HEAD(nosave_regions);
908 
909 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
910 {
911         struct rtree_node *node;
912 
913         list_for_each_entry(node, &zone->nodes, list)
914                 recycle_safe_page(node->data);
915 
916         list_for_each_entry(node, &zone->leaves, list)
917                 recycle_safe_page(node->data);
918 }
919 
920 static void memory_bm_recycle(struct memory_bitmap *bm)
921 {
922         struct mem_zone_bm_rtree *zone;
923         struct linked_page *p_list;
924 
925         list_for_each_entry(zone, &bm->zones, list)
926                 recycle_zone_bm_rtree(zone);
927 
928         p_list = bm->p_list;
929         while (p_list) {
930                 struct linked_page *lp = p_list;
931 
932                 p_list = lp->next;
933                 recycle_safe_page(lp);
934         }
935 }
936 
937 /**
938  * register_nosave_region - Register a region of unsaveable memory.
939  *
940  * Register a range of page frames the contents of which should not be saved
941  * during hibernation (to be used in the early initialization code).
942  */
943 void __init __register_nosave_region(unsigned long start_pfn,
944                                      unsigned long end_pfn, int use_kmalloc)
945 {
946         struct nosave_region *region;
947 
948         if (start_pfn >= end_pfn)
949                 return;
950 
951         if (!list_empty(&nosave_regions)) {
952                 /* Try to extend the previous region (they should be sorted) */
953                 region = list_entry(nosave_regions.prev,
954                                         struct nosave_region, list);
955                 if (region->end_pfn == start_pfn) {
956                         region->end_pfn = end_pfn;
957                         goto Report;
958                 }
959         }
960         if (use_kmalloc) {
961                 /* During init, this shouldn't fail */
962                 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
963                 BUG_ON(!region);
964         } else {
965                 /* This allocation cannot fail */
966                 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
967         }
968         region->start_pfn = start_pfn;
969         region->end_pfn = end_pfn;
970         list_add_tail(&region->list, &nosave_regions);
971  Report:
972         pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
973                 (unsigned long long) start_pfn << PAGE_SHIFT,
974                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
975 }
976 
977 /*
978  * Set bits in this map correspond to the page frames the contents of which
979  * should not be saved during the suspend.
980  */
981 static struct memory_bitmap *forbidden_pages_map;
982 
983 /* Set bits in this map correspond to free page frames. */
984 static struct memory_bitmap *free_pages_map;
985 
986 /*
987  * Each page frame allocated for creating the image is marked by setting the
988  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
989  */
990 
991 void swsusp_set_page_free(struct page *page)
992 {
993         if (free_pages_map)
994                 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
995 }
996 
997 static int swsusp_page_is_free(struct page *page)
998 {
999         return free_pages_map ?
1000                 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1001 }
1002 
1003 void swsusp_unset_page_free(struct page *page)
1004 {
1005         if (free_pages_map)
1006                 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1007 }
1008 
1009 static void swsusp_set_page_forbidden(struct page *page)
1010 {
1011         if (forbidden_pages_map)
1012                 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1013 }
1014 
1015 int swsusp_page_is_forbidden(struct page *page)
1016 {
1017         return forbidden_pages_map ?
1018                 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1019 }
1020 
1021 static void swsusp_unset_page_forbidden(struct page *page)
1022 {
1023         if (forbidden_pages_map)
1024                 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1025 }
1026 
1027 /**
1028  * mark_nosave_pages - Mark pages that should not be saved.
1029  * @bm: Memory bitmap.
1030  *
1031  * Set the bits in @bm that correspond to the page frames the contents of which
1032  * should not be saved.
1033  */
1034 static void mark_nosave_pages(struct memory_bitmap *bm)
1035 {
1036         struct nosave_region *region;
1037 
1038         if (list_empty(&nosave_regions))
1039                 return;
1040 
1041         list_for_each_entry(region, &nosave_regions, list) {
1042                 unsigned long pfn;
1043 
1044                 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1045                          (unsigned long long) region->start_pfn << PAGE_SHIFT,
1046                          ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1047                                 - 1);
1048 
1049                 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1050                         if (pfn_valid(pfn)) {
1051                                 /*
1052                                  * It is safe to ignore the result of
1053                                  * mem_bm_set_bit_check() here, since we won't
1054                                  * touch the PFNs for which the error is
1055                                  * returned anyway.
1056                                  */
1057                                 mem_bm_set_bit_check(bm, pfn);
1058                         }
1059         }
1060 }
1061 
1062 /**
1063  * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1064  *
1065  * Create bitmaps needed for marking page frames that should not be saved and
1066  * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1067  * only modified if everything goes well, because we don't want the bits to be
1068  * touched before both bitmaps are set up.
1069  */
1070 int create_basic_memory_bitmaps(void)
1071 {
1072         struct memory_bitmap *bm1, *bm2;
1073         int error = 0;
1074 
1075         if (forbidden_pages_map && free_pages_map)
1076                 return 0;
1077         else
1078                 BUG_ON(forbidden_pages_map || free_pages_map);
1079 
1080         bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1081         if (!bm1)
1082                 return -ENOMEM;
1083 
1084         error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1085         if (error)
1086                 goto Free_first_object;
1087 
1088         bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1089         if (!bm2)
1090                 goto Free_first_bitmap;
1091 
1092         error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1093         if (error)
1094                 goto Free_second_object;
1095 
1096         forbidden_pages_map = bm1;
1097         free_pages_map = bm2;
1098         mark_nosave_pages(forbidden_pages_map);
1099 
1100         pr_debug("Basic memory bitmaps created\n");
1101 
1102         return 0;
1103 
1104  Free_second_object:
1105         kfree(bm2);
1106  Free_first_bitmap:
1107         memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1108  Free_first_object:
1109         kfree(bm1);
1110         return -ENOMEM;
1111 }
1112 
1113 /**
1114  * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1115  *
1116  * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1117  * auxiliary pointers are necessary so that the bitmaps themselves are not
1118  * referred to while they are being freed.
1119  */
1120 void free_basic_memory_bitmaps(void)
1121 {
1122         struct memory_bitmap *bm1, *bm2;
1123 
1124         if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1125                 return;
1126 
1127         bm1 = forbidden_pages_map;
1128         bm2 = free_pages_map;
1129         forbidden_pages_map = NULL;
1130         free_pages_map = NULL;
1131         memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1132         kfree(bm1);
1133         memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1134         kfree(bm2);
1135 
1136         pr_debug("Basic memory bitmaps freed\n");
1137 }
1138 
1139 void clear_free_pages(void)
1140 {
1141 #ifdef CONFIG_PAGE_POISONING_ZERO
1142         struct memory_bitmap *bm = free_pages_map;
1143         unsigned long pfn;
1144 
1145         if (WARN_ON(!(free_pages_map)))
1146                 return;
1147 
1148         memory_bm_position_reset(bm);
1149         pfn = memory_bm_next_pfn(bm);
1150         while (pfn != BM_END_OF_MAP) {
1151                 if (pfn_valid(pfn))
1152                         clear_highpage(pfn_to_page(pfn));
1153 
1154                 pfn = memory_bm_next_pfn(bm);
1155         }
1156         memory_bm_position_reset(bm);
1157         pr_info("free pages cleared after restore\n");
1158 #endif /* PAGE_POISONING_ZERO */
1159 }
1160 
1161 /**
1162  * snapshot_additional_pages - Estimate the number of extra pages needed.
1163  * @zone: Memory zone to carry out the computation for.
1164  *
1165  * Estimate the number of additional pages needed for setting up a hibernation
1166  * image data structures for @zone (usually, the returned value is greater than
1167  * the exact number).
1168  */
1169 unsigned int snapshot_additional_pages(struct zone *zone)
1170 {
1171         unsigned int rtree, nodes;
1172 
1173         rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1174         rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1175                               LINKED_PAGE_DATA_SIZE);
1176         while (nodes > 1) {
1177                 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1178                 rtree += nodes;
1179         }
1180 
1181         return 2 * rtree;
1182 }
1183 
1184 #ifdef CONFIG_HIGHMEM
1185 /**
1186  * count_free_highmem_pages - Compute the total number of free highmem pages.
1187  *
1188  * The returned number is system-wide.
1189  */
1190 static unsigned int count_free_highmem_pages(void)
1191 {
1192         struct zone *zone;
1193         unsigned int cnt = 0;
1194 
1195         for_each_populated_zone(zone)
1196                 if (is_highmem(zone))
1197                         cnt += zone_page_state(zone, NR_FREE_PAGES);
1198 
1199         return cnt;
1200 }
1201 
1202 /**
1203  * saveable_highmem_page - Check if a highmem page is saveable.
1204  *
1205  * Determine whether a highmem page should be included in a hibernation image.
1206  *
1207  * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1208  * and it isn't part of a free chunk of pages.
1209  */
1210 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1211 {
1212         struct page *page;
1213 
1214         if (!pfn_valid(pfn))
1215                 return NULL;
1216 
1217         page = pfn_to_page(pfn);
1218         if (page_zone(page) != zone)
1219                 return NULL;
1220 
1221         BUG_ON(!PageHighMem(page));
1222 
1223         if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1224             PageReserved(page))
1225                 return NULL;
1226 
1227         if (page_is_guard(page))
1228                 return NULL;
1229 
1230         return page;
1231 }
1232 
1233 /**
1234  * count_highmem_pages - Compute the total number of saveable highmem pages.
1235  */
1236 static unsigned int count_highmem_pages(void)
1237 {
1238         struct zone *zone;
1239         unsigned int n = 0;
1240 
1241         for_each_populated_zone(zone) {
1242                 unsigned long pfn, max_zone_pfn;
1243 
1244                 if (!is_highmem(zone))
1245                         continue;
1246 
1247                 mark_free_pages(zone);
1248                 max_zone_pfn = zone_end_pfn(zone);
1249                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1250                         if (saveable_highmem_page(zone, pfn))
1251                                 n++;
1252         }
1253         return n;
1254 }
1255 #else
1256 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1257 {
1258         return NULL;
1259 }
1260 #endif /* CONFIG_HIGHMEM */
1261 
1262 /**
1263  * saveable_page - Check if the given page is saveable.
1264  *
1265  * Determine whether a non-highmem page should be included in a hibernation
1266  * image.
1267  *
1268  * We should save the page if it isn't Nosave, and is not in the range
1269  * of pages statically defined as 'unsaveable', and it isn't part of
1270  * a free chunk of pages.
1271  */
1272 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1273 {
1274         struct page *page;
1275 
1276         if (!pfn_valid(pfn))
1277                 return NULL;
1278 
1279         page = pfn_to_page(pfn);
1280         if (page_zone(page) != zone)
1281                 return NULL;
1282 
1283         BUG_ON(PageHighMem(page));
1284 
1285         if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1286                 return NULL;
1287 
1288         if (PageReserved(page)
1289             && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1290                 return NULL;
1291 
1292         if (page_is_guard(page))
1293                 return NULL;
1294 
1295         return page;
1296 }
1297 
1298 /**
1299  * count_data_pages - Compute the total number of saveable non-highmem pages.
1300  */
1301 static unsigned int count_data_pages(void)
1302 {
1303         struct zone *zone;
1304         unsigned long pfn, max_zone_pfn;
1305         unsigned int n = 0;
1306 
1307         for_each_populated_zone(zone) {
1308                 if (is_highmem(zone))
1309                         continue;
1310 
1311                 mark_free_pages(zone);
1312                 max_zone_pfn = zone_end_pfn(zone);
1313                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1314                         if (saveable_page(zone, pfn))
1315                                 n++;
1316         }
1317         return n;
1318 }
1319 
1320 /*
1321  * This is needed, because copy_page and memcpy are not usable for copying
1322  * task structs.
1323  */
1324 static inline void do_copy_page(long *dst, long *src)
1325 {
1326         int n;
1327 
1328         for (n = PAGE_SIZE / sizeof(long); n; n--)
1329                 *dst++ = *src++;
1330 }
1331 
1332 /**
1333  * safe_copy_page - Copy a page in a safe way.
1334  *
1335  * Check if the page we are going to copy is marked as present in the kernel
1336  * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1337  * and in that case kernel_page_present() always returns 'true').
1338  */
1339 static void safe_copy_page(void *dst, struct page *s_page)
1340 {
1341         if (kernel_page_present(s_page)) {
1342                 do_copy_page(dst, page_address(s_page));
1343         } else {
1344                 kernel_map_pages(s_page, 1, 1);
1345                 do_copy_page(dst, page_address(s_page));
1346                 kernel_map_pages(s_page, 1, 0);
1347         }
1348 }
1349 
1350 #ifdef CONFIG_HIGHMEM
1351 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1352 {
1353         return is_highmem(zone) ?
1354                 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1355 }
1356 
1357 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1358 {
1359         struct page *s_page, *d_page;
1360         void *src, *dst;
1361 
1362         s_page = pfn_to_page(src_pfn);
1363         d_page = pfn_to_page(dst_pfn);
1364         if (PageHighMem(s_page)) {
1365                 src = kmap_atomic(s_page);
1366                 dst = kmap_atomic(d_page);
1367                 do_copy_page(dst, src);
1368                 kunmap_atomic(dst);
1369                 kunmap_atomic(src);
1370         } else {
1371                 if (PageHighMem(d_page)) {
1372                         /*
1373                          * The page pointed to by src may contain some kernel
1374                          * data modified by kmap_atomic()
1375                          */
1376                         safe_copy_page(buffer, s_page);
1377                         dst = kmap_atomic(d_page);
1378                         copy_page(dst, buffer);
1379                         kunmap_atomic(dst);
1380                 } else {
1381                         safe_copy_page(page_address(d_page), s_page);
1382                 }
1383         }
1384 }
1385 #else
1386 #define page_is_saveable(zone, pfn)     saveable_page(zone, pfn)
1387 
1388 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1389 {
1390         safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1391                                 pfn_to_page(src_pfn));
1392 }
1393 #endif /* CONFIG_HIGHMEM */
1394 
1395 static void copy_data_pages(struct memory_bitmap *copy_bm,
1396                             struct memory_bitmap *orig_bm)
1397 {
1398         struct zone *zone;
1399         unsigned long pfn;
1400 
1401         for_each_populated_zone(zone) {
1402                 unsigned long max_zone_pfn;
1403 
1404                 mark_free_pages(zone);
1405                 max_zone_pfn = zone_end_pfn(zone);
1406                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1407                         if (page_is_saveable(zone, pfn))
1408                                 memory_bm_set_bit(orig_bm, pfn);
1409         }
1410         memory_bm_position_reset(orig_bm);
1411         memory_bm_position_reset(copy_bm);
1412         for(;;) {
1413                 pfn = memory_bm_next_pfn(orig_bm);
1414                 if (unlikely(pfn == BM_END_OF_MAP))
1415                         break;
1416                 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1417         }
1418 }
1419 
1420 /* Total number of image pages */
1421 static unsigned int nr_copy_pages;
1422 /* Number of pages needed for saving the original pfns of the image pages */
1423 static unsigned int nr_meta_pages;
1424 /*
1425  * Numbers of normal and highmem page frames allocated for hibernation image
1426  * before suspending devices.
1427  */
1428 static unsigned int alloc_normal, alloc_highmem;
1429 /*
1430  * Memory bitmap used for marking saveable pages (during hibernation) or
1431  * hibernation image pages (during restore)
1432  */
1433 static struct memory_bitmap orig_bm;
1434 /*
1435  * Memory bitmap used during hibernation for marking allocated page frames that
1436  * will contain copies of saveable pages.  During restore it is initially used
1437  * for marking hibernation image pages, but then the set bits from it are
1438  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1439  * used for marking "safe" highmem pages, but it has to be reinitialized for
1440  * this purpose.
1441  */
1442 static struct memory_bitmap copy_bm;
1443 
1444 /**
1445  * swsusp_free - Free pages allocated for hibernation image.
1446  *
1447  * Image pages are alocated before snapshot creation, so they need to be
1448  * released after resume.
1449  */
1450 void swsusp_free(void)
1451 {
1452         unsigned long fb_pfn, fr_pfn;
1453 
1454         if (!forbidden_pages_map || !free_pages_map)
1455                 goto out;
1456 
1457         memory_bm_position_reset(forbidden_pages_map);
1458         memory_bm_position_reset(free_pages_map);
1459 
1460 loop:
1461         fr_pfn = memory_bm_next_pfn(free_pages_map);
1462         fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1463 
1464         /*
1465          * Find the next bit set in both bitmaps. This is guaranteed to
1466          * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1467          */
1468         do {
1469                 if (fb_pfn < fr_pfn)
1470                         fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471                 if (fr_pfn < fb_pfn)
1472                         fr_pfn = memory_bm_next_pfn(free_pages_map);
1473         } while (fb_pfn != fr_pfn);
1474 
1475         if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1476                 struct page *page = pfn_to_page(fr_pfn);
1477 
1478                 memory_bm_clear_current(forbidden_pages_map);
1479                 memory_bm_clear_current(free_pages_map);
1480                 hibernate_restore_unprotect_page(page_address(page));
1481                 __free_page(page);
1482                 goto loop;
1483         }
1484 
1485 out:
1486         nr_copy_pages = 0;
1487         nr_meta_pages = 0;
1488         restore_pblist = NULL;
1489         buffer = NULL;
1490         alloc_normal = 0;
1491         alloc_highmem = 0;
1492         hibernate_restore_protection_end();
1493 }
1494 
1495 /* Helper functions used for the shrinking of memory. */
1496 
1497 #define GFP_IMAGE       (GFP_KERNEL | __GFP_NOWARN)
1498 
1499 /**
1500  * preallocate_image_pages - Allocate a number of pages for hibernation image.
1501  * @nr_pages: Number of page frames to allocate.
1502  * @mask: GFP flags to use for the allocation.
1503  *
1504  * Return value: Number of page frames actually allocated
1505  */
1506 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1507 {
1508         unsigned long nr_alloc = 0;
1509 
1510         while (nr_pages > 0) {
1511                 struct page *page;
1512 
1513                 page = alloc_image_page(mask);
1514                 if (!page)
1515                         break;
1516                 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1517                 if (PageHighMem(page))
1518                         alloc_highmem++;
1519                 else
1520                         alloc_normal++;
1521                 nr_pages--;
1522                 nr_alloc++;
1523         }
1524 
1525         return nr_alloc;
1526 }
1527 
1528 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1529                                               unsigned long avail_normal)
1530 {
1531         unsigned long alloc;
1532 
1533         if (avail_normal <= alloc_normal)
1534                 return 0;
1535 
1536         alloc = avail_normal - alloc_normal;
1537         if (nr_pages < alloc)
1538                 alloc = nr_pages;
1539 
1540         return preallocate_image_pages(alloc, GFP_IMAGE);
1541 }
1542 
1543 #ifdef CONFIG_HIGHMEM
1544 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1545 {
1546         return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1547 }
1548 
1549 /**
1550  *  __fraction - Compute (an approximation of) x * (multiplier / base).
1551  */
1552 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1553 {
1554         x *= multiplier;
1555         do_div(x, base);
1556         return (unsigned long)x;
1557 }
1558 
1559 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1560                                                   unsigned long highmem,
1561                                                   unsigned long total)
1562 {
1563         unsigned long alloc = __fraction(nr_pages, highmem, total);
1564 
1565         return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1566 }
1567 #else /* CONFIG_HIGHMEM */
1568 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1569 {
1570         return 0;
1571 }
1572 
1573 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1574                                                          unsigned long highmem,
1575                                                          unsigned long total)
1576 {
1577         return 0;
1578 }
1579 #endif /* CONFIG_HIGHMEM */
1580 
1581 /**
1582  * free_unnecessary_pages - Release preallocated pages not needed for the image.
1583  */
1584 static unsigned long free_unnecessary_pages(void)
1585 {
1586         unsigned long save, to_free_normal, to_free_highmem, free;
1587 
1588         save = count_data_pages();
1589         if (alloc_normal >= save) {
1590                 to_free_normal = alloc_normal - save;
1591                 save = 0;
1592         } else {
1593                 to_free_normal = 0;
1594                 save -= alloc_normal;
1595         }
1596         save += count_highmem_pages();
1597         if (alloc_highmem >= save) {
1598                 to_free_highmem = alloc_highmem - save;
1599         } else {
1600                 to_free_highmem = 0;
1601                 save -= alloc_highmem;
1602                 if (to_free_normal > save)
1603                         to_free_normal -= save;
1604                 else
1605                         to_free_normal = 0;
1606         }
1607         free = to_free_normal + to_free_highmem;
1608 
1609         memory_bm_position_reset(&copy_bm);
1610 
1611         while (to_free_normal > 0 || to_free_highmem > 0) {
1612                 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1613                 struct page *page = pfn_to_page(pfn);
1614 
1615                 if (PageHighMem(page)) {
1616                         if (!to_free_highmem)
1617                                 continue;
1618                         to_free_highmem--;
1619                         alloc_highmem--;
1620                 } else {
1621                         if (!to_free_normal)
1622                                 continue;
1623                         to_free_normal--;
1624                         alloc_normal--;
1625                 }
1626                 memory_bm_clear_bit(&copy_bm, pfn);
1627                 swsusp_unset_page_forbidden(page);
1628                 swsusp_unset_page_free(page);
1629                 __free_page(page);
1630         }
1631 
1632         return free;
1633 }
1634 
1635 /**
1636  * minimum_image_size - Estimate the minimum acceptable size of an image.
1637  * @saveable: Number of saveable pages in the system.
1638  *
1639  * We want to avoid attempting to free too much memory too hard, so estimate the
1640  * minimum acceptable size of a hibernation image to use as the lower limit for
1641  * preallocating memory.
1642  *
1643  * We assume that the minimum image size should be proportional to
1644  *
1645  * [number of saveable pages] - [number of pages that can be freed in theory]
1646  *
1647  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1648  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1649  * minus mapped file pages.
1650  */
1651 static unsigned long minimum_image_size(unsigned long saveable)
1652 {
1653         unsigned long size;
1654 
1655         size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1656                 + global_node_page_state(NR_ACTIVE_ANON)
1657                 + global_node_page_state(NR_INACTIVE_ANON)
1658                 + global_node_page_state(NR_ACTIVE_FILE)
1659                 + global_node_page_state(NR_INACTIVE_FILE)
1660                 - global_node_page_state(NR_FILE_MAPPED);
1661 
1662         return saveable <= size ? 0 : saveable - size;
1663 }
1664 
1665 /**
1666  * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1667  *
1668  * To create a hibernation image it is necessary to make a copy of every page
1669  * frame in use.  We also need a number of page frames to be free during
1670  * hibernation for allocations made while saving the image and for device
1671  * drivers, in case they need to allocate memory from their hibernation
1672  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1673  * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1674  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1675  * total number of available page frames and allocate at least
1676  *
1677  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1678  *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1679  *
1680  * of them, which corresponds to the maximum size of a hibernation image.
1681  *
1682  * If image_size is set below the number following from the above formula,
1683  * the preallocation of memory is continued until the total number of saveable
1684  * pages in the system is below the requested image size or the minimum
1685  * acceptable image size returned by minimum_image_size(), whichever is greater.
1686  */
1687 int hibernate_preallocate_memory(void)
1688 {
1689         struct zone *zone;
1690         unsigned long saveable, size, max_size, count, highmem, pages = 0;
1691         unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1692         ktime_t start, stop;
1693         int error;
1694 
1695         pr_info("Preallocating image memory... ");
1696         start = ktime_get();
1697 
1698         error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1699         if (error)
1700                 goto err_out;
1701 
1702         error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1703         if (error)
1704                 goto err_out;
1705 
1706         alloc_normal = 0;
1707         alloc_highmem = 0;
1708 
1709         /* Count the number of saveable data pages. */
1710         save_highmem = count_highmem_pages();
1711         saveable = count_data_pages();
1712 
1713         /*
1714          * Compute the total number of page frames we can use (count) and the
1715          * number of pages needed for image metadata (size).
1716          */
1717         count = saveable;
1718         saveable += save_highmem;
1719         highmem = save_highmem;
1720         size = 0;
1721         for_each_populated_zone(zone) {
1722                 size += snapshot_additional_pages(zone);
1723                 if (is_highmem(zone))
1724                         highmem += zone_page_state(zone, NR_FREE_PAGES);
1725                 else
1726                         count += zone_page_state(zone, NR_FREE_PAGES);
1727         }
1728         avail_normal = count;
1729         count += highmem;
1730         count -= totalreserve_pages;
1731 
1732         /* Add number of pages required for page keys (s390 only). */
1733         size += page_key_additional_pages(saveable);
1734 
1735         /* Compute the maximum number of saveable pages to leave in memory. */
1736         max_size = (count - (size + PAGES_FOR_IO)) / 2
1737                         - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1738         /* Compute the desired number of image pages specified by image_size. */
1739         size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1740         if (size > max_size)
1741                 size = max_size;
1742         /*
1743          * If the desired number of image pages is at least as large as the
1744          * current number of saveable pages in memory, allocate page frames for
1745          * the image and we're done.
1746          */
1747         if (size >= saveable) {
1748                 pages = preallocate_image_highmem(save_highmem);
1749                 pages += preallocate_image_memory(saveable - pages, avail_normal);
1750                 goto out;
1751         }
1752 
1753         /* Estimate the minimum size of the image. */
1754         pages = minimum_image_size(saveable);
1755         /*
1756          * To avoid excessive pressure on the normal zone, leave room in it to
1757          * accommodate an image of the minimum size (unless it's already too
1758          * small, in which case don't preallocate pages from it at all).
1759          */
1760         if (avail_normal > pages)
1761                 avail_normal -= pages;
1762         else
1763                 avail_normal = 0;
1764         if (size < pages)
1765                 size = min_t(unsigned long, pages, max_size);
1766 
1767         /*
1768          * Let the memory management subsystem know that we're going to need a
1769          * large number of page frames to allocate and make it free some memory.
1770          * NOTE: If this is not done, performance will be hurt badly in some
1771          * test cases.
1772          */
1773         shrink_all_memory(saveable - size);
1774 
1775         /*
1776          * The number of saveable pages in memory was too high, so apply some
1777          * pressure to decrease it.  First, make room for the largest possible
1778          * image and fail if that doesn't work.  Next, try to decrease the size
1779          * of the image as much as indicated by 'size' using allocations from
1780          * highmem and non-highmem zones separately.
1781          */
1782         pages_highmem = preallocate_image_highmem(highmem / 2);
1783         alloc = count - max_size;
1784         if (alloc > pages_highmem)
1785                 alloc -= pages_highmem;
1786         else
1787                 alloc = 0;
1788         pages = preallocate_image_memory(alloc, avail_normal);
1789         if (pages < alloc) {
1790                 /* We have exhausted non-highmem pages, try highmem. */
1791                 alloc -= pages;
1792                 pages += pages_highmem;
1793                 pages_highmem = preallocate_image_highmem(alloc);
1794                 if (pages_highmem < alloc)
1795                         goto err_out;
1796                 pages += pages_highmem;
1797                 /*
1798                  * size is the desired number of saveable pages to leave in
1799                  * memory, so try to preallocate (all memory - size) pages.
1800                  */
1801                 alloc = (count - pages) - size;
1802                 pages += preallocate_image_highmem(alloc);
1803         } else {
1804                 /*
1805                  * There are approximately max_size saveable pages at this point
1806                  * and we want to reduce this number down to size.
1807                  */
1808                 alloc = max_size - size;
1809                 size = preallocate_highmem_fraction(alloc, highmem, count);
1810                 pages_highmem += size;
1811                 alloc -= size;
1812                 size = preallocate_image_memory(alloc, avail_normal);
1813                 pages_highmem += preallocate_image_highmem(alloc - size);
1814                 pages += pages_highmem + size;
1815         }
1816 
1817         /*
1818          * We only need as many page frames for the image as there are saveable
1819          * pages in memory, but we have allocated more.  Release the excessive
1820          * ones now.
1821          */
1822         pages -= free_unnecessary_pages();
1823 
1824  out:
1825         stop = ktime_get();
1826         pr_cont("done (allocated %lu pages)\n", pages);
1827         swsusp_show_speed(start, stop, pages, "Allocated");
1828 
1829         return 0;
1830 
1831  err_out:
1832         pr_cont("\n");
1833         swsusp_free();
1834         return -ENOMEM;
1835 }
1836 
1837 #ifdef CONFIG_HIGHMEM
1838 /**
1839  * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1840  *
1841  * Compute the number of non-highmem pages that will be necessary for creating
1842  * copies of highmem pages.
1843  */
1844 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1845 {
1846         unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1847 
1848         if (free_highmem >= nr_highmem)
1849                 nr_highmem = 0;
1850         else
1851                 nr_highmem -= free_highmem;
1852 
1853         return nr_highmem;
1854 }
1855 #else
1856 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1857 #endif /* CONFIG_HIGHMEM */
1858 
1859 /**
1860  * enough_free_mem - Check if there is enough free memory for the image.
1861  */
1862 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1863 {
1864         struct zone *zone;
1865         unsigned int free = alloc_normal;
1866 
1867         for_each_populated_zone(zone)
1868                 if (!is_highmem(zone))
1869                         free += zone_page_state(zone, NR_FREE_PAGES);
1870 
1871         nr_pages += count_pages_for_highmem(nr_highmem);
1872         pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1873                  nr_pages, PAGES_FOR_IO, free);
1874 
1875         return free > nr_pages + PAGES_FOR_IO;
1876 }
1877 
1878 #ifdef CONFIG_HIGHMEM
1879 /**
1880  * get_highmem_buffer - Allocate a buffer for highmem pages.
1881  *
1882  * If there are some highmem pages in the hibernation image, we may need a
1883  * buffer to copy them and/or load their data.
1884  */
1885 static inline int get_highmem_buffer(int safe_needed)
1886 {
1887         buffer = get_image_page(GFP_ATOMIC, safe_needed);
1888         return buffer ? 0 : -ENOMEM;
1889 }
1890 
1891 /**
1892  * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1893  *
1894  * Try to allocate as many pages as needed, but if the number of free highmem
1895  * pages is less than that, allocate them all.
1896  */
1897 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1898                                                unsigned int nr_highmem)
1899 {
1900         unsigned int to_alloc = count_free_highmem_pages();
1901 
1902         if (to_alloc > nr_highmem)
1903                 to_alloc = nr_highmem;
1904 
1905         nr_highmem -= to_alloc;
1906         while (to_alloc-- > 0) {
1907                 struct page *page;
1908 
1909                 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1910                 memory_bm_set_bit(bm, page_to_pfn(page));
1911         }
1912         return nr_highmem;
1913 }
1914 #else
1915 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1916 
1917 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1918                                                unsigned int n) { return 0; }
1919 #endif /* CONFIG_HIGHMEM */
1920 
1921 /**
1922  * swsusp_alloc - Allocate memory for hibernation image.
1923  *
1924  * We first try to allocate as many highmem pages as there are
1925  * saveable highmem pages in the system.  If that fails, we allocate
1926  * non-highmem pages for the copies of the remaining highmem ones.
1927  *
1928  * In this approach it is likely that the copies of highmem pages will
1929  * also be located in the high memory, because of the way in which
1930  * copy_data_pages() works.
1931  */
1932 static int swsusp_alloc(struct memory_bitmap *copy_bm,
1933                         unsigned int nr_pages, unsigned int nr_highmem)
1934 {
1935         if (nr_highmem > 0) {
1936                 if (get_highmem_buffer(PG_ANY))
1937                         goto err_out;
1938                 if (nr_highmem > alloc_highmem) {
1939                         nr_highmem -= alloc_highmem;
1940                         nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1941                 }
1942         }
1943         if (nr_pages > alloc_normal) {
1944                 nr_pages -= alloc_normal;
1945                 while (nr_pages-- > 0) {
1946                         struct page *page;
1947 
1948                         page = alloc_image_page(GFP_ATOMIC);
1949                         if (!page)
1950                                 goto err_out;
1951                         memory_bm_set_bit(copy_bm, page_to_pfn(page));
1952                 }
1953         }
1954 
1955         return 0;
1956 
1957  err_out:
1958         swsusp_free();
1959         return -ENOMEM;
1960 }
1961 
1962 asmlinkage __visible int swsusp_save(void)
1963 {
1964         unsigned int nr_pages, nr_highmem;
1965 
1966         pr_info("Creating hibernation image:\n");
1967 
1968         drain_local_pages(NULL);
1969         nr_pages = count_data_pages();
1970         nr_highmem = count_highmem_pages();
1971         pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1972 
1973         if (!enough_free_mem(nr_pages, nr_highmem)) {
1974                 pr_err("Not enough free memory\n");
1975                 return -ENOMEM;
1976         }
1977 
1978         if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1979                 pr_err("Memory allocation failed\n");
1980                 return -ENOMEM;
1981         }
1982 
1983         /*
1984          * During allocating of suspend pagedir, new cold pages may appear.
1985          * Kill them.
1986          */
1987         drain_local_pages(NULL);
1988         copy_data_pages(&copy_bm, &orig_bm);
1989 
1990         /*
1991          * End of critical section. From now on, we can write to memory,
1992          * but we should not touch disk. This specially means we must _not_
1993          * touch swap space! Except we must write out our image of course.
1994          */
1995 
1996         nr_pages += nr_highmem;
1997         nr_copy_pages = nr_pages;
1998         nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1999 
2000         pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2001 
2002         return 0;
2003 }
2004 
2005 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2006 static int init_header_complete(struct swsusp_info *info)
2007 {
2008         memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2009         info->version_code = LINUX_VERSION_CODE;
2010         return 0;
2011 }
2012 
2013 static char *check_image_kernel(struct swsusp_info *info)
2014 {
2015         if (info->version_code != LINUX_VERSION_CODE)
2016                 return "kernel version";
2017         if (strcmp(info->uts.sysname,init_utsname()->sysname))
2018                 return "system type";
2019         if (strcmp(info->uts.release,init_utsname()->release))
2020                 return "kernel release";
2021         if (strcmp(info->uts.version,init_utsname()->version))
2022                 return "version";
2023         if (strcmp(info->uts.machine,init_utsname()->machine))
2024                 return "machine";
2025         return NULL;
2026 }
2027 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2028 
2029 unsigned long snapshot_get_image_size(void)
2030 {
2031         return nr_copy_pages + nr_meta_pages + 1;
2032 }
2033 
2034 static int init_header(struct swsusp_info *info)
2035 {
2036         memset(info, 0, sizeof(struct swsusp_info));
2037         info->num_physpages = get_num_physpages();
2038         info->image_pages = nr_copy_pages;
2039         info->pages = snapshot_get_image_size();
2040         info->size = info->pages;
2041         info->size <<= PAGE_SHIFT;
2042         return init_header_complete(info);
2043 }
2044 
2045 /**
2046  * pack_pfns - Prepare PFNs for saving.
2047  * @bm: Memory bitmap.
2048  * @buf: Memory buffer to store the PFNs in.
2049  *
2050  * PFNs corresponding to set bits in @bm are stored in the area of memory
2051  * pointed to by @buf (1 page at a time).
2052  */
2053 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2054 {
2055         int j;
2056 
2057         for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2058                 buf[j] = memory_bm_next_pfn(bm);
2059                 if (unlikely(buf[j] == BM_END_OF_MAP))
2060                         break;
2061                 /* Save page key for data page (s390 only). */
2062                 page_key_read(buf + j);
2063         }
2064 }
2065 
2066 /**
2067  * snapshot_read_next - Get the address to read the next image page from.
2068  * @handle: Snapshot handle to be used for the reading.
2069  *
2070  * On the first call, @handle should point to a zeroed snapshot_handle
2071  * structure.  The structure gets populated then and a pointer to it should be
2072  * passed to this function every next time.
2073  *
2074  * On success, the function returns a positive number.  Then, the caller
2075  * is allowed to read up to the returned number of bytes from the memory
2076  * location computed by the data_of() macro.
2077  *
2078  * The function returns 0 to indicate the end of the data stream condition,
2079  * and negative numbers are returned on errors.  If that happens, the structure
2080  * pointed to by @handle is not updated and should not be used any more.
2081  */
2082 int snapshot_read_next(struct snapshot_handle *handle)
2083 {
2084         if (handle->cur > nr_meta_pages + nr_copy_pages)
2085                 return 0;
2086 
2087         if (!buffer) {
2088                 /* This makes the buffer be freed by swsusp_free() */
2089                 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2090                 if (!buffer)
2091                         return -ENOMEM;
2092         }
2093         if (!handle->cur) {
2094                 int error;
2095 
2096                 error = init_header((struct swsusp_info *)buffer);
2097                 if (error)
2098                         return error;
2099                 handle->buffer = buffer;
2100                 memory_bm_position_reset(&orig_bm);
2101                 memory_bm_position_reset(&copy_bm);
2102         } else if (handle->cur <= nr_meta_pages) {
2103                 clear_page(buffer);
2104                 pack_pfns(buffer, &orig_bm);
2105         } else {
2106                 struct page *page;
2107 
2108                 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2109                 if (PageHighMem(page)) {
2110                         /*
2111                          * Highmem pages are copied to the buffer,
2112                          * because we can't return with a kmapped
2113                          * highmem page (we may not be called again).
2114                          */
2115                         void *kaddr;
2116 
2117                         kaddr = kmap_atomic(page);
2118                         copy_page(buffer, kaddr);
2119                         kunmap_atomic(kaddr);
2120                         handle->buffer = buffer;
2121                 } else {
2122                         handle->buffer = page_address(page);
2123                 }
2124         }
2125         handle->cur++;
2126         return PAGE_SIZE;
2127 }
2128 
2129 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2130                                     struct memory_bitmap *src)
2131 {
2132         unsigned long pfn;
2133 
2134         memory_bm_position_reset(src);
2135         pfn = memory_bm_next_pfn(src);
2136         while (pfn != BM_END_OF_MAP) {
2137                 memory_bm_set_bit(dst, pfn);
2138                 pfn = memory_bm_next_pfn(src);
2139         }
2140 }
2141 
2142 /**
2143  * mark_unsafe_pages - Mark pages that were used before hibernation.
2144  *
2145  * Mark the pages that cannot be used for storing the image during restoration,
2146  * because they conflict with the pages that had been used before hibernation.
2147  */
2148 static void mark_unsafe_pages(struct memory_bitmap *bm)
2149 {
2150         unsigned long pfn;
2151 
2152         /* Clear the "free"/"unsafe" bit for all PFNs */
2153         memory_bm_position_reset(free_pages_map);
2154         pfn = memory_bm_next_pfn(free_pages_map);
2155         while (pfn != BM_END_OF_MAP) {
2156                 memory_bm_clear_current(free_pages_map);
2157                 pfn = memory_bm_next_pfn(free_pages_map);
2158         }
2159 
2160         /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2161         duplicate_memory_bitmap(free_pages_map, bm);
2162 
2163         allocated_unsafe_pages = 0;
2164 }
2165 
2166 static int check_header(struct swsusp_info *info)
2167 {
2168         char *reason;
2169 
2170         reason = check_image_kernel(info);
2171         if (!reason && info->num_physpages != get_num_physpages())
2172                 reason = "memory size";
2173         if (reason) {
2174                 pr_err("Image mismatch: %s\n", reason);
2175                 return -EPERM;
2176         }
2177         return 0;
2178 }
2179 
2180 /**
2181  * load header - Check the image header and copy the data from it.
2182  */
2183 static int load_header(struct swsusp_info *info)
2184 {
2185         int error;
2186 
2187         restore_pblist = NULL;
2188         error = check_header(info);
2189         if (!error) {
2190                 nr_copy_pages = info->image_pages;
2191                 nr_meta_pages = info->pages - info->image_pages - 1;
2192         }
2193         return error;
2194 }
2195 
2196 /**
2197  * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2198  * @bm: Memory bitmap.
2199  * @buf: Area of memory containing the PFNs.
2200  *
2201  * For each element of the array pointed to by @buf (1 page at a time), set the
2202  * corresponding bit in @bm.
2203  */
2204 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2205 {
2206         int j;
2207 
2208         for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2209                 if (unlikely(buf[j] == BM_END_OF_MAP))
2210                         break;
2211 
2212                 /* Extract and buffer page key for data page (s390 only). */
2213                 page_key_memorize(buf + j);
2214 
2215                 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2216                         memory_bm_set_bit(bm, buf[j]);
2217                 else
2218                         return -EFAULT;
2219         }
2220 
2221         return 0;
2222 }
2223 
2224 #ifdef CONFIG_HIGHMEM
2225 /*
2226  * struct highmem_pbe is used for creating the list of highmem pages that
2227  * should be restored atomically during the resume from disk, because the page
2228  * frames they have occupied before the suspend are in use.
2229  */
2230 struct highmem_pbe {
2231         struct page *copy_page; /* data is here now */
2232         struct page *orig_page; /* data was here before the suspend */
2233         struct highmem_pbe *next;
2234 };
2235 
2236 /*
2237  * List of highmem PBEs needed for restoring the highmem pages that were
2238  * allocated before the suspend and included in the suspend image, but have
2239  * also been allocated by the "resume" kernel, so their contents cannot be
2240  * written directly to their "original" page frames.
2241  */
2242 static struct highmem_pbe *highmem_pblist;
2243 
2244 /**
2245  * count_highmem_image_pages - Compute the number of highmem pages in the image.
2246  * @bm: Memory bitmap.
2247  *
2248  * The bits in @bm that correspond to image pages are assumed to be set.
2249  */
2250 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2251 {
2252         unsigned long pfn;
2253         unsigned int cnt = 0;
2254 
2255         memory_bm_position_reset(bm);
2256         pfn = memory_bm_next_pfn(bm);
2257         while (pfn != BM_END_OF_MAP) {
2258                 if (PageHighMem(pfn_to_page(pfn)))
2259                         cnt++;
2260 
2261                 pfn = memory_bm_next_pfn(bm);
2262         }
2263         return cnt;
2264 }
2265 
2266 static unsigned int safe_highmem_pages;
2267 
2268 static struct memory_bitmap *safe_highmem_bm;
2269 
2270 /**
2271  * prepare_highmem_image - Allocate memory for loading highmem data from image.
2272  * @bm: Pointer to an uninitialized memory bitmap structure.
2273  * @nr_highmem_p: Pointer to the number of highmem image pages.
2274  *
2275  * Try to allocate as many highmem pages as there are highmem image pages
2276  * (@nr_highmem_p points to the variable containing the number of highmem image
2277  * pages).  The pages that are "safe" (ie. will not be overwritten when the
2278  * hibernation image is restored entirely) have the corresponding bits set in
2279  * @bm (it must be unitialized).
2280  *
2281  * NOTE: This function should not be called if there are no highmem image pages.
2282  */
2283 static int prepare_highmem_image(struct memory_bitmap *bm,
2284                                  unsigned int *nr_highmem_p)
2285 {
2286         unsigned int to_alloc;
2287 
2288         if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2289                 return -ENOMEM;
2290 
2291         if (get_highmem_buffer(PG_SAFE))
2292                 return -ENOMEM;
2293 
2294         to_alloc = count_free_highmem_pages();
2295         if (to_alloc > *nr_highmem_p)
2296                 to_alloc = *nr_highmem_p;
2297         else
2298                 *nr_highmem_p = to_alloc;
2299 
2300         safe_highmem_pages = 0;
2301         while (to_alloc-- > 0) {
2302                 struct page *page;
2303 
2304                 page = alloc_page(__GFP_HIGHMEM);
2305                 if (!swsusp_page_is_free(page)) {
2306                         /* The page is "safe", set its bit the bitmap */
2307                         memory_bm_set_bit(bm, page_to_pfn(page));
2308                         safe_highmem_pages++;
2309                 }
2310                 /* Mark the page as allocated */
2311                 swsusp_set_page_forbidden(page);
2312                 swsusp_set_page_free(page);
2313         }
2314         memory_bm_position_reset(bm);
2315         safe_highmem_bm = bm;
2316         return 0;
2317 }
2318 
2319 static struct page *last_highmem_page;
2320 
2321 /**
2322  * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2323  *
2324  * For a given highmem image page get a buffer that suspend_write_next() should
2325  * return to its caller to write to.
2326  *
2327  * If the page is to be saved to its "original" page frame or a copy of
2328  * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2329  * the copy of the page is to be made in normal memory, so the address of
2330  * the copy is returned.
2331  *
2332  * If @buffer is returned, the caller of suspend_write_next() will write
2333  * the page's contents to @buffer, so they will have to be copied to the
2334  * right location on the next call to suspend_write_next() and it is done
2335  * with the help of copy_last_highmem_page().  For this purpose, if
2336  * @buffer is returned, @last_highmem_page is set to the page to which
2337  * the data will have to be copied from @buffer.
2338  */
2339 static void *get_highmem_page_buffer(struct page *page,
2340                                      struct chain_allocator *ca)
2341 {
2342         struct highmem_pbe *pbe;
2343         void *kaddr;
2344 
2345         if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2346                 /*
2347                  * We have allocated the "original" page frame and we can
2348                  * use it directly to store the loaded page.
2349                  */
2350                 last_highmem_page = page;
2351                 return buffer;
2352         }
2353         /*
2354          * The "original" page frame has not been allocated and we have to
2355          * use a "safe" page frame to store the loaded page.
2356          */
2357         pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2358         if (!pbe) {
2359                 swsusp_free();
2360                 return ERR_PTR(-ENOMEM);
2361         }
2362         pbe->orig_page = page;
2363         if (safe_highmem_pages > 0) {
2364                 struct page *tmp;
2365 
2366                 /* Copy of the page will be stored in high memory */
2367                 kaddr = buffer;
2368                 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2369                 safe_highmem_pages--;
2370                 last_highmem_page = tmp;
2371                 pbe->copy_page = tmp;
2372         } else {
2373                 /* Copy of the page will be stored in normal memory */
2374                 kaddr = safe_pages_list;
2375                 safe_pages_list = safe_pages_list->next;
2376                 pbe->copy_page = virt_to_page(kaddr);
2377         }
2378         pbe->next = highmem_pblist;
2379         highmem_pblist = pbe;
2380         return kaddr;
2381 }
2382 
2383 /**
2384  * copy_last_highmem_page - Copy most the most recent highmem image page.
2385  *
2386  * Copy the contents of a highmem image from @buffer, where the caller of
2387  * snapshot_write_next() has stored them, to the right location represented by
2388  * @last_highmem_page .
2389  */
2390 static void copy_last_highmem_page(void)
2391 {
2392         if (last_highmem_page) {
2393                 void *dst;
2394 
2395                 dst = kmap_atomic(last_highmem_page);
2396                 copy_page(dst, buffer);
2397                 kunmap_atomic(dst);
2398                 last_highmem_page = NULL;
2399         }
2400 }
2401 
2402 static inline int last_highmem_page_copied(void)
2403 {
2404         return !last_highmem_page;
2405 }
2406 
2407 static inline void free_highmem_data(void)
2408 {
2409         if (safe_highmem_bm)
2410                 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2411 
2412         if (buffer)
2413                 free_image_page(buffer, PG_UNSAFE_CLEAR);
2414 }
2415 #else
2416 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2417 
2418 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2419                                         unsigned int *nr_highmem_p) { return 0; }
2420 
2421 static inline void *get_highmem_page_buffer(struct page *page,
2422                                             struct chain_allocator *ca)
2423 {
2424         return ERR_PTR(-EINVAL);
2425 }
2426 
2427 static inline void copy_last_highmem_page(void) {}
2428 static inline int last_highmem_page_copied(void) { return 1; }
2429 static inline void free_highmem_data(void) {}
2430 #endif /* CONFIG_HIGHMEM */
2431 
2432 #define PBES_PER_LINKED_PAGE    (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2433 
2434 /**
2435  * prepare_image - Make room for loading hibernation image.
2436  * @new_bm: Unitialized memory bitmap structure.
2437  * @bm: Memory bitmap with unsafe pages marked.
2438  *
2439  * Use @bm to mark the pages that will be overwritten in the process of
2440  * restoring the system memory state from the suspend image ("unsafe" pages)
2441  * and allocate memory for the image.
2442  *
2443  * The idea is to allocate a new memory bitmap first and then allocate
2444  * as many pages as needed for image data, but without specifying what those
2445  * pages will be used for just yet.  Instead, we mark them all as allocated and
2446  * create a lists of "safe" pages to be used later.  On systems with high
2447  * memory a list of "safe" highmem pages is created too.
2448  */
2449 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2450 {
2451         unsigned int nr_pages, nr_highmem;
2452         struct linked_page *lp;
2453         int error;
2454 
2455         /* If there is no highmem, the buffer will not be necessary */
2456         free_image_page(buffer, PG_UNSAFE_CLEAR);
2457         buffer = NULL;
2458 
2459         nr_highmem = count_highmem_image_pages(bm);
2460         mark_unsafe_pages(bm);
2461 
2462         error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2463         if (error)
2464                 goto Free;
2465 
2466         duplicate_memory_bitmap(new_bm, bm);
2467         memory_bm_free(bm, PG_UNSAFE_KEEP);
2468         if (nr_highmem > 0) {
2469                 error = prepare_highmem_image(bm, &nr_highmem);
2470                 if (error)
2471                         goto Free;
2472         }
2473         /*
2474          * Reserve some safe pages for potential later use.
2475          *
2476          * NOTE: This way we make sure there will be enough safe pages for the
2477          * chain_alloc() in get_buffer().  It is a bit wasteful, but
2478          * nr_copy_pages cannot be greater than 50% of the memory anyway.
2479          *
2480          * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2481          */
2482         nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2483         nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2484         while (nr_pages > 0) {
2485                 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2486                 if (!lp) {
2487                         error = -ENOMEM;
2488                         goto Free;
2489                 }
2490                 lp->next = safe_pages_list;
2491                 safe_pages_list = lp;
2492                 nr_pages--;
2493         }
2494         /* Preallocate memory for the image */
2495         nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2496         while (nr_pages > 0) {
2497                 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2498                 if (!lp) {
2499                         error = -ENOMEM;
2500                         goto Free;
2501                 }
2502                 if (!swsusp_page_is_free(virt_to_page(lp))) {
2503                         /* The page is "safe", add it to the list */
2504                         lp->next = safe_pages_list;
2505                         safe_pages_list = lp;
2506                 }
2507                 /* Mark the page as allocated */
2508                 swsusp_set_page_forbidden(virt_to_page(lp));
2509                 swsusp_set_page_free(virt_to_page(lp));
2510                 nr_pages--;
2511         }
2512         return 0;
2513 
2514  Free:
2515         swsusp_free();
2516         return error;
2517 }
2518 
2519 /**
2520  * get_buffer - Get the address to store the next image data page.
2521  *
2522  * Get the address that snapshot_write_next() should return to its caller to
2523  * write to.
2524  */
2525 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2526 {
2527         struct pbe *pbe;
2528         struct page *page;
2529         unsigned long pfn = memory_bm_next_pfn(bm);
2530 
2531         if (pfn == BM_END_OF_MAP)
2532                 return ERR_PTR(-EFAULT);
2533 
2534         page = pfn_to_page(pfn);
2535         if (PageHighMem(page))
2536                 return get_highmem_page_buffer(page, ca);
2537 
2538         if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2539                 /*
2540                  * We have allocated the "original" page frame and we can
2541                  * use it directly to store the loaded page.
2542                  */
2543                 return page_address(page);
2544 
2545         /*
2546          * The "original" page frame has not been allocated and we have to
2547          * use a "safe" page frame to store the loaded page.
2548          */
2549         pbe = chain_alloc(ca, sizeof(struct pbe));
2550         if (!pbe) {
2551                 swsusp_free();
2552                 return ERR_PTR(-ENOMEM);
2553         }
2554         pbe->orig_address = page_address(page);
2555         pbe->address = safe_pages_list;
2556         safe_pages_list = safe_pages_list->next;
2557         pbe->next = restore_pblist;
2558         restore_pblist = pbe;
2559         return pbe->address;
2560 }
2561 
2562 /**
2563  * snapshot_write_next - Get the address to store the next image page.
2564  * @handle: Snapshot handle structure to guide the writing.
2565  *
2566  * On the first call, @handle should point to a zeroed snapshot_handle
2567  * structure.  The structure gets populated then and a pointer to it should be
2568  * passed to this function every next time.
2569  *
2570  * On success, the function returns a positive number.  Then, the caller
2571  * is allowed to write up to the returned number of bytes to the memory
2572  * location computed by the data_of() macro.
2573  *
2574  * The function returns 0 to indicate the "end of file" condition.  Negative
2575  * numbers are returned on errors, in which cases the structure pointed to by
2576  * @handle is not updated and should not be used any more.
2577  */
2578 int snapshot_write_next(struct snapshot_handle *handle)
2579 {
2580         static struct chain_allocator ca;
2581         int error = 0;
2582 
2583         /* Check if we have already loaded the entire image */
2584         if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2585                 return 0;
2586 
2587         handle->sync_read = 1;
2588 
2589         if (!handle->cur) {
2590                 if (!buffer)
2591                         /* This makes the buffer be freed by swsusp_free() */
2592                         buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2593 
2594                 if (!buffer)
2595                         return -ENOMEM;
2596 
2597                 handle->buffer = buffer;
2598         } else if (handle->cur == 1) {
2599                 error = load_header(buffer);
2600                 if (error)
2601                         return error;
2602 
2603                 safe_pages_list = NULL;
2604 
2605                 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2606                 if (error)
2607                         return error;
2608 
2609                 /* Allocate buffer for page keys. */
2610                 error = page_key_alloc(nr_copy_pages);
2611                 if (error)
2612                         return error;
2613 
2614                 hibernate_restore_protection_begin();
2615         } else if (handle->cur <= nr_meta_pages + 1) {
2616                 error = unpack_orig_pfns(buffer, &copy_bm);
2617                 if (error)
2618                         return error;
2619 
2620                 if (handle->cur == nr_meta_pages + 1) {
2621                         error = prepare_image(&orig_bm, &copy_bm);
2622                         if (error)
2623                                 return error;
2624 
2625                         chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2626                         memory_bm_position_reset(&orig_bm);
2627                         restore_pblist = NULL;
2628                         handle->buffer = get_buffer(&orig_bm, &ca);
2629                         handle->sync_read = 0;
2630                         if (IS_ERR(handle->buffer))
2631                                 return PTR_ERR(handle->buffer);
2632                 }
2633         } else {
2634                 copy_last_highmem_page();
2635                 /* Restore page key for data page (s390 only). */
2636                 page_key_write(handle->buffer);
2637                 hibernate_restore_protect_page(handle->buffer);
2638                 handle->buffer = get_buffer(&orig_bm, &ca);
2639                 if (IS_ERR(handle->buffer))
2640                         return PTR_ERR(handle->buffer);
2641                 if (handle->buffer != buffer)
2642                         handle->sync_read = 0;
2643         }
2644         handle->cur++;
2645         return PAGE_SIZE;
2646 }
2647 
2648 /**
2649  * snapshot_write_finalize - Complete the loading of a hibernation image.
2650  *
2651  * Must be called after the last call to snapshot_write_next() in case the last
2652  * page in the image happens to be a highmem page and its contents should be
2653  * stored in highmem.  Additionally, it recycles bitmap memory that's not
2654  * necessary any more.
2655  */
2656 void snapshot_write_finalize(struct snapshot_handle *handle)
2657 {
2658         copy_last_highmem_page();
2659         /* Restore page key for data page (s390 only). */
2660         page_key_write(handle->buffer);
2661         page_key_free();
2662         hibernate_restore_protect_page(handle->buffer);
2663         /* Do that only if we have loaded the image entirely */
2664         if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2665                 memory_bm_recycle(&orig_bm);
2666                 free_highmem_data();
2667         }
2668 }
2669 
2670 int snapshot_image_loaded(struct snapshot_handle *handle)
2671 {
2672         return !(!nr_copy_pages || !last_highmem_page_copied() ||
2673                         handle->cur <= nr_meta_pages + nr_copy_pages);
2674 }
2675 
2676 #ifdef CONFIG_HIGHMEM
2677 /* Assumes that @buf is ready and points to a "safe" page */
2678 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2679                                        void *buf)
2680 {
2681         void *kaddr1, *kaddr2;
2682 
2683         kaddr1 = kmap_atomic(p1);
2684         kaddr2 = kmap_atomic(p2);
2685         copy_page(buf, kaddr1);
2686         copy_page(kaddr1, kaddr2);
2687         copy_page(kaddr2, buf);
2688         kunmap_atomic(kaddr2);
2689         kunmap_atomic(kaddr1);
2690 }
2691 
2692 /**
2693  * restore_highmem - Put highmem image pages into their original locations.
2694  *
2695  * For each highmem page that was in use before hibernation and is included in
2696  * the image, and also has been allocated by the "restore" kernel, swap its
2697  * current contents with the previous (ie. "before hibernation") ones.
2698  *
2699  * If the restore eventually fails, we can call this function once again and
2700  * restore the highmem state as seen by the restore kernel.
2701  */
2702 int restore_highmem(void)
2703 {
2704         struct highmem_pbe *pbe = highmem_pblist;
2705         void *buf;
2706 
2707         if (!pbe)
2708                 return 0;
2709 
2710         buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2711         if (!buf)
2712                 return -ENOMEM;
2713 
2714         while (pbe) {
2715                 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2716                 pbe = pbe->next;
2717         }
2718         free_image_page(buf, PG_UNSAFE_CLEAR);
2719         return 0;
2720 }
2721 #endif /* CONFIG_HIGHMEM */
2722 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp