~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/migrate.h

Version: ~ [ linux-5.3-rc1 ] ~ [ linux-5.2.2 ] ~ [ linux-5.1.19 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.60 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.134 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.186 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.186 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.70 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_MIGRATE_H
  2 #define _LINUX_MIGRATE_H
  3 
  4 #include <linux/mm.h>
  5 #include <linux/mempolicy.h>
  6 #include <linux/migrate_mode.h>
  7 
  8 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
  9 
 10 /*
 11  * Return values from addresss_space_operations.migratepage():
 12  * - negative errno on page migration failure;
 13  * - zero on page migration success;
 14  *
 15  * The balloon page migration introduces this special case where a 'distinct'
 16  * return code is used to flag a successful page migration to unmap_and_move().
 17  * This approach is necessary because page migration can race against balloon
 18  * deflation procedure, and for such case we could introduce a nasty page leak
 19  * if a successfully migrated balloon page gets released concurrently with
 20  * migration's unmap_and_move() wrap-up steps.
 21  */
 22 #define MIGRATEPAGE_SUCCESS             0
 23 #define MIGRATEPAGE_BALLOON_SUCCESS     1 /* special ret code for balloon page
 24                                            * sucessful migration case.
 25                                            */
 26 enum migrate_reason {
 27         MR_COMPACTION,
 28         MR_MEMORY_FAILURE,
 29         MR_MEMORY_HOTPLUG,
 30         MR_SYSCALL,             /* also applies to cpusets */
 31         MR_MEMPOLICY_MBIND,
 32         MR_NUMA_MISPLACED,
 33         MR_CMA
 34 };
 35 
 36 #ifdef CONFIG_MIGRATION
 37 
 38 extern void putback_lru_pages(struct list_head *l);
 39 extern void putback_movable_pages(struct list_head *l);
 40 extern int migrate_page(struct address_space *,
 41                         struct page *, struct page *, enum migrate_mode);
 42 extern int migrate_pages(struct list_head *l, new_page_t x,
 43                         unsigned long private, bool offlining,
 44                         enum migrate_mode mode, int reason);
 45 extern int migrate_huge_page(struct page *, new_page_t x,
 46                         unsigned long private, bool offlining,
 47                         enum migrate_mode mode);
 48 
 49 extern int fail_migrate_page(struct address_space *,
 50                         struct page *, struct page *);
 51 
 52 extern int migrate_prep(void);
 53 extern int migrate_prep_local(void);
 54 extern int migrate_vmas(struct mm_struct *mm,
 55                 const nodemask_t *from, const nodemask_t *to,
 56                 unsigned long flags);
 57 extern void migrate_page_copy(struct page *newpage, struct page *page);
 58 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
 59                                   struct page *newpage, struct page *page);
 60 #else
 61 
 62 static inline void putback_lru_pages(struct list_head *l) {}
 63 static inline void putback_movable_pages(struct list_head *l) {}
 64 static inline int migrate_pages(struct list_head *l, new_page_t x,
 65                 unsigned long private, bool offlining,
 66                 enum migrate_mode mode, int reason) { return -ENOSYS; }
 67 static inline int migrate_huge_page(struct page *page, new_page_t x,
 68                 unsigned long private, bool offlining,
 69                 enum migrate_mode mode) { return -ENOSYS; }
 70 
 71 static inline int migrate_prep(void) { return -ENOSYS; }
 72 static inline int migrate_prep_local(void) { return -ENOSYS; }
 73 
 74 static inline int migrate_vmas(struct mm_struct *mm,
 75                 const nodemask_t *from, const nodemask_t *to,
 76                 unsigned long flags)
 77 {
 78         return -ENOSYS;
 79 }
 80 
 81 static inline void migrate_page_copy(struct page *newpage,
 82                                      struct page *page) {}
 83 
 84 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 85                                   struct page *newpage, struct page *page)
 86 {
 87         return -ENOSYS;
 88 }
 89 
 90 /* Possible settings for the migrate_page() method in address_operations */
 91 #define migrate_page NULL
 92 #define fail_migrate_page NULL
 93 
 94 #endif /* CONFIG_MIGRATION */
 95 
 96 #ifdef CONFIG_NUMA_BALANCING
 97 extern int migrate_misplaced_page(struct page *page, int node);
 98 extern int migrate_misplaced_page(struct page *page, int node);
 99 extern bool migrate_ratelimited(int node);
100 #else
101 static inline int migrate_misplaced_page(struct page *page, int node)
102 {
103         return -EAGAIN; /* can't migrate now */
104 }
105 static inline bool migrate_ratelimited(int node)
106 {
107         return false;
108 }
109 #endif /* CONFIG_NUMA_BALANCING */
110 
111 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
112 extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
113                         struct vm_area_struct *vma,
114                         pmd_t *pmd, pmd_t entry,
115                         unsigned long address,
116                         struct page *page, int node);
117 #else
118 static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
119                         struct vm_area_struct *vma,
120                         pmd_t *pmd, pmd_t entry,
121                         unsigned long address,
122                         struct page *page, int node)
123 {
124         return -EAGAIN;
125 }
126 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
127 
128 #endif /* _LINUX_MIGRATE_H */
129 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp