1 #ifndef _LINUX_SCHED_MM_H 2 #define _LINUX_SCHED_MM_H 3 4 #include <linux/kernel.h> 5 #include <linux/atomic.h> 6 #include <linux/sched.h> 7 #include <linux/mm_types.h> 8 #include <linux/gfp.h> 9 10 /* 11 * Routines for handling mm_structs 12 */ 13 extern struct mm_struct * mm_alloc(void); 14 15 /** 16 * mmgrab() - Pin a &struct mm_struct. 17 * @mm: The &struct mm_struct to pin. 18 * 19 * Make sure that @mm will not get freed even after the owning task 20 * exits. This doesn't guarantee that the associated address space 21 * will still exist later on and mmget_not_zero() has to be used before 22 * accessing it. 23 * 24 * This is a preferred way to to pin @mm for a longer/unbounded amount 25 * of time. 26 * 27 * Use mmdrop() to release the reference acquired by mmgrab(). 28 * 29 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation 30 * of &mm_struct.mm_count vs &mm_struct.mm_users. 31 */ 32 static inline void mmgrab(struct mm_struct *mm) 33 { 34 atomic_inc(&mm->mm_count); 35 } 36 37 /* mmdrop drops the mm and the page tables */ 38 extern void __mmdrop(struct mm_struct *); 39 static inline void mmdrop(struct mm_struct *mm) 40 { 41 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 42 __mmdrop(mm); 43 } 44 45 static inline void mmdrop_async_fn(struct work_struct *work) 46 { 47 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); 48 __mmdrop(mm); 49 } 50 51 static inline void mmdrop_async(struct mm_struct *mm) 52 { 53 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 54 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 55 schedule_work(&mm->async_put_work); 56 } 57 } 58 59 /** 60 * mmget() - Pin the address space associated with a &struct mm_struct. 61 * @mm: The address space to pin. 62 * 63 * Make sure that the address space of the given &struct mm_struct doesn't 64 * go away. This does not protect against parts of the address space being 65 * modified or freed, however. 66 * 67 * Never use this function to pin this address space for an 68 * unbounded/indefinite amount of time. 69 * 70 * Use mmput() to release the reference acquired by mmget(). 71 * 72 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation 73 * of &mm_struct.mm_count vs &mm_struct.mm_users. 74 */ 75 static inline void mmget(struct mm_struct *mm) 76 { 77 atomic_inc(&mm->mm_users); 78 } 79 80 static inline bool mmget_not_zero(struct mm_struct *mm) 81 { 82 return atomic_inc_not_zero(&mm->mm_users); 83 } 84 85 /* mmput gets rid of the mappings and all user-space */ 86 extern void mmput(struct mm_struct *); 87 #ifdef CONFIG_MMU 88 /* same as above but performs the slow path from the async context. Can 89 * be called from the atomic context as well 90 */ 91 extern void mmput_async(struct mm_struct *); 92 #endif 93 94 /* Grab a reference to a task's mm, if it is not already going away */ 95 extern struct mm_struct *get_task_mm(struct task_struct *task); 96 /* 97 * Grab a reference to a task's mm, if it is not already going away 98 * and ptrace_may_access with the mode parameter passed to it 99 * succeeds. 100 */ 101 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 102 /* Remove the current tasks stale references to the old mm_struct */ 103 extern void mm_release(struct task_struct *, struct mm_struct *); 104 105 #ifdef CONFIG_MEMCG 106 extern void mm_update_next_owner(struct mm_struct *mm); 107 #else 108 static inline void mm_update_next_owner(struct mm_struct *mm) 109 { 110 } 111 #endif /* CONFIG_MEMCG */ 112 113 #ifdef CONFIG_MMU 114 extern void arch_pick_mmap_layout(struct mm_struct *mm); 115 extern unsigned long 116 arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 117 unsigned long, unsigned long); 118 extern unsigned long 119 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 120 unsigned long len, unsigned long pgoff, 121 unsigned long flags); 122 #else 123 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 124 #endif 125 126 static inline bool in_vfork(struct task_struct *tsk) 127 { 128 bool ret; 129 130 /* 131 * need RCU to access ->real_parent if CLONE_VM was used along with 132 * CLONE_PARENT. 133 * 134 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not 135 * imply CLONE_VM 136 * 137 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus 138 * ->real_parent is not necessarily the task doing vfork(), so in 139 * theory we can't rely on task_lock() if we want to dereference it. 140 * 141 * And in this case we can't trust the real_parent->mm == tsk->mm 142 * check, it can be false negative. But we do not care, if init or 143 * another oom-unkillable task does this it should blame itself. 144 */ 145 rcu_read_lock(); 146 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; 147 rcu_read_unlock(); 148 149 return ret; 150 } 151 152 /* 153 * Applies per-task gfp context to the given allocation flags. 154 * PF_MEMALLOC_NOIO implies GFP_NOIO 155 * PF_MEMALLOC_NOFS implies GFP_NOFS 156 */ 157 static inline gfp_t current_gfp_context(gfp_t flags) 158 { 159 /* 160 * NOIO implies both NOIO and NOFS and it is a weaker context 161 * so always make sure it makes precendence 162 */ 163 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 164 flags &= ~(__GFP_IO | __GFP_FS); 165 else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) 166 flags &= ~__GFP_FS; 167 return flags; 168 } 169 170 static inline unsigned int memalloc_noio_save(void) 171 { 172 unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 173 current->flags |= PF_MEMALLOC_NOIO; 174 return flags; 175 } 176 177 static inline void memalloc_noio_restore(unsigned int flags) 178 { 179 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 180 } 181 182 static inline unsigned int memalloc_nofs_save(void) 183 { 184 unsigned int flags = current->flags & PF_MEMALLOC_NOFS; 185 current->flags |= PF_MEMALLOC_NOFS; 186 return flags; 187 } 188 189 static inline void memalloc_nofs_restore(unsigned int flags) 190 { 191 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; 192 } 193 194 static inline unsigned int memalloc_noreclaim_save(void) 195 { 196 unsigned int flags = current->flags & PF_MEMALLOC; 197 current->flags |= PF_MEMALLOC; 198 return flags; 199 } 200 201 static inline void memalloc_noreclaim_restore(unsigned int flags) 202 { 203 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 204 } 205 206 #endif /* _LINUX_SCHED_MM_H */ 207
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.