~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/mmu_notifier.c

Version: ~ [ linux-4.20-rc6 ] ~ [ linux-4.19.8 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.87 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.144 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.166 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.128 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.61 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/mm/mmu_notifier.c
  3  *
  4  *  Copyright (C) 2008  Qumranet, Inc.
  5  *  Copyright (C) 2008  SGI
  6  *             Christoph Lameter <cl@linux.com>
  7  *
  8  *  This work is licensed under the terms of the GNU GPL, version 2. See
  9  *  the COPYING file in the top-level directory.
 10  */
 11 
 12 #include <linux/rculist.h>
 13 #include <linux/mmu_notifier.h>
 14 #include <linux/export.h>
 15 #include <linux/mm.h>
 16 #include <linux/err.h>
 17 #include <linux/srcu.h>
 18 #include <linux/rcupdate.h>
 19 #include <linux/sched.h>
 20 #include <linux/sched/mm.h>
 21 #include <linux/slab.h>
 22 
 23 /* global SRCU for all MMs */
 24 DEFINE_STATIC_SRCU(srcu);
 25 
 26 /*
 27  * This function allows mmu_notifier::release callback to delay a call to
 28  * a function that will free appropriate resources. The function must be
 29  * quick and must not block.
 30  */
 31 void mmu_notifier_call_srcu(struct rcu_head *rcu,
 32                             void (*func)(struct rcu_head *rcu))
 33 {
 34         call_srcu(&srcu, rcu, func);
 35 }
 36 EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
 37 
 38 void mmu_notifier_synchronize(void)
 39 {
 40         /* Wait for any running method to finish. */
 41         srcu_barrier(&srcu);
 42 }
 43 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
 44 
 45 /*
 46  * This function can't run concurrently against mmu_notifier_register
 47  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 48  * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 49  * in parallel despite there being no task using this mm any more,
 50  * through the vmas outside of the exit_mmap context, such as with
 51  * vmtruncate. This serializes against mmu_notifier_unregister with
 52  * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 53  * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
 54  * can't go away from under us as exit_mmap holds an mm_count pin
 55  * itself.
 56  */
 57 void __mmu_notifier_release(struct mm_struct *mm)
 58 {
 59         struct mmu_notifier *mn;
 60         int id;
 61 
 62         /*
 63          * SRCU here will block mmu_notifier_unregister until
 64          * ->release returns.
 65          */
 66         id = srcu_read_lock(&srcu);
 67         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
 68                 /*
 69                  * If ->release runs before mmu_notifier_unregister it must be
 70                  * handled, as it's the only way for the driver to flush all
 71                  * existing sptes and stop the driver from establishing any more
 72                  * sptes before all the pages in the mm are freed.
 73                  */
 74                 if (mn->ops->release)
 75                         mn->ops->release(mn, mm);
 76 
 77         spin_lock(&mm->mmu_notifier_mm->lock);
 78         while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
 79                 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
 80                                  struct mmu_notifier,
 81                                  hlist);
 82                 /*
 83                  * We arrived before mmu_notifier_unregister so
 84                  * mmu_notifier_unregister will do nothing other than to wait
 85                  * for ->release to finish and for mmu_notifier_unregister to
 86                  * return.
 87                  */
 88                 hlist_del_init_rcu(&mn->hlist);
 89         }
 90         spin_unlock(&mm->mmu_notifier_mm->lock);
 91         srcu_read_unlock(&srcu, id);
 92 
 93         /*
 94          * synchronize_srcu here prevents mmu_notifier_release from returning to
 95          * exit_mmap (which would proceed with freeing all pages in the mm)
 96          * until the ->release method returns, if it was invoked by
 97          * mmu_notifier_unregister.
 98          *
 99          * The mmu_notifier_mm can't go away from under us because one mm_count
100          * is held by exit_mmap.
101          */
102         synchronize_srcu(&srcu);
103 }
104 
105 /*
106  * If no young bitflag is supported by the hardware, ->clear_flush_young can
107  * unmap the address and return 1 or 0 depending if the mapping previously
108  * existed or not.
109  */
110 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
111                                         unsigned long start,
112                                         unsigned long end)
113 {
114         struct mmu_notifier *mn;
115         int young = 0, id;
116 
117         id = srcu_read_lock(&srcu);
118         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
119                 if (mn->ops->clear_flush_young)
120                         young |= mn->ops->clear_flush_young(mn, mm, start, end);
121         }
122         srcu_read_unlock(&srcu, id);
123 
124         return young;
125 }
126 
127 int __mmu_notifier_clear_young(struct mm_struct *mm,
128                                unsigned long start,
129                                unsigned long end)
130 {
131         struct mmu_notifier *mn;
132         int young = 0, id;
133 
134         id = srcu_read_lock(&srcu);
135         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
136                 if (mn->ops->clear_young)
137                         young |= mn->ops->clear_young(mn, mm, start, end);
138         }
139         srcu_read_unlock(&srcu, id);
140 
141         return young;
142 }
143 
144 int __mmu_notifier_test_young(struct mm_struct *mm,
145                               unsigned long address)
146 {
147         struct mmu_notifier *mn;
148         int young = 0, id;
149 
150         id = srcu_read_lock(&srcu);
151         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
152                 if (mn->ops->test_young) {
153                         young = mn->ops->test_young(mn, mm, address);
154                         if (young)
155                                 break;
156                 }
157         }
158         srcu_read_unlock(&srcu, id);
159 
160         return young;
161 }
162 
163 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
164                                pte_t pte)
165 {
166         struct mmu_notifier *mn;
167         int id;
168 
169         id = srcu_read_lock(&srcu);
170         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
171                 if (mn->ops->change_pte)
172                         mn->ops->change_pte(mn, mm, address, pte);
173         }
174         srcu_read_unlock(&srcu, id);
175 }
176 
177 int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
178                                   unsigned long start, unsigned long end,
179                                   bool blockable)
180 {
181         struct mmu_notifier *mn;
182         int ret = 0;
183         int id;
184 
185         id = srcu_read_lock(&srcu);
186         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
187                 if (mn->ops->invalidate_range_start) {
188                         int _ret = mn->ops->invalidate_range_start(mn, mm, start, end, blockable);
189                         if (_ret) {
190                                 pr_info("%pS callback failed with %d in %sblockable context.\n",
191                                                 mn->ops->invalidate_range_start, _ret,
192                                                 !blockable ? "non-" : "");
193                                 ret = _ret;
194                         }
195                 }
196         }
197         srcu_read_unlock(&srcu, id);
198 
199         return ret;
200 }
201 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
202 
203 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
204                                          unsigned long start,
205                                          unsigned long end,
206                                          bool only_end)
207 {
208         struct mmu_notifier *mn;
209         int id;
210 
211         id = srcu_read_lock(&srcu);
212         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
213                 /*
214                  * Call invalidate_range here too to avoid the need for the
215                  * subsystem of having to register an invalidate_range_end
216                  * call-back when there is invalidate_range already. Usually a
217                  * subsystem registers either invalidate_range_start()/end() or
218                  * invalidate_range(), so this will be no additional overhead
219                  * (besides the pointer check).
220                  *
221                  * We skip call to invalidate_range() if we know it is safe ie
222                  * call site use mmu_notifier_invalidate_range_only_end() which
223                  * is safe to do when we know that a call to invalidate_range()
224                  * already happen under page table lock.
225                  */
226                 if (!only_end && mn->ops->invalidate_range)
227                         mn->ops->invalidate_range(mn, mm, start, end);
228                 if (mn->ops->invalidate_range_end)
229                         mn->ops->invalidate_range_end(mn, mm, start, end);
230         }
231         srcu_read_unlock(&srcu, id);
232 }
233 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
234 
235 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
236                                   unsigned long start, unsigned long end)
237 {
238         struct mmu_notifier *mn;
239         int id;
240 
241         id = srcu_read_lock(&srcu);
242         hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
243                 if (mn->ops->invalidate_range)
244                         mn->ops->invalidate_range(mn, mm, start, end);
245         }
246         srcu_read_unlock(&srcu, id);
247 }
248 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
249 
250 static int do_mmu_notifier_register(struct mmu_notifier *mn,
251                                     struct mm_struct *mm,
252                                     int take_mmap_sem)
253 {
254         struct mmu_notifier_mm *mmu_notifier_mm;
255         int ret;
256 
257         BUG_ON(atomic_read(&mm->mm_users) <= 0);
258 
259         ret = -ENOMEM;
260         mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
261         if (unlikely(!mmu_notifier_mm))
262                 goto out;
263 
264         if (take_mmap_sem)
265                 down_write(&mm->mmap_sem);
266         ret = mm_take_all_locks(mm);
267         if (unlikely(ret))
268                 goto out_clean;
269 
270         if (!mm_has_notifiers(mm)) {
271                 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
272                 spin_lock_init(&mmu_notifier_mm->lock);
273 
274                 mm->mmu_notifier_mm = mmu_notifier_mm;
275                 mmu_notifier_mm = NULL;
276         }
277         mmgrab(mm);
278 
279         /*
280          * Serialize the update against mmu_notifier_unregister. A
281          * side note: mmu_notifier_release can't run concurrently with
282          * us because we hold the mm_users pin (either implicitly as
283          * current->mm or explicitly with get_task_mm() or similar).
284          * We can't race against any other mmu notifier method either
285          * thanks to mm_take_all_locks().
286          */
287         spin_lock(&mm->mmu_notifier_mm->lock);
288         hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
289         spin_unlock(&mm->mmu_notifier_mm->lock);
290 
291         mm_drop_all_locks(mm);
292 out_clean:
293         if (take_mmap_sem)
294                 up_write(&mm->mmap_sem);
295         kfree(mmu_notifier_mm);
296 out:
297         BUG_ON(atomic_read(&mm->mm_users) <= 0);
298         return ret;
299 }
300 
301 /*
302  * Must not hold mmap_sem nor any other VM related lock when calling
303  * this registration function. Must also ensure mm_users can't go down
304  * to zero while this runs to avoid races with mmu_notifier_release,
305  * so mm has to be current->mm or the mm should be pinned safely such
306  * as with get_task_mm(). If the mm is not current->mm, the mm_users
307  * pin should be released by calling mmput after mmu_notifier_register
308  * returns. mmu_notifier_unregister must be always called to
309  * unregister the notifier. mm_count is automatically pinned to allow
310  * mmu_notifier_unregister to safely run at any time later, before or
311  * after exit_mmap. ->release will always be called before exit_mmap
312  * frees the pages.
313  */
314 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
315 {
316         return do_mmu_notifier_register(mn, mm, 1);
317 }
318 EXPORT_SYMBOL_GPL(mmu_notifier_register);
319 
320 /*
321  * Same as mmu_notifier_register but here the caller must hold the
322  * mmap_sem in write mode.
323  */
324 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
325 {
326         return do_mmu_notifier_register(mn, mm, 0);
327 }
328 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
329 
330 /* this is called after the last mmu_notifier_unregister() returned */
331 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
332 {
333         BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
334         kfree(mm->mmu_notifier_mm);
335         mm->mmu_notifier_mm = LIST_POISON1; /* debug */
336 }
337 
338 /*
339  * This releases the mm_count pin automatically and frees the mm
340  * structure if it was the last user of it. It serializes against
341  * running mmu notifiers with SRCU and against mmu_notifier_unregister
342  * with the unregister lock + SRCU. All sptes must be dropped before
343  * calling mmu_notifier_unregister. ->release or any other notifier
344  * method may be invoked concurrently with mmu_notifier_unregister,
345  * and only after mmu_notifier_unregister returned we're guaranteed
346  * that ->release or any other method can't run anymore.
347  */
348 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
349 {
350         BUG_ON(atomic_read(&mm->mm_count) <= 0);
351 
352         if (!hlist_unhashed(&mn->hlist)) {
353                 /*
354                  * SRCU here will force exit_mmap to wait for ->release to
355                  * finish before freeing the pages.
356                  */
357                 int id;
358 
359                 id = srcu_read_lock(&srcu);
360                 /*
361                  * exit_mmap will block in mmu_notifier_release to guarantee
362                  * that ->release is called before freeing the pages.
363                  */
364                 if (mn->ops->release)
365                         mn->ops->release(mn, mm);
366                 srcu_read_unlock(&srcu, id);
367 
368                 spin_lock(&mm->mmu_notifier_mm->lock);
369                 /*
370                  * Can not use list_del_rcu() since __mmu_notifier_release
371                  * can delete it before we hold the lock.
372                  */
373                 hlist_del_init_rcu(&mn->hlist);
374                 spin_unlock(&mm->mmu_notifier_mm->lock);
375         }
376 
377         /*
378          * Wait for any running method to finish, of course including
379          * ->release if it was run by mmu_notifier_release instead of us.
380          */
381         synchronize_srcu(&srcu);
382 
383         BUG_ON(atomic_read(&mm->mm_count) <= 0);
384 
385         mmdrop(mm);
386 }
387 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
388 
389 /*
390  * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
391  */
392 void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
393                                         struct mm_struct *mm)
394 {
395         spin_lock(&mm->mmu_notifier_mm->lock);
396         /*
397          * Can not use list_del_rcu() since __mmu_notifier_release
398          * can delete it before we hold the lock.
399          */
400         hlist_del_init_rcu(&mn->hlist);
401         spin_unlock(&mm->mmu_notifier_mm->lock);
402 
403         BUG_ON(atomic_read(&mm->mm_count) <= 0);
404         mmdrop(mm);
405 }
406 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
407 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp