~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/dax.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.12 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.55 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.136 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.191 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * fs/dax.c - Direct Access filesystem code
  3  * Copyright (c) 2013-2014 Intel Corporation
  4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
  5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
  6  *
  7  * This program is free software; you can redistribute it and/or modify it
  8  * under the terms and conditions of the GNU General Public License,
  9  * version 2, as published by the Free Software Foundation.
 10  *
 11  * This program is distributed in the hope it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14  * more details.
 15  */
 16 
 17 #include <linux/atomic.h>
 18 #include <linux/blkdev.h>
 19 #include <linux/buffer_head.h>
 20 #include <linux/dax.h>
 21 #include <linux/fs.h>
 22 #include <linux/genhd.h>
 23 #include <linux/highmem.h>
 24 #include <linux/memcontrol.h>
 25 #include <linux/mm.h>
 26 #include <linux/mutex.h>
 27 #include <linux/pagevec.h>
 28 #include <linux/pmem.h>
 29 #include <linux/sched.h>
 30 #include <linux/sched/signal.h>
 31 #include <linux/uio.h>
 32 #include <linux/vmstat.h>
 33 #include <linux/pfn_t.h>
 34 #include <linux/sizes.h>
 35 #include <linux/mmu_notifier.h>
 36 #include <linux/iomap.h>
 37 #include "internal.h"
 38 
 39 #define CREATE_TRACE_POINTS
 40 #include <trace/events/fs_dax.h>
 41 
 42 /* We choose 4096 entries - same as per-zone page wait tables */
 43 #define DAX_WAIT_TABLE_BITS 12
 44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
 45 
 46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 47 
 48 static int __init init_dax_wait_table(void)
 49 {
 50         int i;
 51 
 52         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
 53                 init_waitqueue_head(wait_table + i);
 54         return 0;
 55 }
 56 fs_initcall(init_dax_wait_table);
 57 
 58 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
 59 {
 60         struct request_queue *q = bdev->bd_queue;
 61         long rc = -EIO;
 62 
 63         dax->addr = ERR_PTR(-EIO);
 64         if (blk_queue_enter(q, true) != 0)
 65                 return rc;
 66 
 67         rc = bdev_direct_access(bdev, dax);
 68         if (rc < 0) {
 69                 dax->addr = ERR_PTR(rc);
 70                 blk_queue_exit(q);
 71                 return rc;
 72         }
 73         return rc;
 74 }
 75 
 76 static void dax_unmap_atomic(struct block_device *bdev,
 77                 const struct blk_dax_ctl *dax)
 78 {
 79         if (IS_ERR(dax->addr))
 80                 return;
 81         blk_queue_exit(bdev->bd_queue);
 82 }
 83 
 84 static int dax_is_pmd_entry(void *entry)
 85 {
 86         return (unsigned long)entry & RADIX_DAX_PMD;
 87 }
 88 
 89 static int dax_is_pte_entry(void *entry)
 90 {
 91         return !((unsigned long)entry & RADIX_DAX_PMD);
 92 }
 93 
 94 static int dax_is_zero_entry(void *entry)
 95 {
 96         return (unsigned long)entry & RADIX_DAX_HZP;
 97 }
 98 
 99 static int dax_is_empty_entry(void *entry)
100 {
101         return (unsigned long)entry & RADIX_DAX_EMPTY;
102 }
103 
104 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
105 {
106         struct page *page = alloc_pages(GFP_KERNEL, 0);
107         struct blk_dax_ctl dax = {
108                 .size = PAGE_SIZE,
109                 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
110         };
111         long rc;
112 
113         if (!page)
114                 return ERR_PTR(-ENOMEM);
115 
116         rc = dax_map_atomic(bdev, &dax);
117         if (rc < 0)
118                 return ERR_PTR(rc);
119         memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
120         dax_unmap_atomic(bdev, &dax);
121         return page;
122 }
123 
124 /*
125  * DAX radix tree locking
126  */
127 struct exceptional_entry_key {
128         struct address_space *mapping;
129         pgoff_t entry_start;
130 };
131 
132 struct wait_exceptional_entry_queue {
133         wait_queue_t wait;
134         struct exceptional_entry_key key;
135 };
136 
137 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
138                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
139 {
140         unsigned long hash;
141 
142         /*
143          * If 'entry' is a PMD, align the 'index' that we use for the wait
144          * queue to the start of that PMD.  This ensures that all offsets in
145          * the range covered by the PMD map to the same bit lock.
146          */
147         if (dax_is_pmd_entry(entry))
148                 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
149 
150         key->mapping = mapping;
151         key->entry_start = index;
152 
153         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
154         return wait_table + hash;
155 }
156 
157 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
158                                        int sync, void *keyp)
159 {
160         struct exceptional_entry_key *key = keyp;
161         struct wait_exceptional_entry_queue *ewait =
162                 container_of(wait, struct wait_exceptional_entry_queue, wait);
163 
164         if (key->mapping != ewait->key.mapping ||
165             key->entry_start != ewait->key.entry_start)
166                 return 0;
167         return autoremove_wake_function(wait, mode, sync, NULL);
168 }
169 
170 /*
171  * Check whether the given slot is locked. The function must be called with
172  * mapping->tree_lock held
173  */
174 static inline int slot_locked(struct address_space *mapping, void **slot)
175 {
176         unsigned long entry = (unsigned long)
177                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
178         return entry & RADIX_DAX_ENTRY_LOCK;
179 }
180 
181 /*
182  * Mark the given slot is locked. The function must be called with
183  * mapping->tree_lock held
184  */
185 static inline void *lock_slot(struct address_space *mapping, void **slot)
186 {
187         unsigned long entry = (unsigned long)
188                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
189 
190         entry |= RADIX_DAX_ENTRY_LOCK;
191         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
192         return (void *)entry;
193 }
194 
195 /*
196  * Mark the given slot is unlocked. The function must be called with
197  * mapping->tree_lock held
198  */
199 static inline void *unlock_slot(struct address_space *mapping, void **slot)
200 {
201         unsigned long entry = (unsigned long)
202                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
203 
204         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
205         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
206         return (void *)entry;
207 }
208 
209 /*
210  * Lookup entry in radix tree, wait for it to become unlocked if it is
211  * exceptional entry and return it. The caller must call
212  * put_unlocked_mapping_entry() when he decided not to lock the entry or
213  * put_locked_mapping_entry() when he locked the entry and now wants to
214  * unlock it.
215  *
216  * The function must be called with mapping->tree_lock held.
217  */
218 static void *get_unlocked_mapping_entry(struct address_space *mapping,
219                                         pgoff_t index, void ***slotp)
220 {
221         void *entry, **slot;
222         struct wait_exceptional_entry_queue ewait;
223         wait_queue_head_t *wq;
224 
225         init_wait(&ewait.wait);
226         ewait.wait.func = wake_exceptional_entry_func;
227 
228         for (;;) {
229                 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
230                                           &slot);
231                 if (!entry || !radix_tree_exceptional_entry(entry) ||
232                     !slot_locked(mapping, slot)) {
233                         if (slotp)
234                                 *slotp = slot;
235                         return entry;
236                 }
237 
238                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
239                 prepare_to_wait_exclusive(wq, &ewait.wait,
240                                           TASK_UNINTERRUPTIBLE);
241                 spin_unlock_irq(&mapping->tree_lock);
242                 schedule();
243                 finish_wait(wq, &ewait.wait);
244                 spin_lock_irq(&mapping->tree_lock);
245         }
246 }
247 
248 static void dax_unlock_mapping_entry(struct address_space *mapping,
249                                      pgoff_t index)
250 {
251         void *entry, **slot;
252 
253         spin_lock_irq(&mapping->tree_lock);
254         entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
255         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
256                          !slot_locked(mapping, slot))) {
257                 spin_unlock_irq(&mapping->tree_lock);
258                 return;
259         }
260         unlock_slot(mapping, slot);
261         spin_unlock_irq(&mapping->tree_lock);
262         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
263 }
264 
265 static void put_locked_mapping_entry(struct address_space *mapping,
266                                      pgoff_t index, void *entry)
267 {
268         if (!radix_tree_exceptional_entry(entry)) {
269                 unlock_page(entry);
270                 put_page(entry);
271         } else {
272                 dax_unlock_mapping_entry(mapping, index);
273         }
274 }
275 
276 /*
277  * Called when we are done with radix tree entry we looked up via
278  * get_unlocked_mapping_entry() and which we didn't lock in the end.
279  */
280 static void put_unlocked_mapping_entry(struct address_space *mapping,
281                                        pgoff_t index, void *entry)
282 {
283         if (!radix_tree_exceptional_entry(entry))
284                 return;
285 
286         /* We have to wake up next waiter for the radix tree entry lock */
287         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
288 }
289 
290 /*
291  * Find radix tree entry at given index. If it points to a page, return with
292  * the page locked. If it points to the exceptional entry, return with the
293  * radix tree entry locked. If the radix tree doesn't contain given index,
294  * create empty exceptional entry for the index and return with it locked.
295  *
296  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
297  * either return that locked entry or will return an error.  This error will
298  * happen if there are any 4k entries (either zero pages or DAX entries)
299  * within the 2MiB range that we are requesting.
300  *
301  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
302  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
303  * insertion will fail if it finds any 4k entries already in the tree, and a
304  * 4k insertion will cause an existing 2MiB entry to be unmapped and
305  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
306  * well as 2MiB empty entries.
307  *
308  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
309  * real storage backing them.  We will leave these real 2MiB DAX entries in
310  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
311  *
312  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
313  * persistent memory the benefit is doubtful. We can add that later if we can
314  * show it helps.
315  */
316 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
317                 unsigned long size_flag)
318 {
319         bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
320         void *entry, **slot;
321 
322 restart:
323         spin_lock_irq(&mapping->tree_lock);
324         entry = get_unlocked_mapping_entry(mapping, index, &slot);
325 
326         if (entry) {
327                 if (size_flag & RADIX_DAX_PMD) {
328                         if (!radix_tree_exceptional_entry(entry) ||
329                             dax_is_pte_entry(entry)) {
330                                 put_unlocked_mapping_entry(mapping, index,
331                                                 entry);
332                                 entry = ERR_PTR(-EEXIST);
333                                 goto out_unlock;
334                         }
335                 } else { /* trying to grab a PTE entry */
336                         if (radix_tree_exceptional_entry(entry) &&
337                             dax_is_pmd_entry(entry) &&
338                             (dax_is_zero_entry(entry) ||
339                              dax_is_empty_entry(entry))) {
340                                 pmd_downgrade = true;
341                         }
342                 }
343         }
344 
345         /* No entry for given index? Make sure radix tree is big enough. */
346         if (!entry || pmd_downgrade) {
347                 int err;
348 
349                 if (pmd_downgrade) {
350                         /*
351                          * Make sure 'entry' remains valid while we drop
352                          * mapping->tree_lock.
353                          */
354                         entry = lock_slot(mapping, slot);
355                 }
356 
357                 spin_unlock_irq(&mapping->tree_lock);
358                 /*
359                  * Besides huge zero pages the only other thing that gets
360                  * downgraded are empty entries which don't need to be
361                  * unmapped.
362                  */
363                 if (pmd_downgrade && dax_is_zero_entry(entry))
364                         unmap_mapping_range(mapping,
365                                 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
366 
367                 err = radix_tree_preload(
368                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
369                 if (err) {
370                         if (pmd_downgrade)
371                                 put_locked_mapping_entry(mapping, index, entry);
372                         return ERR_PTR(err);
373                 }
374                 spin_lock_irq(&mapping->tree_lock);
375 
376                 if (!entry) {
377                         /*
378                          * We needed to drop the page_tree lock while calling
379                          * radix_tree_preload() and we didn't have an entry to
380                          * lock.  See if another thread inserted an entry at
381                          * our index during this time.
382                          */
383                         entry = __radix_tree_lookup(&mapping->page_tree, index,
384                                         NULL, &slot);
385                         if (entry) {
386                                 radix_tree_preload_end();
387                                 spin_unlock_irq(&mapping->tree_lock);
388                                 goto restart;
389                         }
390                 }
391 
392                 if (pmd_downgrade) {
393                         radix_tree_delete(&mapping->page_tree, index);
394                         mapping->nrexceptional--;
395                         dax_wake_mapping_entry_waiter(mapping, index, entry,
396                                         true);
397                 }
398 
399                 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
400 
401                 err = __radix_tree_insert(&mapping->page_tree, index,
402                                 dax_radix_order(entry), entry);
403                 radix_tree_preload_end();
404                 if (err) {
405                         spin_unlock_irq(&mapping->tree_lock);
406                         /*
407                          * Our insertion of a DAX entry failed, most likely
408                          * because we were inserting a PMD entry and it
409                          * collided with a PTE sized entry at a different
410                          * index in the PMD range.  We haven't inserted
411                          * anything into the radix tree and have no waiters to
412                          * wake.
413                          */
414                         return ERR_PTR(err);
415                 }
416                 /* Good, we have inserted empty locked entry into the tree. */
417                 mapping->nrexceptional++;
418                 spin_unlock_irq(&mapping->tree_lock);
419                 return entry;
420         }
421         /* Normal page in radix tree? */
422         if (!radix_tree_exceptional_entry(entry)) {
423                 struct page *page = entry;
424 
425                 get_page(page);
426                 spin_unlock_irq(&mapping->tree_lock);
427                 lock_page(page);
428                 /* Page got truncated? Retry... */
429                 if (unlikely(page->mapping != mapping)) {
430                         unlock_page(page);
431                         put_page(page);
432                         goto restart;
433                 }
434                 return page;
435         }
436         entry = lock_slot(mapping, slot);
437  out_unlock:
438         spin_unlock_irq(&mapping->tree_lock);
439         return entry;
440 }
441 
442 /*
443  * We do not necessarily hold the mapping->tree_lock when we call this
444  * function so it is possible that 'entry' is no longer a valid item in the
445  * radix tree.  This is okay because all we really need to do is to find the
446  * correct waitqueue where tasks might be waiting for that old 'entry' and
447  * wake them.
448  */
449 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
450                 pgoff_t index, void *entry, bool wake_all)
451 {
452         struct exceptional_entry_key key;
453         wait_queue_head_t *wq;
454 
455         wq = dax_entry_waitqueue(mapping, index, entry, &key);
456 
457         /*
458          * Checking for locked entry and prepare_to_wait_exclusive() happens
459          * under mapping->tree_lock, ditto for entry handling in our callers.
460          * So at this point all tasks that could have seen our entry locked
461          * must be in the waitqueue and the following check will see them.
462          */
463         if (waitqueue_active(wq))
464                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
465 }
466 
467 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
468                                           pgoff_t index, bool trunc)
469 {
470         int ret = 0;
471         void *entry;
472         struct radix_tree_root *page_tree = &mapping->page_tree;
473 
474         spin_lock_irq(&mapping->tree_lock);
475         entry = get_unlocked_mapping_entry(mapping, index, NULL);
476         if (!entry || !radix_tree_exceptional_entry(entry))
477                 goto out;
478         if (!trunc &&
479             (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
480              radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
481                 goto out;
482         radix_tree_delete(page_tree, index);
483         mapping->nrexceptional--;
484         ret = 1;
485 out:
486         put_unlocked_mapping_entry(mapping, index, entry);
487         spin_unlock_irq(&mapping->tree_lock);
488         return ret;
489 }
490 /*
491  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
492  * entry to get unlocked before deleting it.
493  */
494 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
495 {
496         int ret = __dax_invalidate_mapping_entry(mapping, index, true);
497 
498         /*
499          * This gets called from truncate / punch_hole path. As such, the caller
500          * must hold locks protecting against concurrent modifications of the
501          * radix tree (usually fs-private i_mmap_sem for writing). Since the
502          * caller has seen exceptional entry for this index, we better find it
503          * at that index as well...
504          */
505         WARN_ON_ONCE(!ret);
506         return ret;
507 }
508 
509 /*
510  * Invalidate exceptional DAX entry if it is clean.
511  */
512 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
513                                       pgoff_t index)
514 {
515         return __dax_invalidate_mapping_entry(mapping, index, false);
516 }
517 
518 /*
519  * The user has performed a load from a hole in the file.  Allocating
520  * a new page in the file would cause excessive storage usage for
521  * workloads with sparse files.  We allocate a page cache page instead.
522  * We'll kick it out of the page cache if it's ever written to,
523  * otherwise it will simply fall out of the page cache under memory
524  * pressure without ever having been dirtied.
525  */
526 static int dax_load_hole(struct address_space *mapping, void **entry,
527                          struct vm_fault *vmf)
528 {
529         struct page *page;
530         int ret;
531 
532         /* Hole page already exists? Return it...  */
533         if (!radix_tree_exceptional_entry(*entry)) {
534                 page = *entry;
535                 goto out;
536         }
537 
538         /* This will replace locked radix tree entry with a hole page */
539         page = find_or_create_page(mapping, vmf->pgoff,
540                                    vmf->gfp_mask | __GFP_ZERO);
541         if (!page)
542                 return VM_FAULT_OOM;
543  out:
544         vmf->page = page;
545         ret = finish_fault(vmf);
546         vmf->page = NULL;
547         *entry = page;
548         if (!ret) {
549                 /* Grab reference for PTE that is now referencing the page */
550                 get_page(page);
551                 return VM_FAULT_NOPAGE;
552         }
553         return ret;
554 }
555 
556 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
557                 struct page *to, unsigned long vaddr)
558 {
559         struct blk_dax_ctl dax = {
560                 .sector = sector,
561                 .size = size,
562         };
563         void *vto;
564 
565         if (dax_map_atomic(bdev, &dax) < 0)
566                 return PTR_ERR(dax.addr);
567         vto = kmap_atomic(to);
568         copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
569         kunmap_atomic(vto);
570         dax_unmap_atomic(bdev, &dax);
571         return 0;
572 }
573 
574 /*
575  * By this point grab_mapping_entry() has ensured that we have a locked entry
576  * of the appropriate size so we don't have to worry about downgrading PMDs to
577  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
578  * already in the tree, we will skip the insertion and just dirty the PMD as
579  * appropriate.
580  */
581 static void *dax_insert_mapping_entry(struct address_space *mapping,
582                                       struct vm_fault *vmf,
583                                       void *entry, sector_t sector,
584                                       unsigned long flags)
585 {
586         struct radix_tree_root *page_tree = &mapping->page_tree;
587         int error = 0;
588         bool hole_fill = false;
589         void *new_entry;
590         pgoff_t index = vmf->pgoff;
591 
592         if (vmf->flags & FAULT_FLAG_WRITE)
593                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
594 
595         /* Replacing hole page with block mapping? */
596         if (!radix_tree_exceptional_entry(entry)) {
597                 hole_fill = true;
598                 /*
599                  * Unmap the page now before we remove it from page cache below.
600                  * The page is locked so it cannot be faulted in again.
601                  */
602                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
603                                     PAGE_SIZE, 0);
604                 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
605                 if (error)
606                         return ERR_PTR(error);
607         } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
608                 /* replacing huge zero page with PMD block mapping */
609                 unmap_mapping_range(mapping,
610                         (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
611         }
612 
613         spin_lock_irq(&mapping->tree_lock);
614         new_entry = dax_radix_locked_entry(sector, flags);
615 
616         if (hole_fill) {
617                 __delete_from_page_cache(entry, NULL);
618                 /* Drop pagecache reference */
619                 put_page(entry);
620                 error = __radix_tree_insert(page_tree, index,
621                                 dax_radix_order(new_entry), new_entry);
622                 if (error) {
623                         new_entry = ERR_PTR(error);
624                         goto unlock;
625                 }
626                 mapping->nrexceptional++;
627         } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
628                 /*
629                  * Only swap our new entry into the radix tree if the current
630                  * entry is a zero page or an empty entry.  If a normal PTE or
631                  * PMD entry is already in the tree, we leave it alone.  This
632                  * means that if we are trying to insert a PTE and the
633                  * existing entry is a PMD, we will just leave the PMD in the
634                  * tree and dirty it if necessary.
635                  */
636                 struct radix_tree_node *node;
637                 void **slot;
638                 void *ret;
639 
640                 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
641                 WARN_ON_ONCE(ret != entry);
642                 __radix_tree_replace(page_tree, node, slot,
643                                      new_entry, NULL, NULL);
644         }
645         if (vmf->flags & FAULT_FLAG_WRITE)
646                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
647  unlock:
648         spin_unlock_irq(&mapping->tree_lock);
649         if (hole_fill) {
650                 radix_tree_preload_end();
651                 /*
652                  * We don't need hole page anymore, it has been replaced with
653                  * locked radix tree entry now.
654                  */
655                 if (mapping->a_ops->freepage)
656                         mapping->a_ops->freepage(entry);
657                 unlock_page(entry);
658                 put_page(entry);
659         }
660         return new_entry;
661 }
662 
663 static inline unsigned long
664 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
665 {
666         unsigned long address;
667 
668         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
669         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
670         return address;
671 }
672 
673 /* Walk all mappings of a given index of a file and writeprotect them */
674 static void dax_mapping_entry_mkclean(struct address_space *mapping,
675                                       pgoff_t index, unsigned long pfn)
676 {
677         struct vm_area_struct *vma;
678         pte_t pte, *ptep = NULL;
679         pmd_t *pmdp = NULL;
680         spinlock_t *ptl;
681         bool changed;
682 
683         i_mmap_lock_read(mapping);
684         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
685                 unsigned long address;
686 
687                 cond_resched();
688 
689                 if (!(vma->vm_flags & VM_SHARED))
690                         continue;
691 
692                 address = pgoff_address(index, vma);
693                 changed = false;
694                 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
695                         continue;
696 
697                 if (pmdp) {
698 #ifdef CONFIG_FS_DAX_PMD
699                         pmd_t pmd;
700 
701                         if (pfn != pmd_pfn(*pmdp))
702                                 goto unlock_pmd;
703                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
704                                 goto unlock_pmd;
705 
706                         flush_cache_page(vma, address, pfn);
707                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
708                         pmd = pmd_wrprotect(pmd);
709                         pmd = pmd_mkclean(pmd);
710                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
711                         changed = true;
712 unlock_pmd:
713                         spin_unlock(ptl);
714 #endif
715                 } else {
716                         if (pfn != pte_pfn(*ptep))
717                                 goto unlock_pte;
718                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
719                                 goto unlock_pte;
720 
721                         flush_cache_page(vma, address, pfn);
722                         pte = ptep_clear_flush(vma, address, ptep);
723                         pte = pte_wrprotect(pte);
724                         pte = pte_mkclean(pte);
725                         set_pte_at(vma->vm_mm, address, ptep, pte);
726                         changed = true;
727 unlock_pte:
728                         pte_unmap_unlock(ptep, ptl);
729                 }
730 
731                 if (changed)
732                         mmu_notifier_invalidate_page(vma->vm_mm, address);
733         }
734         i_mmap_unlock_read(mapping);
735 }
736 
737 static int dax_writeback_one(struct block_device *bdev,
738                 struct address_space *mapping, pgoff_t index, void *entry)
739 {
740         struct radix_tree_root *page_tree = &mapping->page_tree;
741         struct blk_dax_ctl dax;
742         void *entry2, **slot;
743         int ret = 0;
744 
745         /*
746          * A page got tagged dirty in DAX mapping? Something is seriously
747          * wrong.
748          */
749         if (WARN_ON(!radix_tree_exceptional_entry(entry)))
750                 return -EIO;
751 
752         spin_lock_irq(&mapping->tree_lock);
753         entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
754         /* Entry got punched out / reallocated? */
755         if (!entry2 || !radix_tree_exceptional_entry(entry2))
756                 goto put_unlocked;
757         /*
758          * Entry got reallocated elsewhere? No need to writeback. We have to
759          * compare sectors as we must not bail out due to difference in lockbit
760          * or entry type.
761          */
762         if (dax_radix_sector(entry2) != dax_radix_sector(entry))
763                 goto put_unlocked;
764         if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
765                                 dax_is_zero_entry(entry))) {
766                 ret = -EIO;
767                 goto put_unlocked;
768         }
769 
770         /* Another fsync thread may have already written back this entry */
771         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
772                 goto put_unlocked;
773         /* Lock the entry to serialize with page faults */
774         entry = lock_slot(mapping, slot);
775         /*
776          * We can clear the tag now but we have to be careful so that concurrent
777          * dax_writeback_one() calls for the same index cannot finish before we
778          * actually flush the caches. This is achieved as the calls will look
779          * at the entry only under tree_lock and once they do that they will
780          * see the entry locked and wait for it to unlock.
781          */
782         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
783         spin_unlock_irq(&mapping->tree_lock);
784 
785         /*
786          * Even if dax_writeback_mapping_range() was given a wbc->range_start
787          * in the middle of a PMD, the 'index' we are given will be aligned to
788          * the start index of the PMD, as will the sector we pull from
789          * 'entry'.  This allows us to flush for PMD_SIZE and not have to
790          * worry about partial PMD writebacks.
791          */
792         dax.sector = dax_radix_sector(entry);
793         dax.size = PAGE_SIZE << dax_radix_order(entry);
794 
795         /*
796          * We cannot hold tree_lock while calling dax_map_atomic() because it
797          * eventually calls cond_resched().
798          */
799         ret = dax_map_atomic(bdev, &dax);
800         if (ret < 0) {
801                 put_locked_mapping_entry(mapping, index, entry);
802                 return ret;
803         }
804 
805         if (WARN_ON_ONCE(ret < dax.size)) {
806                 ret = -EIO;
807                 goto unmap;
808         }
809 
810         dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
811         wb_cache_pmem(dax.addr, dax.size);
812         /*
813          * After we have flushed the cache, we can clear the dirty tag. There
814          * cannot be new dirty data in the pfn after the flush has completed as
815          * the pfn mappings are writeprotected and fault waits for mapping
816          * entry lock.
817          */
818         spin_lock_irq(&mapping->tree_lock);
819         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
820         spin_unlock_irq(&mapping->tree_lock);
821  unmap:
822         dax_unmap_atomic(bdev, &dax);
823         put_locked_mapping_entry(mapping, index, entry);
824         return ret;
825 
826  put_unlocked:
827         put_unlocked_mapping_entry(mapping, index, entry2);
828         spin_unlock_irq(&mapping->tree_lock);
829         return ret;
830 }
831 
832 /*
833  * Flush the mapping to the persistent domain within the byte range of [start,
834  * end]. This is required by data integrity operations to ensure file data is
835  * on persistent storage prior to completion of the operation.
836  */
837 int dax_writeback_mapping_range(struct address_space *mapping,
838                 struct block_device *bdev, struct writeback_control *wbc)
839 {
840         struct inode *inode = mapping->host;
841         pgoff_t start_index, end_index;
842         pgoff_t indices[PAGEVEC_SIZE];
843         struct pagevec pvec;
844         bool done = false;
845         int i, ret = 0;
846 
847         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
848                 return -EIO;
849 
850         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
851                 return 0;
852 
853         start_index = wbc->range_start >> PAGE_SHIFT;
854         end_index = wbc->range_end >> PAGE_SHIFT;
855 
856         tag_pages_for_writeback(mapping, start_index, end_index);
857 
858         pagevec_init(&pvec, 0);
859         while (!done) {
860                 pvec.nr = find_get_entries_tag(mapping, start_index,
861                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
862                                 pvec.pages, indices);
863 
864                 if (pvec.nr == 0)
865                         break;
866 
867                 for (i = 0; i < pvec.nr; i++) {
868                         if (indices[i] > end_index) {
869                                 done = true;
870                                 break;
871                         }
872 
873                         ret = dax_writeback_one(bdev, mapping, indices[i],
874                                         pvec.pages[i]);
875                         if (ret < 0)
876                                 return ret;
877                 }
878                 start_index = indices[pvec.nr - 1] + 1;
879         }
880         return 0;
881 }
882 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
883 
884 static int dax_insert_mapping(struct address_space *mapping,
885                 struct block_device *bdev, sector_t sector, size_t size,
886                 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
887 {
888         unsigned long vaddr = vmf->address;
889         struct blk_dax_ctl dax = {
890                 .sector = sector,
891                 .size = size,
892         };
893         void *ret;
894         void *entry = *entryp;
895 
896         if (dax_map_atomic(bdev, &dax) < 0)
897                 return PTR_ERR(dax.addr);
898         dax_unmap_atomic(bdev, &dax);
899 
900         ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
901         if (IS_ERR(ret))
902                 return PTR_ERR(ret);
903         *entryp = ret;
904 
905         return vm_insert_mixed(vma, vaddr, dax.pfn);
906 }
907 
908 /**
909  * dax_pfn_mkwrite - handle first write to DAX page
910  * @vmf: The description of the fault
911  */
912 int dax_pfn_mkwrite(struct vm_fault *vmf)
913 {
914         struct file *file = vmf->vma->vm_file;
915         struct address_space *mapping = file->f_mapping;
916         void *entry, **slot;
917         pgoff_t index = vmf->pgoff;
918 
919         spin_lock_irq(&mapping->tree_lock);
920         entry = get_unlocked_mapping_entry(mapping, index, &slot);
921         if (!entry || !radix_tree_exceptional_entry(entry)) {
922                 if (entry)
923                         put_unlocked_mapping_entry(mapping, index, entry);
924                 spin_unlock_irq(&mapping->tree_lock);
925                 return VM_FAULT_NOPAGE;
926         }
927         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
928         entry = lock_slot(mapping, slot);
929         spin_unlock_irq(&mapping->tree_lock);
930         /*
931          * If we race with somebody updating the PTE and finish_mkwrite_fault()
932          * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
933          * the fault in either case.
934          */
935         finish_mkwrite_fault(vmf);
936         put_locked_mapping_entry(mapping, index, entry);
937         return VM_FAULT_NOPAGE;
938 }
939 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
940 
941 static bool dax_range_is_aligned(struct block_device *bdev,
942                                  unsigned int offset, unsigned int length)
943 {
944         unsigned short sector_size = bdev_logical_block_size(bdev);
945 
946         if (!IS_ALIGNED(offset, sector_size))
947                 return false;
948         if (!IS_ALIGNED(length, sector_size))
949                 return false;
950 
951         return true;
952 }
953 
954 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
955                 unsigned int offset, unsigned int length)
956 {
957         struct blk_dax_ctl dax = {
958                 .sector         = sector,
959                 .size           = PAGE_SIZE,
960         };
961 
962         if (dax_range_is_aligned(bdev, offset, length)) {
963                 sector_t start_sector = dax.sector + (offset >> 9);
964 
965                 return blkdev_issue_zeroout(bdev, start_sector,
966                                 length >> 9, GFP_NOFS, true);
967         } else {
968                 if (dax_map_atomic(bdev, &dax) < 0)
969                         return PTR_ERR(dax.addr);
970                 clear_pmem(dax.addr + offset, length);
971                 dax_unmap_atomic(bdev, &dax);
972         }
973         return 0;
974 }
975 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
976 
977 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
978 {
979         return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
980 }
981 
982 static loff_t
983 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
984                 struct iomap *iomap)
985 {
986         struct iov_iter *iter = data;
987         loff_t end = pos + length, done = 0;
988         ssize_t ret = 0;
989 
990         if (iov_iter_rw(iter) == READ) {
991                 end = min(end, i_size_read(inode));
992                 if (pos >= end)
993                         return 0;
994 
995                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
996                         return iov_iter_zero(min(length, end - pos), iter);
997         }
998 
999         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1000                 return -EIO;
1001 
1002         /*
1003          * Write can allocate block for an area which has a hole page mapped
1004          * into page tables. We have to tear down these mappings so that data
1005          * written by write(2) is visible in mmap.
1006          */
1007         if (iomap->flags & IOMAP_F_NEW) {
1008                 invalidate_inode_pages2_range(inode->i_mapping,
1009                                               pos >> PAGE_SHIFT,
1010                                               (end - 1) >> PAGE_SHIFT);
1011         }
1012 
1013         while (pos < end) {
1014                 unsigned offset = pos & (PAGE_SIZE - 1);
1015                 struct blk_dax_ctl dax = { 0 };
1016                 ssize_t map_len;
1017 
1018                 if (fatal_signal_pending(current)) {
1019                         ret = -EINTR;
1020                         break;
1021                 }
1022 
1023                 dax.sector = dax_iomap_sector(iomap, pos);
1024                 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1025                 map_len = dax_map_atomic(iomap->bdev, &dax);
1026                 if (map_len < 0) {
1027                         ret = map_len;
1028                         break;
1029                 }
1030 
1031                 dax.addr += offset;
1032                 map_len -= offset;
1033                 if (map_len > end - pos)
1034                         map_len = end - pos;
1035 
1036                 if (iov_iter_rw(iter) == WRITE)
1037                         map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1038                 else
1039                         map_len = copy_to_iter(dax.addr, map_len, iter);
1040                 dax_unmap_atomic(iomap->bdev, &dax);
1041                 if (map_len <= 0) {
1042                         ret = map_len ? map_len : -EFAULT;
1043                         break;
1044                 }
1045 
1046                 pos += map_len;
1047                 length -= map_len;
1048                 done += map_len;
1049         }
1050 
1051         return done ? done : ret;
1052 }
1053 
1054 /**
1055  * dax_iomap_rw - Perform I/O to a DAX file
1056  * @iocb:       The control block for this I/O
1057  * @iter:       The addresses to do I/O from or to
1058  * @ops:        iomap ops passed from the file system
1059  *
1060  * This function performs read and write operations to directly mapped
1061  * persistent memory.  The callers needs to take care of read/write exclusion
1062  * and evicting any page cache pages in the region under I/O.
1063  */
1064 ssize_t
1065 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1066                 const struct iomap_ops *ops)
1067 {
1068         struct address_space *mapping = iocb->ki_filp->f_mapping;
1069         struct inode *inode = mapping->host;
1070         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1071         unsigned flags = 0;
1072 
1073         if (iov_iter_rw(iter) == WRITE) {
1074                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1075                 flags |= IOMAP_WRITE;
1076         } else {
1077                 lockdep_assert_held(&inode->i_rwsem);
1078         }
1079 
1080         while (iov_iter_count(iter)) {
1081                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1082                                 iter, dax_iomap_actor);
1083                 if (ret <= 0)
1084                         break;
1085                 pos += ret;
1086                 done += ret;
1087         }
1088 
1089         iocb->ki_pos += done;
1090         return done ? done : ret;
1091 }
1092 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1093 
1094 static int dax_fault_return(int error)
1095 {
1096         if (error == 0)
1097                 return VM_FAULT_NOPAGE;
1098         if (error == -ENOMEM)
1099                 return VM_FAULT_OOM;
1100         return VM_FAULT_SIGBUS;
1101 }
1102 
1103 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1104                                const struct iomap_ops *ops)
1105 {
1106         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1107         struct inode *inode = mapping->host;
1108         unsigned long vaddr = vmf->address;
1109         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1110         sector_t sector;
1111         struct iomap iomap = { 0 };
1112         unsigned flags = IOMAP_FAULT;
1113         int error, major = 0;
1114         int vmf_ret = 0;
1115         void *entry;
1116 
1117         /*
1118          * Check whether offset isn't beyond end of file now. Caller is supposed
1119          * to hold locks serializing us with truncate / punch hole so this is
1120          * a reliable test.
1121          */
1122         if (pos >= i_size_read(inode))
1123                 return VM_FAULT_SIGBUS;
1124 
1125         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1126                 flags |= IOMAP_WRITE;
1127 
1128         entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1129         if (IS_ERR(entry))
1130                 return dax_fault_return(PTR_ERR(entry));
1131 
1132         /*
1133          * It is possible, particularly with mixed reads & writes to private
1134          * mappings, that we have raced with a PMD fault that overlaps with
1135          * the PTE we need to set up.  If so just return and the fault will be
1136          * retried.
1137          */
1138         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1139                 vmf_ret = VM_FAULT_NOPAGE;
1140                 goto unlock_entry;
1141         }
1142 
1143         /*
1144          * Note that we don't bother to use iomap_apply here: DAX required
1145          * the file system block size to be equal the page size, which means
1146          * that we never have to deal with more than a single extent here.
1147          */
1148         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1149         if (error) {
1150                 vmf_ret = dax_fault_return(error);
1151                 goto unlock_entry;
1152         }
1153         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1154                 error = -EIO;   /* fs corruption? */
1155                 goto error_finish_iomap;
1156         }
1157 
1158         sector = dax_iomap_sector(&iomap, pos);
1159 
1160         if (vmf->cow_page) {
1161                 switch (iomap.type) {
1162                 case IOMAP_HOLE:
1163                 case IOMAP_UNWRITTEN:
1164                         clear_user_highpage(vmf->cow_page, vaddr);
1165                         break;
1166                 case IOMAP_MAPPED:
1167                         error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1168                                         vmf->cow_page, vaddr);
1169                         break;
1170                 default:
1171                         WARN_ON_ONCE(1);
1172                         error = -EIO;
1173                         break;
1174                 }
1175 
1176                 if (error)
1177                         goto error_finish_iomap;
1178 
1179                 __SetPageUptodate(vmf->cow_page);
1180                 vmf_ret = finish_fault(vmf);
1181                 if (!vmf_ret)
1182                         vmf_ret = VM_FAULT_DONE_COW;
1183                 goto finish_iomap;
1184         }
1185 
1186         switch (iomap.type) {
1187         case IOMAP_MAPPED:
1188                 if (iomap.flags & IOMAP_F_NEW) {
1189                         count_vm_event(PGMAJFAULT);
1190                         mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1191                         major = VM_FAULT_MAJOR;
1192                 }
1193                 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1194                                 PAGE_SIZE, &entry, vmf->vma, vmf);
1195                 /* -EBUSY is fine, somebody else faulted on the same PTE */
1196                 if (error == -EBUSY)
1197                         error = 0;
1198                 break;
1199         case IOMAP_UNWRITTEN:
1200         case IOMAP_HOLE:
1201                 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1202                         vmf_ret = dax_load_hole(mapping, &entry, vmf);
1203                         goto finish_iomap;
1204                 }
1205                 /*FALLTHRU*/
1206         default:
1207                 WARN_ON_ONCE(1);
1208                 error = -EIO;
1209                 break;
1210         }
1211 
1212  error_finish_iomap:
1213         vmf_ret = dax_fault_return(error) | major;
1214  finish_iomap:
1215         if (ops->iomap_end) {
1216                 int copied = PAGE_SIZE;
1217 
1218                 if (vmf_ret & VM_FAULT_ERROR)
1219                         copied = 0;
1220                 /*
1221                  * The fault is done by now and there's no way back (other
1222                  * thread may be already happily using PTE we have installed).
1223                  * Just ignore error from ->iomap_end since we cannot do much
1224                  * with it.
1225                  */
1226                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1227         }
1228  unlock_entry:
1229         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1230         return vmf_ret;
1231 }
1232 
1233 #ifdef CONFIG_FS_DAX_PMD
1234 /*
1235  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1236  * more often than one might expect in the below functions.
1237  */
1238 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
1239 
1240 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1241                 loff_t pos, void **entryp)
1242 {
1243         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1244         struct block_device *bdev = iomap->bdev;
1245         struct inode *inode = mapping->host;
1246         struct blk_dax_ctl dax = {
1247                 .sector = dax_iomap_sector(iomap, pos),
1248                 .size = PMD_SIZE,
1249         };
1250         long length = dax_map_atomic(bdev, &dax);
1251         void *ret = NULL;
1252 
1253         if (length < 0) /* dax_map_atomic() failed */
1254                 goto fallback;
1255         if (length < PMD_SIZE)
1256                 goto unmap_fallback;
1257         if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1258                 goto unmap_fallback;
1259         if (!pfn_t_devmap(dax.pfn))
1260                 goto unmap_fallback;
1261 
1262         dax_unmap_atomic(bdev, &dax);
1263 
1264         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1265                         RADIX_DAX_PMD);
1266         if (IS_ERR(ret))
1267                 goto fallback;
1268         *entryp = ret;
1269 
1270         trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
1271         return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1272                         dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
1273 
1274  unmap_fallback:
1275         dax_unmap_atomic(bdev, &dax);
1276 fallback:
1277         trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
1278                         dax.pfn, ret);
1279         return VM_FAULT_FALLBACK;
1280 }
1281 
1282 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1283                 void **entryp)
1284 {
1285         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1286         unsigned long pmd_addr = vmf->address & PMD_MASK;
1287         struct inode *inode = mapping->host;
1288         struct page *zero_page;
1289         void *ret = NULL;
1290         spinlock_t *ptl;
1291         pmd_t pmd_entry;
1292 
1293         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1294 
1295         if (unlikely(!zero_page))
1296                 goto fallback;
1297 
1298         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1299                         RADIX_DAX_PMD | RADIX_DAX_HZP);
1300         if (IS_ERR(ret))
1301                 goto fallback;
1302         *entryp = ret;
1303 
1304         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1305         if (!pmd_none(*(vmf->pmd))) {
1306                 spin_unlock(ptl);
1307                 goto fallback;
1308         }
1309 
1310         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1311         pmd_entry = pmd_mkhuge(pmd_entry);
1312         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1313         spin_unlock(ptl);
1314         trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1315         return VM_FAULT_NOPAGE;
1316 
1317 fallback:
1318         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1319         return VM_FAULT_FALLBACK;
1320 }
1321 
1322 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1323                                const struct iomap_ops *ops)
1324 {
1325         struct vm_area_struct *vma = vmf->vma;
1326         struct address_space *mapping = vma->vm_file->f_mapping;
1327         unsigned long pmd_addr = vmf->address & PMD_MASK;
1328         bool write = vmf->flags & FAULT_FLAG_WRITE;
1329         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1330         struct inode *inode = mapping->host;
1331         int result = VM_FAULT_FALLBACK;
1332         struct iomap iomap = { 0 };
1333         pgoff_t max_pgoff, pgoff;
1334         void *entry;
1335         loff_t pos;
1336         int error;
1337 
1338         /*
1339          * Check whether offset isn't beyond end of file now. Caller is
1340          * supposed to hold locks serializing us with truncate / punch hole so
1341          * this is a reliable test.
1342          */
1343         pgoff = linear_page_index(vma, pmd_addr);
1344         max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1345 
1346         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1347 
1348         /* Fall back to PTEs if we're going to COW */
1349         if (write && !(vma->vm_flags & VM_SHARED))
1350                 goto fallback;
1351 
1352         /* If the PMD would extend outside the VMA */
1353         if (pmd_addr < vma->vm_start)
1354                 goto fallback;
1355         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1356                 goto fallback;
1357 
1358         if (pgoff > max_pgoff) {
1359                 result = VM_FAULT_SIGBUS;
1360                 goto out;
1361         }
1362 
1363         /* If the PMD would extend beyond the file size */
1364         if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1365                 goto fallback;
1366 
1367         /*
1368          * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1369          * PMD or a HZP entry.  If it can't (because a 4k page is already in
1370          * the tree, for instance), it will return -EEXIST and we just fall
1371          * back to 4k entries.
1372          */
1373         entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1374         if (IS_ERR(entry))
1375                 goto fallback;
1376 
1377         /*
1378          * It is possible, particularly with mixed reads & writes to private
1379          * mappings, that we have raced with a PTE fault that overlaps with
1380          * the PMD we need to set up.  If so just return and the fault will be
1381          * retried.
1382          */
1383         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1384                         !pmd_devmap(*vmf->pmd)) {
1385                 result = 0;
1386                 goto unlock_entry;
1387         }
1388 
1389         /*
1390          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1391          * setting up a mapping, so really we're using iomap_begin() as a way
1392          * to look up our filesystem block.
1393          */
1394         pos = (loff_t)pgoff << PAGE_SHIFT;
1395         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1396         if (error)
1397                 goto unlock_entry;
1398 
1399         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1400                 goto finish_iomap;
1401 
1402         switch (iomap.type) {
1403         case IOMAP_MAPPED:
1404                 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1405                 break;
1406         case IOMAP_UNWRITTEN:
1407         case IOMAP_HOLE:
1408                 if (WARN_ON_ONCE(write))
1409                         break;
1410                 result = dax_pmd_load_hole(vmf, &iomap, &entry);
1411                 break;
1412         default:
1413                 WARN_ON_ONCE(1);
1414                 break;
1415         }
1416 
1417  finish_iomap:
1418         if (ops->iomap_end) {
1419                 int copied = PMD_SIZE;
1420 
1421                 if (result == VM_FAULT_FALLBACK)
1422                         copied = 0;
1423                 /*
1424                  * The fault is done by now and there's no way back (other
1425                  * thread may be already happily using PMD we have installed).
1426                  * Just ignore error from ->iomap_end since we cannot do much
1427                  * with it.
1428                  */
1429                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1430                                 &iomap);
1431         }
1432  unlock_entry:
1433         put_locked_mapping_entry(mapping, pgoff, entry);
1434  fallback:
1435         if (result == VM_FAULT_FALLBACK) {
1436                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1437                 count_vm_event(THP_FAULT_FALLBACK);
1438         }
1439 out:
1440         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1441         return result;
1442 }
1443 #else
1444 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1445                                const struct iomap_ops *ops)
1446 {
1447         return VM_FAULT_FALLBACK;
1448 }
1449 #endif /* CONFIG_FS_DAX_PMD */
1450 
1451 /**
1452  * dax_iomap_fault - handle a page fault on a DAX file
1453  * @vmf: The description of the fault
1454  * @ops: iomap ops passed from the file system
1455  *
1456  * When a page fault occurs, filesystems may call this helper in
1457  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1458  * has done all the necessary locking for page fault to proceed
1459  * successfully.
1460  */
1461 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1462                     const struct iomap_ops *ops)
1463 {
1464         switch (pe_size) {
1465         case PE_SIZE_PTE:
1466                 return dax_iomap_pte_fault(vmf, ops);
1467         case PE_SIZE_PMD:
1468                 return dax_iomap_pmd_fault(vmf, ops);
1469         default:
1470                 return VM_FAULT_FALLBACK;
1471         }
1472 }
1473 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1474 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp