~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/memremap.c

Version: ~ [ linux-5.18 ] ~ [ linux-5.17.9 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.41 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.117 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.195 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.244 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.280 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.315 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
  3 #include <linux/device.h>
  4 #include <linux/io.h>
  5 #include <linux/kasan.h>
  6 #include <linux/memory_hotplug.h>
  7 #include <linux/mm.h>
  8 #include <linux/pfn_t.h>
  9 #include <linux/swap.h>
 10 #include <linux/mmzone.h>
 11 #include <linux/swapops.h>
 12 #include <linux/types.h>
 13 #include <linux/wait_bit.h>
 14 #include <linux/xarray.h>
 15 
 16 static DEFINE_XARRAY(pgmap_array);
 17 
 18 /*
 19  * The memremap() and memremap_pages() interfaces are alternately used
 20  * to map persistent memory namespaces. These interfaces place different
 21  * constraints on the alignment and size of the mapping (namespace).
 22  * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
 23  * only map subsections (2MB), and at least one architecture (PowerPC)
 24  * the minimum mapping granularity of memremap_pages() is 16MB.
 25  *
 26  * The role of memremap_compat_align() is to communicate the minimum
 27  * arch supported alignment of a namespace such that it can freely
 28  * switch modes without violating the arch constraint. Namely, do not
 29  * allow a namespace to be PAGE_SIZE aligned since that namespace may be
 30  * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
 31  */
 32 #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
 33 unsigned long memremap_compat_align(void)
 34 {
 35         return SUBSECTION_SIZE;
 36 }
 37 EXPORT_SYMBOL_GPL(memremap_compat_align);
 38 #endif
 39 
 40 #ifdef CONFIG_DEV_PAGEMAP_OPS
 41 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 42 EXPORT_SYMBOL(devmap_managed_key);
 43 
 44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 45 {
 46         if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
 47             pgmap->type == MEMORY_DEVICE_FS_DAX)
 48                 static_branch_dec(&devmap_managed_key);
 49 }
 50 
 51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 52 {
 53         if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
 54             pgmap->type == MEMORY_DEVICE_FS_DAX)
 55                 static_branch_inc(&devmap_managed_key);
 56 }
 57 #else
 58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 59 {
 60 }
 61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 62 {
 63 }
 64 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 65 
 66 static void pgmap_array_delete(struct range *range)
 67 {
 68         xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
 69                         NULL, GFP_KERNEL);
 70         synchronize_rcu();
 71 }
 72 
 73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
 74 {
 75         struct range *range = &pgmap->ranges[range_id];
 76         unsigned long pfn = PHYS_PFN(range->start);
 77 
 78         if (range_id)
 79                 return pfn;
 80         return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
 81 }
 82 
 83 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
 84 {
 85         int i;
 86 
 87         for (i = 0; i < pgmap->nr_range; i++) {
 88                 struct range *range = &pgmap->ranges[i];
 89 
 90                 if (pfn >= PHYS_PFN(range->start) &&
 91                     pfn <= PHYS_PFN(range->end))
 92                         return pfn >= pfn_first(pgmap, i);
 93         }
 94 
 95         return false;
 96 }
 97 
 98 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
 99 {
100         const struct range *range = &pgmap->ranges[range_id];
101 
102         return (range->start + range_len(range)) >> PAGE_SHIFT;
103 }
104 
105 static unsigned long pfn_next(unsigned long pfn)
106 {
107         if (pfn % 1024 == 0)
108                 cond_resched();
109         return pfn + 1;
110 }
111 
112 #define for_each_device_pfn(pfn, map, i) \
113         for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
114 
115 static void dev_pagemap_kill(struct dev_pagemap *pgmap)
116 {
117         if (pgmap->ops && pgmap->ops->kill)
118                 pgmap->ops->kill(pgmap);
119         else
120                 percpu_ref_kill(pgmap->ref);
121 }
122 
123 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
124 {
125         if (pgmap->ops && pgmap->ops->cleanup) {
126                 pgmap->ops->cleanup(pgmap);
127         } else {
128                 wait_for_completion(&pgmap->done);
129                 percpu_ref_exit(pgmap->ref);
130         }
131         /*
132          * Undo the pgmap ref assignment for the internal case as the
133          * caller may re-enable the same pgmap.
134          */
135         if (pgmap->ref == &pgmap->internal_ref)
136                 pgmap->ref = NULL;
137 }
138 
139 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
140 {
141         struct range *range = &pgmap->ranges[range_id];
142         struct page *first_page;
143         int nid;
144 
145         /* make sure to access a memmap that was actually initialized */
146         first_page = pfn_to_page(pfn_first(pgmap, range_id));
147 
148         /* pages are dead and unused, undo the arch mapping */
149         nid = page_to_nid(first_page);
150 
151         mem_hotplug_begin();
152         remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
153                                    PHYS_PFN(range_len(range)));
154         if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
155                 __remove_pages(PHYS_PFN(range->start),
156                                PHYS_PFN(range_len(range)), NULL);
157         } else {
158                 arch_remove_memory(nid, range->start, range_len(range),
159                                 pgmap_altmap(pgmap));
160                 kasan_remove_zero_shadow(__va(range->start), range_len(range));
161         }
162         mem_hotplug_done();
163 
164         untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
165         pgmap_array_delete(range);
166 }
167 
168 void memunmap_pages(struct dev_pagemap *pgmap)
169 {
170         unsigned long pfn;
171         int i;
172 
173         dev_pagemap_kill(pgmap);
174         for (i = 0; i < pgmap->nr_range; i++)
175                 for_each_device_pfn(pfn, pgmap, i)
176                         put_page(pfn_to_page(pfn));
177         dev_pagemap_cleanup(pgmap);
178 
179         for (i = 0; i < pgmap->nr_range; i++)
180                 pageunmap_range(pgmap, i);
181 
182         WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
183         devmap_managed_enable_put(pgmap);
184 }
185 EXPORT_SYMBOL_GPL(memunmap_pages);
186 
187 static void devm_memremap_pages_release(void *data)
188 {
189         memunmap_pages(data);
190 }
191 
192 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
193 {
194         struct dev_pagemap *pgmap =
195                 container_of(ref, struct dev_pagemap, internal_ref);
196 
197         complete(&pgmap->done);
198 }
199 
200 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
201                 int range_id, int nid)
202 {
203         const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
204         struct range *range = &pgmap->ranges[range_id];
205         struct dev_pagemap *conflict_pgmap;
206         int error, is_ram;
207 
208         if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
209                                 "altmap not supported for multiple ranges\n"))
210                 return -EINVAL;
211 
212         conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
213         if (conflict_pgmap) {
214                 WARN(1, "Conflicting mapping in same section\n");
215                 put_dev_pagemap(conflict_pgmap);
216                 return -ENOMEM;
217         }
218 
219         conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
220         if (conflict_pgmap) {
221                 WARN(1, "Conflicting mapping in same section\n");
222                 put_dev_pagemap(conflict_pgmap);
223                 return -ENOMEM;
224         }
225 
226         is_ram = region_intersects(range->start, range_len(range),
227                 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
228 
229         if (is_ram != REGION_DISJOINT) {
230                 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
231                                 is_ram == REGION_MIXED ? "mixed" : "ram",
232                                 range->start, range->end);
233                 return -ENXIO;
234         }
235 
236         error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
237                                 PHYS_PFN(range->end), pgmap, GFP_KERNEL));
238         if (error)
239                 return error;
240 
241         if (nid < 0)
242                 nid = numa_mem_id();
243 
244         error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
245                         range_len(range));
246         if (error)
247                 goto err_pfn_remap;
248 
249         if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
250                 error = -EINVAL;
251                 goto err_pfn_remap;
252         }
253 
254         mem_hotplug_begin();
255 
256         /*
257          * For device private memory we call add_pages() as we only need to
258          * allocate and initialize struct page for the device memory. More-
259          * over the device memory is un-accessible thus we do not want to
260          * create a linear mapping for the memory like arch_add_memory()
261          * would do.
262          *
263          * For all other device memory types, which are accessible by
264          * the CPU, we do want the linear mapping and thus use
265          * arch_add_memory().
266          */
267         if (is_private) {
268                 error = add_pages(nid, PHYS_PFN(range->start),
269                                 PHYS_PFN(range_len(range)), params);
270         } else {
271                 error = kasan_add_zero_shadow(__va(range->start), range_len(range));
272                 if (error) {
273                         mem_hotplug_done();
274                         goto err_kasan;
275                 }
276 
277                 error = arch_add_memory(nid, range->start, range_len(range),
278                                         params);
279         }
280 
281         if (!error) {
282                 struct zone *zone;
283 
284                 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
285                 move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
286                                 PHYS_PFN(range_len(range)), params->altmap,
287                                 MIGRATE_MOVABLE);
288         }
289 
290         mem_hotplug_done();
291         if (error)
292                 goto err_add_memory;
293 
294         /*
295          * Initialization of the pages has been deferred until now in order
296          * to allow us to do the work while not holding the hotplug lock.
297          */
298         memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
299                                 PHYS_PFN(range->start),
300                                 PHYS_PFN(range_len(range)), pgmap);
301         percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
302                         - pfn_first(pgmap, range_id));
303         return 0;
304 
305 err_add_memory:
306         kasan_remove_zero_shadow(__va(range->start), range_len(range));
307 err_kasan:
308         untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
309 err_pfn_remap:
310         pgmap_array_delete(range);
311         return error;
312 }
313 
314 
315 /*
316  * Not device managed version of dev_memremap_pages, undone by
317  * memunmap_pages().  Please use dev_memremap_pages if you have a struct
318  * device available.
319  */
320 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
321 {
322         struct mhp_params params = {
323                 .altmap = pgmap_altmap(pgmap),
324                 .pgprot = PAGE_KERNEL,
325         };
326         const int nr_range = pgmap->nr_range;
327         int error, i;
328 
329         if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
330                 return ERR_PTR(-EINVAL);
331 
332         switch (pgmap->type) {
333         case MEMORY_DEVICE_PRIVATE:
334                 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
335                         WARN(1, "Device private memory not supported\n");
336                         return ERR_PTR(-EINVAL);
337                 }
338                 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
339                         WARN(1, "Missing migrate_to_ram method\n");
340                         return ERR_PTR(-EINVAL);
341                 }
342                 if (!pgmap->ops->page_free) {
343                         WARN(1, "Missing page_free method\n");
344                         return ERR_PTR(-EINVAL);
345                 }
346                 if (!pgmap->owner) {
347                         WARN(1, "Missing owner\n");
348                         return ERR_PTR(-EINVAL);
349                 }
350                 break;
351         case MEMORY_DEVICE_FS_DAX:
352                 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
353                     IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
354                         WARN(1, "File system DAX not supported\n");
355                         return ERR_PTR(-EINVAL);
356                 }
357                 break;
358         case MEMORY_DEVICE_GENERIC:
359                 break;
360         case MEMORY_DEVICE_PCI_P2PDMA:
361                 params.pgprot = pgprot_noncached(params.pgprot);
362                 break;
363         default:
364                 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
365                 break;
366         }
367 
368         if (!pgmap->ref) {
369                 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
370                         return ERR_PTR(-EINVAL);
371 
372                 init_completion(&pgmap->done);
373                 error = percpu_ref_init(&pgmap->internal_ref,
374                                 dev_pagemap_percpu_release, 0, GFP_KERNEL);
375                 if (error)
376                         return ERR_PTR(error);
377                 pgmap->ref = &pgmap->internal_ref;
378         } else {
379                 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
380                         WARN(1, "Missing reference count teardown definition\n");
381                         return ERR_PTR(-EINVAL);
382                 }
383         }
384 
385         devmap_managed_enable_get(pgmap);
386 
387         /*
388          * Clear the pgmap nr_range as it will be incremented for each
389          * successfully processed range. This communicates how many
390          * regions to unwind in the abort case.
391          */
392         pgmap->nr_range = 0;
393         error = 0;
394         for (i = 0; i < nr_range; i++) {
395                 error = pagemap_range(pgmap, &params, i, nid);
396                 if (error)
397                         break;
398                 pgmap->nr_range++;
399         }
400 
401         if (i < nr_range) {
402                 memunmap_pages(pgmap);
403                 pgmap->nr_range = nr_range;
404                 return ERR_PTR(error);
405         }
406 
407         return __va(pgmap->ranges[0].start);
408 }
409 EXPORT_SYMBOL_GPL(memremap_pages);
410 
411 /**
412  * devm_memremap_pages - remap and provide memmap backing for the given resource
413  * @dev: hosting device for @res
414  * @pgmap: pointer to a struct dev_pagemap
415  *
416  * Notes:
417  * 1/ At a minimum the res and type members of @pgmap must be initialized
418  *    by the caller before passing it to this function
419  *
420  * 2/ The altmap field may optionally be initialized, in which case
421  *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
422  *
423  * 3/ The ref field may optionally be provided, in which pgmap->ref must be
424  *    'live' on entry and will be killed and reaped at
425  *    devm_memremap_pages_release() time, or if this routine fails.
426  *
427  * 4/ range is expected to be a host memory range that could feasibly be
428  *    treated as a "System RAM" range, i.e. not a device mmio range, but
429  *    this is not enforced.
430  */
431 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
432 {
433         int error;
434         void *ret;
435 
436         ret = memremap_pages(pgmap, dev_to_node(dev));
437         if (IS_ERR(ret))
438                 return ret;
439 
440         error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
441                         pgmap);
442         if (error)
443                 return ERR_PTR(error);
444         return ret;
445 }
446 EXPORT_SYMBOL_GPL(devm_memremap_pages);
447 
448 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
449 {
450         devm_release_action(dev, devm_memremap_pages_release, pgmap);
451 }
452 EXPORT_SYMBOL_GPL(devm_memunmap_pages);
453 
454 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
455 {
456         /* number of pfns from base where pfn_to_page() is valid */
457         if (altmap)
458                 return altmap->reserve + altmap->free;
459         return 0;
460 }
461 
462 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
463 {
464         altmap->alloc -= nr_pfns;
465 }
466 
467 /**
468  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
469  * @pfn: page frame number to lookup page_map
470  * @pgmap: optional known pgmap that already has a reference
471  *
472  * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
473  * is non-NULL but does not cover @pfn the reference to it will be released.
474  */
475 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
476                 struct dev_pagemap *pgmap)
477 {
478         resource_size_t phys = PFN_PHYS(pfn);
479 
480         /*
481          * In the cached case we're already holding a live reference.
482          */
483         if (pgmap) {
484                 if (phys >= pgmap->range.start && phys <= pgmap->range.end)
485                         return pgmap;
486                 put_dev_pagemap(pgmap);
487         }
488 
489         /* fall back to slow path lookup */
490         rcu_read_lock();
491         pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
492         if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
493                 pgmap = NULL;
494         rcu_read_unlock();
495 
496         return pgmap;
497 }
498 EXPORT_SYMBOL_GPL(get_dev_pagemap);
499 
500 #ifdef CONFIG_DEV_PAGEMAP_OPS
501 void free_devmap_managed_page(struct page *page)
502 {
503         /* notify page idle for dax */
504         if (!is_device_private_page(page)) {
505                 wake_up_var(&page->_refcount);
506                 return;
507         }
508 
509         __ClearPageWaiters(page);
510 
511         mem_cgroup_uncharge(page);
512 
513         /*
514          * When a device_private page is freed, the page->mapping field
515          * may still contain a (stale) mapping value. For example, the
516          * lower bits of page->mapping may still identify the page as an
517          * anonymous page. Ultimately, this entire field is just stale
518          * and wrong, and it will cause errors if not cleared.  One
519          * example is:
520          *
521          *  migrate_vma_pages()
522          *    migrate_vma_insert_page()
523          *      page_add_new_anon_rmap()
524          *        __page_set_anon_rmap()
525          *          ...checks page->mapping, via PageAnon(page) call,
526          *            and incorrectly concludes that the page is an
527          *            anonymous page. Therefore, it incorrectly,
528          *            silently fails to set up the new anon rmap.
529          *
530          * For other types of ZONE_DEVICE pages, migration is either
531          * handled differently or not done at all, so there is no need
532          * to clear page->mapping.
533          */
534         page->mapping = NULL;
535         page->pgmap->ops->page_free(page);
536 }
537 #endif /* CONFIG_DEV_PAGEMAP_OPS */
538 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp