~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/memremap.h

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_MEMREMAP_H_
  2 #define _LINUX_MEMREMAP_H_
  3 #include <linux/mm.h>
  4 #include <linux/ioport.h>
  5 #include <linux/percpu-refcount.h>
  6 
  7 struct resource;
  8 struct device;
  9 
 10 /**
 11  * struct vmem_altmap - pre-allocated storage for vmemmap_populate
 12  * @base_pfn: base of the entire dev_pagemap mapping
 13  * @reserve: pages mapped, but reserved for driver use (relative to @base)
 14  * @free: free pages set aside in the mapping for memmap storage
 15  * @align: pages reserved to meet allocation alignments
 16  * @alloc: track pages consumed, private to vmemmap_populate()
 17  */
 18 struct vmem_altmap {
 19         const unsigned long base_pfn;
 20         const unsigned long reserve;
 21         unsigned long free;
 22         unsigned long align;
 23         unsigned long alloc;
 24 };
 25 
 26 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
 27 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
 28 
 29 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE)
 30 struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
 31 #else
 32 static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
 33 {
 34         return NULL;
 35 }
 36 #endif
 37 
 38 /**
 39  * struct dev_pagemap - metadata for ZONE_DEVICE mappings
 40  * @altmap: pre-allocated/reserved memory for vmemmap allocations
 41  * @res: physical address range covered by @ref
 42  * @ref: reference count that pins the devm_memremap_pages() mapping
 43  * @dev: host device of the mapping for debug
 44  */
 45 struct dev_pagemap {
 46         struct vmem_altmap *altmap;
 47         const struct resource *res;
 48         struct percpu_ref *ref;
 49         struct device *dev;
 50 };
 51 
 52 #ifdef CONFIG_ZONE_DEVICE
 53 void *devm_memremap_pages(struct device *dev, struct resource *res,
 54                 struct percpu_ref *ref, struct vmem_altmap *altmap);
 55 struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
 56 #else
 57 static inline void *devm_memremap_pages(struct device *dev,
 58                 struct resource *res, struct percpu_ref *ref,
 59                 struct vmem_altmap *altmap)
 60 {
 61         /*
 62          * Fail attempts to call devm_memremap_pages() without
 63          * ZONE_DEVICE support enabled, this requires callers to fall
 64          * back to plain devm_memremap() based on config
 65          */
 66         WARN_ON_ONCE(1);
 67         return ERR_PTR(-ENXIO);
 68 }
 69 
 70 static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
 71 {
 72         return NULL;
 73 }
 74 #endif
 75 
 76 /**
 77  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
 78  * @pfn: page frame number to lookup page_map
 79  * @pgmap: optional known pgmap that already has a reference
 80  *
 81  * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
 82  * same mapping.
 83  */
 84 static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
 85                 struct dev_pagemap *pgmap)
 86 {
 87         const struct resource *res = pgmap ? pgmap->res : NULL;
 88         resource_size_t phys = PFN_PHYS(pfn);
 89 
 90         /*
 91          * In the cached case we're already holding a live reference so
 92          * we can simply do a blind increment
 93          */
 94         if (res && phys >= res->start && phys <= res->end) {
 95                 percpu_ref_get(pgmap->ref);
 96                 return pgmap;
 97         }
 98 
 99         /* fall back to slow path lookup */
100         rcu_read_lock();
101         pgmap = find_dev_pagemap(phys);
102         if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
103                 pgmap = NULL;
104         rcu_read_unlock();
105 
106         return pgmap;
107 }
108 
109 static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
110 {
111         if (pgmap)
112                 percpu_ref_put(pgmap->ref);
113 }
114 #endif /* _LINUX_MEMREMAP_H_ */
115 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp