~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/nvdimm/test/iomap.c

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4  */
  5 #include <linux/memremap.h>
  6 #include <linux/rculist.h>
  7 #include <linux/export.h>
  8 #include <linux/ioport.h>
  9 #include <linux/module.h>
 10 #include <linux/types.h>
 11 #include <linux/pfn_t.h>
 12 #include <linux/acpi.h>
 13 #include <linux/io.h>
 14 #include <linux/mm.h>
 15 #include "nfit_test.h"
 16 
 17 static LIST_HEAD(iomap_head);
 18 
 19 static struct iomap_ops {
 20         nfit_test_lookup_fn nfit_test_lookup;
 21         nfit_test_evaluate_dsm_fn evaluate_dsm;
 22         struct list_head list;
 23 } iomap_ops = {
 24         .list = LIST_HEAD_INIT(iomap_ops.list),
 25 };
 26 
 27 void nfit_test_setup(nfit_test_lookup_fn lookup,
 28                 nfit_test_evaluate_dsm_fn evaluate)
 29 {
 30         iomap_ops.nfit_test_lookup = lookup;
 31         iomap_ops.evaluate_dsm = evaluate;
 32         list_add_rcu(&iomap_ops.list, &iomap_head);
 33 }
 34 EXPORT_SYMBOL(nfit_test_setup);
 35 
 36 void nfit_test_teardown(void)
 37 {
 38         list_del_rcu(&iomap_ops.list);
 39         synchronize_rcu();
 40 }
 41 EXPORT_SYMBOL(nfit_test_teardown);
 42 
 43 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
 44 {
 45         struct iomap_ops *ops;
 46 
 47         ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
 48         if (ops)
 49                 return ops->nfit_test_lookup(resource);
 50         return NULL;
 51 }
 52 
 53 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
 54 {
 55         struct nfit_test_resource *res;
 56 
 57         rcu_read_lock();
 58         res = __get_nfit_res(resource);
 59         rcu_read_unlock();
 60 
 61         return res;
 62 }
 63 EXPORT_SYMBOL(get_nfit_res);
 64 
 65 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
 66                 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
 67 {
 68         struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 69 
 70         if (nfit_res)
 71                 return (void __iomem *) nfit_res->buf + offset
 72                         - nfit_res->res.start;
 73         return fallback_fn(offset, size);
 74 }
 75 
 76 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
 77                 resource_size_t offset, unsigned long size)
 78 {
 79         struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 80 
 81         if (nfit_res)
 82                 return (void __iomem *) nfit_res->buf + offset
 83                         - nfit_res->res.start;
 84         return devm_ioremap_nocache(dev, offset, size);
 85 }
 86 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
 87 
 88 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
 89                 size_t size, unsigned long flags)
 90 {
 91         struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 92 
 93         if (nfit_res)
 94                 return nfit_res->buf + offset - nfit_res->res.start;
 95         return devm_memremap(dev, offset, size, flags);
 96 }
 97 EXPORT_SYMBOL(__wrap_devm_memremap);
 98 
 99 static void nfit_test_kill(void *_pgmap)
100 {
101         struct dev_pagemap *pgmap = _pgmap;
102 
103         WARN_ON(!pgmap || !pgmap->ref);
104 
105         if (pgmap->ops && pgmap->ops->kill)
106                 pgmap->ops->kill(pgmap);
107         else
108                 percpu_ref_kill(pgmap->ref);
109 
110         if (pgmap->ops && pgmap->ops->cleanup) {
111                 pgmap->ops->cleanup(pgmap);
112         } else {
113                 wait_for_completion(&pgmap->done);
114                 percpu_ref_exit(pgmap->ref);
115         }
116 }
117 
118 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
119 {
120         struct dev_pagemap *pgmap =
121                 container_of(ref, struct dev_pagemap, internal_ref);
122 
123         complete(&pgmap->done);
124 }
125 
126 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
127 {
128         int error;
129         resource_size_t offset = pgmap->res.start;
130         struct nfit_test_resource *nfit_res = get_nfit_res(offset);
131 
132         if (!nfit_res)
133                 return devm_memremap_pages(dev, pgmap);
134 
135         pgmap->dev = dev;
136         if (!pgmap->ref) {
137                 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
138                         return ERR_PTR(-EINVAL);
139 
140                 init_completion(&pgmap->done);
141                 error = percpu_ref_init(&pgmap->internal_ref,
142                                 dev_pagemap_percpu_release, 0, GFP_KERNEL);
143                 if (error)
144                         return ERR_PTR(error);
145                 pgmap->ref = &pgmap->internal_ref;
146         } else {
147                 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
148                         WARN(1, "Missing reference count teardown definition\n");
149                         return ERR_PTR(-EINVAL);
150                 }
151         }
152 
153         error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
154         if (error)
155                 return ERR_PTR(error);
156         return nfit_res->buf + offset - nfit_res->res.start;
157 }
158 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
159 
160 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
161 {
162         struct nfit_test_resource *nfit_res = get_nfit_res(addr);
163 
164         if (nfit_res)
165                 flags &= ~PFN_MAP;
166         return phys_to_pfn_t(addr, flags);
167 }
168 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
169 
170 void *__wrap_memremap(resource_size_t offset, size_t size,
171                 unsigned long flags)
172 {
173         struct nfit_test_resource *nfit_res = get_nfit_res(offset);
174 
175         if (nfit_res)
176                 return nfit_res->buf + offset - nfit_res->res.start;
177         return memremap(offset, size, flags);
178 }
179 EXPORT_SYMBOL(__wrap_memremap);
180 
181 void __wrap_devm_memunmap(struct device *dev, void *addr)
182 {
183         struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
184 
185         if (nfit_res)
186                 return;
187         return devm_memunmap(dev, addr);
188 }
189 EXPORT_SYMBOL(__wrap_devm_memunmap);
190 
191 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
192 {
193         return __nfit_test_ioremap(offset, size, ioremap_nocache);
194 }
195 EXPORT_SYMBOL(__wrap_ioremap_nocache);
196 
197 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
198 {
199         return __nfit_test_ioremap(offset, size, ioremap_wc);
200 }
201 EXPORT_SYMBOL(__wrap_ioremap_wc);
202 
203 void __wrap_iounmap(volatile void __iomem *addr)
204 {
205         struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
206         if (nfit_res)
207                 return;
208         return iounmap(addr);
209 }
210 EXPORT_SYMBOL(__wrap_iounmap);
211 
212 void __wrap_memunmap(void *addr)
213 {
214         struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
215 
216         if (nfit_res)
217                 return;
218         return memunmap(addr);
219 }
220 EXPORT_SYMBOL(__wrap_memunmap);
221 
222 static bool nfit_test_release_region(struct device *dev,
223                 struct resource *parent, resource_size_t start,
224                 resource_size_t n);
225 
226 static void nfit_devres_release(struct device *dev, void *data)
227 {
228         struct resource *res = *((struct resource **) data);
229 
230         WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
231                         resource_size(res)));
232 }
233 
234 static int match(struct device *dev, void *__res, void *match_data)
235 {
236         struct resource *res = *((struct resource **) __res);
237         resource_size_t start = *((resource_size_t *) match_data);
238 
239         return res->start == start;
240 }
241 
242 static bool nfit_test_release_region(struct device *dev,
243                 struct resource *parent, resource_size_t start,
244                 resource_size_t n)
245 {
246         if (parent == &iomem_resource) {
247                 struct nfit_test_resource *nfit_res = get_nfit_res(start);
248 
249                 if (nfit_res) {
250                         struct nfit_test_request *req;
251                         struct resource *res = NULL;
252 
253                         if (dev) {
254                                 devres_release(dev, nfit_devres_release, match,
255                                                 &start);
256                                 return true;
257                         }
258 
259                         spin_lock(&nfit_res->lock);
260                         list_for_each_entry(req, &nfit_res->requests, list)
261                                 if (req->res.start == start) {
262                                         res = &req->res;
263                                         list_del(&req->list);
264                                         break;
265                                 }
266                         spin_unlock(&nfit_res->lock);
267 
268                         WARN(!res || resource_size(res) != n,
269                                         "%s: start: %llx n: %llx mismatch: %pr\n",
270                                                 __func__, start, n, res);
271                         if (res)
272                                 kfree(req);
273                         return true;
274                 }
275         }
276         return false;
277 }
278 
279 static struct resource *nfit_test_request_region(struct device *dev,
280                 struct resource *parent, resource_size_t start,
281                 resource_size_t n, const char *name, int flags)
282 {
283         struct nfit_test_resource *nfit_res;
284 
285         if (parent == &iomem_resource) {
286                 nfit_res = get_nfit_res(start);
287                 if (nfit_res) {
288                         struct nfit_test_request *req;
289                         struct resource *res = NULL;
290 
291                         if (start + n > nfit_res->res.start
292                                         + resource_size(&nfit_res->res)) {
293                                 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
294                                                 __func__, start, n,
295                                                 &nfit_res->res);
296                                 return NULL;
297                         }
298 
299                         spin_lock(&nfit_res->lock);
300                         list_for_each_entry(req, &nfit_res->requests, list)
301                                 if (start == req->res.start) {
302                                         res = &req->res;
303                                         break;
304                                 }
305                         spin_unlock(&nfit_res->lock);
306 
307                         if (res) {
308                                 WARN(1, "%pr already busy\n", res);
309                                 return NULL;
310                         }
311 
312                         req = kzalloc(sizeof(*req), GFP_KERNEL);
313                         if (!req)
314                                 return NULL;
315                         INIT_LIST_HEAD(&req->list);
316                         res = &req->res;
317 
318                         res->start = start;
319                         res->end = start + n - 1;
320                         res->name = name;
321                         res->flags = resource_type(parent);
322                         res->flags |= IORESOURCE_BUSY | flags;
323                         spin_lock(&nfit_res->lock);
324                         list_add(&req->list, &nfit_res->requests);
325                         spin_unlock(&nfit_res->lock);
326 
327                         if (dev) {
328                                 struct resource **d;
329 
330                                 d = devres_alloc(nfit_devres_release,
331                                                 sizeof(struct resource *),
332                                                 GFP_KERNEL);
333                                 if (!d)
334                                         return NULL;
335                                 *d = res;
336                                 devres_add(dev, d);
337                         }
338 
339                         pr_debug("%s: %pr\n", __func__, res);
340                         return res;
341                 }
342         }
343         if (dev)
344                 return __devm_request_region(dev, parent, start, n, name);
345         return __request_region(parent, start, n, name, flags);
346 }
347 
348 struct resource *__wrap___request_region(struct resource *parent,
349                 resource_size_t start, resource_size_t n, const char *name,
350                 int flags)
351 {
352         return nfit_test_request_region(NULL, parent, start, n, name, flags);
353 }
354 EXPORT_SYMBOL(__wrap___request_region);
355 
356 int __wrap_insert_resource(struct resource *parent, struct resource *res)
357 {
358         if (get_nfit_res(res->start))
359                 return 0;
360         return insert_resource(parent, res);
361 }
362 EXPORT_SYMBOL(__wrap_insert_resource);
363 
364 int __wrap_remove_resource(struct resource *res)
365 {
366         if (get_nfit_res(res->start))
367                 return 0;
368         return remove_resource(res);
369 }
370 EXPORT_SYMBOL(__wrap_remove_resource);
371 
372 struct resource *__wrap___devm_request_region(struct device *dev,
373                 struct resource *parent, resource_size_t start,
374                 resource_size_t n, const char *name)
375 {
376         if (!dev)
377                 return NULL;
378         return nfit_test_request_region(dev, parent, start, n, name, 0);
379 }
380 EXPORT_SYMBOL(__wrap___devm_request_region);
381 
382 void __wrap___release_region(struct resource *parent, resource_size_t start,
383                 resource_size_t n)
384 {
385         if (!nfit_test_release_region(NULL, parent, start, n))
386                 __release_region(parent, start, n);
387 }
388 EXPORT_SYMBOL(__wrap___release_region);
389 
390 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
391                 resource_size_t start, resource_size_t n)
392 {
393         if (!nfit_test_release_region(dev, parent, start, n))
394                 __devm_release_region(dev, parent, start, n);
395 }
396 EXPORT_SYMBOL(__wrap___devm_release_region);
397 
398 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
399                 struct acpi_object_list *p, struct acpi_buffer *buf)
400 {
401         struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
402         union acpi_object **obj;
403 
404         if (!nfit_res || strcmp(path, "_FIT") || !buf)
405                 return acpi_evaluate_object(handle, path, p, buf);
406 
407         obj = nfit_res->buf;
408         buf->length = sizeof(union acpi_object);
409         buf->pointer = *obj;
410         return AE_OK;
411 }
412 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
413 
414 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
415                 u64 rev, u64 func, union acpi_object *argv4)
416 {
417         union acpi_object *obj = ERR_PTR(-ENXIO);
418         struct iomap_ops *ops;
419 
420         rcu_read_lock();
421         ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
422         if (ops)
423                 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
424         rcu_read_unlock();
425 
426         if (IS_ERR(obj))
427                 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
428         return obj;
429 }
430 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
431 
432 MODULE_LICENSE("GPL v2");
433 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp