~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/genalloc.c

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Basic general purpose allocator for managing special purpose
  3  * memory, for example, memory that is not managed by the regular
  4  * kmalloc/kfree interface.  Uses for this includes on-device special
  5  * memory, uncached memory etc.
  6  *
  7  * It is safe to use the allocator in NMI handlers and other special
  8  * unblockable contexts that could otherwise deadlock on locks.  This
  9  * is implemented by using atomic operations and retries on any
 10  * conflicts.  The disadvantage is that there may be livelocks in
 11  * extreme cases.  For better scalability, one allocator can be used
 12  * for each CPU.
 13  *
 14  * The lockless operation only works if there is enough memory
 15  * available.  If new memory is added to the pool a lock has to be
 16  * still taken.  So any user relying on locklessness has to ensure
 17  * that sufficient memory is preallocated.
 18  *
 19  * The basic atomic operation of this allocator is cmpxchg on long.
 20  * On architectures that don't have NMI-safe cmpxchg implementation,
 21  * the allocator can NOT be used in NMI handler.  So code uses the
 22  * allocator in NMI handler should depend on
 23  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
 24  *
 25  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
 26  *
 27  * This source code is licensed under the GNU General Public License,
 28  * Version 2.  See the file COPYING for more details.
 29  */
 30 
 31 #include <linux/slab.h>
 32 #include <linux/export.h>
 33 #include <linux/bitmap.h>
 34 #include <linux/rculist.h>
 35 #include <linux/interrupt.h>
 36 #include <linux/genalloc.h>
 37 #include <linux/of_device.h>
 38 
 39 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
 40 {
 41         return chunk->end_addr - chunk->start_addr + 1;
 42 }
 43 
 44 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
 45 {
 46         unsigned long val, nval;
 47 
 48         nval = *addr;
 49         do {
 50                 val = nval;
 51                 if (val & mask_to_set)
 52                         return -EBUSY;
 53                 cpu_relax();
 54         } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
 55 
 56         return 0;
 57 }
 58 
 59 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
 60 {
 61         unsigned long val, nval;
 62 
 63         nval = *addr;
 64         do {
 65                 val = nval;
 66                 if ((val & mask_to_clear) != mask_to_clear)
 67                         return -EBUSY;
 68                 cpu_relax();
 69         } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
 70 
 71         return 0;
 72 }
 73 
 74 /*
 75  * bitmap_set_ll - set the specified number of bits at the specified position
 76  * @map: pointer to a bitmap
 77  * @start: a bit position in @map
 78  * @nr: number of bits to set
 79  *
 80  * Set @nr bits start from @start in @map lock-lessly. Several users
 81  * can set/clear the same bitmap simultaneously without lock. If two
 82  * users set the same bit, one user will return remain bits, otherwise
 83  * return 0.
 84  */
 85 static int bitmap_set_ll(unsigned long *map, int start, int nr)
 86 {
 87         unsigned long *p = map + BIT_WORD(start);
 88         const int size = start + nr;
 89         int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
 90         unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 91 
 92         while (nr - bits_to_set >= 0) {
 93                 if (set_bits_ll(p, mask_to_set))
 94                         return nr;
 95                 nr -= bits_to_set;
 96                 bits_to_set = BITS_PER_LONG;
 97                 mask_to_set = ~0UL;
 98                 p++;
 99         }
100         if (nr) {
101                 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102                 if (set_bits_ll(p, mask_to_set))
103                         return nr;
104         }
105 
106         return 0;
107 }
108 
109 /*
110  * bitmap_clear_ll - clear the specified number of bits at the specified position
111  * @map: pointer to a bitmap
112  * @start: a bit position in @map
113  * @nr: number of bits to set
114  *
115  * Clear @nr bits start from @start in @map lock-lessly. Several users
116  * can set/clear the same bitmap simultaneously without lock. If two
117  * users clear the same bit, one user will return remain bits,
118  * otherwise return 0.
119  */
120 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
121 {
122         unsigned long *p = map + BIT_WORD(start);
123         const int size = start + nr;
124         int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
125         unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
126 
127         while (nr - bits_to_clear >= 0) {
128                 if (clear_bits_ll(p, mask_to_clear))
129                         return nr;
130                 nr -= bits_to_clear;
131                 bits_to_clear = BITS_PER_LONG;
132                 mask_to_clear = ~0UL;
133                 p++;
134         }
135         if (nr) {
136                 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
137                 if (clear_bits_ll(p, mask_to_clear))
138                         return nr;
139         }
140 
141         return 0;
142 }
143 
144 /**
145  * gen_pool_create - create a new special memory pool
146  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
147  * @nid: node id of the node the pool structure should be allocated on, or -1
148  *
149  * Create a new special memory pool that can be used to manage special purpose
150  * memory not managed by the regular kmalloc/kfree interface.
151  */
152 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
153 {
154         struct gen_pool *pool;
155 
156         pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
157         if (pool != NULL) {
158                 spin_lock_init(&pool->lock);
159                 INIT_LIST_HEAD(&pool->chunks);
160                 pool->min_alloc_order = min_alloc_order;
161                 pool->algo = gen_pool_first_fit;
162                 pool->data = NULL;
163         }
164         return pool;
165 }
166 EXPORT_SYMBOL(gen_pool_create);
167 
168 /**
169  * gen_pool_add_virt - add a new chunk of special memory to the pool
170  * @pool: pool to add new memory chunk to
171  * @virt: virtual starting address of memory chunk to add to pool
172  * @phys: physical starting address of memory chunk to add to pool
173  * @size: size in bytes of the memory chunk to add to pool
174  * @nid: node id of the node the chunk structure and bitmap should be
175  *       allocated on, or -1
176  *
177  * Add a new chunk of special memory to the specified pool.
178  *
179  * Returns 0 on success or a -ve errno on failure.
180  */
181 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
182                  size_t size, int nid)
183 {
184         struct gen_pool_chunk *chunk;
185         int nbits = size >> pool->min_alloc_order;
186         int nbytes = sizeof(struct gen_pool_chunk) +
187                                 BITS_TO_LONGS(nbits) * sizeof(long);
188 
189         chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
190         if (unlikely(chunk == NULL))
191                 return -ENOMEM;
192 
193         chunk->phys_addr = phys;
194         chunk->start_addr = virt;
195         chunk->end_addr = virt + size - 1;
196         atomic_set(&chunk->avail, size);
197 
198         spin_lock(&pool->lock);
199         list_add_rcu(&chunk->next_chunk, &pool->chunks);
200         spin_unlock(&pool->lock);
201 
202         return 0;
203 }
204 EXPORT_SYMBOL(gen_pool_add_virt);
205 
206 /**
207  * gen_pool_virt_to_phys - return the physical address of memory
208  * @pool: pool to allocate from
209  * @addr: starting address of memory
210  *
211  * Returns the physical address on success, or -1 on error.
212  */
213 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
214 {
215         struct gen_pool_chunk *chunk;
216         phys_addr_t paddr = -1;
217 
218         rcu_read_lock();
219         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
220                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
221                         paddr = chunk->phys_addr + (addr - chunk->start_addr);
222                         break;
223                 }
224         }
225         rcu_read_unlock();
226 
227         return paddr;
228 }
229 EXPORT_SYMBOL(gen_pool_virt_to_phys);
230 
231 /**
232  * gen_pool_destroy - destroy a special memory pool
233  * @pool: pool to destroy
234  *
235  * Destroy the specified special memory pool. Verifies that there are no
236  * outstanding allocations.
237  */
238 void gen_pool_destroy(struct gen_pool *pool)
239 {
240         struct list_head *_chunk, *_next_chunk;
241         struct gen_pool_chunk *chunk;
242         int order = pool->min_alloc_order;
243         int bit, end_bit;
244 
245         list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
246                 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
247                 list_del(&chunk->next_chunk);
248 
249                 end_bit = chunk_size(chunk) >> order;
250                 bit = find_next_bit(chunk->bits, end_bit, 0);
251                 BUG_ON(bit < end_bit);
252 
253                 kfree(chunk);
254         }
255         kfree(pool);
256         return;
257 }
258 EXPORT_SYMBOL(gen_pool_destroy);
259 
260 /**
261  * gen_pool_alloc - allocate special memory from the pool
262  * @pool: pool to allocate from
263  * @size: number of bytes to allocate from the pool
264  *
265  * Allocate the requested number of bytes from the specified pool.
266  * Uses the pool allocation function (with first-fit algorithm by default).
267  * Can not be used in NMI handler on architectures without
268  * NMI-safe cmpxchg implementation.
269  */
270 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
271 {
272         struct gen_pool_chunk *chunk;
273         unsigned long addr = 0;
274         int order = pool->min_alloc_order;
275         int nbits, start_bit = 0, end_bit, remain;
276 
277 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
278         BUG_ON(in_nmi());
279 #endif
280 
281         if (size == 0)
282                 return 0;
283 
284         nbits = (size + (1UL << order) - 1) >> order;
285         rcu_read_lock();
286         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
287                 if (size > atomic_read(&chunk->avail))
288                         continue;
289 
290                 end_bit = chunk_size(chunk) >> order;
291 retry:
292                 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
293                                 pool->data);
294                 if (start_bit >= end_bit)
295                         continue;
296                 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
297                 if (remain) {
298                         remain = bitmap_clear_ll(chunk->bits, start_bit,
299                                                  nbits - remain);
300                         BUG_ON(remain);
301                         goto retry;
302                 }
303 
304                 addr = chunk->start_addr + ((unsigned long)start_bit << order);
305                 size = nbits << order;
306                 atomic_sub(size, &chunk->avail);
307                 break;
308         }
309         rcu_read_unlock();
310         return addr;
311 }
312 EXPORT_SYMBOL(gen_pool_alloc);
313 
314 /**
315  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
316  * @pool: pool to allocate from
317  * @size: number of bytes to allocate from the pool
318  * @dma: dma-view physical address return value.  Use NULL if unneeded.
319  *
320  * Allocate the requested number of bytes from the specified pool.
321  * Uses the pool allocation function (with first-fit algorithm by default).
322  * Can not be used in NMI handler on architectures without
323  * NMI-safe cmpxchg implementation.
324  */
325 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
326 {
327         unsigned long vaddr;
328 
329         if (!pool)
330                 return NULL;
331 
332         vaddr = gen_pool_alloc(pool, size);
333         if (!vaddr)
334                 return NULL;
335 
336         if (dma)
337                 *dma = gen_pool_virt_to_phys(pool, vaddr);
338 
339         return (void *)vaddr;
340 }
341 EXPORT_SYMBOL(gen_pool_dma_alloc);
342 
343 /**
344  * gen_pool_free - free allocated special memory back to the pool
345  * @pool: pool to free to
346  * @addr: starting address of memory to free back to pool
347  * @size: size in bytes of memory to free
348  *
349  * Free previously allocated special memory back to the specified
350  * pool.  Can not be used in NMI handler on architectures without
351  * NMI-safe cmpxchg implementation.
352  */
353 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
354 {
355         struct gen_pool_chunk *chunk;
356         int order = pool->min_alloc_order;
357         int start_bit, nbits, remain;
358 
359 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
360         BUG_ON(in_nmi());
361 #endif
362 
363         nbits = (size + (1UL << order) - 1) >> order;
364         rcu_read_lock();
365         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
366                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
367                         BUG_ON(addr + size - 1 > chunk->end_addr);
368                         start_bit = (addr - chunk->start_addr) >> order;
369                         remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
370                         BUG_ON(remain);
371                         size = nbits << order;
372                         atomic_add(size, &chunk->avail);
373                         rcu_read_unlock();
374                         return;
375                 }
376         }
377         rcu_read_unlock();
378         BUG();
379 }
380 EXPORT_SYMBOL(gen_pool_free);
381 
382 /**
383  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
384  * @pool:       the generic memory pool
385  * @func:       func to call
386  * @data:       additional data used by @func
387  *
388  * Call @func for every chunk of generic memory pool.  The @func is
389  * called with rcu_read_lock held.
390  */
391 void gen_pool_for_each_chunk(struct gen_pool *pool,
392         void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
393         void *data)
394 {
395         struct gen_pool_chunk *chunk;
396 
397         rcu_read_lock();
398         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
399                 func(pool, chunk, data);
400         rcu_read_unlock();
401 }
402 EXPORT_SYMBOL(gen_pool_for_each_chunk);
403 
404 /**
405  * addr_in_gen_pool - checks if an address falls within the range of a pool
406  * @pool:       the generic memory pool
407  * @start:      start address
408  * @size:       size of the region
409  *
410  * Check if the range of addresses falls within the specified pool. Returns
411  * true if the entire range is contained in the pool and false otherwise.
412  */
413 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
414                         size_t size)
415 {
416         bool found = false;
417         unsigned long end = start + size - 1;
418         struct gen_pool_chunk *chunk;
419 
420         rcu_read_lock();
421         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
422                 if (start >= chunk->start_addr && start <= chunk->end_addr) {
423                         if (end <= chunk->end_addr) {
424                                 found = true;
425                                 break;
426                         }
427                 }
428         }
429         rcu_read_unlock();
430         return found;
431 }
432 
433 /**
434  * gen_pool_avail - get available free space of the pool
435  * @pool: pool to get available free space
436  *
437  * Return available free space of the specified pool.
438  */
439 size_t gen_pool_avail(struct gen_pool *pool)
440 {
441         struct gen_pool_chunk *chunk;
442         size_t avail = 0;
443 
444         rcu_read_lock();
445         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
446                 avail += atomic_read(&chunk->avail);
447         rcu_read_unlock();
448         return avail;
449 }
450 EXPORT_SYMBOL_GPL(gen_pool_avail);
451 
452 /**
453  * gen_pool_size - get size in bytes of memory managed by the pool
454  * @pool: pool to get size
455  *
456  * Return size in bytes of memory managed by the pool.
457  */
458 size_t gen_pool_size(struct gen_pool *pool)
459 {
460         struct gen_pool_chunk *chunk;
461         size_t size = 0;
462 
463         rcu_read_lock();
464         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
465                 size += chunk_size(chunk);
466         rcu_read_unlock();
467         return size;
468 }
469 EXPORT_SYMBOL_GPL(gen_pool_size);
470 
471 /**
472  * gen_pool_set_algo - set the allocation algorithm
473  * @pool: pool to change allocation algorithm
474  * @algo: custom algorithm function
475  * @data: additional data used by @algo
476  *
477  * Call @algo for each memory allocation in the pool.
478  * If @algo is NULL use gen_pool_first_fit as default
479  * memory allocation function.
480  */
481 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
482 {
483         rcu_read_lock();
484 
485         pool->algo = algo;
486         if (!pool->algo)
487                 pool->algo = gen_pool_first_fit;
488 
489         pool->data = data;
490 
491         rcu_read_unlock();
492 }
493 EXPORT_SYMBOL(gen_pool_set_algo);
494 
495 /**
496  * gen_pool_first_fit - find the first available region
497  * of memory matching the size requirement (no alignment constraint)
498  * @map: The address to base the search on
499  * @size: The bitmap size in bits
500  * @start: The bitnumber to start searching at
501  * @nr: The number of zeroed bits we're looking for
502  * @data: additional data - unused
503  */
504 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
505                 unsigned long start, unsigned int nr, void *data)
506 {
507         return bitmap_find_next_zero_area(map, size, start, nr, 0);
508 }
509 EXPORT_SYMBOL(gen_pool_first_fit);
510 
511 /**
512  * gen_pool_first_fit_order_align - find the first available region
513  * of memory matching the size requirement. The region will be aligned
514  * to the order of the size specified.
515  * @map: The address to base the search on
516  * @size: The bitmap size in bits
517  * @start: The bitnumber to start searching at
518  * @nr: The number of zeroed bits we're looking for
519  * @data: additional data - unused
520  */
521 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
522                 unsigned long size, unsigned long start,
523                 unsigned int nr, void *data)
524 {
525         unsigned long align_mask = roundup_pow_of_two(nr) - 1;
526 
527         return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
528 }
529 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
530 
531 /**
532  * gen_pool_best_fit - find the best fitting region of memory
533  * macthing the size requirement (no alignment constraint)
534  * @map: The address to base the search on
535  * @size: The bitmap size in bits
536  * @start: The bitnumber to start searching at
537  * @nr: The number of zeroed bits we're looking for
538  * @data: additional data - unused
539  *
540  * Iterate over the bitmap to find the smallest free region
541  * which we can allocate the memory.
542  */
543 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
544                 unsigned long start, unsigned int nr, void *data)
545 {
546         unsigned long start_bit = size;
547         unsigned long len = size + 1;
548         unsigned long index;
549 
550         index = bitmap_find_next_zero_area(map, size, start, nr, 0);
551 
552         while (index < size) {
553                 int next_bit = find_next_bit(map, size, index + nr);
554                 if ((next_bit - index) < len) {
555                         len = next_bit - index;
556                         start_bit = index;
557                         if (len == nr)
558                                 return start_bit;
559                 }
560                 index = bitmap_find_next_zero_area(map, size,
561                                                    next_bit + 1, nr, 0);
562         }
563 
564         return start_bit;
565 }
566 EXPORT_SYMBOL(gen_pool_best_fit);
567 
568 static void devm_gen_pool_release(struct device *dev, void *res)
569 {
570         gen_pool_destroy(*(struct gen_pool **)res);
571 }
572 
573 /**
574  * devm_gen_pool_create - managed gen_pool_create
575  * @dev: device that provides the gen_pool
576  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
577  * @nid: node id of the node the pool structure should be allocated on, or -1
578  *
579  * Create a new special memory pool that can be used to manage special purpose
580  * memory not managed by the regular kmalloc/kfree interface. The pool will be
581  * automatically destroyed by the device management code.
582  */
583 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
584                 int nid)
585 {
586         struct gen_pool **ptr, *pool;
587 
588         ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
589         if (!ptr)
590                 return NULL;
591 
592         pool = gen_pool_create(min_alloc_order, nid);
593         if (pool) {
594                 *ptr = pool;
595                 devres_add(dev, ptr);
596         } else {
597                 devres_free(ptr);
598         }
599 
600         return pool;
601 }
602 EXPORT_SYMBOL(devm_gen_pool_create);
603 
604 /**
605  * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
606  * @dev: device to retrieve the gen_pool from
607  *
608  * Returns the gen_pool for the device if one is present, or NULL.
609  */
610 struct gen_pool *dev_get_gen_pool(struct device *dev)
611 {
612         struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
613                                         NULL);
614 
615         if (!p)
616                 return NULL;
617         return *p;
618 }
619 EXPORT_SYMBOL_GPL(dev_get_gen_pool);
620 
621 #ifdef CONFIG_OF
622 /**
623  * of_get_named_gen_pool - find a pool by phandle property
624  * @np: device node
625  * @propname: property name containing phandle(s)
626  * @index: index into the phandle array
627  *
628  * Returns the pool that contains the chunk starting at the physical
629  * address of the device tree node pointed at by the phandle property,
630  * or NULL if not found.
631  */
632 struct gen_pool *of_get_named_gen_pool(struct device_node *np,
633         const char *propname, int index)
634 {
635         struct platform_device *pdev;
636         struct device_node *np_pool;
637 
638         np_pool = of_parse_phandle(np, propname, index);
639         if (!np_pool)
640                 return NULL;
641         pdev = of_find_device_by_node(np_pool);
642         of_node_put(np_pool);
643         if (!pdev)
644                 return NULL;
645         return dev_get_gen_pool(&pdev->dev);
646 }
647 EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
648 #endif /* CONFIG_OF */
649 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp