~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/genalloc.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Basic general purpose allocator for managing special purpose
  3  * memory, for example, memory that is not managed by the regular
  4  * kmalloc/kfree interface.  Uses for this includes on-device special
  5  * memory, uncached memory etc.
  6  *
  7  * It is safe to use the allocator in NMI handlers and other special
  8  * unblockable contexts that could otherwise deadlock on locks.  This
  9  * is implemented by using atomic operations and retries on any
 10  * conflicts.  The disadvantage is that there may be livelocks in
 11  * extreme cases.  For better scalability, one allocator can be used
 12  * for each CPU.
 13  *
 14  * The lockless operation only works if there is enough memory
 15  * available.  If new memory is added to the pool a lock has to be
 16  * still taken.  So any user relying on locklessness has to ensure
 17  * that sufficient memory is preallocated.
 18  *
 19  * The basic atomic operation of this allocator is cmpxchg on long.
 20  * On architectures that don't have NMI-safe cmpxchg implementation,
 21  * the allocator can NOT be used in NMI handler.  So code uses the
 22  * allocator in NMI handler should depend on
 23  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
 24  *
 25  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
 26  *
 27  * This source code is licensed under the GNU General Public License,
 28  * Version 2.  See the file COPYING for more details.
 29  */
 30 
 31 #include <linux/slab.h>
 32 #include <linux/export.h>
 33 #include <linux/bitmap.h>
 34 #include <linux/rculist.h>
 35 #include <linux/interrupt.h>
 36 #include <linux/genalloc.h>
 37 #include <linux/of_device.h>
 38 
 39 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
 40 {
 41         return chunk->end_addr - chunk->start_addr + 1;
 42 }
 43 
 44 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
 45 {
 46         unsigned long val, nval;
 47 
 48         nval = *addr;
 49         do {
 50                 val = nval;
 51                 if (val & mask_to_set)
 52                         return -EBUSY;
 53                 cpu_relax();
 54         } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
 55 
 56         return 0;
 57 }
 58 
 59 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
 60 {
 61         unsigned long val, nval;
 62 
 63         nval = *addr;
 64         do {
 65                 val = nval;
 66                 if ((val & mask_to_clear) != mask_to_clear)
 67                         return -EBUSY;
 68                 cpu_relax();
 69         } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
 70 
 71         return 0;
 72 }
 73 
 74 /*
 75  * bitmap_set_ll - set the specified number of bits at the specified position
 76  * @map: pointer to a bitmap
 77  * @start: a bit position in @map
 78  * @nr: number of bits to set
 79  *
 80  * Set @nr bits start from @start in @map lock-lessly. Several users
 81  * can set/clear the same bitmap simultaneously without lock. If two
 82  * users set the same bit, one user will return remain bits, otherwise
 83  * return 0.
 84  */
 85 static int bitmap_set_ll(unsigned long *map, int start, int nr)
 86 {
 87         unsigned long *p = map + BIT_WORD(start);
 88         const int size = start + nr;
 89         int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
 90         unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 91 
 92         while (nr - bits_to_set >= 0) {
 93                 if (set_bits_ll(p, mask_to_set))
 94                         return nr;
 95                 nr -= bits_to_set;
 96                 bits_to_set = BITS_PER_LONG;
 97                 mask_to_set = ~0UL;
 98                 p++;
 99         }
100         if (nr) {
101                 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102                 if (set_bits_ll(p, mask_to_set))
103                         return nr;
104         }
105 
106         return 0;
107 }
108 
109 /*
110  * bitmap_clear_ll - clear the specified number of bits at the specified position
111  * @map: pointer to a bitmap
112  * @start: a bit position in @map
113  * @nr: number of bits to set
114  *
115  * Clear @nr bits start from @start in @map lock-lessly. Several users
116  * can set/clear the same bitmap simultaneously without lock. If two
117  * users clear the same bit, one user will return remain bits,
118  * otherwise return 0.
119  */
120 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
121 {
122         unsigned long *p = map + BIT_WORD(start);
123         const int size = start + nr;
124         int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
125         unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
126 
127         while (nr - bits_to_clear >= 0) {
128                 if (clear_bits_ll(p, mask_to_clear))
129                         return nr;
130                 nr -= bits_to_clear;
131                 bits_to_clear = BITS_PER_LONG;
132                 mask_to_clear = ~0UL;
133                 p++;
134         }
135         if (nr) {
136                 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
137                 if (clear_bits_ll(p, mask_to_clear))
138                         return nr;
139         }
140 
141         return 0;
142 }
143 
144 /**
145  * gen_pool_create - create a new special memory pool
146  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
147  * @nid: node id of the node the pool structure should be allocated on, or -1
148  *
149  * Create a new special memory pool that can be used to manage special purpose
150  * memory not managed by the regular kmalloc/kfree interface.
151  */
152 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
153 {
154         struct gen_pool *pool;
155 
156         pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
157         if (pool != NULL) {
158                 spin_lock_init(&pool->lock);
159                 INIT_LIST_HEAD(&pool->chunks);
160                 pool->min_alloc_order = min_alloc_order;
161                 pool->algo = gen_pool_first_fit;
162                 pool->data = NULL;
163                 pool->name = NULL;
164         }
165         return pool;
166 }
167 EXPORT_SYMBOL(gen_pool_create);
168 
169 /**
170  * gen_pool_add_virt - add a new chunk of special memory to the pool
171  * @pool: pool to add new memory chunk to
172  * @virt: virtual starting address of memory chunk to add to pool
173  * @phys: physical starting address of memory chunk to add to pool
174  * @size: size in bytes of the memory chunk to add to pool
175  * @nid: node id of the node the chunk structure and bitmap should be
176  *       allocated on, or -1
177  *
178  * Add a new chunk of special memory to the specified pool.
179  *
180  * Returns 0 on success or a -ve errno on failure.
181  */
182 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183                  size_t size, int nid)
184 {
185         struct gen_pool_chunk *chunk;
186         int nbits = size >> pool->min_alloc_order;
187         int nbytes = sizeof(struct gen_pool_chunk) +
188                                 BITS_TO_LONGS(nbits) * sizeof(long);
189 
190         chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
191         if (unlikely(chunk == NULL))
192                 return -ENOMEM;
193 
194         chunk->phys_addr = phys;
195         chunk->start_addr = virt;
196         chunk->end_addr = virt + size - 1;
197         atomic_set(&chunk->avail, size);
198 
199         spin_lock(&pool->lock);
200         list_add_rcu(&chunk->next_chunk, &pool->chunks);
201         spin_unlock(&pool->lock);
202 
203         return 0;
204 }
205 EXPORT_SYMBOL(gen_pool_add_virt);
206 
207 /**
208  * gen_pool_virt_to_phys - return the physical address of memory
209  * @pool: pool to allocate from
210  * @addr: starting address of memory
211  *
212  * Returns the physical address on success, or -1 on error.
213  */
214 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
215 {
216         struct gen_pool_chunk *chunk;
217         phys_addr_t paddr = -1;
218 
219         rcu_read_lock();
220         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
221                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
222                         paddr = chunk->phys_addr + (addr - chunk->start_addr);
223                         break;
224                 }
225         }
226         rcu_read_unlock();
227 
228         return paddr;
229 }
230 EXPORT_SYMBOL(gen_pool_virt_to_phys);
231 
232 /**
233  * gen_pool_destroy - destroy a special memory pool
234  * @pool: pool to destroy
235  *
236  * Destroy the specified special memory pool. Verifies that there are no
237  * outstanding allocations.
238  */
239 void gen_pool_destroy(struct gen_pool *pool)
240 {
241         struct list_head *_chunk, *_next_chunk;
242         struct gen_pool_chunk *chunk;
243         int order = pool->min_alloc_order;
244         int bit, end_bit;
245 
246         list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
247                 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
248                 list_del(&chunk->next_chunk);
249 
250                 end_bit = chunk_size(chunk) >> order;
251                 bit = find_next_bit(chunk->bits, end_bit, 0);
252                 BUG_ON(bit < end_bit);
253 
254                 kfree(chunk);
255         }
256         kfree_const(pool->name);
257         kfree(pool);
258 }
259 EXPORT_SYMBOL(gen_pool_destroy);
260 
261 /**
262  * gen_pool_alloc - allocate special memory from the pool
263  * @pool: pool to allocate from
264  * @size: number of bytes to allocate from the pool
265  *
266  * Allocate the requested number of bytes from the specified pool.
267  * Uses the pool allocation function (with first-fit algorithm by default).
268  * Can not be used in NMI handler on architectures without
269  * NMI-safe cmpxchg implementation.
270  */
271 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
272 {
273         struct gen_pool_chunk *chunk;
274         unsigned long addr = 0;
275         int order = pool->min_alloc_order;
276         int nbits, start_bit = 0, end_bit, remain;
277 
278 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
279         BUG_ON(in_nmi());
280 #endif
281 
282         if (size == 0)
283                 return 0;
284 
285         nbits = (size + (1UL << order) - 1) >> order;
286         rcu_read_lock();
287         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
288                 if (size > atomic_read(&chunk->avail))
289                         continue;
290 
291                 end_bit = chunk_size(chunk) >> order;
292 retry:
293                 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
294                                 pool->data);
295                 if (start_bit >= end_bit)
296                         continue;
297                 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
298                 if (remain) {
299                         remain = bitmap_clear_ll(chunk->bits, start_bit,
300                                                  nbits - remain);
301                         BUG_ON(remain);
302                         goto retry;
303                 }
304 
305                 addr = chunk->start_addr + ((unsigned long)start_bit << order);
306                 size = nbits << order;
307                 atomic_sub(size, &chunk->avail);
308                 break;
309         }
310         rcu_read_unlock();
311         return addr;
312 }
313 EXPORT_SYMBOL(gen_pool_alloc);
314 
315 /**
316  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
317  * @pool: pool to allocate from
318  * @size: number of bytes to allocate from the pool
319  * @dma: dma-view physical address return value.  Use NULL if unneeded.
320  *
321  * Allocate the requested number of bytes from the specified pool.
322  * Uses the pool allocation function (with first-fit algorithm by default).
323  * Can not be used in NMI handler on architectures without
324  * NMI-safe cmpxchg implementation.
325  */
326 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
327 {
328         unsigned long vaddr;
329 
330         if (!pool)
331                 return NULL;
332 
333         vaddr = gen_pool_alloc(pool, size);
334         if (!vaddr)
335                 return NULL;
336 
337         if (dma)
338                 *dma = gen_pool_virt_to_phys(pool, vaddr);
339 
340         return (void *)vaddr;
341 }
342 EXPORT_SYMBOL(gen_pool_dma_alloc);
343 
344 /**
345  * gen_pool_free - free allocated special memory back to the pool
346  * @pool: pool to free to
347  * @addr: starting address of memory to free back to pool
348  * @size: size in bytes of memory to free
349  *
350  * Free previously allocated special memory back to the specified
351  * pool.  Can not be used in NMI handler on architectures without
352  * NMI-safe cmpxchg implementation.
353  */
354 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
355 {
356         struct gen_pool_chunk *chunk;
357         int order = pool->min_alloc_order;
358         int start_bit, nbits, remain;
359 
360 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
361         BUG_ON(in_nmi());
362 #endif
363 
364         nbits = (size + (1UL << order) - 1) >> order;
365         rcu_read_lock();
366         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
367                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
368                         BUG_ON(addr + size - 1 > chunk->end_addr);
369                         start_bit = (addr - chunk->start_addr) >> order;
370                         remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
371                         BUG_ON(remain);
372                         size = nbits << order;
373                         atomic_add(size, &chunk->avail);
374                         rcu_read_unlock();
375                         return;
376                 }
377         }
378         rcu_read_unlock();
379         BUG();
380 }
381 EXPORT_SYMBOL(gen_pool_free);
382 
383 /**
384  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
385  * @pool:       the generic memory pool
386  * @func:       func to call
387  * @data:       additional data used by @func
388  *
389  * Call @func for every chunk of generic memory pool.  The @func is
390  * called with rcu_read_lock held.
391  */
392 void gen_pool_for_each_chunk(struct gen_pool *pool,
393         void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
394         void *data)
395 {
396         struct gen_pool_chunk *chunk;
397 
398         rcu_read_lock();
399         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
400                 func(pool, chunk, data);
401         rcu_read_unlock();
402 }
403 EXPORT_SYMBOL(gen_pool_for_each_chunk);
404 
405 /**
406  * addr_in_gen_pool - checks if an address falls within the range of a pool
407  * @pool:       the generic memory pool
408  * @start:      start address
409  * @size:       size of the region
410  *
411  * Check if the range of addresses falls within the specified pool. Returns
412  * true if the entire range is contained in the pool and false otherwise.
413  */
414 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
415                         size_t size)
416 {
417         bool found = false;
418         unsigned long end = start + size - 1;
419         struct gen_pool_chunk *chunk;
420 
421         rcu_read_lock();
422         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
423                 if (start >= chunk->start_addr && start <= chunk->end_addr) {
424                         if (end <= chunk->end_addr) {
425                                 found = true;
426                                 break;
427                         }
428                 }
429         }
430         rcu_read_unlock();
431         return found;
432 }
433 
434 /**
435  * gen_pool_avail - get available free space of the pool
436  * @pool: pool to get available free space
437  *
438  * Return available free space of the specified pool.
439  */
440 size_t gen_pool_avail(struct gen_pool *pool)
441 {
442         struct gen_pool_chunk *chunk;
443         size_t avail = 0;
444 
445         rcu_read_lock();
446         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
447                 avail += atomic_read(&chunk->avail);
448         rcu_read_unlock();
449         return avail;
450 }
451 EXPORT_SYMBOL_GPL(gen_pool_avail);
452 
453 /**
454  * gen_pool_size - get size in bytes of memory managed by the pool
455  * @pool: pool to get size
456  *
457  * Return size in bytes of memory managed by the pool.
458  */
459 size_t gen_pool_size(struct gen_pool *pool)
460 {
461         struct gen_pool_chunk *chunk;
462         size_t size = 0;
463 
464         rcu_read_lock();
465         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
466                 size += chunk_size(chunk);
467         rcu_read_unlock();
468         return size;
469 }
470 EXPORT_SYMBOL_GPL(gen_pool_size);
471 
472 /**
473  * gen_pool_set_algo - set the allocation algorithm
474  * @pool: pool to change allocation algorithm
475  * @algo: custom algorithm function
476  * @data: additional data used by @algo
477  *
478  * Call @algo for each memory allocation in the pool.
479  * If @algo is NULL use gen_pool_first_fit as default
480  * memory allocation function.
481  */
482 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
483 {
484         rcu_read_lock();
485 
486         pool->algo = algo;
487         if (!pool->algo)
488                 pool->algo = gen_pool_first_fit;
489 
490         pool->data = data;
491 
492         rcu_read_unlock();
493 }
494 EXPORT_SYMBOL(gen_pool_set_algo);
495 
496 /**
497  * gen_pool_first_fit - find the first available region
498  * of memory matching the size requirement (no alignment constraint)
499  * @map: The address to base the search on
500  * @size: The bitmap size in bits
501  * @start: The bitnumber to start searching at
502  * @nr: The number of zeroed bits we're looking for
503  * @data: additional data - unused
504  */
505 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
506                 unsigned long start, unsigned int nr, void *data)
507 {
508         return bitmap_find_next_zero_area(map, size, start, nr, 0);
509 }
510 EXPORT_SYMBOL(gen_pool_first_fit);
511 
512 /**
513  * gen_pool_first_fit_order_align - find the first available region
514  * of memory matching the size requirement. The region will be aligned
515  * to the order of the size specified.
516  * @map: The address to base the search on
517  * @size: The bitmap size in bits
518  * @start: The bitnumber to start searching at
519  * @nr: The number of zeroed bits we're looking for
520  * @data: additional data - unused
521  */
522 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
523                 unsigned long size, unsigned long start,
524                 unsigned int nr, void *data)
525 {
526         unsigned long align_mask = roundup_pow_of_two(nr) - 1;
527 
528         return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
529 }
530 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
531 
532 /**
533  * gen_pool_best_fit - find the best fitting region of memory
534  * macthing the size requirement (no alignment constraint)
535  * @map: The address to base the search on
536  * @size: The bitmap size in bits
537  * @start: The bitnumber to start searching at
538  * @nr: The number of zeroed bits we're looking for
539  * @data: additional data - unused
540  *
541  * Iterate over the bitmap to find the smallest free region
542  * which we can allocate the memory.
543  */
544 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
545                 unsigned long start, unsigned int nr, void *data)
546 {
547         unsigned long start_bit = size;
548         unsigned long len = size + 1;
549         unsigned long index;
550 
551         index = bitmap_find_next_zero_area(map, size, start, nr, 0);
552 
553         while (index < size) {
554                 int next_bit = find_next_bit(map, size, index + nr);
555                 if ((next_bit - index) < len) {
556                         len = next_bit - index;
557                         start_bit = index;
558                         if (len == nr)
559                                 return start_bit;
560                 }
561                 index = bitmap_find_next_zero_area(map, size,
562                                                    next_bit + 1, nr, 0);
563         }
564 
565         return start_bit;
566 }
567 EXPORT_SYMBOL(gen_pool_best_fit);
568 
569 static void devm_gen_pool_release(struct device *dev, void *res)
570 {
571         gen_pool_destroy(*(struct gen_pool **)res);
572 }
573 
574 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
575 {
576         struct gen_pool **p = res;
577 
578         /* NULL data matches only a pool without an assigned name */
579         if (!data && !(*p)->name)
580                 return 1;
581 
582         if (!data || !(*p)->name)
583                 return 0;
584 
585         return !strcmp((*p)->name, data);
586 }
587 
588 /**
589  * gen_pool_get - Obtain the gen_pool (if any) for a device
590  * @dev: device to retrieve the gen_pool from
591  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
592  *
593  * Returns the gen_pool for the device if one is present, or NULL.
594  */
595 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
596 {
597         struct gen_pool **p;
598 
599         p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
600                         (void *)name);
601         if (!p)
602                 return NULL;
603         return *p;
604 }
605 EXPORT_SYMBOL_GPL(gen_pool_get);
606 
607 /**
608  * devm_gen_pool_create - managed gen_pool_create
609  * @dev: device that provides the gen_pool
610  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
611  * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
612  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
613  *
614  * Create a new special memory pool that can be used to manage special purpose
615  * memory not managed by the regular kmalloc/kfree interface. The pool will be
616  * automatically destroyed by the device management code.
617  */
618 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
619                                       int nid, const char *name)
620 {
621         struct gen_pool **ptr, *pool;
622         const char *pool_name = NULL;
623 
624         /* Check that genpool to be created is uniquely addressed on device */
625         if (gen_pool_get(dev, name))
626                 return ERR_PTR(-EINVAL);
627 
628         if (name) {
629                 pool_name = kstrdup_const(name, GFP_KERNEL);
630                 if (!pool_name)
631                         return ERR_PTR(-ENOMEM);
632         }
633 
634         ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
635         if (!ptr)
636                 goto free_pool_name;
637 
638         pool = gen_pool_create(min_alloc_order, nid);
639         if (!pool)
640                 goto free_devres;
641 
642         *ptr = pool;
643         pool->name = pool_name;
644         devres_add(dev, ptr);
645 
646         return pool;
647 
648 free_devres:
649         devres_free(ptr);
650 free_pool_name:
651         kfree_const(pool_name);
652 
653         return ERR_PTR(-ENOMEM);
654 }
655 EXPORT_SYMBOL(devm_gen_pool_create);
656 
657 #ifdef CONFIG_OF
658 /**
659  * of_gen_pool_get - find a pool by phandle property
660  * @np: device node
661  * @propname: property name containing phandle(s)
662  * @index: index into the phandle array
663  *
664  * Returns the pool that contains the chunk starting at the physical
665  * address of the device tree node pointed at by the phandle property,
666  * or NULL if not found.
667  */
668 struct gen_pool *of_gen_pool_get(struct device_node *np,
669         const char *propname, int index)
670 {
671         struct platform_device *pdev;
672         struct device_node *np_pool, *parent;
673         const char *name = NULL;
674         struct gen_pool *pool = NULL;
675 
676         np_pool = of_parse_phandle(np, propname, index);
677         if (!np_pool)
678                 return NULL;
679 
680         pdev = of_find_device_by_node(np_pool);
681         if (!pdev) {
682                 /* Check if named gen_pool is created by parent node device */
683                 parent = of_get_parent(np_pool);
684                 pdev = of_find_device_by_node(parent);
685                 of_node_put(parent);
686 
687                 of_property_read_string(np_pool, "label", &name);
688                 if (!name)
689                         name = np_pool->name;
690         }
691         if (pdev)
692                 pool = gen_pool_get(&pdev->dev, name);
693         of_node_put(np_pool);
694 
695         return pool;
696 }
697 EXPORT_SYMBOL_GPL(of_gen_pool_get);
698 #endif /* CONFIG_OF */
699 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp