~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/dmapool.c

Version: ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * DMA Pool allocator
  3  *
  4  * Copyright 2001 David Brownell
  5  * Copyright 2007 Intel Corporation
  6  *   Author: Matthew Wilcox <willy@linux.intel.com>
  7  *
  8  * This software may be redistributed and/or modified under the terms of
  9  * the GNU General Public License ("GPL") version 2 as published by the
 10  * Free Software Foundation.
 11  *
 12  * This allocator returns small blocks of a given size which are DMA-able by
 13  * the given device.  It uses the dma_alloc_coherent page allocator to get
 14  * new pages, then splits them up into blocks of the required size.
 15  * Many older drivers still have their own code to do this.
 16  *
 17  * The current design of this allocator is fairly simple.  The pool is
 18  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 19  * allocated pages.  Each page in the page_list is split into blocks of at
 20  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 21  * list of free blocks within the page.  Used blocks aren't tracked, but we
 22  * keep a count of how many are currently allocated from each page.
 23  */
 24 
 25 #include <linux/device.h>
 26 #include <linux/dma-mapping.h>
 27 #include <linux/dmapool.h>
 28 #include <linux/kernel.h>
 29 #include <linux/list.h>
 30 #include <linux/module.h>
 31 #include <linux/mutex.h>
 32 #include <linux/poison.h>
 33 #include <linux/sched.h>
 34 #include <linux/slab.h>
 35 #include <linux/spinlock.h>
 36 #include <linux/string.h>
 37 #include <linux/types.h>
 38 #include <linux/wait.h>
 39 
 40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 41 #define DMAPOOL_DEBUG 1
 42 #endif
 43 
 44 struct dma_pool {               /* the pool */
 45         struct list_head page_list;
 46         spinlock_t lock;
 47         size_t size;
 48         struct device *dev;
 49         size_t allocation;
 50         size_t boundary;
 51         char name[32];
 52         struct list_head pools;
 53 };
 54 
 55 struct dma_page {               /* cacheable header for 'allocation' bytes */
 56         struct list_head page_list;
 57         void *vaddr;
 58         dma_addr_t dma;
 59         unsigned int in_use;
 60         unsigned int offset;
 61 };
 62 
 63 static DEFINE_MUTEX(pools_lock);
 64 
 65 static ssize_t
 66 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 67 {
 68         unsigned temp;
 69         unsigned size;
 70         char *next;
 71         struct dma_page *page;
 72         struct dma_pool *pool;
 73 
 74         next = buf;
 75         size = PAGE_SIZE;
 76 
 77         temp = scnprintf(next, size, "poolinfo - 0.1\n");
 78         size -= temp;
 79         next += temp;
 80 
 81         mutex_lock(&pools_lock);
 82         list_for_each_entry(pool, &dev->dma_pools, pools) {
 83                 unsigned pages = 0;
 84                 unsigned blocks = 0;
 85 
 86                 spin_lock_irq(&pool->lock);
 87                 list_for_each_entry(page, &pool->page_list, page_list) {
 88                         pages++;
 89                         blocks += page->in_use;
 90                 }
 91                 spin_unlock_irq(&pool->lock);
 92 
 93                 /* per-pool info, no real statistics yet */
 94                 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
 95                                  pool->name, blocks,
 96                                  pages * (pool->allocation / pool->size),
 97                                  pool->size, pages);
 98                 size -= temp;
 99                 next += temp;
100         }
101         mutex_unlock(&pools_lock);
102 
103         return PAGE_SIZE - size;
104 }
105 
106 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
107 
108 /**
109  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
110  * @name: name of pool, for diagnostics
111  * @dev: device that will be doing the DMA
112  * @size: size of the blocks in this pool.
113  * @align: alignment requirement for blocks; must be a power of two
114  * @boundary: returned blocks won't cross this power of two boundary
115  * Context: !in_interrupt()
116  *
117  * Returns a dma allocation pool with the requested characteristics, or
118  * null if one can't be created.  Given one of these pools, dma_pool_alloc()
119  * may be used to allocate memory.  Such memory will all have "consistent"
120  * DMA mappings, accessible by the device and its driver without using
121  * cache flushing primitives.  The actual size of blocks allocated may be
122  * larger than requested because of alignment.
123  *
124  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
125  * cross that size boundary.  This is useful for devices which have
126  * addressing restrictions on individual DMA transfers, such as not crossing
127  * boundaries of 4KBytes.
128  */
129 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
130                                  size_t size, size_t align, size_t boundary)
131 {
132         struct dma_pool *retval;
133         size_t allocation;
134 
135         if (align == 0) {
136                 align = 1;
137         } else if (align & (align - 1)) {
138                 return NULL;
139         }
140 
141         if (size == 0) {
142                 return NULL;
143         } else if (size < 4) {
144                 size = 4;
145         }
146 
147         if ((size % align) != 0)
148                 size = ALIGN(size, align);
149 
150         allocation = max_t(size_t, size, PAGE_SIZE);
151 
152         if (!boundary) {
153                 boundary = allocation;
154         } else if ((boundary < size) || (boundary & (boundary - 1))) {
155                 return NULL;
156         }
157 
158         retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
159         if (!retval)
160                 return retval;
161 
162         strlcpy(retval->name, name, sizeof(retval->name));
163 
164         retval->dev = dev;
165 
166         INIT_LIST_HEAD(&retval->page_list);
167         spin_lock_init(&retval->lock);
168         retval->size = size;
169         retval->boundary = boundary;
170         retval->allocation = allocation;
171 
172         if (dev) {
173                 int ret;
174 
175                 mutex_lock(&pools_lock);
176                 if (list_empty(&dev->dma_pools))
177                         ret = device_create_file(dev, &dev_attr_pools);
178                 else
179                         ret = 0;
180                 /* note:  not currently insisting "name" be unique */
181                 if (!ret)
182                         list_add(&retval->pools, &dev->dma_pools);
183                 else {
184                         kfree(retval);
185                         retval = NULL;
186                 }
187                 mutex_unlock(&pools_lock);
188         } else
189                 INIT_LIST_HEAD(&retval->pools);
190 
191         return retval;
192 }
193 EXPORT_SYMBOL(dma_pool_create);
194 
195 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
196 {
197         unsigned int offset = 0;
198         unsigned int next_boundary = pool->boundary;
199 
200         do {
201                 unsigned int next = offset + pool->size;
202                 if (unlikely((next + pool->size) >= next_boundary)) {
203                         next = next_boundary;
204                         next_boundary += pool->boundary;
205                 }
206                 *(int *)(page->vaddr + offset) = next;
207                 offset = next;
208         } while (offset < pool->allocation);
209 }
210 
211 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
212 {
213         struct dma_page *page;
214 
215         page = kmalloc(sizeof(*page), mem_flags);
216         if (!page)
217                 return NULL;
218         page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
219                                          &page->dma, mem_flags);
220         if (page->vaddr) {
221 #ifdef  DMAPOOL_DEBUG
222                 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
223 #endif
224                 pool_initialise_page(pool, page);
225                 page->in_use = 0;
226                 page->offset = 0;
227         } else {
228                 kfree(page);
229                 page = NULL;
230         }
231         return page;
232 }
233 
234 static inline int is_page_busy(struct dma_page *page)
235 {
236         return page->in_use != 0;
237 }
238 
239 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
240 {
241         dma_addr_t dma = page->dma;
242 
243 #ifdef  DMAPOOL_DEBUG
244         memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
245 #endif
246         dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
247         list_del(&page->page_list);
248         kfree(page);
249 }
250 
251 /**
252  * dma_pool_destroy - destroys a pool of dma memory blocks.
253  * @pool: dma pool that will be destroyed
254  * Context: !in_interrupt()
255  *
256  * Caller guarantees that no more memory from the pool is in use,
257  * and that nothing will try to use the pool after this call.
258  */
259 void dma_pool_destroy(struct dma_pool *pool)
260 {
261         mutex_lock(&pools_lock);
262         list_del(&pool->pools);
263         if (pool->dev && list_empty(&pool->dev->dma_pools))
264                 device_remove_file(pool->dev, &dev_attr_pools);
265         mutex_unlock(&pools_lock);
266 
267         while (!list_empty(&pool->page_list)) {
268                 struct dma_page *page;
269                 page = list_entry(pool->page_list.next,
270                                   struct dma_page, page_list);
271                 if (is_page_busy(page)) {
272                         if (pool->dev)
273                                 dev_err(pool->dev,
274                                         "dma_pool_destroy %s, %p busy\n",
275                                         pool->name, page->vaddr);
276                         else
277                                 printk(KERN_ERR
278                                        "dma_pool_destroy %s, %p busy\n",
279                                        pool->name, page->vaddr);
280                         /* leak the still-in-use consistent memory */
281                         list_del(&page->page_list);
282                         kfree(page);
283                 } else
284                         pool_free_page(pool, page);
285         }
286 
287         kfree(pool);
288 }
289 EXPORT_SYMBOL(dma_pool_destroy);
290 
291 /**
292  * dma_pool_alloc - get a block of consistent memory
293  * @pool: dma pool that will produce the block
294  * @mem_flags: GFP_* bitmask
295  * @handle: pointer to dma address of block
296  *
297  * This returns the kernel virtual address of a currently unused block,
298  * and reports its dma address through the handle.
299  * If such a memory block can't be allocated, %NULL is returned.
300  */
301 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
302                      dma_addr_t *handle)
303 {
304         unsigned long flags;
305         struct dma_page *page;
306         size_t offset;
307         void *retval;
308 
309         might_sleep_if(mem_flags & __GFP_WAIT);
310 
311         spin_lock_irqsave(&pool->lock, flags);
312         list_for_each_entry(page, &pool->page_list, page_list) {
313                 if (page->offset < pool->allocation)
314                         goto ready;
315         }
316 
317         /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
318         spin_unlock_irqrestore(&pool->lock, flags);
319 
320         page = pool_alloc_page(pool, mem_flags);
321         if (!page)
322                 return NULL;
323 
324         spin_lock_irqsave(&pool->lock, flags);
325 
326         list_add(&page->page_list, &pool->page_list);
327  ready:
328         page->in_use++;
329         offset = page->offset;
330         page->offset = *(int *)(page->vaddr + offset);
331         retval = offset + page->vaddr;
332         *handle = offset + page->dma;
333 #ifdef  DMAPOOL_DEBUG
334         memset(retval, POOL_POISON_ALLOCATED, pool->size);
335 #endif
336         spin_unlock_irqrestore(&pool->lock, flags);
337         return retval;
338 }
339 EXPORT_SYMBOL(dma_pool_alloc);
340 
341 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
342 {
343         struct dma_page *page;
344 
345         list_for_each_entry(page, &pool->page_list, page_list) {
346                 if (dma < page->dma)
347                         continue;
348                 if (dma < (page->dma + pool->allocation))
349                         return page;
350         }
351         return NULL;
352 }
353 
354 /**
355  * dma_pool_free - put block back into dma pool
356  * @pool: the dma pool holding the block
357  * @vaddr: virtual address of block
358  * @dma: dma address of block
359  *
360  * Caller promises neither device nor driver will again touch this block
361  * unless it is first re-allocated.
362  */
363 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
364 {
365         struct dma_page *page;
366         unsigned long flags;
367         unsigned int offset;
368 
369         spin_lock_irqsave(&pool->lock, flags);
370         page = pool_find_page(pool, dma);
371         if (!page) {
372                 spin_unlock_irqrestore(&pool->lock, flags);
373                 if (pool->dev)
374                         dev_err(pool->dev,
375                                 "dma_pool_free %s, %p/%lx (bad dma)\n",
376                                 pool->name, vaddr, (unsigned long)dma);
377                 else
378                         printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
379                                pool->name, vaddr, (unsigned long)dma);
380                 return;
381         }
382 
383         offset = vaddr - page->vaddr;
384 #ifdef  DMAPOOL_DEBUG
385         if ((dma - page->dma) != offset) {
386                 spin_unlock_irqrestore(&pool->lock, flags);
387                 if (pool->dev)
388                         dev_err(pool->dev,
389                                 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
390                                 pool->name, vaddr, (unsigned long long)dma);
391                 else
392                         printk(KERN_ERR
393                                "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
394                                pool->name, vaddr, (unsigned long long)dma);
395                 return;
396         }
397         {
398                 unsigned int chain = page->offset;
399                 while (chain < pool->allocation) {
400                         if (chain != offset) {
401                                 chain = *(int *)(page->vaddr + chain);
402                                 continue;
403                         }
404                         spin_unlock_irqrestore(&pool->lock, flags);
405                         if (pool->dev)
406                                 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
407                                         "already free\n", pool->name,
408                                         (unsigned long long)dma);
409                         else
410                                 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
411                                         "already free\n", pool->name,
412                                         (unsigned long long)dma);
413                         return;
414                 }
415         }
416         memset(vaddr, POOL_POISON_FREED, pool->size);
417 #endif
418 
419         page->in_use--;
420         *(int *)vaddr = page->offset;
421         page->offset = offset;
422         /*
423          * Resist a temptation to do
424          *    if (!is_page_busy(page)) pool_free_page(pool, page);
425          * Better have a few empty pages hang around.
426          */
427         spin_unlock_irqrestore(&pool->lock, flags);
428 }
429 EXPORT_SYMBOL(dma_pool_free);
430 
431 /*
432  * Managed DMA pool
433  */
434 static void dmam_pool_release(struct device *dev, void *res)
435 {
436         struct dma_pool *pool = *(struct dma_pool **)res;
437 
438         dma_pool_destroy(pool);
439 }
440 
441 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
442 {
443         return *(struct dma_pool **)res == match_data;
444 }
445 
446 /**
447  * dmam_pool_create - Managed dma_pool_create()
448  * @name: name of pool, for diagnostics
449  * @dev: device that will be doing the DMA
450  * @size: size of the blocks in this pool.
451  * @align: alignment requirement for blocks; must be a power of two
452  * @allocation: returned blocks won't cross this boundary (or zero)
453  *
454  * Managed dma_pool_create().  DMA pool created with this function is
455  * automatically destroyed on driver detach.
456  */
457 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
458                                   size_t size, size_t align, size_t allocation)
459 {
460         struct dma_pool **ptr, *pool;
461 
462         ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
463         if (!ptr)
464                 return NULL;
465 
466         pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
467         if (pool)
468                 devres_add(dev, ptr);
469         else
470                 devres_free(ptr);
471 
472         return pool;
473 }
474 EXPORT_SYMBOL(dmam_pool_create);
475 
476 /**
477  * dmam_pool_destroy - Managed dma_pool_destroy()
478  * @pool: dma pool that will be destroyed
479  *
480  * Managed dma_pool_destroy().
481  */
482 void dmam_pool_destroy(struct dma_pool *pool)
483 {
484         struct device *dev = pool->dev;
485 
486         dma_pool_destroy(pool);
487         WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
488 }
489 EXPORT_SYMBOL(dmam_pool_destroy);
490 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp