~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/cma.c

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Contiguous Memory Allocator
  3  *
  4  * Copyright (c) 2010-2011 by Samsung Electronics.
  5  * Copyright IBM Corporation, 2013
  6  * Copyright LG Electronics Inc., 2014
  7  * Written by:
  8  *      Marek Szyprowski <m.szyprowski@samsung.com>
  9  *      Michal Nazarewicz <mina86@mina86.com>
 10  *      Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 11  *      Joonsoo Kim <iamjoonsoo.kim@lge.com>
 12  *
 13  * This program is free software; you can redistribute it and/or
 14  * modify it under the terms of the GNU General Public License as
 15  * published by the Free Software Foundation; either version 2 of the
 16  * License or (at your optional) any later version of the license.
 17  */
 18 
 19 #define pr_fmt(fmt) "cma: " fmt
 20 
 21 #ifdef CONFIG_CMA_DEBUG
 22 #ifndef DEBUG
 23 #  define DEBUG
 24 #endif
 25 #endif
 26 #define CREATE_TRACE_POINTS
 27 
 28 #include <linux/memblock.h>
 29 #include <linux/err.h>
 30 #include <linux/mm.h>
 31 #include <linux/mutex.h>
 32 #include <linux/sizes.h>
 33 #include <linux/slab.h>
 34 #include <linux/log2.h>
 35 #include <linux/cma.h>
 36 #include <linux/highmem.h>
 37 #include <linux/io.h>
 38 #include <linux/kmemleak.h>
 39 #include <trace/events/cma.h>
 40 
 41 #include "cma.h"
 42 
 43 struct cma cma_areas[MAX_CMA_AREAS];
 44 unsigned cma_area_count;
 45 static DEFINE_MUTEX(cma_mutex);
 46 
 47 phys_addr_t cma_get_base(const struct cma *cma)
 48 {
 49         return PFN_PHYS(cma->base_pfn);
 50 }
 51 
 52 unsigned long cma_get_size(const struct cma *cma)
 53 {
 54         return cma->count << PAGE_SHIFT;
 55 }
 56 
 57 const char *cma_get_name(const struct cma *cma)
 58 {
 59         return cma->name ? cma->name : "(undefined)";
 60 }
 61 
 62 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
 63                                              unsigned int align_order)
 64 {
 65         if (align_order <= cma->order_per_bit)
 66                 return 0;
 67         return (1UL << (align_order - cma->order_per_bit)) - 1;
 68 }
 69 
 70 /*
 71  * Find the offset of the base PFN from the specified align_order.
 72  * The value returned is represented in order_per_bits.
 73  */
 74 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
 75                                                unsigned int align_order)
 76 {
 77         return (cma->base_pfn & ((1UL << align_order) - 1))
 78                 >> cma->order_per_bit;
 79 }
 80 
 81 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
 82                                               unsigned long pages)
 83 {
 84         return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
 85 }
 86 
 87 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
 88                              unsigned int count)
 89 {
 90         unsigned long bitmap_no, bitmap_count;
 91 
 92         bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
 93         bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 94 
 95         mutex_lock(&cma->lock);
 96         bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
 97         mutex_unlock(&cma->lock);
 98 }
 99 
100 static int __init cma_activate_area(struct cma *cma)
101 {
102         int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
103         unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
104         unsigned i = cma->count >> pageblock_order;
105         struct zone *zone;
106 
107         cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
108 
109         if (!cma->bitmap)
110                 return -ENOMEM;
111 
112         WARN_ON_ONCE(!pfn_valid(pfn));
113         zone = page_zone(pfn_to_page(pfn));
114 
115         do {
116                 unsigned j;
117 
118                 base_pfn = pfn;
119                 for (j = pageblock_nr_pages; j; --j, pfn++) {
120                         WARN_ON_ONCE(!pfn_valid(pfn));
121                         /*
122                          * alloc_contig_range requires the pfn range
123                          * specified to be in the same zone. Make this
124                          * simple by forcing the entire CMA resv range
125                          * to be in the same zone.
126                          */
127                         if (page_zone(pfn_to_page(pfn)) != zone)
128                                 goto not_in_zone;
129                 }
130                 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
131         } while (--i);
132 
133         mutex_init(&cma->lock);
134 
135 #ifdef CONFIG_CMA_DEBUGFS
136         INIT_HLIST_HEAD(&cma->mem_head);
137         spin_lock_init(&cma->mem_head_lock);
138 #endif
139 
140         return 0;
141 
142 not_in_zone:
143         pr_err("CMA area %s could not be activated\n", cma->name);
144         kfree(cma->bitmap);
145         cma->count = 0;
146         return -EINVAL;
147 }
148 
149 static int __init cma_init_reserved_areas(void)
150 {
151         int i;
152 
153         for (i = 0; i < cma_area_count; i++) {
154                 int ret = cma_activate_area(&cma_areas[i]);
155 
156                 if (ret)
157                         return ret;
158         }
159 
160         return 0;
161 }
162 core_initcall(cma_init_reserved_areas);
163 
164 /**
165  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
166  * @base: Base address of the reserved area
167  * @size: Size of the reserved area (in bytes),
168  * @order_per_bit: Order of pages represented by one bit on bitmap.
169  * @name: The name of the area. If this parameter is NULL, the name of
170  *        the area will be set to "cmaN", where N is a running counter of
171  *        used areas.
172  * @res_cma: Pointer to store the created cma region.
173  *
174  * This function creates custom contiguous area from already reserved memory.
175  */
176 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
177                                  unsigned int order_per_bit,
178                                  const char *name,
179                                  struct cma **res_cma)
180 {
181         struct cma *cma;
182         phys_addr_t alignment;
183 
184         /* Sanity checks */
185         if (cma_area_count == ARRAY_SIZE(cma_areas)) {
186                 pr_err("Not enough slots for CMA reserved regions!\n");
187                 return -ENOSPC;
188         }
189 
190         if (!size || !memblock_is_region_reserved(base, size))
191                 return -EINVAL;
192 
193         /* ensure minimal alignment required by mm core */
194         alignment = PAGE_SIZE <<
195                         max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
196 
197         /* alignment should be aligned with order_per_bit */
198         if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
199                 return -EINVAL;
200 
201         if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
202                 return -EINVAL;
203 
204         /*
205          * Each reserved area must be initialised later, when more kernel
206          * subsystems (like slab allocator) are available.
207          */
208         cma = &cma_areas[cma_area_count];
209         if (name) {
210                 cma->name = name;
211         } else {
212                 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
213                 if (!cma->name)
214                         return -ENOMEM;
215         }
216         cma->base_pfn = PFN_DOWN(base);
217         cma->count = size >> PAGE_SHIFT;
218         cma->order_per_bit = order_per_bit;
219         *res_cma = cma;
220         cma_area_count++;
221         totalcma_pages += (size / PAGE_SIZE);
222 
223         return 0;
224 }
225 
226 /**
227  * cma_declare_contiguous() - reserve custom contiguous area
228  * @base: Base address of the reserved area optional, use 0 for any
229  * @size: Size of the reserved area (in bytes),
230  * @limit: End address of the reserved memory (optional, 0 for any).
231  * @alignment: Alignment for the CMA area, should be power of 2 or zero
232  * @order_per_bit: Order of pages represented by one bit on bitmap.
233  * @fixed: hint about where to place the reserved area
234  * @name: The name of the area. See function cma_init_reserved_mem()
235  * @res_cma: Pointer to store the created cma region.
236  *
237  * This function reserves memory from early allocator. It should be
238  * called by arch specific code once the early allocator (memblock or bootmem)
239  * has been activated and all other subsystems have already allocated/reserved
240  * memory. This function allows to create custom reserved areas.
241  *
242  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
243  * reserve in range from @base to @limit.
244  */
245 int __init cma_declare_contiguous(phys_addr_t base,
246                         phys_addr_t size, phys_addr_t limit,
247                         phys_addr_t alignment, unsigned int order_per_bit,
248                         bool fixed, const char *name, struct cma **res_cma)
249 {
250         phys_addr_t memblock_end = memblock_end_of_DRAM();
251         phys_addr_t highmem_start;
252         int ret = 0;
253 
254         /*
255          * We can't use __pa(high_memory) directly, since high_memory
256          * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
257          * complain. Find the boundary by adding one to the last valid
258          * address.
259          */
260         highmem_start = __pa(high_memory - 1) + 1;
261         pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
262                 __func__, &size, &base, &limit, &alignment);
263 
264         if (cma_area_count == ARRAY_SIZE(cma_areas)) {
265                 pr_err("Not enough slots for CMA reserved regions!\n");
266                 return -ENOSPC;
267         }
268 
269         if (!size)
270                 return -EINVAL;
271 
272         if (alignment && !is_power_of_2(alignment))
273                 return -EINVAL;
274 
275         /*
276          * Sanitise input arguments.
277          * Pages both ends in CMA area could be merged into adjacent unmovable
278          * migratetype page by page allocator's buddy algorithm. In the case,
279          * you couldn't get a contiguous memory, which is not what we want.
280          */
281         alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
282                           max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
283         base = ALIGN(base, alignment);
284         size = ALIGN(size, alignment);
285         limit &= ~(alignment - 1);
286 
287         if (!base)
288                 fixed = false;
289 
290         /* size should be aligned with order_per_bit */
291         if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
292                 return -EINVAL;
293 
294         /*
295          * If allocating at a fixed base the request region must not cross the
296          * low/high memory boundary.
297          */
298         if (fixed && base < highmem_start && base + size > highmem_start) {
299                 ret = -EINVAL;
300                 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
301                         &base, &highmem_start);
302                 goto err;
303         }
304 
305         /*
306          * If the limit is unspecified or above the memblock end, its effective
307          * value will be the memblock end. Set it explicitly to simplify further
308          * checks.
309          */
310         if (limit == 0 || limit > memblock_end)
311                 limit = memblock_end;
312 
313         /* Reserve memory */
314         if (fixed) {
315                 if (memblock_is_region_reserved(base, size) ||
316                     memblock_reserve(base, size) < 0) {
317                         ret = -EBUSY;
318                         goto err;
319                 }
320         } else {
321                 phys_addr_t addr = 0;
322 
323                 /*
324                  * All pages in the reserved area must come from the same zone.
325                  * If the requested region crosses the low/high memory boundary,
326                  * try allocating from high memory first and fall back to low
327                  * memory in case of failure.
328                  */
329                 if (base < highmem_start && limit > highmem_start) {
330                         addr = memblock_alloc_range(size, alignment,
331                                                     highmem_start, limit,
332                                                     MEMBLOCK_NONE);
333                         limit = highmem_start;
334                 }
335 
336                 if (!addr) {
337                         addr = memblock_alloc_range(size, alignment, base,
338                                                     limit,
339                                                     MEMBLOCK_NONE);
340                         if (!addr) {
341                                 ret = -ENOMEM;
342                                 goto err;
343                         }
344                 }
345 
346                 /*
347                  * kmemleak scans/reads tracked objects for pointers to other
348                  * objects but this address isn't mapped and accessible
349                  */
350                 kmemleak_ignore_phys(addr);
351                 base = addr;
352         }
353 
354         ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
355         if (ret)
356                 goto err;
357 
358         pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
359                 &base);
360         return 0;
361 
362 err:
363         pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
364         return ret;
365 }
366 
367 #ifdef CONFIG_CMA_DEBUG
368 static void cma_debug_show_areas(struct cma *cma)
369 {
370         unsigned long next_zero_bit, next_set_bit;
371         unsigned long start = 0;
372         unsigned int nr_zero, nr_total = 0;
373 
374         mutex_lock(&cma->lock);
375         pr_info("number of available pages: ");
376         for (;;) {
377                 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
378                 if (next_zero_bit >= cma->count)
379                         break;
380                 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
381                 nr_zero = next_set_bit - next_zero_bit;
382                 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
383                 nr_total += nr_zero;
384                 start = next_zero_bit + nr_zero;
385         }
386         pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
387         mutex_unlock(&cma->lock);
388 }
389 #else
390 static inline void cma_debug_show_areas(struct cma *cma) { }
391 #endif
392 
393 /**
394  * cma_alloc() - allocate pages from contiguous area
395  * @cma:   Contiguous memory region for which the allocation is performed.
396  * @count: Requested number of pages.
397  * @align: Requested alignment of pages (in PAGE_SIZE order).
398  * @no_warn: Avoid printing message about failed allocation
399  *
400  * This function allocates part of contiguous memory on specific
401  * contiguous memory area.
402  */
403 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
404                        bool no_warn)
405 {
406         unsigned long mask, offset;
407         unsigned long pfn = -1;
408         unsigned long start = 0;
409         unsigned long bitmap_maxno, bitmap_no, bitmap_count;
410         struct page *page = NULL;
411         int ret = -ENOMEM;
412 
413         if (!cma || !cma->count)
414                 return NULL;
415 
416         pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
417                  count, align);
418 
419         if (!count)
420                 return NULL;
421 
422         mask = cma_bitmap_aligned_mask(cma, align);
423         offset = cma_bitmap_aligned_offset(cma, align);
424         bitmap_maxno = cma_bitmap_maxno(cma);
425         bitmap_count = cma_bitmap_pages_to_bits(cma, count);
426 
427         if (bitmap_count > bitmap_maxno)
428                 return NULL;
429 
430         for (;;) {
431                 mutex_lock(&cma->lock);
432                 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
433                                 bitmap_maxno, start, bitmap_count, mask,
434                                 offset);
435                 if (bitmap_no >= bitmap_maxno) {
436                         mutex_unlock(&cma->lock);
437                         break;
438                 }
439                 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
440                 /*
441                  * It's safe to drop the lock here. We've marked this region for
442                  * our exclusive use. If the migration fails we will take the
443                  * lock again and unmark it.
444                  */
445                 mutex_unlock(&cma->lock);
446 
447                 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
448                 mutex_lock(&cma_mutex);
449                 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
450                                      GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
451                 mutex_unlock(&cma_mutex);
452                 if (ret == 0) {
453                         page = pfn_to_page(pfn);
454                         break;
455                 }
456 
457                 cma_clear_bitmap(cma, pfn, count);
458                 if (ret != -EBUSY)
459                         break;
460 
461                 pr_debug("%s(): memory range at %p is busy, retrying\n",
462                          __func__, pfn_to_page(pfn));
463                 /* try again with a bit different memory target */
464                 start = bitmap_no + mask + 1;
465         }
466 
467         trace_cma_alloc(pfn, page, count, align);
468 
469         if (ret && !no_warn) {
470                 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
471                         __func__, count, ret);
472                 cma_debug_show_areas(cma);
473         }
474 
475         pr_debug("%s(): returned %p\n", __func__, page);
476         return page;
477 }
478 
479 /**
480  * cma_release() - release allocated pages
481  * @cma:   Contiguous memory region for which the allocation is performed.
482  * @pages: Allocated pages.
483  * @count: Number of allocated pages.
484  *
485  * This function releases memory allocated by alloc_cma().
486  * It returns false when provided pages do not belong to contiguous area and
487  * true otherwise.
488  */
489 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
490 {
491         unsigned long pfn;
492 
493         if (!cma || !pages)
494                 return false;
495 
496         pr_debug("%s(page %p)\n", __func__, (void *)pages);
497 
498         pfn = page_to_pfn(pages);
499 
500         if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
501                 return false;
502 
503         VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
504 
505         free_contig_range(pfn, count);
506         cma_clear_bitmap(cma, pfn, count);
507         trace_cma_release(pfn, pages, count);
508 
509         return true;
510 }
511 
512 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
513 {
514         int i;
515 
516         for (i = 0; i < cma_area_count; i++) {
517                 int ret = it(&cma_areas[i], data);
518 
519                 if (ret)
520                         return ret;
521         }
522 
523         return 0;
524 }
525 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp