~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/cma.c

Version: ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Contiguous Memory Allocator
  3  *
  4  * Copyright (c) 2010-2011 by Samsung Electronics.
  5  * Copyright IBM Corporation, 2013
  6  * Copyright LG Electronics Inc., 2014
  7  * Written by:
  8  *      Marek Szyprowski <m.szyprowski@samsung.com>
  9  *      Michal Nazarewicz <mina86@mina86.com>
 10  *      Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 11  *      Joonsoo Kim <iamjoonsoo.kim@lge.com>
 12  *
 13  * This program is free software; you can redistribute it and/or
 14  * modify it under the terms of the GNU General Public License as
 15  * published by the Free Software Foundation; either version 2 of the
 16  * License or (at your optional) any later version of the license.
 17  */
 18 
 19 #define pr_fmt(fmt) "cma: " fmt
 20 
 21 #ifdef CONFIG_CMA_DEBUG
 22 #ifndef DEBUG
 23 #  define DEBUG
 24 #endif
 25 #endif
 26 
 27 #include <linux/memblock.h>
 28 #include <linux/err.h>
 29 #include <linux/mm.h>
 30 #include <linux/mutex.h>
 31 #include <linux/sizes.h>
 32 #include <linux/slab.h>
 33 #include <linux/log2.h>
 34 #include <linux/cma.h>
 35 #include <linux/highmem.h>
 36 #include <linux/io.h>
 37 
 38 struct cma {
 39         unsigned long   base_pfn;
 40         unsigned long   count;
 41         unsigned long   *bitmap;
 42         unsigned int order_per_bit; /* Order of pages represented by one bit */
 43         struct mutex    lock;
 44 };
 45 
 46 static struct cma cma_areas[MAX_CMA_AREAS];
 47 static unsigned cma_area_count;
 48 static DEFINE_MUTEX(cma_mutex);
 49 
 50 phys_addr_t cma_get_base(const struct cma *cma)
 51 {
 52         return PFN_PHYS(cma->base_pfn);
 53 }
 54 
 55 unsigned long cma_get_size(const struct cma *cma)
 56 {
 57         return cma->count << PAGE_SHIFT;
 58 }
 59 
 60 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
 61                                              unsigned int align_order)
 62 {
 63         if (align_order <= cma->order_per_bit)
 64                 return 0;
 65         return (1UL << (align_order - cma->order_per_bit)) - 1;
 66 }
 67 
 68 /*
 69  * Find the offset of the base PFN from the specified align_order.
 70  * The value returned is represented in order_per_bits.
 71  */
 72 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
 73                                                unsigned int align_order)
 74 {
 75         return (cma->base_pfn & ((1UL << align_order) - 1))
 76                 >> cma->order_per_bit;
 77 }
 78 
 79 static unsigned long cma_bitmap_maxno(struct cma *cma)
 80 {
 81         return cma->count >> cma->order_per_bit;
 82 }
 83 
 84 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
 85                                               unsigned long pages)
 86 {
 87         return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
 88 }
 89 
 90 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
 91                              unsigned int count)
 92 {
 93         unsigned long bitmap_no, bitmap_count;
 94 
 95         bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
 96         bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 97 
 98         mutex_lock(&cma->lock);
 99         bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
100         mutex_unlock(&cma->lock);
101 }
102 
103 static int __init cma_activate_area(struct cma *cma)
104 {
105         int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
106         unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
107         unsigned i = cma->count >> pageblock_order;
108         struct zone *zone;
109 
110         cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
111 
112         if (!cma->bitmap)
113                 return -ENOMEM;
114 
115         WARN_ON_ONCE(!pfn_valid(pfn));
116         zone = page_zone(pfn_to_page(pfn));
117 
118         do {
119                 unsigned j;
120 
121                 base_pfn = pfn;
122                 for (j = pageblock_nr_pages; j; --j, pfn++) {
123                         WARN_ON_ONCE(!pfn_valid(pfn));
124                         /*
125                          * alloc_contig_range requires the pfn range
126                          * specified to be in the same zone. Make this
127                          * simple by forcing the entire CMA resv range
128                          * to be in the same zone.
129                          */
130                         if (page_zone(pfn_to_page(pfn)) != zone)
131                                 goto err;
132                 }
133                 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
134         } while (--i);
135 
136         mutex_init(&cma->lock);
137         return 0;
138 
139 err:
140         kfree(cma->bitmap);
141         cma->count = 0;
142         return -EINVAL;
143 }
144 
145 static int __init cma_init_reserved_areas(void)
146 {
147         int i;
148 
149         for (i = 0; i < cma_area_count; i++) {
150                 int ret = cma_activate_area(&cma_areas[i]);
151 
152                 if (ret)
153                         return ret;
154         }
155 
156         return 0;
157 }
158 core_initcall(cma_init_reserved_areas);
159 
160 /**
161  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162  * @base: Base address of the reserved area
163  * @size: Size of the reserved area (in bytes),
164  * @order_per_bit: Order of pages represented by one bit on bitmap.
165  * @res_cma: Pointer to store the created cma region.
166  *
167  * This function creates custom contiguous area from already reserved memory.
168  */
169 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170                                  unsigned int order_per_bit,
171                                  struct cma **res_cma)
172 {
173         struct cma *cma;
174         phys_addr_t alignment;
175 
176         /* Sanity checks */
177         if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178                 pr_err("Not enough slots for CMA reserved regions!\n");
179                 return -ENOSPC;
180         }
181 
182         if (!size || !memblock_is_region_reserved(base, size))
183                 return -EINVAL;
184 
185         /* ensure minimal alignment requied by mm core */
186         alignment = PAGE_SIZE <<
187                         max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
188 
189         /* alignment should be aligned with order_per_bit */
190         if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
191                 return -EINVAL;
192 
193         if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
194                 return -EINVAL;
195 
196         /*
197          * Each reserved area must be initialised later, when more kernel
198          * subsystems (like slab allocator) are available.
199          */
200         cma = &cma_areas[cma_area_count];
201         cma->base_pfn = PFN_DOWN(base);
202         cma->count = size >> PAGE_SHIFT;
203         cma->order_per_bit = order_per_bit;
204         *res_cma = cma;
205         cma_area_count++;
206         totalcma_pages += (size / PAGE_SIZE);
207 
208         return 0;
209 }
210 
211 /**
212  * cma_declare_contiguous() - reserve custom contiguous area
213  * @base: Base address of the reserved area optional, use 0 for any
214  * @size: Size of the reserved area (in bytes),
215  * @limit: End address of the reserved memory (optional, 0 for any).
216  * @alignment: Alignment for the CMA area, should be power of 2 or zero
217  * @order_per_bit: Order of pages represented by one bit on bitmap.
218  * @fixed: hint about where to place the reserved area
219  * @res_cma: Pointer to store the created cma region.
220  *
221  * This function reserves memory from early allocator. It should be
222  * called by arch specific code once the early allocator (memblock or bootmem)
223  * has been activated and all other subsystems have already allocated/reserved
224  * memory. This function allows to create custom reserved areas.
225  *
226  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
227  * reserve in range from @base to @limit.
228  */
229 int __init cma_declare_contiguous(phys_addr_t base,
230                         phys_addr_t size, phys_addr_t limit,
231                         phys_addr_t alignment, unsigned int order_per_bit,
232                         bool fixed, struct cma **res_cma)
233 {
234         phys_addr_t memblock_end = memblock_end_of_DRAM();
235         phys_addr_t highmem_start;
236         int ret = 0;
237 
238 #ifdef CONFIG_X86
239         /*
240          * high_memory isn't direct mapped memory so retrieving its physical
241          * address isn't appropriate.  But it would be useful to check the
242          * physical address of the highmem boundary so it's justfiable to get
243          * the physical address from it.  On x86 there is a validation check for
244          * this case, so the following workaround is needed to avoid it.
245          */
246         highmem_start = __pa_nodebug(high_memory);
247 #else
248         highmem_start = __pa(high_memory);
249 #endif
250         pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
251                 __func__, &size, &base, &limit, &alignment);
252 
253         if (cma_area_count == ARRAY_SIZE(cma_areas)) {
254                 pr_err("Not enough slots for CMA reserved regions!\n");
255                 return -ENOSPC;
256         }
257 
258         if (!size)
259                 return -EINVAL;
260 
261         if (alignment && !is_power_of_2(alignment))
262                 return -EINVAL;
263 
264         /*
265          * Sanitise input arguments.
266          * Pages both ends in CMA area could be merged into adjacent unmovable
267          * migratetype page by page allocator's buddy algorithm. In the case,
268          * you couldn't get a contiguous memory, which is not what we want.
269          */
270         alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
271                           max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
272         base = ALIGN(base, alignment);
273         size = ALIGN(size, alignment);
274         limit &= ~(alignment - 1);
275 
276         if (!base)
277                 fixed = false;
278 
279         /* size should be aligned with order_per_bit */
280         if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
281                 return -EINVAL;
282 
283         /*
284          * If allocating at a fixed base the request region must not cross the
285          * low/high memory boundary.
286          */
287         if (fixed && base < highmem_start && base + size > highmem_start) {
288                 ret = -EINVAL;
289                 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
290                         &base, &highmem_start);
291                 goto err;
292         }
293 
294         /*
295          * If the limit is unspecified or above the memblock end, its effective
296          * value will be the memblock end. Set it explicitly to simplify further
297          * checks.
298          */
299         if (limit == 0 || limit > memblock_end)
300                 limit = memblock_end;
301 
302         /* Reserve memory */
303         if (fixed) {
304                 if (memblock_is_region_reserved(base, size) ||
305                     memblock_reserve(base, size) < 0) {
306                         ret = -EBUSY;
307                         goto err;
308                 }
309         } else {
310                 phys_addr_t addr = 0;
311 
312                 /*
313                  * All pages in the reserved area must come from the same zone.
314                  * If the requested region crosses the low/high memory boundary,
315                  * try allocating from high memory first and fall back to low
316                  * memory in case of failure.
317                  */
318                 if (base < highmem_start && limit > highmem_start) {
319                         addr = memblock_alloc_range(size, alignment,
320                                                     highmem_start, limit);
321                         limit = highmem_start;
322                 }
323 
324                 if (!addr) {
325                         addr = memblock_alloc_range(size, alignment, base,
326                                                     limit);
327                         if (!addr) {
328                                 ret = -ENOMEM;
329                                 goto err;
330                         }
331                 }
332 
333                 /*
334                  * kmemleak scans/reads tracked objects for pointers to other
335                  * objects but this address isn't mapped and accessible
336                  */
337                 kmemleak_ignore(phys_to_virt(addr));
338                 base = addr;
339         }
340 
341         ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
342         if (ret)
343                 goto free_mem;
344 
345         pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
346                 &base);
347         return 0;
348 
349 free_mem:
350         memblock_free(base, size);
351 err:
352         pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
353         return ret;
354 }
355 
356 /**
357  * cma_alloc() - allocate pages from contiguous area
358  * @cma:   Contiguous memory region for which the allocation is performed.
359  * @count: Requested number of pages.
360  * @align: Requested alignment of pages (in PAGE_SIZE order).
361  *
362  * This function allocates part of contiguous memory on specific
363  * contiguous memory area.
364  */
365 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
366 {
367         unsigned long mask, offset, pfn, start = 0;
368         unsigned long bitmap_maxno, bitmap_no, bitmap_count;
369         struct page *page = NULL;
370         int ret;
371 
372         if (!cma || !cma->count)
373                 return NULL;
374 
375         pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
376                  count, align);
377 
378         if (!count)
379                 return NULL;
380 
381         mask = cma_bitmap_aligned_mask(cma, align);
382         offset = cma_bitmap_aligned_offset(cma, align);
383         bitmap_maxno = cma_bitmap_maxno(cma);
384         bitmap_count = cma_bitmap_pages_to_bits(cma, count);
385 
386         for (;;) {
387                 mutex_lock(&cma->lock);
388                 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
389                                 bitmap_maxno, start, bitmap_count, mask,
390                                 offset);
391                 if (bitmap_no >= bitmap_maxno) {
392                         mutex_unlock(&cma->lock);
393                         break;
394                 }
395                 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
396                 /*
397                  * It's safe to drop the lock here. We've marked this region for
398                  * our exclusive use. If the migration fails we will take the
399                  * lock again and unmark it.
400                  */
401                 mutex_unlock(&cma->lock);
402 
403                 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
404                 mutex_lock(&cma_mutex);
405                 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
406                 mutex_unlock(&cma_mutex);
407                 if (ret == 0) {
408                         page = pfn_to_page(pfn);
409                         break;
410                 }
411 
412                 cma_clear_bitmap(cma, pfn, count);
413                 if (ret != -EBUSY)
414                         break;
415 
416                 pr_debug("%s(): memory range at %p is busy, retrying\n",
417                          __func__, pfn_to_page(pfn));
418                 /* try again with a bit different memory target */
419                 start = bitmap_no + mask + 1;
420         }
421 
422         pr_debug("%s(): returned %p\n", __func__, page);
423         return page;
424 }
425 
426 /**
427  * cma_release() - release allocated pages
428  * @cma:   Contiguous memory region for which the allocation is performed.
429  * @pages: Allocated pages.
430  * @count: Number of allocated pages.
431  *
432  * This function releases memory allocated by alloc_cma().
433  * It returns false when provided pages do not belong to contiguous area and
434  * true otherwise.
435  */
436 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
437 {
438         unsigned long pfn;
439 
440         if (!cma || !pages)
441                 return false;
442 
443         pr_debug("%s(page %p)\n", __func__, (void *)pages);
444 
445         pfn = page_to_pfn(pages);
446 
447         if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
448                 return false;
449 
450         VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
451 
452         free_contig_range(pfn, count);
453         cma_clear_bitmap(cma, pfn, count);
454 
455         return true;
456 }
457 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp