1 /* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/debugobjects.h> 22 #include <linux/kallsyms.h> 23 #include <linux/list.h> 24 #include <linux/rbtree.h> 25 #include <linux/radix-tree.h> 26 #include <linux/rcupdate.h> 27 #include <linux/pfn.h> 28 #include <linux/kmemleak.h> 29 #include <asm/atomic.h> 30 #include <asm/uaccess.h> 31 #include <asm/tlbflush.h> 32 #include <asm/shmparam.h> 33 34 35 /*** Page table manipulation functions ***/ 36 37 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 38 { 39 pte_t *pte; 40 41 pte = pte_offset_kernel(pmd, addr); 42 do { 43 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 44 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 45 } while (pte++, addr += PAGE_SIZE, addr != end); 46 } 47 48 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 49 { 50 pmd_t *pmd; 51 unsigned long next; 52 53 pmd = pmd_offset(pud, addr); 54 do { 55 next = pmd_addr_end(addr, end); 56 if (pmd_none_or_clear_bad(pmd)) 57 continue; 58 vunmap_pte_range(pmd, addr, next); 59 } while (pmd++, addr = next, addr != end); 60 } 61 62 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 63 { 64 pud_t *pud; 65 unsigned long next; 66 67 pud = pud_offset(pgd, addr); 68 do { 69 next = pud_addr_end(addr, end); 70 if (pud_none_or_clear_bad(pud)) 71 continue; 72 vunmap_pmd_range(pud, addr, next); 73 } while (pud++, addr = next, addr != end); 74 } 75 76 static void vunmap_page_range(unsigned long addr, unsigned long end) 77 { 78 pgd_t *pgd; 79 unsigned long next; 80 81 BUG_ON(addr >= end); 82 pgd = pgd_offset_k(addr); 83 do { 84 next = pgd_addr_end(addr, end); 85 if (pgd_none_or_clear_bad(pgd)) 86 continue; 87 vunmap_pud_range(pgd, addr, next); 88 } while (pgd++, addr = next, addr != end); 89 } 90 91 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 92 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 93 { 94 pte_t *pte; 95 96 /* 97 * nr is a running index into the array which helps higher level 98 * callers keep track of where we're up to. 99 */ 100 101 pte = pte_alloc_kernel(pmd, addr); 102 if (!pte) 103 return -ENOMEM; 104 do { 105 struct page *page = pages[*nr]; 106 107 if (WARN_ON(!pte_none(*pte))) 108 return -EBUSY; 109 if (WARN_ON(!page)) 110 return -ENOMEM; 111 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 112 (*nr)++; 113 } while (pte++, addr += PAGE_SIZE, addr != end); 114 return 0; 115 } 116 117 static int vmap_pmd_range(pud_t *pud, unsigned long addr, 118 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 119 { 120 pmd_t *pmd; 121 unsigned long next; 122 123 pmd = pmd_alloc(&init_mm, pud, addr); 124 if (!pmd) 125 return -ENOMEM; 126 do { 127 next = pmd_addr_end(addr, end); 128 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 129 return -ENOMEM; 130 } while (pmd++, addr = next, addr != end); 131 return 0; 132 } 133 134 static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 135 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 136 { 137 pud_t *pud; 138 unsigned long next; 139 140 pud = pud_alloc(&init_mm, pgd, addr); 141 if (!pud) 142 return -ENOMEM; 143 do { 144 next = pud_addr_end(addr, end); 145 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 146 return -ENOMEM; 147 } while (pud++, addr = next, addr != end); 148 return 0; 149 } 150 151 /* 152 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 153 * will have pfns corresponding to the "pages" array. 154 * 155 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 156 */ 157 static int vmap_page_range_noflush(unsigned long start, unsigned long end, 158 pgprot_t prot, struct page **pages) 159 { 160 pgd_t *pgd; 161 unsigned long next; 162 unsigned long addr = start; 163 int err = 0; 164 int nr = 0; 165 166 BUG_ON(addr >= end); 167 pgd = pgd_offset_k(addr); 168 do { 169 next = pgd_addr_end(addr, end); 170 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 171 if (err) 172 return err; 173 } while (pgd++, addr = next, addr != end); 174 175 return nr; 176 } 177 178 static int vmap_page_range(unsigned long start, unsigned long end, 179 pgprot_t prot, struct page **pages) 180 { 181 int ret; 182 183 ret = vmap_page_range_noflush(start, end, prot, pages); 184 flush_cache_vmap(start, end); 185 return ret; 186 } 187 188 int is_vmalloc_or_module_addr(const void *x) 189 { 190 /* 191 * ARM, x86-64 and sparc64 put modules in a special place, 192 * and fall back on vmalloc() if that fails. Others 193 * just put it in the vmalloc space. 194 */ 195 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 196 unsigned long addr = (unsigned long)x; 197 if (addr >= MODULES_VADDR && addr < MODULES_END) 198 return 1; 199 #endif 200 return is_vmalloc_addr(x); 201 } 202 203 /* 204 * Walk a vmap address to the struct page it maps. 205 */ 206 struct page *vmalloc_to_page(const void *vmalloc_addr) 207 { 208 unsigned long addr = (unsigned long) vmalloc_addr; 209 struct page *page = NULL; 210 pgd_t *pgd = pgd_offset_k(addr); 211 212 /* 213 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 214 * architectures that do not vmalloc module space 215 */ 216 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 217 218 if (!pgd_none(*pgd)) { 219 pud_t *pud = pud_offset(pgd, addr); 220 if (!pud_none(*pud)) { 221 pmd_t *pmd = pmd_offset(pud, addr); 222 if (!pmd_none(*pmd)) { 223 pte_t *ptep, pte; 224 225 ptep = pte_offset_map(pmd, addr); 226 pte = *ptep; 227 if (pte_present(pte)) 228 page = pte_page(pte); 229 pte_unmap(ptep); 230 } 231 } 232 } 233 return page; 234 } 235 EXPORT_SYMBOL(vmalloc_to_page); 236 237 /* 238 * Map a vmalloc()-space virtual address to the physical page frame number. 239 */ 240 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 241 { 242 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 243 } 244 EXPORT_SYMBOL(vmalloc_to_pfn); 245 246 247 /*** Global kva allocator ***/ 248 249 #define VM_LAZY_FREE 0x01 250 #define VM_LAZY_FREEING 0x02 251 #define VM_VM_AREA 0x04 252 253 struct vmap_area { 254 unsigned long va_start; 255 unsigned long va_end; 256 unsigned long flags; 257 struct rb_node rb_node; /* address sorted rbtree */ 258 struct list_head list; /* address sorted list */ 259 struct list_head purge_list; /* "lazy purge" list */ 260 void *private; 261 struct rcu_head rcu_head; 262 }; 263 264 static DEFINE_SPINLOCK(vmap_area_lock); 265 static struct rb_root vmap_area_root = RB_ROOT; 266 static LIST_HEAD(vmap_area_list); 267 static unsigned long vmap_area_pcpu_hole; 268 269 static struct vmap_area *__find_vmap_area(unsigned long addr) 270 { 271 struct rb_node *n = vmap_area_root.rb_node; 272 273 while (n) { 274 struct vmap_area *va; 275 276 va = rb_entry(n, struct vmap_area, rb_node); 277 if (addr < va->va_start) 278 n = n->rb_left; 279 else if (addr > va->va_start) 280 n = n->rb_right; 281 else 282 return va; 283 } 284 285 return NULL; 286 } 287 288 static void __insert_vmap_area(struct vmap_area *va) 289 { 290 struct rb_node **p = &vmap_area_root.rb_node; 291 struct rb_node *parent = NULL; 292 struct rb_node *tmp; 293 294 while (*p) { 295 struct vmap_area *tmp; 296 297 parent = *p; 298 tmp = rb_entry(parent, struct vmap_area, rb_node); 299 if (va->va_start < tmp->va_end) 300 p = &(*p)->rb_left; 301 else if (va->va_end > tmp->va_start) 302 p = &(*p)->rb_right; 303 else 304 BUG(); 305 } 306 307 rb_link_node(&va->rb_node, parent, p); 308 rb_insert_color(&va->rb_node, &vmap_area_root); 309 310 /* address-sort this list so it is usable like the vmlist */ 311 tmp = rb_prev(&va->rb_node); 312 if (tmp) { 313 struct vmap_area *prev; 314 prev = rb_entry(tmp, struct vmap_area, rb_node); 315 list_add_rcu(&va->list, &prev->list); 316 } else 317 list_add_rcu(&va->list, &vmap_area_list); 318 } 319 320 static void purge_vmap_area_lazy(void); 321 322 /* 323 * Allocate a region of KVA of the specified size and alignment, within the 324 * vstart and vend. 325 */ 326 static struct vmap_area *alloc_vmap_area(unsigned long size, 327 unsigned long align, 328 unsigned long vstart, unsigned long vend, 329 int node, gfp_t gfp_mask) 330 { 331 struct vmap_area *va; 332 struct rb_node *n; 333 unsigned long addr; 334 int purged = 0; 335 336 BUG_ON(!size); 337 BUG_ON(size & ~PAGE_MASK); 338 339 va = kmalloc_node(sizeof(struct vmap_area), 340 gfp_mask & GFP_RECLAIM_MASK, node); 341 if (unlikely(!va)) 342 return ERR_PTR(-ENOMEM); 343 344 retry: 345 addr = ALIGN(vstart, align); 346 347 spin_lock(&vmap_area_lock); 348 if (addr + size - 1 < addr) 349 goto overflow; 350 351 /* XXX: could have a last_hole cache */ 352 n = vmap_area_root.rb_node; 353 if (n) { 354 struct vmap_area *first = NULL; 355 356 do { 357 struct vmap_area *tmp; 358 tmp = rb_entry(n, struct vmap_area, rb_node); 359 if (tmp->va_end >= addr) { 360 if (!first && tmp->va_start < addr + size) 361 first = tmp; 362 n = n->rb_left; 363 } else { 364 first = tmp; 365 n = n->rb_right; 366 } 367 } while (n); 368 369 if (!first) 370 goto found; 371 372 if (first->va_end < addr) { 373 n = rb_next(&first->rb_node); 374 if (n) 375 first = rb_entry(n, struct vmap_area, rb_node); 376 else 377 goto found; 378 } 379 380 while (addr + size > first->va_start && addr + size <= vend) { 381 addr = ALIGN(first->va_end + PAGE_SIZE, align); 382 if (addr + size - 1 < addr) 383 goto overflow; 384 385 n = rb_next(&first->rb_node); 386 if (n) 387 first = rb_entry(n, struct vmap_area, rb_node); 388 else 389 goto found; 390 } 391 } 392 found: 393 if (addr + size > vend) { 394 overflow: 395 spin_unlock(&vmap_area_lock); 396 if (!purged) { 397 purge_vmap_area_lazy(); 398 purged = 1; 399 goto retry; 400 } 401 if (printk_ratelimit()) 402 printk(KERN_WARNING 403 "vmap allocation for size %lu failed: " 404 "use vmalloc=<size> to increase size.\n", size); 405 kfree(va); 406 return ERR_PTR(-EBUSY); 407 } 408 409 BUG_ON(addr & (align-1)); 410 411 va->va_start = addr; 412 va->va_end = addr + size; 413 va->flags = 0; 414 __insert_vmap_area(va); 415 spin_unlock(&vmap_area_lock); 416 417 return va; 418 } 419 420 static void rcu_free_va(struct rcu_head *head) 421 { 422 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 423 424 kfree(va); 425 } 426 427 static void __free_vmap_area(struct vmap_area *va) 428 { 429 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 430 rb_erase(&va->rb_node, &vmap_area_root); 431 RB_CLEAR_NODE(&va->rb_node); 432 list_del_rcu(&va->list); 433 434 /* 435 * Track the highest possible candidate for pcpu area 436 * allocation. Areas outside of vmalloc area can be returned 437 * here too, consider only end addresses which fall inside 438 * vmalloc area proper. 439 */ 440 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 441 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 442 443 call_rcu(&va->rcu_head, rcu_free_va); 444 } 445 446 /* 447 * Free a region of KVA allocated by alloc_vmap_area 448 */ 449 static void free_vmap_area(struct vmap_area *va) 450 { 451 spin_lock(&vmap_area_lock); 452 __free_vmap_area(va); 453 spin_unlock(&vmap_area_lock); 454 } 455 456 /* 457 * Clear the pagetable entries of a given vmap_area 458 */ 459 static void unmap_vmap_area(struct vmap_area *va) 460 { 461 vunmap_page_range(va->va_start, va->va_end); 462 } 463 464 static void vmap_debug_free_range(unsigned long start, unsigned long end) 465 { 466 /* 467 * Unmap page tables and force a TLB flush immediately if 468 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 469 * bugs similarly to those in linear kernel virtual address 470 * space after a page has been freed. 471 * 472 * All the lazy freeing logic is still retained, in order to 473 * minimise intrusiveness of this debugging feature. 474 * 475 * This is going to be *slow* (linear kernel virtual address 476 * debugging doesn't do a broadcast TLB flush so it is a lot 477 * faster). 478 */ 479 #ifdef CONFIG_DEBUG_PAGEALLOC 480 vunmap_page_range(start, end); 481 flush_tlb_kernel_range(start, end); 482 #endif 483 } 484 485 /* 486 * lazy_max_pages is the maximum amount of virtual address space we gather up 487 * before attempting to purge with a TLB flush. 488 * 489 * There is a tradeoff here: a larger number will cover more kernel page tables 490 * and take slightly longer to purge, but it will linearly reduce the number of 491 * global TLB flushes that must be performed. It would seem natural to scale 492 * this number up linearly with the number of CPUs (because vmapping activity 493 * could also scale linearly with the number of CPUs), however it is likely 494 * that in practice, workloads might be constrained in other ways that mean 495 * vmap activity will not scale linearly with CPUs. Also, I want to be 496 * conservative and not introduce a big latency on huge systems, so go with 497 * a less aggressive log scale. It will still be an improvement over the old 498 * code, and it will be simple to change the scale factor if we find that it 499 * becomes a problem on bigger systems. 500 */ 501 static unsigned long lazy_max_pages(void) 502 { 503 unsigned int log; 504 505 log = fls(num_online_cpus()); 506 507 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 508 } 509 510 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 511 512 /* for per-CPU blocks */ 513 static void purge_fragmented_blocks_allcpus(void); 514 515 /* 516 * called before a call to iounmap() if the caller wants vm_area_struct's 517 * immediately freed. 518 */ 519 void set_iounmap_nonlazy(void) 520 { 521 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 522 } 523 524 /* 525 * Purges all lazily-freed vmap areas. 526 * 527 * If sync is 0 then don't purge if there is already a purge in progress. 528 * If force_flush is 1, then flush kernel TLBs between *start and *end even 529 * if we found no lazy vmap areas to unmap (callers can use this to optimise 530 * their own TLB flushing). 531 * Returns with *start = min(*start, lowest purged address) 532 * *end = max(*end, highest purged address) 533 */ 534 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 535 int sync, int force_flush) 536 { 537 static DEFINE_SPINLOCK(purge_lock); 538 LIST_HEAD(valist); 539 struct vmap_area *va; 540 struct vmap_area *n_va; 541 int nr = 0; 542 543 /* 544 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 545 * should not expect such behaviour. This just simplifies locking for 546 * the case that isn't actually used at the moment anyway. 547 */ 548 if (!sync && !force_flush) { 549 if (!spin_trylock(&purge_lock)) 550 return; 551 } else 552 spin_lock(&purge_lock); 553 554 if (sync) 555 purge_fragmented_blocks_allcpus(); 556 557 rcu_read_lock(); 558 list_for_each_entry_rcu(va, &vmap_area_list, list) { 559 if (va->flags & VM_LAZY_FREE) { 560 if (va->va_start < *start) 561 *start = va->va_start; 562 if (va->va_end > *end) 563 *end = va->va_end; 564 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 565 unmap_vmap_area(va); 566 list_add_tail(&va->purge_list, &valist); 567 va->flags |= VM_LAZY_FREEING; 568 va->flags &= ~VM_LAZY_FREE; 569 } 570 } 571 rcu_read_unlock(); 572 573 if (nr) 574 atomic_sub(nr, &vmap_lazy_nr); 575 576 if (nr || force_flush) 577 flush_tlb_kernel_range(*start, *end); 578 579 if (nr) { 580 spin_lock(&vmap_area_lock); 581 list_for_each_entry_safe(va, n_va, &valist, purge_list) 582 __free_vmap_area(va); 583 spin_unlock(&vmap_area_lock); 584 } 585 spin_unlock(&purge_lock); 586 } 587 588 /* 589 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 590 * is already purging. 591 */ 592 static void try_purge_vmap_area_lazy(void) 593 { 594 unsigned long start = ULONG_MAX, end = 0; 595 596 __purge_vmap_area_lazy(&start, &end, 0, 0); 597 } 598 599 /* 600 * Kick off a purge of the outstanding lazy areas. 601 */ 602 static void purge_vmap_area_lazy(void) 603 { 604 unsigned long start = ULONG_MAX, end = 0; 605 606 __purge_vmap_area_lazy(&start, &end, 1, 0); 607 } 608 609 /* 610 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 611 * called for the correct range previously. 612 */ 613 static void free_unmap_vmap_area_noflush(struct vmap_area *va) 614 { 615 va->flags |= VM_LAZY_FREE; 616 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 617 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 618 try_purge_vmap_area_lazy(); 619 } 620 621 /* 622 * Free and unmap a vmap area 623 */ 624 static void free_unmap_vmap_area(struct vmap_area *va) 625 { 626 flush_cache_vunmap(va->va_start, va->va_end); 627 free_unmap_vmap_area_noflush(va); 628 } 629 630 static struct vmap_area *find_vmap_area(unsigned long addr) 631 { 632 struct vmap_area *va; 633 634 spin_lock(&vmap_area_lock); 635 va = __find_vmap_area(addr); 636 spin_unlock(&vmap_area_lock); 637 638 return va; 639 } 640 641 static void free_unmap_vmap_area_addr(unsigned long addr) 642 { 643 struct vmap_area *va; 644 645 va = find_vmap_area(addr); 646 BUG_ON(!va); 647 free_unmap_vmap_area(va); 648 } 649 650 651 /*** Per cpu kva allocator ***/ 652 653 /* 654 * vmap space is limited especially on 32 bit architectures. Ensure there is 655 * room for at least 16 percpu vmap blocks per CPU. 656 */ 657 /* 658 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 659 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 660 * instead (we just need a rough idea) 661 */ 662 #if BITS_PER_LONG == 32 663 #define VMALLOC_SPACE (128UL*1024*1024) 664 #else 665 #define VMALLOC_SPACE (128UL*1024*1024*1024) 666 #endif 667 668 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 669 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 670 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 671 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 672 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 673 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 674 #define VMAP_BBMAP_BITS \ 675 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 676 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 677 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 678 679 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 680 681 static bool vmap_initialized __read_mostly = false; 682 683 struct vmap_block_queue { 684 spinlock_t lock; 685 struct list_head free; 686 }; 687 688 struct vmap_block { 689 spinlock_t lock; 690 struct vmap_area *va; 691 struct vmap_block_queue *vbq; 692 unsigned long free, dirty; 693 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 694 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 695 struct list_head free_list; 696 struct rcu_head rcu_head; 697 struct list_head purge; 698 }; 699 700 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 701 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 702 703 /* 704 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 705 * in the free path. Could get rid of this if we change the API to return a 706 * "cookie" from alloc, to be passed to free. But no big deal yet. 707 */ 708 static DEFINE_SPINLOCK(vmap_block_tree_lock); 709 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 710 711 /* 712 * We should probably have a fallback mechanism to allocate virtual memory 713 * out of partially filled vmap blocks. However vmap block sizing should be 714 * fairly reasonable according to the vmalloc size, so it shouldn't be a 715 * big problem. 716 */ 717 718 static unsigned long addr_to_vb_idx(unsigned long addr) 719 { 720 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 721 addr /= VMAP_BLOCK_SIZE; 722 return addr; 723 } 724 725 static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 726 { 727 struct vmap_block_queue *vbq; 728 struct vmap_block *vb; 729 struct vmap_area *va; 730 unsigned long vb_idx; 731 int node, err; 732 733 node = numa_node_id(); 734 735 vb = kmalloc_node(sizeof(struct vmap_block), 736 gfp_mask & GFP_RECLAIM_MASK, node); 737 if (unlikely(!vb)) 738 return ERR_PTR(-ENOMEM); 739 740 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 741 VMALLOC_START, VMALLOC_END, 742 node, gfp_mask); 743 if (unlikely(IS_ERR(va))) { 744 kfree(vb); 745 return ERR_PTR(PTR_ERR(va)); 746 } 747 748 err = radix_tree_preload(gfp_mask); 749 if (unlikely(err)) { 750 kfree(vb); 751 free_vmap_area(va); 752 return ERR_PTR(err); 753 } 754 755 spin_lock_init(&vb->lock); 756 vb->va = va; 757 vb->free = VMAP_BBMAP_BITS; 758 vb->dirty = 0; 759 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 760 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 761 INIT_LIST_HEAD(&vb->free_list); 762 763 vb_idx = addr_to_vb_idx(va->va_start); 764 spin_lock(&vmap_block_tree_lock); 765 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 766 spin_unlock(&vmap_block_tree_lock); 767 BUG_ON(err); 768 radix_tree_preload_end(); 769 770 vbq = &get_cpu_var(vmap_block_queue); 771 vb->vbq = vbq; 772 spin_lock(&vbq->lock); 773 list_add_rcu(&vb->free_list, &vbq->free); 774 spin_unlock(&vbq->lock); 775 put_cpu_var(vmap_cpu_blocks); 776 777 return vb; 778 } 779 780 static void rcu_free_vb(struct rcu_head *head) 781 { 782 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 783 784 kfree(vb); 785 } 786 787 static void free_vmap_block(struct vmap_block *vb) 788 { 789 struct vmap_block *tmp; 790 unsigned long vb_idx; 791 792 vb_idx = addr_to_vb_idx(vb->va->va_start); 793 spin_lock(&vmap_block_tree_lock); 794 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 795 spin_unlock(&vmap_block_tree_lock); 796 BUG_ON(tmp != vb); 797 798 free_unmap_vmap_area_noflush(vb->va); 799 call_rcu(&vb->rcu_head, rcu_free_vb); 800 } 801 802 static void purge_fragmented_blocks(int cpu) 803 { 804 LIST_HEAD(purge); 805 struct vmap_block *vb; 806 struct vmap_block *n_vb; 807 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 808 809 rcu_read_lock(); 810 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 811 812 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 813 continue; 814 815 spin_lock(&vb->lock); 816 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 817 vb->free = 0; /* prevent further allocs after releasing lock */ 818 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 819 bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); 820 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); 821 spin_lock(&vbq->lock); 822 list_del_rcu(&vb->free_list); 823 spin_unlock(&vbq->lock); 824 spin_unlock(&vb->lock); 825 list_add_tail(&vb->purge, &purge); 826 } else 827 spin_unlock(&vb->lock); 828 } 829 rcu_read_unlock(); 830 831 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 832 list_del(&vb->purge); 833 free_vmap_block(vb); 834 } 835 } 836 837 static void purge_fragmented_blocks_thiscpu(void) 838 { 839 purge_fragmented_blocks(smp_processor_id()); 840 } 841 842 static void purge_fragmented_blocks_allcpus(void) 843 { 844 int cpu; 845 846 for_each_possible_cpu(cpu) 847 purge_fragmented_blocks(cpu); 848 } 849 850 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 851 { 852 struct vmap_block_queue *vbq; 853 struct vmap_block *vb; 854 unsigned long addr = 0; 855 unsigned int order; 856 int purge = 0; 857 858 BUG_ON(size & ~PAGE_MASK); 859 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 860 order = get_order(size); 861 862 again: 863 rcu_read_lock(); 864 vbq = &get_cpu_var(vmap_block_queue); 865 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 866 int i; 867 868 spin_lock(&vb->lock); 869 if (vb->free < 1UL << order) 870 goto next; 871 i = bitmap_find_free_region(vb->alloc_map, 872 VMAP_BBMAP_BITS, order); 873 874 if (i < 0) { 875 if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { 876 /* fragmented and no outstanding allocations */ 877 BUG_ON(vb->dirty != VMAP_BBMAP_BITS); 878 purge = 1; 879 } 880 goto next; 881 } 882 addr = vb->va->va_start + (i << PAGE_SHIFT); 883 BUG_ON(addr_to_vb_idx(addr) != 884 addr_to_vb_idx(vb->va->va_start)); 885 vb->free -= 1UL << order; 886 if (vb->free == 0) { 887 spin_lock(&vbq->lock); 888 list_del_rcu(&vb->free_list); 889 spin_unlock(&vbq->lock); 890 } 891 spin_unlock(&vb->lock); 892 break; 893 next: 894 spin_unlock(&vb->lock); 895 } 896 897 if (purge) 898 purge_fragmented_blocks_thiscpu(); 899 900 put_cpu_var(vmap_cpu_blocks); 901 rcu_read_unlock(); 902 903 if (!addr) { 904 vb = new_vmap_block(gfp_mask); 905 if (IS_ERR(vb)) 906 return vb; 907 goto again; 908 } 909 910 return (void *)addr; 911 } 912 913 static void vb_free(const void *addr, unsigned long size) 914 { 915 unsigned long offset; 916 unsigned long vb_idx; 917 unsigned int order; 918 struct vmap_block *vb; 919 920 BUG_ON(size & ~PAGE_MASK); 921 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 922 923 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 924 925 order = get_order(size); 926 927 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 928 929 vb_idx = addr_to_vb_idx((unsigned long)addr); 930 rcu_read_lock(); 931 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 932 rcu_read_unlock(); 933 BUG_ON(!vb); 934 935 spin_lock(&vb->lock); 936 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 937 938 vb->dirty += 1UL << order; 939 if (vb->dirty == VMAP_BBMAP_BITS) { 940 BUG_ON(vb->free); 941 spin_unlock(&vb->lock); 942 free_vmap_block(vb); 943 } else 944 spin_unlock(&vb->lock); 945 } 946 947 /** 948 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 949 * 950 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 951 * to amortize TLB flushing overheads. What this means is that any page you 952 * have now, may, in a former life, have been mapped into kernel virtual 953 * address by the vmap layer and so there might be some CPUs with TLB entries 954 * still referencing that page (additional to the regular 1:1 kernel mapping). 955 * 956 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 957 * be sure that none of the pages we have control over will have any aliases 958 * from the vmap layer. 959 */ 960 void vm_unmap_aliases(void) 961 { 962 unsigned long start = ULONG_MAX, end = 0; 963 int cpu; 964 int flush = 0; 965 966 if (unlikely(!vmap_initialized)) 967 return; 968 969 for_each_possible_cpu(cpu) { 970 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 971 struct vmap_block *vb; 972 973 rcu_read_lock(); 974 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 975 int i; 976 977 spin_lock(&vb->lock); 978 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 979 while (i < VMAP_BBMAP_BITS) { 980 unsigned long s, e; 981 int j; 982 j = find_next_zero_bit(vb->dirty_map, 983 VMAP_BBMAP_BITS, i); 984 985 s = vb->va->va_start + (i << PAGE_SHIFT); 986 e = vb->va->va_start + (j << PAGE_SHIFT); 987 vunmap_page_range(s, e); 988 flush = 1; 989 990 if (s < start) 991 start = s; 992 if (e > end) 993 end = e; 994 995 i = j; 996 i = find_next_bit(vb->dirty_map, 997 VMAP_BBMAP_BITS, i); 998 } 999 spin_unlock(&vb->lock); 1000 } 1001 rcu_read_unlock(); 1002 } 1003 1004 __purge_vmap_area_lazy(&start, &end, 1, flush); 1005 } 1006 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1007 1008 /** 1009 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1010 * @mem: the pointer returned by vm_map_ram 1011 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1012 */ 1013 void vm_unmap_ram(const void *mem, unsigned int count) 1014 { 1015 unsigned long size = count << PAGE_SHIFT; 1016 unsigned long addr = (unsigned long)mem; 1017 1018 BUG_ON(!addr); 1019 BUG_ON(addr < VMALLOC_START); 1020 BUG_ON(addr > VMALLOC_END); 1021 BUG_ON(addr & (PAGE_SIZE-1)); 1022 1023 debug_check_no_locks_freed(mem, size); 1024 vmap_debug_free_range(addr, addr+size); 1025 1026 if (likely(count <= VMAP_MAX_ALLOC)) 1027 vb_free(mem, size); 1028 else 1029 free_unmap_vmap_area_addr(addr); 1030 } 1031 EXPORT_SYMBOL(vm_unmap_ram); 1032 1033 /** 1034 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1035 * @pages: an array of pointers to the pages to be mapped 1036 * @count: number of pages 1037 * @node: prefer to allocate data structures on this node 1038 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1039 * 1040 * Returns: a pointer to the address that has been mapped, or %NULL on failure 1041 */ 1042 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1043 { 1044 unsigned long size = count << PAGE_SHIFT; 1045 unsigned long addr; 1046 void *mem; 1047 1048 if (likely(count <= VMAP_MAX_ALLOC)) { 1049 mem = vb_alloc(size, GFP_KERNEL); 1050 if (IS_ERR(mem)) 1051 return NULL; 1052 addr = (unsigned long)mem; 1053 } else { 1054 struct vmap_area *va; 1055 va = alloc_vmap_area(size, PAGE_SIZE, 1056 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1057 if (IS_ERR(va)) 1058 return NULL; 1059 1060 addr = va->va_start; 1061 mem = (void *)addr; 1062 } 1063 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1064 vm_unmap_ram(mem, count); 1065 return NULL; 1066 } 1067 return mem; 1068 } 1069 EXPORT_SYMBOL(vm_map_ram); 1070 1071 /** 1072 * vm_area_register_early - register vmap area early during boot 1073 * @vm: vm_struct to register 1074 * @align: requested alignment 1075 * 1076 * This function is used to register kernel vm area before 1077 * vmalloc_init() is called. @vm->size and @vm->flags should contain 1078 * proper values on entry and other fields should be zero. On return, 1079 * vm->addr contains the allocated address. 1080 * 1081 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1082 */ 1083 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1084 { 1085 static size_t vm_init_off __initdata; 1086 unsigned long addr; 1087 1088 addr = ALIGN(VMALLOC_START + vm_init_off, align); 1089 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1090 1091 vm->addr = (void *)addr; 1092 1093 vm->next = vmlist; 1094 vmlist = vm; 1095 } 1096 1097 void __init vmalloc_init(void) 1098 { 1099 struct vmap_area *va; 1100 struct vm_struct *tmp; 1101 int i; 1102 1103 for_each_possible_cpu(i) { 1104 struct vmap_block_queue *vbq; 1105 1106 vbq = &per_cpu(vmap_block_queue, i); 1107 spin_lock_init(&vbq->lock); 1108 INIT_LIST_HEAD(&vbq->free); 1109 } 1110 1111 /* Import existing vmlist entries. */ 1112 for (tmp = vmlist; tmp; tmp = tmp->next) { 1113 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1114 va->flags = tmp->flags | VM_VM_AREA; 1115 va->va_start = (unsigned long)tmp->addr; 1116 va->va_end = va->va_start + tmp->size; 1117 __insert_vmap_area(va); 1118 } 1119 1120 vmap_area_pcpu_hole = VMALLOC_END; 1121 1122 vmap_initialized = true; 1123 } 1124 1125 /** 1126 * map_kernel_range_noflush - map kernel VM area with the specified pages 1127 * @addr: start of the VM area to map 1128 * @size: size of the VM area to map 1129 * @prot: page protection flags to use 1130 * @pages: pages to map 1131 * 1132 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 1133 * specify should have been allocated using get_vm_area() and its 1134 * friends. 1135 * 1136 * NOTE: 1137 * This function does NOT do any cache flushing. The caller is 1138 * responsible for calling flush_cache_vmap() on to-be-mapped areas 1139 * before calling this function. 1140 * 1141 * RETURNS: 1142 * The number of pages mapped on success, -errno on failure. 1143 */ 1144 int map_kernel_range_noflush(unsigned long addr, unsigned long size, 1145 pgprot_t prot, struct page **pages) 1146 { 1147 return vmap_page_range_noflush(addr, addr + size, prot, pages); 1148 } 1149 1150 /** 1151 * unmap_kernel_range_noflush - unmap kernel VM area 1152 * @addr: start of the VM area to unmap 1153 * @size: size of the VM area to unmap 1154 * 1155 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 1156 * specify should have been allocated using get_vm_area() and its 1157 * friends. 1158 * 1159 * NOTE: 1160 * This function does NOT do any cache flushing. The caller is 1161 * responsible for calling flush_cache_vunmap() on to-be-mapped areas 1162 * before calling this function and flush_tlb_kernel_range() after. 1163 */ 1164 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 1165 { 1166 vunmap_page_range(addr, addr + size); 1167 } 1168 1169 /** 1170 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 1171 * @addr: start of the VM area to unmap 1172 * @size: size of the VM area to unmap 1173 * 1174 * Similar to unmap_kernel_range_noflush() but flushes vcache before 1175 * the unmapping and tlb after. 1176 */ 1177 void unmap_kernel_range(unsigned long addr, unsigned long size) 1178 { 1179 unsigned long end = addr + size; 1180 1181 flush_cache_vunmap(addr, end); 1182 vunmap_page_range(addr, end); 1183 flush_tlb_kernel_range(addr, end); 1184 } 1185 1186 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1187 { 1188 unsigned long addr = (unsigned long)area->addr; 1189 unsigned long end = addr + area->size - PAGE_SIZE; 1190 int err; 1191 1192 err = vmap_page_range(addr, end, prot, *pages); 1193 if (err > 0) { 1194 *pages += err; 1195 err = 0; 1196 } 1197 1198 return err; 1199 } 1200 EXPORT_SYMBOL_GPL(map_vm_area); 1201 1202 /*** Old vmalloc interfaces ***/ 1203 DEFINE_RWLOCK(vmlist_lock); 1204 struct vm_struct *vmlist; 1205 1206 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1207 unsigned long flags, void *caller) 1208 { 1209 vm->flags = flags; 1210 vm->addr = (void *)va->va_start; 1211 vm->size = va->va_end - va->va_start; 1212 vm->caller = caller; 1213 va->private = vm; 1214 va->flags |= VM_VM_AREA; 1215 } 1216 1217 static void insert_vmalloc_vmlist(struct vm_struct *vm) 1218 { 1219 struct vm_struct *tmp, **p; 1220 1221 vm->flags &= ~VM_UNLIST; 1222 1223 write_lock(&vmlist_lock); 1224 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1225 if (tmp->addr >= vm->addr) 1226 break; 1227 } 1228 vm->next = *p; 1229 *p = vm; 1230 write_unlock(&vmlist_lock); 1231 } 1232 1233 static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1234 unsigned long flags, void *caller) 1235 { 1236 setup_vmalloc_vm(vm, va, flags, caller); 1237 insert_vmalloc_vmlist(vm); 1238 } 1239 1240 static struct vm_struct *__get_vm_area_node(unsigned long size, 1241 unsigned long align, unsigned long flags, unsigned long start, 1242 unsigned long end, int node, gfp_t gfp_mask, void *caller) 1243 { 1244 static struct vmap_area *va; 1245 struct vm_struct *area; 1246 1247 BUG_ON(in_interrupt()); 1248 if (flags & VM_IOREMAP) { 1249 int bit = fls(size); 1250 1251 if (bit > IOREMAP_MAX_ORDER) 1252 bit = IOREMAP_MAX_ORDER; 1253 else if (bit < PAGE_SHIFT) 1254 bit = PAGE_SHIFT; 1255 1256 align = 1ul << bit; 1257 } 1258 1259 size = PAGE_ALIGN(size); 1260 if (unlikely(!size)) 1261 return NULL; 1262 1263 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 1264 if (unlikely(!area)) 1265 return NULL; 1266 1267 /* 1268 * We always allocate a guard page. 1269 */ 1270 size += PAGE_SIZE; 1271 1272 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1273 if (IS_ERR(va)) { 1274 kfree(area); 1275 return NULL; 1276 } 1277 1278 /* 1279 * When this function is called from __vmalloc_node, 1280 * we do not add vm_struct to vmlist here to avoid 1281 * accessing uninitialized members of vm_struct such as 1282 * pages and nr_pages fields. They will be set later. 1283 * To distinguish it from others, we use a VM_UNLIST flag. 1284 */ 1285 if (flags & VM_UNLIST) 1286 setup_vmalloc_vm(area, va, flags, caller); 1287 else 1288 insert_vmalloc_vm(area, va, flags, caller); 1289 1290 return area; 1291 } 1292 1293 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1294 unsigned long start, unsigned long end) 1295 { 1296 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1297 __builtin_return_address(0)); 1298 } 1299 EXPORT_SYMBOL_GPL(__get_vm_area); 1300 1301 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1302 unsigned long start, unsigned long end, 1303 void *caller) 1304 { 1305 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1306 caller); 1307 } 1308 1309 /** 1310 * get_vm_area - reserve a contiguous kernel virtual area 1311 * @size: size of the area 1312 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 1313 * 1314 * Search an area of @size in the kernel virtual mapping area, 1315 * and reserved it for out purposes. Returns the area descriptor 1316 * on success or %NULL on failure. 1317 */ 1318 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1319 { 1320 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1321 -1, GFP_KERNEL, __builtin_return_address(0)); 1322 } 1323 1324 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1325 void *caller) 1326 { 1327 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1328 -1, GFP_KERNEL, caller); 1329 } 1330 1331 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1332 int node, gfp_t gfp_mask) 1333 { 1334 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1335 node, gfp_mask, __builtin_return_address(0)); 1336 } 1337 1338 static struct vm_struct *find_vm_area(const void *addr) 1339 { 1340 struct vmap_area *va; 1341 1342 va = find_vmap_area((unsigned long)addr); 1343 if (va && va->flags & VM_VM_AREA) 1344 return va->private; 1345 1346 return NULL; 1347 } 1348 1349 /** 1350 * remove_vm_area - find and remove a continuous kernel virtual area 1351 * @addr: base address 1352 * 1353 * Search for the kernel VM area starting at @addr, and remove it. 1354 * This function returns the found VM area, but using it is NOT safe 1355 * on SMP machines, except for its size or flags. 1356 */ 1357 struct vm_struct *remove_vm_area(const void *addr) 1358 { 1359 struct vmap_area *va; 1360 1361 va = find_vmap_area((unsigned long)addr); 1362 if (va && va->flags & VM_VM_AREA) { 1363 struct vm_struct *vm = va->private; 1364 1365 if (!(vm->flags & VM_UNLIST)) { 1366 struct vm_struct *tmp, **p; 1367 /* 1368 * remove from list and disallow access to 1369 * this vm_struct before unmap. (address range 1370 * confliction is maintained by vmap.) 1371 */ 1372 write_lock(&vmlist_lock); 1373 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1374 ; 1375 *p = tmp->next; 1376 write_unlock(&vmlist_lock); 1377 } 1378 1379 vmap_debug_free_range(va->va_start, va->va_end); 1380 free_unmap_vmap_area(va); 1381 vm->size -= PAGE_SIZE; 1382 1383 return vm; 1384 } 1385 return NULL; 1386 } 1387 1388 static void __vunmap(const void *addr, int deallocate_pages) 1389 { 1390 struct vm_struct *area; 1391 1392 if (!addr) 1393 return; 1394 1395 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1396 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1397 return; 1398 } 1399 1400 area = remove_vm_area(addr); 1401 if (unlikely(!area)) { 1402 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1403 addr); 1404 return; 1405 } 1406 1407 debug_check_no_locks_freed(addr, area->size); 1408 debug_check_no_obj_freed(addr, area->size); 1409 1410 if (deallocate_pages) { 1411 int i; 1412 1413 for (i = 0; i < area->nr_pages; i++) { 1414 struct page *page = area->pages[i]; 1415 1416 BUG_ON(!page); 1417 __free_page(page); 1418 } 1419 1420 if (area->flags & VM_VPAGES) 1421 vfree(area->pages); 1422 else 1423 kfree(area->pages); 1424 } 1425 1426 kfree(area); 1427 return; 1428 } 1429 1430 /** 1431 * vfree - release memory allocated by vmalloc() 1432 * @addr: memory base address 1433 * 1434 * Free the virtually continuous memory area starting at @addr, as 1435 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1436 * NULL, no operation is performed. 1437 * 1438 * Must not be called in interrupt context. 1439 */ 1440 void vfree(const void *addr) 1441 { 1442 BUG_ON(in_interrupt()); 1443 1444 kmemleak_free(addr); 1445 1446 __vunmap(addr, 1); 1447 } 1448 EXPORT_SYMBOL(vfree); 1449 1450 /** 1451 * vunmap - release virtual mapping obtained by vmap() 1452 * @addr: memory base address 1453 * 1454 * Free the virtually contiguous memory area starting at @addr, 1455 * which was created from the page array passed to vmap(). 1456 * 1457 * Must not be called in interrupt context. 1458 */ 1459 void vunmap(const void *addr) 1460 { 1461 BUG_ON(in_interrupt()); 1462 might_sleep(); 1463 __vunmap(addr, 0); 1464 } 1465 EXPORT_SYMBOL(vunmap); 1466 1467 /** 1468 * vmap - map an array of pages into virtually contiguous space 1469 * @pages: array of page pointers 1470 * @count: number of pages to map 1471 * @flags: vm_area->flags 1472 * @prot: page protection for the mapping 1473 * 1474 * Maps @count pages from @pages into contiguous kernel virtual 1475 * space. 1476 */ 1477 void *vmap(struct page **pages, unsigned int count, 1478 unsigned long flags, pgprot_t prot) 1479 { 1480 struct vm_struct *area; 1481 1482 might_sleep(); 1483 1484 if (count > totalram_pages) 1485 return NULL; 1486 1487 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1488 __builtin_return_address(0)); 1489 if (!area) 1490 return NULL; 1491 1492 if (map_vm_area(area, prot, &pages)) { 1493 vunmap(area->addr); 1494 return NULL; 1495 } 1496 1497 return area->addr; 1498 } 1499 EXPORT_SYMBOL(vmap); 1500 1501 static void *__vmalloc_node(unsigned long size, unsigned long align, 1502 gfp_t gfp_mask, pgprot_t prot, 1503 int node, void *caller); 1504 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1505 pgprot_t prot, int node, void *caller) 1506 { 1507 struct page **pages; 1508 unsigned int nr_pages, array_size, i; 1509 1510 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1511 array_size = (nr_pages * sizeof(struct page *)); 1512 1513 area->nr_pages = nr_pages; 1514 /* Please note that the recursion is strictly bounded. */ 1515 if (array_size > PAGE_SIZE) { 1516 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, 1517 PAGE_KERNEL, node, caller); 1518 area->flags |= VM_VPAGES; 1519 } else { 1520 pages = kmalloc_node(array_size, 1521 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 1522 node); 1523 } 1524 area->pages = pages; 1525 area->caller = caller; 1526 if (!area->pages) { 1527 remove_vm_area(area->addr); 1528 kfree(area); 1529 return NULL; 1530 } 1531 1532 for (i = 0; i < area->nr_pages; i++) { 1533 struct page *page; 1534 1535 if (node < 0) 1536 page = alloc_page(gfp_mask); 1537 else 1538 page = alloc_pages_node(node, gfp_mask, 0); 1539 1540 if (unlikely(!page)) { 1541 /* Successfully allocated i pages, free them in __vunmap() */ 1542 area->nr_pages = i; 1543 goto fail; 1544 } 1545 area->pages[i] = page; 1546 } 1547 1548 if (map_vm_area(area, prot, &pages)) 1549 goto fail; 1550 return area->addr; 1551 1552 fail: 1553 vfree(area->addr); 1554 return NULL; 1555 } 1556 1557 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1558 { 1559 void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, 1560 __builtin_return_address(0)); 1561 1562 /* 1563 * A ref_count = 3 is needed because the vm_struct and vmap_area 1564 * structures allocated in the __get_vm_area_node() function contain 1565 * references to the virtual address of the vmalloc'ed block. 1566 */ 1567 kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); 1568 1569 return addr; 1570 } 1571 1572 /** 1573 * __vmalloc_node - allocate virtually contiguous memory 1574 * @size: allocation size 1575 * @align: desired alignment 1576 * @gfp_mask: flags for the page level allocator 1577 * @prot: protection mask for the allocated pages 1578 * @node: node to use for allocation or -1 1579 * @caller: caller's return address 1580 * 1581 * Allocate enough pages to cover @size from the page level 1582 * allocator with @gfp_mask flags. Map them into contiguous 1583 * kernel virtual space, using a pagetable protection of @prot. 1584 */ 1585 static void *__vmalloc_node(unsigned long size, unsigned long align, 1586 gfp_t gfp_mask, pgprot_t prot, 1587 int node, void *caller) 1588 { 1589 struct vm_struct *area; 1590 void *addr; 1591 unsigned long real_size = size; 1592 1593 size = PAGE_ALIGN(size); 1594 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1595 return NULL; 1596 1597 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, 1598 VMALLOC_START, VMALLOC_END, node, 1599 gfp_mask, caller); 1600 1601 if (!area) 1602 return NULL; 1603 1604 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1605 1606 /* 1607 * In this function, newly allocated vm_struct is not added 1608 * to vmlist at __get_vm_area_node(). so, it is added here. 1609 */ 1610 insert_vmalloc_vmlist(area); 1611 1612 /* 1613 * A ref_count = 3 is needed because the vm_struct and vmap_area 1614 * structures allocated in the __get_vm_area_node() function contain 1615 * references to the virtual address of the vmalloc'ed block. 1616 */ 1617 kmemleak_alloc(addr, real_size, 3, gfp_mask); 1618 1619 return addr; 1620 } 1621 1622 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1623 { 1624 return __vmalloc_node(size, 1, gfp_mask, prot, -1, 1625 __builtin_return_address(0)); 1626 } 1627 EXPORT_SYMBOL(__vmalloc); 1628 1629 /** 1630 * vmalloc - allocate virtually contiguous memory 1631 * @size: allocation size 1632 * Allocate enough pages to cover @size from the page level 1633 * allocator and map them into contiguous kernel virtual space. 1634 * 1635 * For tight control over page level allocator and protection flags 1636 * use __vmalloc() instead. 1637 */ 1638 void *vmalloc(unsigned long size) 1639 { 1640 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1641 -1, __builtin_return_address(0)); 1642 } 1643 EXPORT_SYMBOL(vmalloc); 1644 1645 /** 1646 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 1647 * @size: allocation size 1648 * 1649 * The resulting memory area is zeroed so it can be mapped to userspace 1650 * without leaking data. 1651 */ 1652 void *vmalloc_user(unsigned long size) 1653 { 1654 struct vm_struct *area; 1655 void *ret; 1656 1657 ret = __vmalloc_node(size, SHMLBA, 1658 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1659 PAGE_KERNEL, -1, __builtin_return_address(0)); 1660 if (ret) { 1661 area = find_vm_area(ret); 1662 area->flags |= VM_USERMAP; 1663 } 1664 return ret; 1665 } 1666 EXPORT_SYMBOL(vmalloc_user); 1667 1668 /** 1669 * vmalloc_node - allocate memory on a specific node 1670 * @size: allocation size 1671 * @node: numa node 1672 * 1673 * Allocate enough pages to cover @size from the page level 1674 * allocator and map them into contiguous kernel virtual space. 1675 * 1676 * For tight control over page level allocator and protection flags 1677 * use __vmalloc() instead. 1678 */ 1679 void *vmalloc_node(unsigned long size, int node) 1680 { 1681 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1682 node, __builtin_return_address(0)); 1683 } 1684 EXPORT_SYMBOL(vmalloc_node); 1685 1686 #ifndef PAGE_KERNEL_EXEC 1687 # define PAGE_KERNEL_EXEC PAGE_KERNEL 1688 #endif 1689 1690 /** 1691 * vmalloc_exec - allocate virtually contiguous, executable memory 1692 * @size: allocation size 1693 * 1694 * Kernel-internal function to allocate enough pages to cover @size 1695 * the page level allocator and map them into contiguous and 1696 * executable kernel virtual space. 1697 * 1698 * For tight control over page level allocator and protection flags 1699 * use __vmalloc() instead. 1700 */ 1701 1702 void *vmalloc_exec(unsigned long size) 1703 { 1704 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1705 -1, __builtin_return_address(0)); 1706 } 1707 1708 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1709 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1710 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1711 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1712 #else 1713 #define GFP_VMALLOC32 GFP_KERNEL 1714 #endif 1715 1716 /** 1717 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 1718 * @size: allocation size 1719 * 1720 * Allocate enough 32bit PA addressable pages to cover @size from the 1721 * page level allocator and map them into contiguous kernel virtual space. 1722 */ 1723 void *vmalloc_32(unsigned long size) 1724 { 1725 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 1726 -1, __builtin_return_address(0)); 1727 } 1728 EXPORT_SYMBOL(vmalloc_32); 1729 1730 /** 1731 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 1732 * @size: allocation size 1733 * 1734 * The resulting memory area is 32bit addressable and zeroed so it can be 1735 * mapped to userspace without leaking data. 1736 */ 1737 void *vmalloc_32_user(unsigned long size) 1738 { 1739 struct vm_struct *area; 1740 void *ret; 1741 1742 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1743 -1, __builtin_return_address(0)); 1744 if (ret) { 1745 area = find_vm_area(ret); 1746 area->flags |= VM_USERMAP; 1747 } 1748 return ret; 1749 } 1750 EXPORT_SYMBOL(vmalloc_32_user); 1751 1752 /* 1753 * small helper routine , copy contents to buf from addr. 1754 * If the page is not present, fill zero. 1755 */ 1756 1757 static int aligned_vread(char *buf, char *addr, unsigned long count) 1758 { 1759 struct page *p; 1760 int copied = 0; 1761 1762 while (count) { 1763 unsigned long offset, length; 1764 1765 offset = (unsigned long)addr & ~PAGE_MASK; 1766 length = PAGE_SIZE - offset; 1767 if (length > count) 1768 length = count; 1769 p = vmalloc_to_page(addr); 1770 /* 1771 * To do safe access to this _mapped_ area, we need 1772 * lock. But adding lock here means that we need to add 1773 * overhead of vmalloc()/vfree() calles for this _debug_ 1774 * interface, rarely used. Instead of that, we'll use 1775 * kmap() and get small overhead in this access function. 1776 */ 1777 if (p) { 1778 /* 1779 * we can expect USER0 is not used (see vread/vwrite's 1780 * function description) 1781 */ 1782 void *map = kmap_atomic(p, KM_USER0); 1783 memcpy(buf, map + offset, length); 1784 kunmap_atomic(map, KM_USER0); 1785 } else 1786 memset(buf, 0, length); 1787 1788 addr += length; 1789 buf += length; 1790 copied += length; 1791 count -= length; 1792 } 1793 return copied; 1794 } 1795 1796 static int aligned_vwrite(char *buf, char *addr, unsigned long count) 1797 { 1798 struct page *p; 1799 int copied = 0; 1800 1801 while (count) { 1802 unsigned long offset, length; 1803 1804 offset = (unsigned long)addr & ~PAGE_MASK; 1805 length = PAGE_SIZE - offset; 1806 if (length > count) 1807 length = count; 1808 p = vmalloc_to_page(addr); 1809 /* 1810 * To do safe access to this _mapped_ area, we need 1811 * lock. But adding lock here means that we need to add 1812 * overhead of vmalloc()/vfree() calles for this _debug_ 1813 * interface, rarely used. Instead of that, we'll use 1814 * kmap() and get small overhead in this access function. 1815 */ 1816 if (p) { 1817 /* 1818 * we can expect USER0 is not used (see vread/vwrite's 1819 * function description) 1820 */ 1821 void *map = kmap_atomic(p, KM_USER0); 1822 memcpy(map + offset, buf, length); 1823 kunmap_atomic(map, KM_USER0); 1824 } 1825 addr += length; 1826 buf += length; 1827 copied += length; 1828 count -= length; 1829 } 1830 return copied; 1831 } 1832 1833 /** 1834 * vread() - read vmalloc area in a safe way. 1835 * @buf: buffer for reading data 1836 * @addr: vm address. 1837 * @count: number of bytes to be read. 1838 * 1839 * Returns # of bytes which addr and buf should be increased. 1840 * (same number to @count). Returns 0 if [addr...addr+count) doesn't 1841 * includes any intersect with alive vmalloc area. 1842 * 1843 * This function checks that addr is a valid vmalloc'ed area, and 1844 * copy data from that area to a given buffer. If the given memory range 1845 * of [addr...addr+count) includes some valid address, data is copied to 1846 * proper area of @buf. If there are memory holes, they'll be zero-filled. 1847 * IOREMAP area is treated as memory hole and no copy is done. 1848 * 1849 * If [addr...addr+count) doesn't includes any intersects with alive 1850 * vm_struct area, returns 0. 1851 * @buf should be kernel's buffer. Because this function uses KM_USER0, 1852 * the caller should guarantee KM_USER0 is not used. 1853 * 1854 * Note: In usual ops, vread() is never necessary because the caller 1855 * should know vmalloc() area is valid and can use memcpy(). 1856 * This is for routines which have to access vmalloc area without 1857 * any informaion, as /dev/kmem. 1858 * 1859 */ 1860 1861 long vread(char *buf, char *addr, unsigned long count) 1862 { 1863 struct vm_struct *tmp; 1864 char *vaddr, *buf_start = buf; 1865 unsigned long buflen = count; 1866 unsigned long n; 1867 1868 /* Don't allow overflow */ 1869 if ((unsigned long) addr + count < count) 1870 count = -(unsigned long) addr; 1871 1872 read_lock(&vmlist_lock); 1873 for (tmp = vmlist; count && tmp; tmp = tmp->next) { 1874 vaddr = (char *) tmp->addr; 1875 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1876 continue; 1877 while (addr < vaddr) { 1878 if (count == 0) 1879 goto finished; 1880 *buf = '\0'; 1881 buf++; 1882 addr++; 1883 count--; 1884 } 1885 n = vaddr + tmp->size - PAGE_SIZE - addr; 1886 if (n > count) 1887 n = count; 1888 if (!(tmp->flags & VM_IOREMAP)) 1889 aligned_vread(buf, addr, n); 1890 else /* IOREMAP area is treated as memory hole */ 1891 memset(buf, 0, n); 1892 buf += n; 1893 addr += n; 1894 count -= n; 1895 } 1896 finished: 1897 read_unlock(&vmlist_lock); 1898 1899 if (buf == buf_start) 1900 return 0; 1901 /* zero-fill memory holes */ 1902 if (buf != buf_start + buflen) 1903 memset(buf, 0, buflen - (buf - buf_start)); 1904 1905 return buflen; 1906 } 1907 1908 /** 1909 * vwrite() - write vmalloc area in a safe way. 1910 * @buf: buffer for source data 1911 * @addr: vm address. 1912 * @count: number of bytes to be read. 1913 * 1914 * Returns # of bytes which addr and buf should be incresed. 1915 * (same number to @count). 1916 * If [addr...addr+count) doesn't includes any intersect with valid 1917 * vmalloc area, returns 0. 1918 * 1919 * This function checks that addr is a valid vmalloc'ed area, and 1920 * copy data from a buffer to the given addr. If specified range of 1921 * [addr...addr+count) includes some valid address, data is copied from 1922 * proper area of @buf. If there are memory holes, no copy to hole. 1923 * IOREMAP area is treated as memory hole and no copy is done. 1924 * 1925 * If [addr...addr+count) doesn't includes any intersects with alive 1926 * vm_struct area, returns 0. 1927 * @buf should be kernel's buffer. Because this function uses KM_USER0, 1928 * the caller should guarantee KM_USER0 is not used. 1929 * 1930 * Note: In usual ops, vwrite() is never necessary because the caller 1931 * should know vmalloc() area is valid and can use memcpy(). 1932 * This is for routines which have to access vmalloc area without 1933 * any informaion, as /dev/kmem. 1934 * 1935 * The caller should guarantee KM_USER1 is not used. 1936 */ 1937 1938 long vwrite(char *buf, char *addr, unsigned long count) 1939 { 1940 struct vm_struct *tmp; 1941 char *vaddr; 1942 unsigned long n, buflen; 1943 int copied = 0; 1944 1945 /* Don't allow overflow */ 1946 if ((unsigned long) addr + count < count) 1947 count = -(unsigned long) addr; 1948 buflen = count; 1949 1950 read_lock(&vmlist_lock); 1951 for (tmp = vmlist; count && tmp; tmp = tmp->next) { 1952 vaddr = (char *) tmp->addr; 1953 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1954 continue; 1955 while (addr < vaddr) { 1956 if (count == 0) 1957 goto finished; 1958 buf++; 1959 addr++; 1960 count--; 1961 } 1962 n = vaddr + tmp->size - PAGE_SIZE - addr; 1963 if (n > count) 1964 n = count; 1965 if (!(tmp->flags & VM_IOREMAP)) { 1966 aligned_vwrite(buf, addr, n); 1967 copied++; 1968 } 1969 buf += n; 1970 addr += n; 1971 count -= n; 1972 } 1973 finished: 1974 read_unlock(&vmlist_lock); 1975 if (!copied) 1976 return 0; 1977 return buflen; 1978 } 1979 1980 /** 1981 * remap_vmalloc_range - map vmalloc pages to userspace 1982 * @vma: vma to cover (map full range of vma) 1983 * @addr: vmalloc memory 1984 * @pgoff: number of pages into addr before first page to map 1985 * 1986 * Returns: 0 for success, -Exxx on failure 1987 * 1988 * This function checks that addr is a valid vmalloc'ed area, and 1989 * that it is big enough to cover the vma. Will return failure if 1990 * that criteria isn't met. 1991 * 1992 * Similar to remap_pfn_range() (see mm/memory.c) 1993 */ 1994 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1995 unsigned long pgoff) 1996 { 1997 struct vm_struct *area; 1998 unsigned long uaddr = vma->vm_start; 1999 unsigned long usize = vma->vm_end - vma->vm_start; 2000 2001 if ((PAGE_SIZE-1) & (unsigned long)addr) 2002 return -EINVAL; 2003 2004 area = find_vm_area(addr); 2005 if (!area) 2006 return -EINVAL; 2007 2008 if (!(area->flags & VM_USERMAP)) 2009 return -EINVAL; 2010 2011 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 2012 return -EINVAL; 2013 2014 addr += pgoff << PAGE_SHIFT; 2015 do { 2016 struct page *page = vmalloc_to_page(addr); 2017 int ret; 2018 2019 ret = vm_insert_page(vma, uaddr, page); 2020 if (ret) 2021 return ret; 2022 2023 uaddr += PAGE_SIZE; 2024 addr += PAGE_SIZE; 2025 usize -= PAGE_SIZE; 2026 } while (usize > 0); 2027 2028 /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 2029 vma->vm_flags |= VM_RESERVED; 2030 2031 return 0; 2032 } 2033 EXPORT_SYMBOL(remap_vmalloc_range); 2034 2035 /* 2036 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 2037 * have one. 2038 */ 2039 void __attribute__((weak)) vmalloc_sync_all(void) 2040 { 2041 } 2042 2043 2044 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2045 { 2046 /* apply_to_page_range() does all the hard work. */ 2047 return 0; 2048 } 2049 2050 /** 2051 * alloc_vm_area - allocate a range of kernel address space 2052 * @size: size of the area 2053 * 2054 * Returns: NULL on failure, vm_struct on success 2055 * 2056 * This function reserves a range of kernel address space, and 2057 * allocates pagetables to map that range. No actual mappings 2058 * are created. If the kernel address space is not shared 2059 * between processes, it syncs the pagetable across all 2060 * processes. 2061 */ 2062 struct vm_struct *alloc_vm_area(size_t size) 2063 { 2064 struct vm_struct *area; 2065 2066 area = get_vm_area_caller(size, VM_IOREMAP, 2067 __builtin_return_address(0)); 2068 if (area == NULL) 2069 return NULL; 2070 2071 /* 2072 * This ensures that page tables are constructed for this region 2073 * of kernel virtual address space and mapped into init_mm. 2074 */ 2075 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2076 area->size, f, NULL)) { 2077 free_vm_area(area); 2078 return NULL; 2079 } 2080 2081 /* Make sure the pagetables are constructed in process kernel 2082 mappings */ 2083 vmalloc_sync_all(); 2084 2085 return area; 2086 } 2087 EXPORT_SYMBOL_GPL(alloc_vm_area); 2088 2089 void free_vm_area(struct vm_struct *area) 2090 { 2091 struct vm_struct *ret; 2092 ret = remove_vm_area(area->addr); 2093 BUG_ON(ret != area); 2094 kfree(area); 2095 } 2096 EXPORT_SYMBOL_GPL(free_vm_area); 2097 2098 #ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA 2099 static struct vmap_area *node_to_va(struct rb_node *n) 2100 { 2101 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 2102 } 2103 2104 /** 2105 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2106 * @end: target address 2107 * @pnext: out arg for the next vmap_area 2108 * @pprev: out arg for the previous vmap_area 2109 * 2110 * Returns: %true if either or both of next and prev are found, 2111 * %false if no vmap_area exists 2112 * 2113 * Find vmap_areas end addresses of which enclose @end. ie. if not 2114 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2115 */ 2116 static bool pvm_find_next_prev(unsigned long end, 2117 struct vmap_area **pnext, 2118 struct vmap_area **pprev) 2119 { 2120 struct rb_node *n = vmap_area_root.rb_node; 2121 struct vmap_area *va = NULL; 2122 2123 while (n) { 2124 va = rb_entry(n, struct vmap_area, rb_node); 2125 if (end < va->va_end) 2126 n = n->rb_left; 2127 else if (end > va->va_end) 2128 n = n->rb_right; 2129 else 2130 break; 2131 } 2132 2133 if (!va) 2134 return false; 2135 2136 if (va->va_end > end) { 2137 *pnext = va; 2138 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2139 } else { 2140 *pprev = va; 2141 *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2142 } 2143 return true; 2144 } 2145 2146 /** 2147 * pvm_determine_end - find the highest aligned address between two vmap_areas 2148 * @pnext: in/out arg for the next vmap_area 2149 * @pprev: in/out arg for the previous vmap_area 2150 * @align: alignment 2151 * 2152 * Returns: determined end address 2153 * 2154 * Find the highest aligned address between *@pnext and *@pprev below 2155 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2156 * down address is between the end addresses of the two vmap_areas. 2157 * 2158 * Please note that the address returned by this function may fall 2159 * inside *@pnext vmap_area. The caller is responsible for checking 2160 * that. 2161 */ 2162 static unsigned long pvm_determine_end(struct vmap_area **pnext, 2163 struct vmap_area **pprev, 2164 unsigned long align) 2165 { 2166 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2167 unsigned long addr; 2168 2169 if (*pnext) 2170 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2171 else 2172 addr = vmalloc_end; 2173 2174 while (*pprev && (*pprev)->va_end > addr) { 2175 *pnext = *pprev; 2176 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2177 } 2178 2179 return addr; 2180 } 2181 2182 /** 2183 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2184 * @offsets: array containing offset of each area 2185 * @sizes: array containing size of each area 2186 * @nr_vms: the number of areas to allocate 2187 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2188 * @gfp_mask: allocation mask 2189 * 2190 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2191 * vm_structs on success, %NULL on failure 2192 * 2193 * Percpu allocator wants to use congruent vm areas so that it can 2194 * maintain the offsets among percpu areas. This function allocates 2195 * congruent vmalloc areas for it. These areas tend to be scattered 2196 * pretty far, distance between two areas easily going up to 2197 * gigabytes. To avoid interacting with regular vmallocs, these areas 2198 * are allocated from top. 2199 * 2200 * Despite its complicated look, this allocator is rather simple. It 2201 * does everything top-down and scans areas from the end looking for 2202 * matching slot. While scanning, if any of the areas overlaps with 2203 * existing vmap_area, the base address is pulled down to fit the 2204 * area. Scanning is repeated till all the areas fit and then all 2205 * necessary data structres are inserted and the result is returned. 2206 */ 2207 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2208 const size_t *sizes, int nr_vms, 2209 size_t align, gfp_t gfp_mask) 2210 { 2211 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2212 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2213 struct vmap_area **vas, *prev, *next; 2214 struct vm_struct **vms; 2215 int area, area2, last_area, term_area; 2216 unsigned long base, start, end, last_end; 2217 bool purged = false; 2218 2219 gfp_mask &= GFP_RECLAIM_MASK; 2220 2221 /* verify parameters and allocate data structures */ 2222 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2223 for (last_area = 0, area = 0; area < nr_vms; area++) { 2224 start = offsets[area]; 2225 end = start + sizes[area]; 2226 2227 /* is everything aligned properly? */ 2228 BUG_ON(!IS_ALIGNED(offsets[area], align)); 2229 BUG_ON(!IS_ALIGNED(sizes[area], align)); 2230 2231 /* detect the area with the highest address */ 2232 if (start > offsets[last_area]) 2233 last_area = area; 2234 2235 for (area2 = 0; area2 < nr_vms; area2++) { 2236 unsigned long start2 = offsets[area2]; 2237 unsigned long end2 = start2 + sizes[area2]; 2238 2239 if (area2 == area) 2240 continue; 2241 2242 BUG_ON(start2 >= start && start2 < end); 2243 BUG_ON(end2 <= end && end2 > start); 2244 } 2245 } 2246 last_end = offsets[last_area] + sizes[last_area]; 2247 2248 if (vmalloc_end - vmalloc_start < last_end) { 2249 WARN_ON(true); 2250 return NULL; 2251 } 2252 2253 vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); 2254 vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); 2255 if (!vas || !vms) 2256 goto err_free; 2257 2258 for (area = 0; area < nr_vms; area++) { 2259 vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); 2260 vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); 2261 if (!vas[area] || !vms[area]) 2262 goto err_free; 2263 } 2264 retry: 2265 spin_lock(&vmap_area_lock); 2266 2267 /* start scanning - we scan from the top, begin with the last area */ 2268 area = term_area = last_area; 2269 start = offsets[area]; 2270 end = start + sizes[area]; 2271 2272 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2273 base = vmalloc_end - last_end; 2274 goto found; 2275 } 2276 base = pvm_determine_end(&next, &prev, align) - end; 2277 2278 while (true) { 2279 BUG_ON(next && next->va_end <= base + end); 2280 BUG_ON(prev && prev->va_end > base + end); 2281 2282 /* 2283 * base might have underflowed, add last_end before 2284 * comparing. 2285 */ 2286 if (base + last_end < vmalloc_start + last_end) { 2287 spin_unlock(&vmap_area_lock); 2288 if (!purged) { 2289 purge_vmap_area_lazy(); 2290 purged = true; 2291 goto retry; 2292 } 2293 goto err_free; 2294 } 2295 2296 /* 2297 * If next overlaps, move base downwards so that it's 2298 * right below next and then recheck. 2299 */ 2300 if (next && next->va_start < base + end) { 2301 base = pvm_determine_end(&next, &prev, align) - end; 2302 term_area = area; 2303 continue; 2304 } 2305 2306 /* 2307 * If prev overlaps, shift down next and prev and move 2308 * base so that it's right below new next and then 2309 * recheck. 2310 */ 2311 if (prev && prev->va_end > base + start) { 2312 next = prev; 2313 prev = node_to_va(rb_prev(&next->rb_node)); 2314 base = pvm_determine_end(&next, &prev, align) - end; 2315 term_area = area; 2316 continue; 2317 } 2318 2319 /* 2320 * This area fits, move on to the previous one. If 2321 * the previous one is the terminal one, we're done. 2322 */ 2323 area = (area + nr_vms - 1) % nr_vms; 2324 if (area == term_area) 2325 break; 2326 start = offsets[area]; 2327 end = start + sizes[area]; 2328 pvm_find_next_prev(base + end, &next, &prev); 2329 } 2330 found: 2331 /* we've found a fitting base, insert all va's */ 2332 for (area = 0; area < nr_vms; area++) { 2333 struct vmap_area *va = vas[area]; 2334 2335 va->va_start = base + offsets[area]; 2336 va->va_end = va->va_start + sizes[area]; 2337 __insert_vmap_area(va); 2338 } 2339 2340 vmap_area_pcpu_hole = base + offsets[last_area]; 2341 2342 spin_unlock(&vmap_area_lock); 2343 2344 /* insert all vm's */ 2345 for (area = 0; area < nr_vms; area++) 2346 insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2347 pcpu_get_vm_areas); 2348 2349 kfree(vas); 2350 return vms; 2351 2352 err_free: 2353 for (area = 0; area < nr_vms; area++) { 2354 if (vas) 2355 kfree(vas[area]); 2356 if (vms) 2357 kfree(vms[area]); 2358 } 2359 kfree(vas); 2360 kfree(vms); 2361 return NULL; 2362 } 2363 #endif 2364 2365 /** 2366 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2367 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2368 * @nr_vms: the number of allocated areas 2369 * 2370 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2371 */ 2372 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2373 { 2374 int i; 2375 2376 for (i = 0; i < nr_vms; i++) 2377 free_vm_area(vms[i]); 2378 kfree(vms); 2379 } 2380 2381 #ifdef CONFIG_PROC_FS 2382 static void *s_start(struct seq_file *m, loff_t *pos) 2383 { 2384 loff_t n = *pos; 2385 struct vm_struct *v; 2386 2387 read_lock(&vmlist_lock); 2388 v = vmlist; 2389 while (n > 0 && v) { 2390 n--; 2391 v = v->next; 2392 } 2393 if (!n) 2394 return v; 2395 2396 return NULL; 2397 2398 } 2399 2400 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2401 { 2402 struct vm_struct *v = p; 2403 2404 ++*pos; 2405 return v->next; 2406 } 2407 2408 static void s_stop(struct seq_file *m, void *p) 2409 { 2410 read_unlock(&vmlist_lock); 2411 } 2412 2413 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2414 { 2415 if (NUMA_BUILD) { 2416 unsigned int nr, *counters = m->private; 2417 2418 if (!counters) 2419 return; 2420 2421 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2422 2423 for (nr = 0; nr < v->nr_pages; nr++) 2424 counters[page_to_nid(v->pages[nr])]++; 2425 2426 for_each_node_state(nr, N_HIGH_MEMORY) 2427 if (counters[nr]) 2428 seq_printf(m, " N%u=%u", nr, counters[nr]); 2429 } 2430 } 2431 2432 static int s_show(struct seq_file *m, void *p) 2433 { 2434 struct vm_struct *v = p; 2435 2436 seq_printf(m, "0x%p-0x%p %7ld", 2437 v->addr, v->addr + v->size, v->size); 2438 2439 if (v->caller) { 2440 char buff[KSYM_SYMBOL_LEN]; 2441 2442 seq_putc(m, ' '); 2443 sprint_symbol(buff, (unsigned long)v->caller); 2444 seq_puts(m, buff); 2445 } 2446 2447 if (v->nr_pages) 2448 seq_printf(m, " pages=%d", v->nr_pages); 2449 2450 if (v->phys_addr) 2451 seq_printf(m, " phys=%lx", v->phys_addr); 2452 2453 if (v->flags & VM_IOREMAP) 2454 seq_printf(m, " ioremap"); 2455 2456 if (v->flags & VM_ALLOC) 2457 seq_printf(m, " vmalloc"); 2458 2459 if (v->flags & VM_MAP) 2460 seq_printf(m, " vmap"); 2461 2462 if (v->flags & VM_USERMAP) 2463 seq_printf(m, " user"); 2464 2465 if (v->flags & VM_VPAGES) 2466 seq_printf(m, " vpages"); 2467 2468 show_numa_info(m, v); 2469 seq_putc(m, '\n'); 2470 return 0; 2471 } 2472 2473 static const struct seq_operations vmalloc_op = { 2474 .start = s_start, 2475 .next = s_next, 2476 .stop = s_stop, 2477 .show = s_show, 2478 }; 2479 2480 static int vmalloc_open(struct inode *inode, struct file *file) 2481 { 2482 unsigned int *ptr = NULL; 2483 int ret; 2484 2485 if (NUMA_BUILD) 2486 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 2487 ret = seq_open(file, &vmalloc_op); 2488 if (!ret) { 2489 struct seq_file *m = file->private_data; 2490 m->private = ptr; 2491 } else 2492 kfree(ptr); 2493 return ret; 2494 } 2495 2496 static const struct file_operations proc_vmalloc_operations = { 2497 .open = vmalloc_open, 2498 .read = seq_read, 2499 .llseek = seq_lseek, 2500 .release = seq_release_private, 2501 }; 2502 2503 static int __init proc_vmalloc_init(void) 2504 { 2505 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 2506 return 0; 2507 } 2508 module_init(proc_vmalloc_init); 2509 #endif 2510 2511
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.