1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operatios 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/bitops.h> 19 #include <linux/slab.h> 20 #include "slab.h" 21 #include <linux/proc_fs.h> 22 #include <linux/notifier.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/cpu.h> 26 #include <linux/cpuset.h> 27 #include <linux/mempolicy.h> 28 #include <linux/ctype.h> 29 #include <linux/debugobjects.h> 30 #include <linux/kallsyms.h> 31 #include <linux/memory.h> 32 #include <linux/math64.h> 33 #include <linux/fault-inject.h> 34 #include <linux/stacktrace.h> 35 #include <linux/prefetch.h> 36 #include <linux/memcontrol.h> 37 #include <linux/random.h> 38 39 #include <trace/events/kmem.h> 40 41 #include "internal.h" 42 43 /* 44 * Lock order: 45 * 1. slab_mutex (Global Mutex) 46 * 2. node->list_lock 47 * 3. slab_lock(page) (Only on some arches and for debugging) 48 * 49 * slab_mutex 50 * 51 * The role of the slab_mutex is to protect the list of all the slabs 52 * and to synchronize major metadata changes to slab cache structures. 53 * 54 * The slab_lock is only used for debugging and on arches that do not 55 * have the ability to do a cmpxchg_double. It only protects the second 56 * double word in the page struct. Meaning 57 * A. page->freelist -> List of object free in a page 58 * B. page->counters -> Counters of objects 59 * C. page->frozen -> frozen state 60 * 61 * If a slab is frozen then it is exempt from list management. It is not 62 * on any list. The processor that froze the slab is the one who can 63 * perform list operations on the page. Other processors may put objects 64 * onto the freelist but the processor that froze the slab is the only 65 * one that can retrieve the objects from the page's freelist. 66 * 67 * The list_lock protects the partial and full list on each node and 68 * the partial slab counter. If taken then no new slabs may be added or 69 * removed from the lists nor make the number of partial slabs be modified. 70 * (Note that the total number of slabs is an atomic value that may be 71 * modified without taking the list lock). 72 * 73 * The list_lock is a centralized lock and thus we avoid taking it as 74 * much as possible. As long as SLUB does not have to handle partial 75 * slabs, operations can continue without any centralized lock. F.e. 76 * allocating a long series of objects that fill up slabs does not require 77 * the list lock. 78 * Interrupts are disabled during allocation and deallocation in order to 79 * make the slab allocator safe to use in the context of an irq. In addition 80 * interrupts are disabled to ensure that the processor does not change 81 * while handling per_cpu slabs, due to kernel preemption. 82 * 83 * SLUB assigns one slab for allocation to each processor. 84 * Allocations only occur from these slabs called cpu slabs. 85 * 86 * Slabs with free elements are kept on a partial list and during regular 87 * operations no list for full slabs is used. If an object in a full slab is 88 * freed then the slab will show up again on the partial lists. 89 * We track full slabs for debugging purposes though because otherwise we 90 * cannot scan all objects. 91 * 92 * Slabs are freed when they become empty. Teardown and setup is 93 * minimal so we rely on the page allocators per cpu caches for 94 * fast frees and allocs. 95 * 96 * Overloading of page flags that are otherwise used for LRU management. 97 * 98 * PageActive The slab is frozen and exempt from list processing. 99 * This means that the slab is dedicated to a purpose 100 * such as satisfying allocations for a specific 101 * processor. Objects may be freed in the slab while 102 * it is frozen but slab_free will then skip the usual 103 * list operations. It is up to the processor holding 104 * the slab to integrate the slab into the slab lists 105 * when the slab is no longer needed. 106 * 107 * One use of this flag is to mark slabs that are 108 * used for allocations. Then such a slab becomes a cpu 109 * slab. The cpu slab may be equipped with an additional 110 * freelist that allows lockless access to 111 * free objects in addition to the regular freelist 112 * that requires the slab lock. 113 * 114 * PageError Slab requires special handling due to debug 115 * options set. This moves slab handling out of 116 * the fast path and disables lockless freelists. 117 */ 118 119 static inline int kmem_cache_debug(struct kmem_cache *s) 120 { 121 #ifdef CONFIG_SLUB_DEBUG 122 return unlikely(s->flags & SLAB_DEBUG_FLAGS); 123 #else 124 return 0; 125 #endif 126 } 127 128 void *fixup_red_left(struct kmem_cache *s, void *p) 129 { 130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) 131 p += s->red_left_pad; 132 133 return p; 134 } 135 136 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 137 { 138 #ifdef CONFIG_SLUB_CPU_PARTIAL 139 return !kmem_cache_debug(s); 140 #else 141 return false; 142 #endif 143 } 144 145 /* 146 * Issues still to be resolved: 147 * 148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 149 * 150 * - Variable sizing of the per node arrays 151 */ 152 153 /* Enable to test recovery from slab corruption on boot */ 154 #undef SLUB_RESILIENCY_TEST 155 156 /* Enable to log cmpxchg failures */ 157 #undef SLUB_DEBUG_CMPXCHG 158 159 /* 160 * Mininum number of partial slabs. These will be left on the partial 161 * lists even if they are empty. kmem_cache_shrink may reclaim them. 162 */ 163 #define MIN_PARTIAL 5 164 165 /* 166 * Maximum number of desirable partial slabs. 167 * The existence of more partial slabs makes kmem_cache_shrink 168 * sort the partial list by the number of objects in use. 169 */ 170 #define MAX_PARTIAL 10 171 172 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 173 SLAB_POISON | SLAB_STORE_USER) 174 175 /* 176 * These debug flags cannot use CMPXCHG because there might be consistency 177 * issues when checking or reading debug information 178 */ 179 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 180 SLAB_TRACE) 181 182 183 /* 184 * Debugging flags that require metadata to be stored in the slab. These get 185 * disabled when slub_debug=O is used and a cache's min order increases with 186 * metadata. 187 */ 188 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 189 190 #define OO_SHIFT 16 191 #define OO_MASK ((1 << OO_SHIFT) - 1) 192 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 193 194 /* Internal SLUB flags */ 195 /* Poison object */ 196 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 197 /* Use cmpxchg_double */ 198 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 199 200 /* 201 * Tracking user of a slab. 202 */ 203 #define TRACK_ADDRS_COUNT 16 204 struct track { 205 unsigned long addr; /* Called from address */ 206 #ifdef CONFIG_STACKTRACE 207 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ 208 #endif 209 int cpu; /* Was running on cpu */ 210 int pid; /* Pid context */ 211 unsigned long when; /* When did the operation occur */ 212 }; 213 214 enum track_item { TRACK_ALLOC, TRACK_FREE }; 215 216 #ifdef CONFIG_SYSFS 217 static int sysfs_slab_add(struct kmem_cache *); 218 static int sysfs_slab_alias(struct kmem_cache *, const char *); 219 static void memcg_propagate_slab_attrs(struct kmem_cache *s); 220 static void sysfs_slab_remove(struct kmem_cache *s); 221 #else 222 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 223 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 224 { return 0; } 225 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } 226 static inline void sysfs_slab_remove(struct kmem_cache *s) { } 227 #endif 228 229 static inline void stat(const struct kmem_cache *s, enum stat_item si) 230 { 231 #ifdef CONFIG_SLUB_STATS 232 /* 233 * The rmw is racy on a preemptible kernel but this is acceptable, so 234 * avoid this_cpu_add()'s irq-disable overhead. 235 */ 236 raw_cpu_inc(s->cpu_slab->stat[si]); 237 #endif 238 } 239 240 /******************************************************************** 241 * Core slab cache functions 242 *******************************************************************/ 243 244 /* 245 * Returns freelist pointer (ptr). With hardening, this is obfuscated 246 * with an XOR of the address where the pointer is held and a per-cache 247 * random number. 248 */ 249 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 250 unsigned long ptr_addr) 251 { 252 #ifdef CONFIG_SLAB_FREELIST_HARDENED 253 return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); 254 #else 255 return ptr; 256 #endif 257 } 258 259 /* Returns the freelist pointer recorded at location ptr_addr. */ 260 static inline void *freelist_dereference(const struct kmem_cache *s, 261 void *ptr_addr) 262 { 263 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 264 (unsigned long)ptr_addr); 265 } 266 267 static inline void *get_freepointer(struct kmem_cache *s, void *object) 268 { 269 return freelist_dereference(s, object + s->offset); 270 } 271 272 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 273 { 274 if (object) 275 prefetch(freelist_dereference(s, object + s->offset)); 276 } 277 278 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 279 { 280 unsigned long freepointer_addr; 281 void *p; 282 283 if (!debug_pagealloc_enabled()) 284 return get_freepointer(s, object); 285 286 freepointer_addr = (unsigned long)object + s->offset; 287 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); 288 return freelist_ptr(s, p, freepointer_addr); 289 } 290 291 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 292 { 293 unsigned long freeptr_addr = (unsigned long)object + s->offset; 294 295 #ifdef CONFIG_SLAB_FREELIST_HARDENED 296 BUG_ON(object == fp); /* naive detection of double free or corruption */ 297 #endif 298 299 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 300 } 301 302 /* Loop over all objects in a slab */ 303 #define for_each_object(__p, __s, __addr, __objects) \ 304 for (__p = fixup_red_left(__s, __addr); \ 305 __p < (__addr) + (__objects) * (__s)->size; \ 306 __p += (__s)->size) 307 308 #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ 309 for (__p = fixup_red_left(__s, __addr), __idx = 1; \ 310 __idx <= __objects; \ 311 __p += (__s)->size, __idx++) 312 313 /* Determine object index from a given position */ 314 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 315 { 316 return (p - addr) / s->size; 317 } 318 319 static inline int order_objects(int order, unsigned long size, int reserved) 320 { 321 return ((PAGE_SIZE << order) - reserved) / size; 322 } 323 324 static inline struct kmem_cache_order_objects oo_make(int order, 325 unsigned long size, int reserved) 326 { 327 struct kmem_cache_order_objects x = { 328 (order << OO_SHIFT) + order_objects(order, size, reserved) 329 }; 330 331 return x; 332 } 333 334 static inline int oo_order(struct kmem_cache_order_objects x) 335 { 336 return x.x >> OO_SHIFT; 337 } 338 339 static inline int oo_objects(struct kmem_cache_order_objects x) 340 { 341 return x.x & OO_MASK; 342 } 343 344 /* 345 * Per slab locking using the pagelock 346 */ 347 static __always_inline void slab_lock(struct page *page) 348 { 349 VM_BUG_ON_PAGE(PageTail(page), page); 350 bit_spin_lock(PG_locked, &page->flags); 351 } 352 353 static __always_inline void slab_unlock(struct page *page) 354 { 355 VM_BUG_ON_PAGE(PageTail(page), page); 356 __bit_spin_unlock(PG_locked, &page->flags); 357 } 358 359 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) 360 { 361 struct page tmp; 362 tmp.counters = counters_new; 363 /* 364 * page->counters can cover frozen/inuse/objects as well 365 * as page->_refcount. If we assign to ->counters directly 366 * we run the risk of losing updates to page->_refcount, so 367 * be careful and only assign to the fields we need. 368 */ 369 page->frozen = tmp.frozen; 370 page->inuse = tmp.inuse; 371 page->objects = tmp.objects; 372 } 373 374 /* Interrupts must be disabled (for the fallback code to work right) */ 375 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 376 void *freelist_old, unsigned long counters_old, 377 void *freelist_new, unsigned long counters_new, 378 const char *n) 379 { 380 VM_BUG_ON(!irqs_disabled()); 381 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 382 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 383 if (s->flags & __CMPXCHG_DOUBLE) { 384 if (cmpxchg_double(&page->freelist, &page->counters, 385 freelist_old, counters_old, 386 freelist_new, counters_new)) 387 return true; 388 } else 389 #endif 390 { 391 slab_lock(page); 392 if (page->freelist == freelist_old && 393 page->counters == counters_old) { 394 page->freelist = freelist_new; 395 set_page_slub_counters(page, counters_new); 396 slab_unlock(page); 397 return true; 398 } 399 slab_unlock(page); 400 } 401 402 cpu_relax(); 403 stat(s, CMPXCHG_DOUBLE_FAIL); 404 405 #ifdef SLUB_DEBUG_CMPXCHG 406 pr_info("%s %s: cmpxchg double redo ", n, s->name); 407 #endif 408 409 return false; 410 } 411 412 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 413 void *freelist_old, unsigned long counters_old, 414 void *freelist_new, unsigned long counters_new, 415 const char *n) 416 { 417 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 418 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 419 if (s->flags & __CMPXCHG_DOUBLE) { 420 if (cmpxchg_double(&page->freelist, &page->counters, 421 freelist_old, counters_old, 422 freelist_new, counters_new)) 423 return true; 424 } else 425 #endif 426 { 427 unsigned long flags; 428 429 local_irq_save(flags); 430 slab_lock(page); 431 if (page->freelist == freelist_old && 432 page->counters == counters_old) { 433 page->freelist = freelist_new; 434 set_page_slub_counters(page, counters_new); 435 slab_unlock(page); 436 local_irq_restore(flags); 437 return true; 438 } 439 slab_unlock(page); 440 local_irq_restore(flags); 441 } 442 443 cpu_relax(); 444 stat(s, CMPXCHG_DOUBLE_FAIL); 445 446 #ifdef SLUB_DEBUG_CMPXCHG 447 pr_info("%s %s: cmpxchg double redo ", n, s->name); 448 #endif 449 450 return false; 451 } 452 453 #ifdef CONFIG_SLUB_DEBUG 454 /* 455 * Determine a map of object in use on a page. 456 * 457 * Node listlock must be held to guarantee that the page does 458 * not vanish from under us. 459 */ 460 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) 461 { 462 void *p; 463 void *addr = page_address(page); 464 465 for (p = page->freelist; p; p = get_freepointer(s, p)) 466 set_bit(slab_index(p, s, addr), map); 467 } 468 469 static inline int size_from_object(struct kmem_cache *s) 470 { 471 if (s->flags & SLAB_RED_ZONE) 472 return s->size - s->red_left_pad; 473 474 return s->size; 475 } 476 477 static inline void *restore_red_left(struct kmem_cache *s, void *p) 478 { 479 if (s->flags & SLAB_RED_ZONE) 480 p -= s->red_left_pad; 481 482 return p; 483 } 484 485 /* 486 * Debug settings: 487 */ 488 #if defined(CONFIG_SLUB_DEBUG_ON) 489 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 490 #else 491 static slab_flags_t slub_debug; 492 #endif 493 494 static char *slub_debug_slabs; 495 static int disable_higher_order_debug; 496 497 /* 498 * slub is about to manipulate internal object metadata. This memory lies 499 * outside the range of the allocated object, so accessing it would normally 500 * be reported by kasan as a bounds error. metadata_access_enable() is used 501 * to tell kasan that these accesses are OK. 502 */ 503 static inline void metadata_access_enable(void) 504 { 505 kasan_disable_current(); 506 } 507 508 static inline void metadata_access_disable(void) 509 { 510 kasan_enable_current(); 511 } 512 513 /* 514 * Object debugging 515 */ 516 517 /* Verify that a pointer has an address that is valid within a slab page */ 518 static inline int check_valid_pointer(struct kmem_cache *s, 519 struct page *page, void *object) 520 { 521 void *base; 522 523 if (!object) 524 return 1; 525 526 base = page_address(page); 527 object = restore_red_left(s, object); 528 if (object < base || object >= base + page->objects * s->size || 529 (object - base) % s->size) { 530 return 0; 531 } 532 533 return 1; 534 } 535 536 static void print_section(char *level, char *text, u8 *addr, 537 unsigned int length) 538 { 539 metadata_access_enable(); 540 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, 541 length, 1); 542 metadata_access_disable(); 543 } 544 545 static struct track *get_track(struct kmem_cache *s, void *object, 546 enum track_item alloc) 547 { 548 struct track *p; 549 550 if (s->offset) 551 p = object + s->offset + sizeof(void *); 552 else 553 p = object + s->inuse; 554 555 return p + alloc; 556 } 557 558 static void set_track(struct kmem_cache *s, void *object, 559 enum track_item alloc, unsigned long addr) 560 { 561 struct track *p = get_track(s, object, alloc); 562 563 if (addr) { 564 #ifdef CONFIG_STACKTRACE 565 struct stack_trace trace; 566 int i; 567 568 trace.nr_entries = 0; 569 trace.max_entries = TRACK_ADDRS_COUNT; 570 trace.entries = p->addrs; 571 trace.skip = 3; 572 metadata_access_enable(); 573 save_stack_trace(&trace); 574 metadata_access_disable(); 575 576 /* See rant in lockdep.c */ 577 if (trace.nr_entries != 0 && 578 trace.entries[trace.nr_entries - 1] == ULONG_MAX) 579 trace.nr_entries--; 580 581 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) 582 p->addrs[i] = 0; 583 #endif 584 p->addr = addr; 585 p->cpu = smp_processor_id(); 586 p->pid = current->pid; 587 p->when = jiffies; 588 } else 589 memset(p, 0, sizeof(struct track)); 590 } 591 592 static void init_tracking(struct kmem_cache *s, void *object) 593 { 594 if (!(s->flags & SLAB_STORE_USER)) 595 return; 596 597 set_track(s, object, TRACK_FREE, 0UL); 598 set_track(s, object, TRACK_ALLOC, 0UL); 599 } 600 601 static void print_track(const char *s, struct track *t) 602 { 603 if (!t->addr) 604 return; 605 606 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 607 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 608 #ifdef CONFIG_STACKTRACE 609 { 610 int i; 611 for (i = 0; i < TRACK_ADDRS_COUNT; i++) 612 if (t->addrs[i]) 613 pr_err("\t%pS\n", (void *)t->addrs[i]); 614 else 615 break; 616 } 617 #endif 618 } 619 620 static void print_tracking(struct kmem_cache *s, void *object) 621 { 622 if (!(s->flags & SLAB_STORE_USER)) 623 return; 624 625 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 626 print_track("Freed", get_track(s, object, TRACK_FREE)); 627 } 628 629 static void print_page_info(struct page *page) 630 { 631 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 632 page, page->objects, page->inuse, page->freelist, page->flags); 633 634 } 635 636 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 637 { 638 struct va_format vaf; 639 va_list args; 640 641 va_start(args, fmt); 642 vaf.fmt = fmt; 643 vaf.va = &args; 644 pr_err("=============================================================================\n"); 645 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 646 pr_err("-----------------------------------------------------------------------------\n\n"); 647 648 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 649 va_end(args); 650 } 651 652 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 653 { 654 struct va_format vaf; 655 va_list args; 656 657 va_start(args, fmt); 658 vaf.fmt = fmt; 659 vaf.va = &args; 660 pr_err("FIX %s: %pV\n", s->name, &vaf); 661 va_end(args); 662 } 663 664 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 665 { 666 unsigned int off; /* Offset of last byte */ 667 u8 *addr = page_address(page); 668 669 print_tracking(s, p); 670 671 print_page_info(page); 672 673 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 674 p, p - addr, get_freepointer(s, p)); 675 676 if (s->flags & SLAB_RED_ZONE) 677 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 678 s->red_left_pad); 679 else if (p > addr + 16) 680 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 681 682 print_section(KERN_ERR, "Object ", p, 683 min_t(unsigned long, s->object_size, PAGE_SIZE)); 684 if (s->flags & SLAB_RED_ZONE) 685 print_section(KERN_ERR, "Redzone ", p + s->object_size, 686 s->inuse - s->object_size); 687 688 if (s->offset) 689 off = s->offset + sizeof(void *); 690 else 691 off = s->inuse; 692 693 if (s->flags & SLAB_STORE_USER) 694 off += 2 * sizeof(struct track); 695 696 off += kasan_metadata_size(s); 697 698 if (off != size_from_object(s)) 699 /* Beginning of the filler is the free pointer */ 700 print_section(KERN_ERR, "Padding ", p + off, 701 size_from_object(s) - off); 702 703 dump_stack(); 704 } 705 706 void object_err(struct kmem_cache *s, struct page *page, 707 u8 *object, char *reason) 708 { 709 slab_bug(s, "%s", reason); 710 print_trailer(s, page, object); 711 } 712 713 static void slab_err(struct kmem_cache *s, struct page *page, 714 const char *fmt, ...) 715 { 716 va_list args; 717 char buf[100]; 718 719 va_start(args, fmt); 720 vsnprintf(buf, sizeof(buf), fmt, args); 721 va_end(args); 722 slab_bug(s, "%s", buf); 723 print_page_info(page); 724 dump_stack(); 725 } 726 727 static void init_object(struct kmem_cache *s, void *object, u8 val) 728 { 729 u8 *p = object; 730 731 if (s->flags & SLAB_RED_ZONE) 732 memset(p - s->red_left_pad, val, s->red_left_pad); 733 734 if (s->flags & __OBJECT_POISON) { 735 memset(p, POISON_FREE, s->object_size - 1); 736 p[s->object_size - 1] = POISON_END; 737 } 738 739 if (s->flags & SLAB_RED_ZONE) 740 memset(p + s->object_size, val, s->inuse - s->object_size); 741 } 742 743 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 744 void *from, void *to) 745 { 746 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 747 memset(from, data, to - from); 748 } 749 750 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 751 u8 *object, char *what, 752 u8 *start, unsigned int value, unsigned int bytes) 753 { 754 u8 *fault; 755 u8 *end; 756 757 metadata_access_enable(); 758 fault = memchr_inv(start, value, bytes); 759 metadata_access_disable(); 760 if (!fault) 761 return 1; 762 763 end = start + bytes; 764 while (end > fault && end[-1] == value) 765 end--; 766 767 slab_bug(s, "%s overwritten", what); 768 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 769 fault, end - 1, fault[0], value); 770 print_trailer(s, page, object); 771 772 restore_bytes(s, what, value, fault, end); 773 return 0; 774 } 775 776 /* 777 * Object layout: 778 * 779 * object address 780 * Bytes of the object to be managed. 781 * If the freepointer may overlay the object then the free 782 * pointer is the first word of the object. 783 * 784 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 785 * 0xa5 (POISON_END) 786 * 787 * object + s->object_size 788 * Padding to reach word boundary. This is also used for Redzoning. 789 * Padding is extended by another word if Redzoning is enabled and 790 * object_size == inuse. 791 * 792 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 793 * 0xcc (RED_ACTIVE) for objects in use. 794 * 795 * object + s->inuse 796 * Meta data starts here. 797 * 798 * A. Free pointer (if we cannot overwrite object on free) 799 * B. Tracking data for SLAB_STORE_USER 800 * C. Padding to reach required alignment boundary or at mininum 801 * one word if debugging is on to be able to detect writes 802 * before the word boundary. 803 * 804 * Padding is done using 0x5a (POISON_INUSE) 805 * 806 * object + s->size 807 * Nothing is used beyond s->size. 808 * 809 * If slabcaches are merged then the object_size and inuse boundaries are mostly 810 * ignored. And therefore no slab options that rely on these boundaries 811 * may be used with merged slabcaches. 812 */ 813 814 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 815 { 816 unsigned long off = s->inuse; /* The end of info */ 817 818 if (s->offset) 819 /* Freepointer is placed after the object. */ 820 off += sizeof(void *); 821 822 if (s->flags & SLAB_STORE_USER) 823 /* We also have user information there */ 824 off += 2 * sizeof(struct track); 825 826 off += kasan_metadata_size(s); 827 828 if (size_from_object(s) == off) 829 return 1; 830 831 return check_bytes_and_report(s, page, p, "Object padding", 832 p + off, POISON_INUSE, size_from_object(s) - off); 833 } 834 835 /* Check the pad bytes at the end of a slab page */ 836 static int slab_pad_check(struct kmem_cache *s, struct page *page) 837 { 838 u8 *start; 839 u8 *fault; 840 u8 *end; 841 u8 *pad; 842 int length; 843 int remainder; 844 845 if (!(s->flags & SLAB_POISON)) 846 return 1; 847 848 start = page_address(page); 849 length = (PAGE_SIZE << compound_order(page)) - s->reserved; 850 end = start + length; 851 remainder = length % s->size; 852 if (!remainder) 853 return 1; 854 855 pad = end - remainder; 856 metadata_access_enable(); 857 fault = memchr_inv(pad, POISON_INUSE, remainder); 858 metadata_access_disable(); 859 if (!fault) 860 return 1; 861 while (end > fault && end[-1] == POISON_INUSE) 862 end--; 863 864 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 865 print_section(KERN_ERR, "Padding ", pad, remainder); 866 867 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 868 return 0; 869 } 870 871 static int check_object(struct kmem_cache *s, struct page *page, 872 void *object, u8 val) 873 { 874 u8 *p = object; 875 u8 *endobject = object + s->object_size; 876 877 if (s->flags & SLAB_RED_ZONE) { 878 if (!check_bytes_and_report(s, page, object, "Redzone", 879 object - s->red_left_pad, val, s->red_left_pad)) 880 return 0; 881 882 if (!check_bytes_and_report(s, page, object, "Redzone", 883 endobject, val, s->inuse - s->object_size)) 884 return 0; 885 } else { 886 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 887 check_bytes_and_report(s, page, p, "Alignment padding", 888 endobject, POISON_INUSE, 889 s->inuse - s->object_size); 890 } 891 } 892 893 if (s->flags & SLAB_POISON) { 894 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 895 (!check_bytes_and_report(s, page, p, "Poison", p, 896 POISON_FREE, s->object_size - 1) || 897 !check_bytes_and_report(s, page, p, "Poison", 898 p + s->object_size - 1, POISON_END, 1))) 899 return 0; 900 /* 901 * check_pad_bytes cleans up on its own. 902 */ 903 check_pad_bytes(s, page, p); 904 } 905 906 if (!s->offset && val == SLUB_RED_ACTIVE) 907 /* 908 * Object and freepointer overlap. Cannot check 909 * freepointer while object is allocated. 910 */ 911 return 1; 912 913 /* Check free pointer validity */ 914 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 915 object_err(s, page, p, "Freepointer corrupt"); 916 /* 917 * No choice but to zap it and thus lose the remainder 918 * of the free objects in this slab. May cause 919 * another error because the object count is now wrong. 920 */ 921 set_freepointer(s, p, NULL); 922 return 0; 923 } 924 return 1; 925 } 926 927 static int check_slab(struct kmem_cache *s, struct page *page) 928 { 929 int maxobj; 930 931 VM_BUG_ON(!irqs_disabled()); 932 933 if (!PageSlab(page)) { 934 slab_err(s, page, "Not a valid slab page"); 935 return 0; 936 } 937 938 maxobj = order_objects(compound_order(page), s->size, s->reserved); 939 if (page->objects > maxobj) { 940 slab_err(s, page, "objects %u > max %u", 941 page->objects, maxobj); 942 return 0; 943 } 944 if (page->inuse > page->objects) { 945 slab_err(s, page, "inuse %u > max %u", 946 page->inuse, page->objects); 947 return 0; 948 } 949 /* Slab_pad_check fixes things up after itself */ 950 slab_pad_check(s, page); 951 return 1; 952 } 953 954 /* 955 * Determine if a certain object on a page is on the freelist. Must hold the 956 * slab lock to guarantee that the chains are in a consistent state. 957 */ 958 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 959 { 960 int nr = 0; 961 void *fp; 962 void *object = NULL; 963 int max_objects; 964 965 fp = page->freelist; 966 while (fp && nr <= page->objects) { 967 if (fp == search) 968 return 1; 969 if (!check_valid_pointer(s, page, fp)) { 970 if (object) { 971 object_err(s, page, object, 972 "Freechain corrupt"); 973 set_freepointer(s, object, NULL); 974 } else { 975 slab_err(s, page, "Freepointer corrupt"); 976 page->freelist = NULL; 977 page->inuse = page->objects; 978 slab_fix(s, "Freelist cleared"); 979 return 0; 980 } 981 break; 982 } 983 object = fp; 984 fp = get_freepointer(s, object); 985 nr++; 986 } 987 988 max_objects = order_objects(compound_order(page), s->size, s->reserved); 989 if (max_objects > MAX_OBJS_PER_PAGE) 990 max_objects = MAX_OBJS_PER_PAGE; 991 992 if (page->objects != max_objects) { 993 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", 994 page->objects, max_objects); 995 page->objects = max_objects; 996 slab_fix(s, "Number of objects adjusted."); 997 } 998 if (page->inuse != page->objects - nr) { 999 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", 1000 page->inuse, page->objects - nr); 1001 page->inuse = page->objects - nr; 1002 slab_fix(s, "Object count adjusted."); 1003 } 1004 return search == NULL; 1005 } 1006 1007 static void trace(struct kmem_cache *s, struct page *page, void *object, 1008 int alloc) 1009 { 1010 if (s->flags & SLAB_TRACE) { 1011 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1012 s->name, 1013 alloc ? "alloc" : "free", 1014 object, page->inuse, 1015 page->freelist); 1016 1017 if (!alloc) 1018 print_section(KERN_INFO, "Object ", (void *)object, 1019 s->object_size); 1020 1021 dump_stack(); 1022 } 1023 } 1024 1025 /* 1026 * Tracking of fully allocated slabs for debugging purposes. 1027 */ 1028 static void add_full(struct kmem_cache *s, 1029 struct kmem_cache_node *n, struct page *page) 1030 { 1031 if (!(s->flags & SLAB_STORE_USER)) 1032 return; 1033 1034 lockdep_assert_held(&n->list_lock); 1035 list_add(&page->lru, &n->full); 1036 } 1037 1038 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1039 { 1040 if (!(s->flags & SLAB_STORE_USER)) 1041 return; 1042 1043 lockdep_assert_held(&n->list_lock); 1044 list_del(&page->lru); 1045 } 1046 1047 /* Tracking of the number of slabs for debugging purposes */ 1048 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1049 { 1050 struct kmem_cache_node *n = get_node(s, node); 1051 1052 return atomic_long_read(&n->nr_slabs); 1053 } 1054 1055 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1056 { 1057 return atomic_long_read(&n->nr_slabs); 1058 } 1059 1060 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1061 { 1062 struct kmem_cache_node *n = get_node(s, node); 1063 1064 /* 1065 * May be called early in order to allocate a slab for the 1066 * kmem_cache_node structure. Solve the chicken-egg 1067 * dilemma by deferring the increment of the count during 1068 * bootstrap (see early_kmem_cache_node_alloc). 1069 */ 1070 if (likely(n)) { 1071 atomic_long_inc(&n->nr_slabs); 1072 atomic_long_add(objects, &n->total_objects); 1073 } 1074 } 1075 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1076 { 1077 struct kmem_cache_node *n = get_node(s, node); 1078 1079 atomic_long_dec(&n->nr_slabs); 1080 atomic_long_sub(objects, &n->total_objects); 1081 } 1082 1083 /* Object debug checks for alloc/free paths */ 1084 static void setup_object_debug(struct kmem_cache *s, struct page *page, 1085 void *object) 1086 { 1087 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 1088 return; 1089 1090 init_object(s, object, SLUB_RED_INACTIVE); 1091 init_tracking(s, object); 1092 } 1093 1094 static inline int alloc_consistency_checks(struct kmem_cache *s, 1095 struct page *page, 1096 void *object, unsigned long addr) 1097 { 1098 if (!check_slab(s, page)) 1099 return 0; 1100 1101 if (!check_valid_pointer(s, page, object)) { 1102 object_err(s, page, object, "Freelist Pointer check fails"); 1103 return 0; 1104 } 1105 1106 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1107 return 0; 1108 1109 return 1; 1110 } 1111 1112 static noinline int alloc_debug_processing(struct kmem_cache *s, 1113 struct page *page, 1114 void *object, unsigned long addr) 1115 { 1116 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1117 if (!alloc_consistency_checks(s, page, object, addr)) 1118 goto bad; 1119 } 1120 1121 /* Success perform special debug activities for allocs */ 1122 if (s->flags & SLAB_STORE_USER) 1123 set_track(s, object, TRACK_ALLOC, addr); 1124 trace(s, page, object, 1); 1125 init_object(s, object, SLUB_RED_ACTIVE); 1126 return 1; 1127 1128 bad: 1129 if (PageSlab(page)) { 1130 /* 1131 * If this is a slab page then lets do the best we can 1132 * to avoid issues in the future. Marking all objects 1133 * as used avoids touching the remaining objects. 1134 */ 1135 slab_fix(s, "Marking all objects used"); 1136 page->inuse = page->objects; 1137 page->freelist = NULL; 1138 } 1139 return 0; 1140 } 1141 1142 static inline int free_consistency_checks(struct kmem_cache *s, 1143 struct page *page, void *object, unsigned long addr) 1144 { 1145 if (!check_valid_pointer(s, page, object)) { 1146 slab_err(s, page, "Invalid object pointer 0x%p", object); 1147 return 0; 1148 } 1149 1150 if (on_freelist(s, page, object)) { 1151 object_err(s, page, object, "Object already free"); 1152 return 0; 1153 } 1154 1155 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1156 return 0; 1157 1158 if (unlikely(s != page->slab_cache)) { 1159 if (!PageSlab(page)) { 1160 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", 1161 object); 1162 } else if (!page->slab_cache) { 1163 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1164 object); 1165 dump_stack(); 1166 } else 1167 object_err(s, page, object, 1168 "page slab pointer corrupt."); 1169 return 0; 1170 } 1171 return 1; 1172 } 1173 1174 /* Supports checking bulk free of a constructed freelist */ 1175 static noinline int free_debug_processing( 1176 struct kmem_cache *s, struct page *page, 1177 void *head, void *tail, int bulk_cnt, 1178 unsigned long addr) 1179 { 1180 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1181 void *object = head; 1182 int cnt = 0; 1183 unsigned long uninitialized_var(flags); 1184 int ret = 0; 1185 1186 spin_lock_irqsave(&n->list_lock, flags); 1187 slab_lock(page); 1188 1189 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1190 if (!check_slab(s, page)) 1191 goto out; 1192 } 1193 1194 next_object: 1195 cnt++; 1196 1197 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1198 if (!free_consistency_checks(s, page, object, addr)) 1199 goto out; 1200 } 1201 1202 if (s->flags & SLAB_STORE_USER) 1203 set_track(s, object, TRACK_FREE, addr); 1204 trace(s, page, object, 0); 1205 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1206 init_object(s, object, SLUB_RED_INACTIVE); 1207 1208 /* Reached end of constructed freelist yet? */ 1209 if (object != tail) { 1210 object = get_freepointer(s, object); 1211 goto next_object; 1212 } 1213 ret = 1; 1214 1215 out: 1216 if (cnt != bulk_cnt) 1217 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1218 bulk_cnt, cnt); 1219 1220 slab_unlock(page); 1221 spin_unlock_irqrestore(&n->list_lock, flags); 1222 if (!ret) 1223 slab_fix(s, "Object at 0x%p not freed", object); 1224 return ret; 1225 } 1226 1227 static int __init setup_slub_debug(char *str) 1228 { 1229 slub_debug = DEBUG_DEFAULT_FLAGS; 1230 if (*str++ != '=' || !*str) 1231 /* 1232 * No options specified. Switch on full debugging. 1233 */ 1234 goto out; 1235 1236 if (*str == ',') 1237 /* 1238 * No options but restriction on slabs. This means full 1239 * debugging for slabs matching a pattern. 1240 */ 1241 goto check_slabs; 1242 1243 slub_debug = 0; 1244 if (*str == '-') 1245 /* 1246 * Switch off all debugging measures. 1247 */ 1248 goto out; 1249 1250 /* 1251 * Determine which debug features should be switched on 1252 */ 1253 for (; *str && *str != ','; str++) { 1254 switch (tolower(*str)) { 1255 case 'f': 1256 slub_debug |= SLAB_CONSISTENCY_CHECKS; 1257 break; 1258 case 'z': 1259 slub_debug |= SLAB_RED_ZONE; 1260 break; 1261 case 'p': 1262 slub_debug |= SLAB_POISON; 1263 break; 1264 case 'u': 1265 slub_debug |= SLAB_STORE_USER; 1266 break; 1267 case 't': 1268 slub_debug |= SLAB_TRACE; 1269 break; 1270 case 'a': 1271 slub_debug |= SLAB_FAILSLAB; 1272 break; 1273 case 'o': 1274 /* 1275 * Avoid enabling debugging on caches if its minimum 1276 * order would increase as a result. 1277 */ 1278 disable_higher_order_debug = 1; 1279 break; 1280 default: 1281 pr_err("slub_debug option '%c' unknown. skipped\n", 1282 *str); 1283 } 1284 } 1285 1286 check_slabs: 1287 if (*str == ',') 1288 slub_debug_slabs = str + 1; 1289 out: 1290 return 1; 1291 } 1292 1293 __setup("slub_debug", setup_slub_debug); 1294 1295 slab_flags_t kmem_cache_flags(unsigned long object_size, 1296 slab_flags_t flags, const char *name, 1297 void (*ctor)(void *)) 1298 { 1299 /* 1300 * Enable debugging if selected on the kernel commandline. 1301 */ 1302 if (slub_debug && (!slub_debug_slabs || (name && 1303 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))) 1304 flags |= slub_debug; 1305 1306 return flags; 1307 } 1308 #else /* !CONFIG_SLUB_DEBUG */ 1309 static inline void setup_object_debug(struct kmem_cache *s, 1310 struct page *page, void *object) {} 1311 1312 static inline int alloc_debug_processing(struct kmem_cache *s, 1313 struct page *page, void *object, unsigned long addr) { return 0; } 1314 1315 static inline int free_debug_processing( 1316 struct kmem_cache *s, struct page *page, 1317 void *head, void *tail, int bulk_cnt, 1318 unsigned long addr) { return 0; } 1319 1320 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1321 { return 1; } 1322 static inline int check_object(struct kmem_cache *s, struct page *page, 1323 void *object, u8 val) { return 1; } 1324 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1325 struct page *page) {} 1326 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1327 struct page *page) {} 1328 slab_flags_t kmem_cache_flags(unsigned long object_size, 1329 slab_flags_t flags, const char *name, 1330 void (*ctor)(void *)) 1331 { 1332 return flags; 1333 } 1334 #define slub_debug 0 1335 1336 #define disable_higher_order_debug 0 1337 1338 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1339 { return 0; } 1340 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1341 { return 0; } 1342 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1343 int objects) {} 1344 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1345 int objects) {} 1346 1347 #endif /* CONFIG_SLUB_DEBUG */ 1348 1349 /* 1350 * Hooks for other subsystems that check memory allocations. In a typical 1351 * production configuration these hooks all should produce no code at all. 1352 */ 1353 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1354 { 1355 kmemleak_alloc(ptr, size, 1, flags); 1356 kasan_kmalloc_large(ptr, size, flags); 1357 } 1358 1359 static __always_inline void kfree_hook(void *x) 1360 { 1361 kmemleak_free(x); 1362 kasan_kfree_large(x, _RET_IP_); 1363 } 1364 1365 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) 1366 { 1367 kmemleak_free_recursive(x, s->flags); 1368 1369 /* 1370 * Trouble is that we may no longer disable interrupts in the fast path 1371 * So in order to make the debug calls that expect irqs to be 1372 * disabled we need to disable interrupts temporarily. 1373 */ 1374 #ifdef CONFIG_LOCKDEP 1375 { 1376 unsigned long flags; 1377 1378 local_irq_save(flags); 1379 debug_check_no_locks_freed(x, s->object_size); 1380 local_irq_restore(flags); 1381 } 1382 #endif 1383 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1384 debug_check_no_obj_freed(x, s->object_size); 1385 1386 /* KASAN might put x into memory quarantine, delaying its reuse */ 1387 return kasan_slab_free(s, x, _RET_IP_); 1388 } 1389 1390 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1391 void **head, void **tail) 1392 { 1393 /* 1394 * Compiler cannot detect this function can be removed if slab_free_hook() 1395 * evaluates to nothing. Thus, catch all relevant config debug options here. 1396 */ 1397 #if defined(CONFIG_LOCKDEP) || \ 1398 defined(CONFIG_DEBUG_KMEMLEAK) || \ 1399 defined(CONFIG_DEBUG_OBJECTS_FREE) || \ 1400 defined(CONFIG_KASAN) 1401 1402 void *object; 1403 void *next = *head; 1404 void *old_tail = *tail ? *tail : *head; 1405 1406 /* Head and tail of the reconstructed freelist */ 1407 *head = NULL; 1408 *tail = NULL; 1409 1410 do { 1411 object = next; 1412 next = get_freepointer(s, object); 1413 /* If object's reuse doesn't have to be delayed */ 1414 if (!slab_free_hook(s, object)) { 1415 /* Move object to the new freelist */ 1416 set_freepointer(s, object, *head); 1417 *head = object; 1418 if (!*tail) 1419 *tail = object; 1420 } 1421 } while (object != old_tail); 1422 1423 if (*head == *tail) 1424 *tail = NULL; 1425 1426 return *head != NULL; 1427 #else 1428 return true; 1429 #endif 1430 } 1431 1432 static void setup_object(struct kmem_cache *s, struct page *page, 1433 void *object) 1434 { 1435 setup_object_debug(s, page, object); 1436 kasan_init_slab_obj(s, object); 1437 if (unlikely(s->ctor)) { 1438 kasan_unpoison_object_data(s, object); 1439 s->ctor(object); 1440 kasan_poison_object_data(s, object); 1441 } 1442 } 1443 1444 /* 1445 * Slab allocation and freeing 1446 */ 1447 static inline struct page *alloc_slab_page(struct kmem_cache *s, 1448 gfp_t flags, int node, struct kmem_cache_order_objects oo) 1449 { 1450 struct page *page; 1451 int order = oo_order(oo); 1452 1453 if (node == NUMA_NO_NODE) 1454 page = alloc_pages(flags, order); 1455 else 1456 page = __alloc_pages_node(node, flags, order); 1457 1458 if (page && memcg_charge_slab(page, flags, order, s)) { 1459 __free_pages(page, order); 1460 page = NULL; 1461 } 1462 1463 return page; 1464 } 1465 1466 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1467 /* Pre-initialize the random sequence cache */ 1468 static int init_cache_random_seq(struct kmem_cache *s) 1469 { 1470 int err; 1471 unsigned long i, count = oo_objects(s->oo); 1472 1473 /* Bailout if already initialised */ 1474 if (s->random_seq) 1475 return 0; 1476 1477 err = cache_random_seq_create(s, count, GFP_KERNEL); 1478 if (err) { 1479 pr_err("SLUB: Unable to initialize free list for %s\n", 1480 s->name); 1481 return err; 1482 } 1483 1484 /* Transform to an offset on the set of pages */ 1485 if (s->random_seq) { 1486 for (i = 0; i < count; i++) 1487 s->random_seq[i] *= s->size; 1488 } 1489 return 0; 1490 } 1491 1492 /* Initialize each random sequence freelist per cache */ 1493 static void __init init_freelist_randomization(void) 1494 { 1495 struct kmem_cache *s; 1496 1497 mutex_lock(&slab_mutex); 1498 1499 list_for_each_entry(s, &slab_caches, list) 1500 init_cache_random_seq(s); 1501 1502 mutex_unlock(&slab_mutex); 1503 } 1504 1505 /* Get the next entry on the pre-computed freelist randomized */ 1506 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, 1507 unsigned long *pos, void *start, 1508 unsigned long page_limit, 1509 unsigned long freelist_count) 1510 { 1511 unsigned int idx; 1512 1513 /* 1514 * If the target page allocation failed, the number of objects on the 1515 * page might be smaller than the usual size defined by the cache. 1516 */ 1517 do { 1518 idx = s->random_seq[*pos]; 1519 *pos += 1; 1520 if (*pos >= freelist_count) 1521 *pos = 0; 1522 } while (unlikely(idx >= page_limit)); 1523 1524 return (char *)start + idx; 1525 } 1526 1527 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1528 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1529 { 1530 void *start; 1531 void *cur; 1532 void *next; 1533 unsigned long idx, pos, page_limit, freelist_count; 1534 1535 if (page->objects < 2 || !s->random_seq) 1536 return false; 1537 1538 freelist_count = oo_objects(s->oo); 1539 pos = get_random_int() % freelist_count; 1540 1541 page_limit = page->objects * s->size; 1542 start = fixup_red_left(s, page_address(page)); 1543 1544 /* First entry is used as the base of the freelist */ 1545 cur = next_freelist_entry(s, page, &pos, start, page_limit, 1546 freelist_count); 1547 page->freelist = cur; 1548 1549 for (idx = 1; idx < page->objects; idx++) { 1550 setup_object(s, page, cur); 1551 next = next_freelist_entry(s, page, &pos, start, page_limit, 1552 freelist_count); 1553 set_freepointer(s, cur, next); 1554 cur = next; 1555 } 1556 setup_object(s, page, cur); 1557 set_freepointer(s, cur, NULL); 1558 1559 return true; 1560 } 1561 #else 1562 static inline int init_cache_random_seq(struct kmem_cache *s) 1563 { 1564 return 0; 1565 } 1566 static inline void init_freelist_randomization(void) { } 1567 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1568 { 1569 return false; 1570 } 1571 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1572 1573 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1574 { 1575 struct page *page; 1576 struct kmem_cache_order_objects oo = s->oo; 1577 gfp_t alloc_gfp; 1578 void *start, *p; 1579 int idx, order; 1580 bool shuffle; 1581 1582 flags &= gfp_allowed_mask; 1583 1584 if (gfpflags_allow_blocking(flags)) 1585 local_irq_enable(); 1586 1587 flags |= s->allocflags; 1588 1589 /* 1590 * Let the initial higher-order allocation fail under memory pressure 1591 * so we fall-back to the minimum order allocation. 1592 */ 1593 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1594 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1595 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 1596 1597 page = alloc_slab_page(s, alloc_gfp, node, oo); 1598 if (unlikely(!page)) { 1599 oo = s->min; 1600 alloc_gfp = flags; 1601 /* 1602 * Allocation may have failed due to fragmentation. 1603 * Try a lower order alloc if possible 1604 */ 1605 page = alloc_slab_page(s, alloc_gfp, node, oo); 1606 if (unlikely(!page)) 1607 goto out; 1608 stat(s, ORDER_FALLBACK); 1609 } 1610 1611 page->objects = oo_objects(oo); 1612 1613 order = compound_order(page); 1614 page->slab_cache = s; 1615 __SetPageSlab(page); 1616 if (page_is_pfmemalloc(page)) 1617 SetPageSlabPfmemalloc(page); 1618 1619 start = page_address(page); 1620 1621 if (unlikely(s->flags & SLAB_POISON)) 1622 memset(start, POISON_INUSE, PAGE_SIZE << order); 1623 1624 kasan_poison_slab(page); 1625 1626 shuffle = shuffle_freelist(s, page); 1627 1628 if (!shuffle) { 1629 for_each_object_idx(p, idx, s, start, page->objects) { 1630 setup_object(s, page, p); 1631 if (likely(idx < page->objects)) 1632 set_freepointer(s, p, p + s->size); 1633 else 1634 set_freepointer(s, p, NULL); 1635 } 1636 page->freelist = fixup_red_left(s, start); 1637 } 1638 1639 page->inuse = page->objects; 1640 page->frozen = 1; 1641 1642 out: 1643 if (gfpflags_allow_blocking(flags)) 1644 local_irq_disable(); 1645 if (!page) 1646 return NULL; 1647 1648 mod_lruvec_page_state(page, 1649 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1650 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1651 1 << oo_order(oo)); 1652 1653 inc_slabs_node(s, page_to_nid(page), page->objects); 1654 1655 return page; 1656 } 1657 1658 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1659 { 1660 if (unlikely(flags & GFP_SLAB_BUG_MASK)) { 1661 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; 1662 flags &= ~GFP_SLAB_BUG_MASK; 1663 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", 1664 invalid_mask, &invalid_mask, flags, &flags); 1665 dump_stack(); 1666 } 1667 1668 return allocate_slab(s, 1669 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1670 } 1671 1672 static void __free_slab(struct kmem_cache *s, struct page *page) 1673 { 1674 int order = compound_order(page); 1675 int pages = 1 << order; 1676 1677 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1678 void *p; 1679 1680 slab_pad_check(s, page); 1681 for_each_object(p, s, page_address(page), 1682 page->objects) 1683 check_object(s, page, p, SLUB_RED_INACTIVE); 1684 } 1685 1686 mod_lruvec_page_state(page, 1687 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1688 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1689 -pages); 1690 1691 __ClearPageSlabPfmemalloc(page); 1692 __ClearPageSlab(page); 1693 1694 page_mapcount_reset(page); 1695 if (current->reclaim_state) 1696 current->reclaim_state->reclaimed_slab += pages; 1697 memcg_uncharge_slab(page, order, s); 1698 __free_pages(page, order); 1699 } 1700 1701 #define need_reserve_slab_rcu \ 1702 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) 1703 1704 static void rcu_free_slab(struct rcu_head *h) 1705 { 1706 struct page *page; 1707 1708 if (need_reserve_slab_rcu) 1709 page = virt_to_head_page(h); 1710 else 1711 page = container_of((struct list_head *)h, struct page, lru); 1712 1713 __free_slab(page->slab_cache, page); 1714 } 1715 1716 static void free_slab(struct kmem_cache *s, struct page *page) 1717 { 1718 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 1719 struct rcu_head *head; 1720 1721 if (need_reserve_slab_rcu) { 1722 int order = compound_order(page); 1723 int offset = (PAGE_SIZE << order) - s->reserved; 1724 1725 VM_BUG_ON(s->reserved != sizeof(*head)); 1726 head = page_address(page) + offset; 1727 } else { 1728 head = &page->rcu_head; 1729 } 1730 1731 call_rcu(head, rcu_free_slab); 1732 } else 1733 __free_slab(s, page); 1734 } 1735 1736 static void discard_slab(struct kmem_cache *s, struct page *page) 1737 { 1738 dec_slabs_node(s, page_to_nid(page), page->objects); 1739 free_slab(s, page); 1740 } 1741 1742 /* 1743 * Management of partially allocated slabs. 1744 */ 1745 static inline void 1746 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) 1747 { 1748 n->nr_partial++; 1749 if (tail == DEACTIVATE_TO_TAIL) 1750 list_add_tail(&page->lru, &n->partial); 1751 else 1752 list_add(&page->lru, &n->partial); 1753 } 1754 1755 static inline void add_partial(struct kmem_cache_node *n, 1756 struct page *page, int tail) 1757 { 1758 lockdep_assert_held(&n->list_lock); 1759 __add_partial(n, page, tail); 1760 } 1761 1762 static inline void remove_partial(struct kmem_cache_node *n, 1763 struct page *page) 1764 { 1765 lockdep_assert_held(&n->list_lock); 1766 list_del(&page->lru); 1767 n->nr_partial--; 1768 } 1769 1770 /* 1771 * Remove slab from the partial list, freeze it and 1772 * return the pointer to the freelist. 1773 * 1774 * Returns a list of objects or NULL if it fails. 1775 */ 1776 static inline void *acquire_slab(struct kmem_cache *s, 1777 struct kmem_cache_node *n, struct page *page, 1778 int mode, int *objects) 1779 { 1780 void *freelist; 1781 unsigned long counters; 1782 struct page new; 1783 1784 lockdep_assert_held(&n->list_lock); 1785 1786 /* 1787 * Zap the freelist and set the frozen bit. 1788 * The old freelist is the list of objects for the 1789 * per cpu allocation list. 1790 */ 1791 freelist = page->freelist; 1792 counters = page->counters; 1793 new.counters = counters; 1794 *objects = new.objects - new.inuse; 1795 if (mode) { 1796 new.inuse = page->objects; 1797 new.freelist = NULL; 1798 } else { 1799 new.freelist = freelist; 1800 } 1801 1802 VM_BUG_ON(new.frozen); 1803 new.frozen = 1; 1804 1805 if (!__cmpxchg_double_slab(s, page, 1806 freelist, counters, 1807 new.freelist, new.counters, 1808 "acquire_slab")) 1809 return NULL; 1810 1811 remove_partial(n, page); 1812 WARN_ON(!freelist); 1813 return freelist; 1814 } 1815 1816 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 1817 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 1818 1819 /* 1820 * Try to allocate a partial slab from a specific node. 1821 */ 1822 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 1823 struct kmem_cache_cpu *c, gfp_t flags) 1824 { 1825 struct page *page, *page2; 1826 void *object = NULL; 1827 int available = 0; 1828 int objects; 1829 1830 /* 1831 * Racy check. If we mistakenly see no partial slabs then we 1832 * just allocate an empty slab. If we mistakenly try to get a 1833 * partial slab and there is none available then get_partials() 1834 * will return NULL. 1835 */ 1836 if (!n || !n->nr_partial) 1837 return NULL; 1838 1839 spin_lock(&n->list_lock); 1840 list_for_each_entry_safe(page, page2, &n->partial, lru) { 1841 void *t; 1842 1843 if (!pfmemalloc_match(page, flags)) 1844 continue; 1845 1846 t = acquire_slab(s, n, page, object == NULL, &objects); 1847 if (!t) 1848 break; 1849 1850 available += objects; 1851 if (!object) { 1852 c->page = page; 1853 stat(s, ALLOC_FROM_PARTIAL); 1854 object = t; 1855 } else { 1856 put_cpu_partial(s, page, 0); 1857 stat(s, CPU_PARTIAL_NODE); 1858 } 1859 if (!kmem_cache_has_cpu_partial(s) 1860 || available > slub_cpu_partial(s) / 2) 1861 break; 1862 1863 } 1864 spin_unlock(&n->list_lock); 1865 return object; 1866 } 1867 1868 /* 1869 * Get a page from somewhere. Search in increasing NUMA distances. 1870 */ 1871 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 1872 struct kmem_cache_cpu *c) 1873 { 1874 #ifdef CONFIG_NUMA 1875 struct zonelist *zonelist; 1876 struct zoneref *z; 1877 struct zone *zone; 1878 enum zone_type high_zoneidx = gfp_zone(flags); 1879 void *object; 1880 unsigned int cpuset_mems_cookie; 1881 1882 /* 1883 * The defrag ratio allows a configuration of the tradeoffs between 1884 * inter node defragmentation and node local allocations. A lower 1885 * defrag_ratio increases the tendency to do local allocations 1886 * instead of attempting to obtain partial slabs from other nodes. 1887 * 1888 * If the defrag_ratio is set to 0 then kmalloc() always 1889 * returns node local objects. If the ratio is higher then kmalloc() 1890 * may return off node objects because partial slabs are obtained 1891 * from other nodes and filled up. 1892 * 1893 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 1894 * (which makes defrag_ratio = 1000) then every (well almost) 1895 * allocation will first attempt to defrag slab caches on other nodes. 1896 * This means scanning over all nodes to look for partial slabs which 1897 * may be expensive if we do it every time we are trying to find a slab 1898 * with available objects. 1899 */ 1900 if (!s->remote_node_defrag_ratio || 1901 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1902 return NULL; 1903 1904 do { 1905 cpuset_mems_cookie = read_mems_allowed_begin(); 1906 zonelist = node_zonelist(mempolicy_slab_node(), flags); 1907 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1908 struct kmem_cache_node *n; 1909 1910 n = get_node(s, zone_to_nid(zone)); 1911 1912 if (n && cpuset_zone_allowed(zone, flags) && 1913 n->nr_partial > s->min_partial) { 1914 object = get_partial_node(s, n, c, flags); 1915 if (object) { 1916 /* 1917 * Don't check read_mems_allowed_retry() 1918 * here - if mems_allowed was updated in 1919 * parallel, that was a harmless race 1920 * between allocation and the cpuset 1921 * update 1922 */ 1923 return object; 1924 } 1925 } 1926 } 1927 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 1928 #endif 1929 return NULL; 1930 } 1931 1932 /* 1933 * Get a partial page, lock it and return it. 1934 */ 1935 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 1936 struct kmem_cache_cpu *c) 1937 { 1938 void *object; 1939 int searchnode = node; 1940 1941 if (node == NUMA_NO_NODE) 1942 searchnode = numa_mem_id(); 1943 else if (!node_present_pages(node)) 1944 searchnode = node_to_mem_node(node); 1945 1946 object = get_partial_node(s, get_node(s, searchnode), c, flags); 1947 if (object || node != NUMA_NO_NODE) 1948 return object; 1949 1950 return get_any_partial(s, flags, c); 1951 } 1952 1953 #ifdef CONFIG_PREEMPT 1954 /* 1955 * Calculate the next globally unique transaction for disambiguiation 1956 * during cmpxchg. The transactions start with the cpu number and are then 1957 * incremented by CONFIG_NR_CPUS. 1958 */ 1959 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 1960 #else 1961 /* 1962 * No preemption supported therefore also no need to check for 1963 * different cpus. 1964 */ 1965 #define TID_STEP 1 1966 #endif 1967 1968 static inline unsigned long next_tid(unsigned long tid) 1969 { 1970 return tid + TID_STEP; 1971 } 1972 1973 static inline unsigned int tid_to_cpu(unsigned long tid) 1974 { 1975 return tid % TID_STEP; 1976 } 1977 1978 static inline unsigned long tid_to_event(unsigned long tid) 1979 { 1980 return tid / TID_STEP; 1981 } 1982 1983 static inline unsigned int init_tid(int cpu) 1984 { 1985 return cpu; 1986 } 1987 1988 static inline void note_cmpxchg_failure(const char *n, 1989 const struct kmem_cache *s, unsigned long tid) 1990 { 1991 #ifdef SLUB_DEBUG_CMPXCHG 1992 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 1993 1994 pr_info("%s %s: cmpxchg redo ", n, s->name); 1995 1996 #ifdef CONFIG_PREEMPT 1997 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 1998 pr_warn("due to cpu change %d -> %d\n", 1999 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2000 else 2001 #endif 2002 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2003 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2004 tid_to_event(tid), tid_to_event(actual_tid)); 2005 else 2006 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2007 actual_tid, tid, next_tid(tid)); 2008 #endif 2009 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2010 } 2011 2012 static void init_kmem_cache_cpus(struct kmem_cache *s) 2013 { 2014 int cpu; 2015 2016 for_each_possible_cpu(cpu) 2017 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 2018 } 2019 2020 /* 2021 * Remove the cpu slab 2022 */ 2023 static void deactivate_slab(struct kmem_cache *s, struct page *page, 2024 void *freelist, struct kmem_cache_cpu *c) 2025 { 2026 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 2027 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 2028 int lock = 0; 2029 enum slab_modes l = M_NONE, m = M_NONE; 2030 void *nextfree; 2031 int tail = DEACTIVATE_TO_HEAD; 2032 struct page new; 2033 struct page old; 2034 2035 if (page->freelist) { 2036 stat(s, DEACTIVATE_REMOTE_FREES); 2037 tail = DEACTIVATE_TO_TAIL; 2038 } 2039 2040 /* 2041 * Stage one: Free all available per cpu objects back 2042 * to the page freelist while it is still frozen. Leave the 2043 * last one. 2044 * 2045 * There is no need to take the list->lock because the page 2046 * is still frozen. 2047 */ 2048 while (freelist && (nextfree = get_freepointer(s, freelist))) { 2049 void *prior; 2050 unsigned long counters; 2051 2052 do { 2053 prior = page->freelist; 2054 counters = page->counters; 2055 set_freepointer(s, freelist, prior); 2056 new.counters = counters; 2057 new.inuse--; 2058 VM_BUG_ON(!new.frozen); 2059 2060 } while (!__cmpxchg_double_slab(s, page, 2061 prior, counters, 2062 freelist, new.counters, 2063 "drain percpu freelist")); 2064 2065 freelist = nextfree; 2066 } 2067 2068 /* 2069 * Stage two: Ensure that the page is unfrozen while the 2070 * list presence reflects the actual number of objects 2071 * during unfreeze. 2072 * 2073 * We setup the list membership and then perform a cmpxchg 2074 * with the count. If there is a mismatch then the page 2075 * is not unfrozen but the page is on the wrong list. 2076 * 2077 * Then we restart the process which may have to remove 2078 * the page from the list that we just put it on again 2079 * because the number of objects in the slab may have 2080 * changed. 2081 */ 2082 redo: 2083 2084 old.freelist = page->freelist; 2085 old.counters = page->counters; 2086 VM_BUG_ON(!old.frozen); 2087 2088 /* Determine target state of the slab */ 2089 new.counters = old.counters; 2090 if (freelist) { 2091 new.inuse--; 2092 set_freepointer(s, freelist, old.freelist); 2093 new.freelist = freelist; 2094 } else 2095 new.freelist = old.freelist; 2096 2097 new.frozen = 0; 2098 2099 if (!new.inuse && n->nr_partial >= s->min_partial) 2100 m = M_FREE; 2101 else if (new.freelist) { 2102 m = M_PARTIAL; 2103 if (!lock) { 2104 lock = 1; 2105 /* 2106 * Taking the spinlock removes the possiblity 2107 * that acquire_slab() will see a slab page that 2108 * is frozen 2109 */ 2110 spin_lock(&n->list_lock); 2111 } 2112 } else { 2113 m = M_FULL; 2114 if (kmem_cache_debug(s) && !lock) { 2115 lock = 1; 2116 /* 2117 * This also ensures that the scanning of full 2118 * slabs from diagnostic functions will not see 2119 * any frozen slabs. 2120 */ 2121 spin_lock(&n->list_lock); 2122 } 2123 } 2124 2125 if (l != m) { 2126 2127 if (l == M_PARTIAL) 2128 2129 remove_partial(n, page); 2130 2131 else if (l == M_FULL) 2132 2133 remove_full(s, n, page); 2134 2135 if (m == M_PARTIAL) { 2136 2137 add_partial(n, page, tail); 2138 stat(s, tail); 2139 2140 } else if (m == M_FULL) { 2141 2142 stat(s, DEACTIVATE_FULL); 2143 add_full(s, n, page); 2144 2145 } 2146 } 2147 2148 l = m; 2149 if (!__cmpxchg_double_slab(s, page, 2150 old.freelist, old.counters, 2151 new.freelist, new.counters, 2152 "unfreezing slab")) 2153 goto redo; 2154 2155 if (lock) 2156 spin_unlock(&n->list_lock); 2157 2158 if (m == M_FREE) { 2159 stat(s, DEACTIVATE_EMPTY); 2160 discard_slab(s, page); 2161 stat(s, FREE_SLAB); 2162 } 2163 2164 c->page = NULL; 2165 c->freelist = NULL; 2166 } 2167 2168 /* 2169 * Unfreeze all the cpu partial slabs. 2170 * 2171 * This function must be called with interrupts disabled 2172 * for the cpu using c (or some other guarantee must be there 2173 * to guarantee no concurrent accesses). 2174 */ 2175 static void unfreeze_partials(struct kmem_cache *s, 2176 struct kmem_cache_cpu *c) 2177 { 2178 #ifdef CONFIG_SLUB_CPU_PARTIAL 2179 struct kmem_cache_node *n = NULL, *n2 = NULL; 2180 struct page *page, *discard_page = NULL; 2181 2182 while ((page = c->partial)) { 2183 struct page new; 2184 struct page old; 2185 2186 c->partial = page->next; 2187 2188 n2 = get_node(s, page_to_nid(page)); 2189 if (n != n2) { 2190 if (n) 2191 spin_unlock(&n->list_lock); 2192 2193 n = n2; 2194 spin_lock(&n->list_lock); 2195 } 2196 2197 do { 2198 2199 old.freelist = page->freelist; 2200 old.counters = page->counters; 2201 VM_BUG_ON(!old.frozen); 2202 2203 new.counters = old.counters; 2204 new.freelist = old.freelist; 2205 2206 new.frozen = 0; 2207 2208 } while (!__cmpxchg_double_slab(s, page, 2209 old.freelist, old.counters, 2210 new.freelist, new.counters, 2211 "unfreezing slab")); 2212 2213 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2214 page->next = discard_page; 2215 discard_page = page; 2216 } else { 2217 add_partial(n, page, DEACTIVATE_TO_TAIL); 2218 stat(s, FREE_ADD_PARTIAL); 2219 } 2220 } 2221 2222 if (n) 2223 spin_unlock(&n->list_lock); 2224 2225 while (discard_page) { 2226 page = discard_page; 2227 discard_page = discard_page->next; 2228 2229 stat(s, DEACTIVATE_EMPTY); 2230 discard_slab(s, page); 2231 stat(s, FREE_SLAB); 2232 } 2233 #endif 2234 } 2235 2236 /* 2237 * Put a page that was just frozen (in __slab_free) into a partial page 2238 * slot if available. 2239 * 2240 * If we did not find a slot then simply move all the partials to the 2241 * per node partial list. 2242 */ 2243 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2244 { 2245 #ifdef CONFIG_SLUB_CPU_PARTIAL 2246 struct page *oldpage; 2247 int pages; 2248 int pobjects; 2249 2250 preempt_disable(); 2251 do { 2252 pages = 0; 2253 pobjects = 0; 2254 oldpage = this_cpu_read(s->cpu_slab->partial); 2255 2256 if (oldpage) { 2257 pobjects = oldpage->pobjects; 2258 pages = oldpage->pages; 2259 if (drain && pobjects > s->cpu_partial) { 2260 unsigned long flags; 2261 /* 2262 * partial array is full. Move the existing 2263 * set to the per node partial list. 2264 */ 2265 local_irq_save(flags); 2266 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2267 local_irq_restore(flags); 2268 oldpage = NULL; 2269 pobjects = 0; 2270 pages = 0; 2271 stat(s, CPU_PARTIAL_DRAIN); 2272 } 2273 } 2274 2275 pages++; 2276 pobjects += page->objects - page->inuse; 2277 2278 page->pages = pages; 2279 page->pobjects = pobjects; 2280 page->next = oldpage; 2281 2282 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) 2283 != oldpage); 2284 if (unlikely(!s->cpu_partial)) { 2285 unsigned long flags; 2286 2287 local_irq_save(flags); 2288 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2289 local_irq_restore(flags); 2290 } 2291 preempt_enable(); 2292 #endif 2293 } 2294 2295 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2296 { 2297 stat(s, CPUSLAB_FLUSH); 2298 deactivate_slab(s, c->page, c->freelist, c); 2299 2300 c->tid = next_tid(c->tid); 2301 } 2302 2303 /* 2304 * Flush cpu slab. 2305 * 2306 * Called from IPI handler with interrupts disabled. 2307 */ 2308 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2309 { 2310 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2311 2312 if (likely(c)) { 2313 if (c->page) 2314 flush_slab(s, c); 2315 2316 unfreeze_partials(s, c); 2317 } 2318 } 2319 2320 static void flush_cpu_slab(void *d) 2321 { 2322 struct kmem_cache *s = d; 2323 2324 __flush_cpu_slab(s, smp_processor_id()); 2325 } 2326 2327 static bool has_cpu_slab(int cpu, void *info) 2328 { 2329 struct kmem_cache *s = info; 2330 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2331 2332 return c->page || slub_percpu_partial(c); 2333 } 2334 2335 static void flush_all(struct kmem_cache *s) 2336 { 2337 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); 2338 } 2339 2340 /* 2341 * Use the cpu notifier to insure that the cpu slabs are flushed when 2342 * necessary. 2343 */ 2344 static int slub_cpu_dead(unsigned int cpu) 2345 { 2346 struct kmem_cache *s; 2347 unsigned long flags; 2348 2349 mutex_lock(&slab_mutex); 2350 list_for_each_entry(s, &slab_caches, list) { 2351 local_irq_save(flags); 2352 __flush_cpu_slab(s, cpu); 2353 local_irq_restore(flags); 2354 } 2355 mutex_unlock(&slab_mutex); 2356 return 0; 2357 } 2358 2359 /* 2360 * Check if the objects in a per cpu structure fit numa 2361 * locality expectations. 2362 */ 2363 static inline int node_match(struct page *page, int node) 2364 { 2365 #ifdef CONFIG_NUMA 2366 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) 2367 return 0; 2368 #endif 2369 return 1; 2370 } 2371 2372 #ifdef CONFIG_SLUB_DEBUG 2373 static int count_free(struct page *page) 2374 { 2375 return page->objects - page->inuse; 2376 } 2377 2378 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2379 { 2380 return atomic_long_read(&n->total_objects); 2381 } 2382 #endif /* CONFIG_SLUB_DEBUG */ 2383 2384 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2385 static unsigned long count_partial(struct kmem_cache_node *n, 2386 int (*get_count)(struct page *)) 2387 { 2388 unsigned long flags; 2389 unsigned long x = 0; 2390 struct page *page; 2391 2392 spin_lock_irqsave(&n->list_lock, flags); 2393 list_for_each_entry(page, &n->partial, lru) 2394 x += get_count(page); 2395 spin_unlock_irqrestore(&n->list_lock, flags); 2396 return x; 2397 } 2398 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2399 2400 static noinline void 2401 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2402 { 2403 #ifdef CONFIG_SLUB_DEBUG 2404 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2405 DEFAULT_RATELIMIT_BURST); 2406 int node; 2407 struct kmem_cache_node *n; 2408 2409 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2410 return; 2411 2412 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2413 nid, gfpflags, &gfpflags); 2414 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n", 2415 s->name, s->object_size, s->size, oo_order(s->oo), 2416 oo_order(s->min)); 2417 2418 if (oo_order(s->min) > get_order(s->object_size)) 2419 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2420 s->name); 2421 2422 for_each_kmem_cache_node(s, node, n) { 2423 unsigned long nr_slabs; 2424 unsigned long nr_objs; 2425 unsigned long nr_free; 2426 2427 nr_free = count_partial(n, count_free); 2428 nr_slabs = node_nr_slabs(n); 2429 nr_objs = node_nr_objs(n); 2430 2431 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2432 node, nr_slabs, nr_objs, nr_free); 2433 } 2434 #endif 2435 } 2436 2437 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2438 int node, struct kmem_cache_cpu **pc) 2439 { 2440 void *freelist; 2441 struct kmem_cache_cpu *c = *pc; 2442 struct page *page; 2443 2444 freelist = get_partial(s, flags, node, c); 2445 2446 if (freelist) 2447 return freelist; 2448 2449 page = new_slab(s, flags, node); 2450 if (page) { 2451 c = raw_cpu_ptr(s->cpu_slab); 2452 if (c->page) 2453 flush_slab(s, c); 2454 2455 /* 2456 * No other reference to the page yet so we can 2457 * muck around with it freely without cmpxchg 2458 */ 2459 freelist = page->freelist; 2460 page->freelist = NULL; 2461 2462 stat(s, ALLOC_SLAB); 2463 c->page = page; 2464 *pc = c; 2465 } else 2466 freelist = NULL; 2467 2468 return freelist; 2469 } 2470 2471 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) 2472 { 2473 if (unlikely(PageSlabPfmemalloc(page))) 2474 return gfp_pfmemalloc_allowed(gfpflags); 2475 2476 return true; 2477 } 2478 2479 /* 2480 * Check the page->freelist of a page and either transfer the freelist to the 2481 * per cpu freelist or deactivate the page. 2482 * 2483 * The page is still frozen if the return value is not NULL. 2484 * 2485 * If this function returns NULL then the page has been unfrozen. 2486 * 2487 * This function must be called with interrupt disabled. 2488 */ 2489 static inline void *get_freelist(struct kmem_cache *s, struct page *page) 2490 { 2491 struct page new; 2492 unsigned long counters; 2493 void *freelist; 2494 2495 do { 2496 freelist = page->freelist; 2497 counters = page->counters; 2498 2499 new.counters = counters; 2500 VM_BUG_ON(!new.frozen); 2501 2502 new.inuse = page->objects; 2503 new.frozen = freelist != NULL; 2504 2505 } while (!__cmpxchg_double_slab(s, page, 2506 freelist, counters, 2507 NULL, new.counters, 2508 "get_freelist")); 2509 2510 return freelist; 2511 } 2512 2513 /* 2514 * Slow path. The lockless freelist is empty or we need to perform 2515 * debugging duties. 2516 * 2517 * Processing is still very fast if new objects have been freed to the 2518 * regular freelist. In that case we simply take over the regular freelist 2519 * as the lockless freelist and zap the regular freelist. 2520 * 2521 * If that is not working then we fall back to the partial lists. We take the 2522 * first element of the freelist as the object to allocate now and move the 2523 * rest of the freelist to the lockless freelist. 2524 * 2525 * And if we were unable to get a new slab from the partial slab lists then 2526 * we need to allocate a new slab. This is the slowest path since it involves 2527 * a call to the page allocator and the setup of a new slab. 2528 * 2529 * Version of __slab_alloc to use when we know that interrupts are 2530 * already disabled (which is the case for bulk allocation). 2531 */ 2532 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2533 unsigned long addr, struct kmem_cache_cpu *c) 2534 { 2535 void *freelist; 2536 struct page *page; 2537 2538 page = c->page; 2539 if (!page) 2540 goto new_slab; 2541 redo: 2542 2543 if (unlikely(!node_match(page, node))) { 2544 int searchnode = node; 2545 2546 if (node != NUMA_NO_NODE && !node_present_pages(node)) 2547 searchnode = node_to_mem_node(node); 2548 2549 if (unlikely(!node_match(page, searchnode))) { 2550 stat(s, ALLOC_NODE_MISMATCH); 2551 deactivate_slab(s, page, c->freelist, c); 2552 goto new_slab; 2553 } 2554 } 2555 2556 /* 2557 * By rights, we should be searching for a slab page that was 2558 * PFMEMALLOC but right now, we are losing the pfmemalloc 2559 * information when the page leaves the per-cpu allocator 2560 */ 2561 if (unlikely(!pfmemalloc_match(page, gfpflags))) { 2562 deactivate_slab(s, page, c->freelist, c); 2563 goto new_slab; 2564 } 2565 2566 /* must check again c->freelist in case of cpu migration or IRQ */ 2567 freelist = c->freelist; 2568 if (freelist) 2569 goto load_freelist; 2570 2571 freelist = get_freelist(s, page); 2572 2573 if (!freelist) { 2574 c->page = NULL; 2575 stat(s, DEACTIVATE_BYPASS); 2576 goto new_slab; 2577 } 2578 2579 stat(s, ALLOC_REFILL); 2580 2581 load_freelist: 2582 /* 2583 * freelist is pointing to the list of objects to be used. 2584 * page is pointing to the page from which the objects are obtained. 2585 * That page must be frozen for per cpu allocations to work. 2586 */ 2587 VM_BUG_ON(!c->page->frozen); 2588 c->freelist = get_freepointer(s, freelist); 2589 c->tid = next_tid(c->tid); 2590 return freelist; 2591 2592 new_slab: 2593 2594 if (slub_percpu_partial(c)) { 2595 page = c->page = slub_percpu_partial(c); 2596 slub_set_percpu_partial(c, page); 2597 stat(s, CPU_PARTIAL_ALLOC); 2598 goto redo; 2599 } 2600 2601 freelist = new_slab_objects(s, gfpflags, node, &c); 2602 2603 if (unlikely(!freelist)) { 2604 slab_out_of_memory(s, gfpflags, node); 2605 return NULL; 2606 } 2607 2608 page = c->page; 2609 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) 2610 goto load_freelist; 2611 2612 /* Only entered in the debug case */ 2613 if (kmem_cache_debug(s) && 2614 !alloc_debug_processing(s, page, freelist, addr)) 2615 goto new_slab; /* Slab failed checks. Next slab needed */ 2616 2617 deactivate_slab(s, page, get_freepointer(s, freelist), c); 2618 return freelist; 2619 } 2620 2621 /* 2622 * Another one that disabled interrupt and compensates for possible 2623 * cpu changes by refetching the per cpu area pointer. 2624 */ 2625 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2626 unsigned long addr, struct kmem_cache_cpu *c) 2627 { 2628 void *p; 2629 unsigned long flags; 2630 2631 local_irq_save(flags); 2632 #ifdef CONFIG_PREEMPT 2633 /* 2634 * We may have been preempted and rescheduled on a different 2635 * cpu before disabling interrupts. Need to reload cpu area 2636 * pointer. 2637 */ 2638 c = this_cpu_ptr(s->cpu_slab); 2639 #endif 2640 2641 p = ___slab_alloc(s, gfpflags, node, addr, c); 2642 local_irq_restore(flags); 2643 return p; 2644 } 2645 2646 /* 2647 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2648 * have the fastpath folded into their functions. So no function call 2649 * overhead for requests that can be satisfied on the fastpath. 2650 * 2651 * The fastpath works by first checking if the lockless freelist can be used. 2652 * If not then __slab_alloc is called for slow processing. 2653 * 2654 * Otherwise we can simply pick the next object from the lockless free list. 2655 */ 2656 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2657 gfp_t gfpflags, int node, unsigned long addr) 2658 { 2659 void *object; 2660 struct kmem_cache_cpu *c; 2661 struct page *page; 2662 unsigned long tid; 2663 2664 s = slab_pre_alloc_hook(s, gfpflags); 2665 if (!s) 2666 return NULL; 2667 redo: 2668 /* 2669 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2670 * enabled. We may switch back and forth between cpus while 2671 * reading from one cpu area. That does not matter as long 2672 * as we end up on the original cpu again when doing the cmpxchg. 2673 * 2674 * We should guarantee that tid and kmem_cache are retrieved on 2675 * the same cpu. It could be different if CONFIG_PREEMPT so we need 2676 * to check if it is matched or not. 2677 */ 2678 do { 2679 tid = this_cpu_read(s->cpu_slab->tid); 2680 c = raw_cpu_ptr(s->cpu_slab); 2681 } while (IS_ENABLED(CONFIG_PREEMPT) && 2682 unlikely(tid != READ_ONCE(c->tid))); 2683 2684 /* 2685 * Irqless object alloc/free algorithm used here depends on sequence 2686 * of fetching cpu_slab's data. tid should be fetched before anything 2687 * on c to guarantee that object and page associated with previous tid 2688 * won't be used with current tid. If we fetch tid first, object and 2689 * page could be one associated with next tid and our alloc/free 2690 * request will be failed. In this case, we will retry. So, no problem. 2691 */ 2692 barrier(); 2693 2694 /* 2695 * The transaction ids are globally unique per cpu and per operation on 2696 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 2697 * occurs on the right processor and that there was no operation on the 2698 * linked list in between. 2699 */ 2700 2701 object = c->freelist; 2702 page = c->page; 2703 if (unlikely(!object || !node_match(page, node))) { 2704 object = __slab_alloc(s, gfpflags, node, addr, c); 2705 stat(s, ALLOC_SLOWPATH); 2706 } else { 2707 void *next_object = get_freepointer_safe(s, object); 2708 2709 /* 2710 * The cmpxchg will only match if there was no additional 2711 * operation and if we are on the right processor. 2712 * 2713 * The cmpxchg does the following atomically (without lock 2714 * semantics!) 2715 * 1. Relocate first pointer to the current per cpu area. 2716 * 2. Verify that tid and freelist have not been changed 2717 * 3. If they were not changed replace tid and freelist 2718 * 2719 * Since this is without lock semantics the protection is only 2720 * against code executing on this cpu *not* from access by 2721 * other cpus. 2722 */ 2723 if (unlikely(!this_cpu_cmpxchg_double( 2724 s->cpu_slab->freelist, s->cpu_slab->tid, 2725 object, tid, 2726 next_object, next_tid(tid)))) { 2727 2728 note_cmpxchg_failure("slab_alloc", s, tid); 2729 goto redo; 2730 } 2731 prefetch_freepointer(s, next_object); 2732 stat(s, ALLOC_FASTPATH); 2733 } 2734 2735 if (unlikely(gfpflags & __GFP_ZERO) && object) 2736 memset(object, 0, s->object_size); 2737 2738 slab_post_alloc_hook(s, gfpflags, 1, &object); 2739 2740 return object; 2741 } 2742 2743 static __always_inline void *slab_alloc(struct kmem_cache *s, 2744 gfp_t gfpflags, unsigned long addr) 2745 { 2746 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); 2747 } 2748 2749 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2750 { 2751 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2752 2753 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, 2754 s->size, gfpflags); 2755 2756 return ret; 2757 } 2758 EXPORT_SYMBOL(kmem_cache_alloc); 2759 2760 #ifdef CONFIG_TRACING 2761 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2762 { 2763 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2764 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2765 kasan_kmalloc(s, ret, size, gfpflags); 2766 return ret; 2767 } 2768 EXPORT_SYMBOL(kmem_cache_alloc_trace); 2769 #endif 2770 2771 #ifdef CONFIG_NUMA 2772 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2773 { 2774 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); 2775 2776 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2777 s->object_size, s->size, gfpflags, node); 2778 2779 return ret; 2780 } 2781 EXPORT_SYMBOL(kmem_cache_alloc_node); 2782 2783 #ifdef CONFIG_TRACING 2784 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 2785 gfp_t gfpflags, 2786 int node, size_t size) 2787 { 2788 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); 2789 2790 trace_kmalloc_node(_RET_IP_, ret, 2791 size, s->size, gfpflags, node); 2792 2793 kasan_kmalloc(s, ret, size, gfpflags); 2794 return ret; 2795 } 2796 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2797 #endif 2798 #endif 2799 2800 /* 2801 * Slow path handling. This may still be called frequently since objects 2802 * have a longer lifetime than the cpu slabs in most processing loads. 2803 * 2804 * So we still attempt to reduce cache line usage. Just take the slab 2805 * lock and free the item. If there is no additional partial page 2806 * handling required then we can return immediately. 2807 */ 2808 static void __slab_free(struct kmem_cache *s, struct page *page, 2809 void *head, void *tail, int cnt, 2810 unsigned long addr) 2811 2812 { 2813 void *prior; 2814 int was_frozen; 2815 struct page new; 2816 unsigned long counters; 2817 struct kmem_cache_node *n = NULL; 2818 unsigned long uninitialized_var(flags); 2819 2820 stat(s, FREE_SLOWPATH); 2821 2822 if (kmem_cache_debug(s) && 2823 !free_debug_processing(s, page, head, tail, cnt, addr)) 2824 return; 2825 2826 do { 2827 if (unlikely(n)) { 2828 spin_unlock_irqrestore(&n->list_lock, flags); 2829 n = NULL; 2830 } 2831 prior = page->freelist; 2832 counters = page->counters; 2833 set_freepointer(s, tail, prior); 2834 new.counters = counters; 2835 was_frozen = new.frozen; 2836 new.inuse -= cnt; 2837 if ((!new.inuse || !prior) && !was_frozen) { 2838 2839 if (kmem_cache_has_cpu_partial(s) && !prior) { 2840 2841 /* 2842 * Slab was on no list before and will be 2843 * partially empty 2844 * We can defer the list move and instead 2845 * freeze it. 2846 */ 2847 new.frozen = 1; 2848 2849 } else { /* Needs to be taken off a list */ 2850 2851 n = get_node(s, page_to_nid(page)); 2852 /* 2853 * Speculatively acquire the list_lock. 2854 * If the cmpxchg does not succeed then we may 2855 * drop the list_lock without any processing. 2856 * 2857 * Otherwise the list_lock will synchronize with 2858 * other processors updating the list of slabs. 2859 */ 2860 spin_lock_irqsave(&n->list_lock, flags); 2861 2862 } 2863 } 2864 2865 } while (!cmpxchg_double_slab(s, page, 2866 prior, counters, 2867 head, new.counters, 2868 "__slab_free")); 2869 2870 if (likely(!n)) { 2871 2872 /* 2873 * If we just froze the page then put it onto the 2874 * per cpu partial list. 2875 */ 2876 if (new.frozen && !was_frozen) { 2877 put_cpu_partial(s, page, 1); 2878 stat(s, CPU_PARTIAL_FREE); 2879 } 2880 /* 2881 * The list lock was not taken therefore no list 2882 * activity can be necessary. 2883 */ 2884 if (was_frozen) 2885 stat(s, FREE_FROZEN); 2886 return; 2887 } 2888 2889 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 2890 goto slab_empty; 2891 2892 /* 2893 * Objects left in the slab. If it was not on the partial list before 2894 * then add it. 2895 */ 2896 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 2897 if (kmem_cache_debug(s)) 2898 remove_full(s, n, page); 2899 add_partial(n, page, DEACTIVATE_TO_TAIL); 2900 stat(s, FREE_ADD_PARTIAL); 2901 } 2902 spin_unlock_irqrestore(&n->list_lock, flags); 2903 return; 2904 2905 slab_empty: 2906 if (prior) { 2907 /* 2908 * Slab on the partial list. 2909 */ 2910 remove_partial(n, page); 2911 stat(s, FREE_REMOVE_PARTIAL); 2912 } else { 2913 /* Slab must be on the full list */ 2914 remove_full(s, n, page); 2915 } 2916 2917 spin_unlock_irqrestore(&n->list_lock, flags); 2918 stat(s, FREE_SLAB); 2919 discard_slab(s, page); 2920 } 2921 2922 /* 2923 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 2924 * can perform fastpath freeing without additional function calls. 2925 * 2926 * The fastpath is only possible if we are freeing to the current cpu slab 2927 * of this processor. This typically the case if we have just allocated 2928 * the item before. 2929 * 2930 * If fastpath is not possible then fall back to __slab_free where we deal 2931 * with all sorts of special processing. 2932 * 2933 * Bulk free of a freelist with several objects (all pointing to the 2934 * same page) possible by specifying head and tail ptr, plus objects 2935 * count (cnt). Bulk free indicated by tail pointer being set. 2936 */ 2937 static __always_inline void do_slab_free(struct kmem_cache *s, 2938 struct page *page, void *head, void *tail, 2939 int cnt, unsigned long addr) 2940 { 2941 void *tail_obj = tail ? : head; 2942 struct kmem_cache_cpu *c; 2943 unsigned long tid; 2944 redo: 2945 /* 2946 * Determine the currently cpus per cpu slab. 2947 * The cpu may change afterward. However that does not matter since 2948 * data is retrieved via this pointer. If we are on the same cpu 2949 * during the cmpxchg then the free will succeed. 2950 */ 2951 do { 2952 tid = this_cpu_read(s->cpu_slab->tid); 2953 c = raw_cpu_ptr(s->cpu_slab); 2954 } while (IS_ENABLED(CONFIG_PREEMPT) && 2955 unlikely(tid != READ_ONCE(c->tid))); 2956 2957 /* Same with comment on barrier() in slab_alloc_node() */ 2958 barrier(); 2959 2960 if (likely(page == c->page)) { 2961 set_freepointer(s, tail_obj, c->freelist); 2962 2963 if (unlikely(!this_cpu_cmpxchg_double( 2964 s->cpu_slab->freelist, s->cpu_slab->tid, 2965 c->freelist, tid, 2966 head, next_tid(tid)))) { 2967 2968 note_cmpxchg_failure("slab_free", s, tid); 2969 goto redo; 2970 } 2971 stat(s, FREE_FASTPATH); 2972 } else 2973 __slab_free(s, page, head, tail_obj, cnt, addr); 2974 2975 } 2976 2977 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 2978 void *head, void *tail, int cnt, 2979 unsigned long addr) 2980 { 2981 /* 2982 * With KASAN enabled slab_free_freelist_hook modifies the freelist 2983 * to remove objects, whose reuse must be delayed. 2984 */ 2985 if (slab_free_freelist_hook(s, &head, &tail)) 2986 do_slab_free(s, page, head, tail, cnt, addr); 2987 } 2988 2989 #ifdef CONFIG_KASAN 2990 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 2991 { 2992 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); 2993 } 2994 #endif 2995 2996 void kmem_cache_free(struct kmem_cache *s, void *x) 2997 { 2998 s = cache_from_obj(s, x); 2999 if (!s) 3000 return; 3001 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 3002 trace_kmem_cache_free(_RET_IP_, x); 3003 } 3004 EXPORT_SYMBOL(kmem_cache_free); 3005 3006 struct detached_freelist { 3007 struct page *page; 3008 void *tail; 3009 void *freelist; 3010 int cnt; 3011 struct kmem_cache *s; 3012 }; 3013 3014 /* 3015 * This function progressively scans the array with free objects (with 3016 * a limited look ahead) and extract objects belonging to the same 3017 * page. It builds a detached freelist directly within the given 3018 * page/objects. This can happen without any need for 3019 * synchronization, because the objects are owned by running process. 3020 * The freelist is build up as a single linked list in the objects. 3021 * The idea is, that this detached freelist can then be bulk 3022 * transferred to the real freelist(s), but only requiring a single 3023 * synchronization primitive. Look ahead in the array is limited due 3024 * to performance reasons. 3025 */ 3026 static inline 3027 int build_detached_freelist(struct kmem_cache *s, size_t size, 3028 void **p, struct detached_freelist *df) 3029 { 3030 size_t first_skipped_index = 0; 3031 int lookahead = 3; 3032 void *object; 3033 struct page *page; 3034 3035 /* Always re-init detached_freelist */ 3036 df->page = NULL; 3037 3038 do { 3039 object = p[--size]; 3040 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ 3041 } while (!object && size); 3042 3043 if (!object) 3044 return 0; 3045 3046 page = virt_to_head_page(object); 3047 if (!s) { 3048 /* Handle kalloc'ed objects */ 3049 if (unlikely(!PageSlab(page))) { 3050 BUG_ON(!PageCompound(page)); 3051 kfree_hook(object); 3052 __free_pages(page, compound_order(page)); 3053 p[size] = NULL; /* mark object processed */ 3054 return size; 3055 } 3056 /* Derive kmem_cache from object */ 3057 df->s = page->slab_cache; 3058 } else { 3059 df->s = cache_from_obj(s, object); /* Support for memcg */ 3060 } 3061 3062 /* Start new detached freelist */ 3063 df->page = page; 3064 set_freepointer(df->s, object, NULL); 3065 df->tail = object; 3066 df->freelist = object; 3067 p[size] = NULL; /* mark object processed */ 3068 df->cnt = 1; 3069 3070 while (size) { 3071 object = p[--size]; 3072 if (!object) 3073 continue; /* Skip processed objects */ 3074 3075 /* df->page is always set at this point */ 3076 if (df->page == virt_to_head_page(object)) { 3077 /* Opportunity build freelist */ 3078 set_freepointer(df->s, object, df->freelist); 3079 df->freelist = object; 3080 df->cnt++; 3081 p[size] = NULL; /* mark object processed */ 3082 3083 continue; 3084 } 3085 3086 /* Limit look ahead search */ 3087 if (!--lookahead) 3088 break; 3089 3090 if (!first_skipped_index) 3091 first_skipped_index = size + 1; 3092 } 3093 3094 return first_skipped_index; 3095 } 3096 3097 /* Note that interrupts must be enabled when calling this function. */ 3098 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3099 { 3100 if (WARN_ON(!size)) 3101 return; 3102 3103 do { 3104 struct detached_freelist df; 3105 3106 size = build_detached_freelist(s, size, p, &df); 3107 if (!df.page) 3108 continue; 3109 3110 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); 3111 } while (likely(size)); 3112 } 3113 EXPORT_SYMBOL(kmem_cache_free_bulk); 3114 3115 /* Note that interrupts must be enabled when calling this function. */ 3116 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3117 void **p) 3118 { 3119 struct kmem_cache_cpu *c; 3120 int i; 3121 3122 /* memcg and kmem_cache debug support */ 3123 s = slab_pre_alloc_hook(s, flags); 3124 if (unlikely(!s)) 3125 return false; 3126 /* 3127 * Drain objects in the per cpu slab, while disabling local 3128 * IRQs, which protects against PREEMPT and interrupts 3129 * handlers invoking normal fastpath. 3130 */ 3131 local_irq_disable(); 3132 c = this_cpu_ptr(s->cpu_slab); 3133 3134 for (i = 0; i < size; i++) { 3135 void *object = c->freelist; 3136 3137 if (unlikely(!object)) { 3138 /* 3139 * Invoking slow path likely have side-effect 3140 * of re-populating per CPU c->freelist 3141 */ 3142 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3143 _RET_IP_, c); 3144 if (unlikely(!p[i])) 3145 goto error; 3146 3147 c = this_cpu_ptr(s->cpu_slab); 3148 continue; /* goto for-loop */ 3149 } 3150 c->freelist = get_freepointer(s, object); 3151 p[i] = object; 3152 } 3153 c->tid = next_tid(c->tid); 3154 local_irq_enable(); 3155 3156 /* Clear memory outside IRQ disabled fastpath loop */ 3157 if (unlikely(flags & __GFP_ZERO)) { 3158 int j; 3159 3160 for (j = 0; j < i; j++) 3161 memset(p[j], 0, s->object_size); 3162 } 3163 3164 /* memcg and kmem_cache debug support */ 3165 slab_post_alloc_hook(s, flags, size, p); 3166 return i; 3167 error: 3168 local_irq_enable(); 3169 slab_post_alloc_hook(s, flags, i, p); 3170 __kmem_cache_free_bulk(s, i, p); 3171 return 0; 3172 } 3173 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3174 3175 3176 /* 3177 * Object placement in a slab is made very easy because we always start at 3178 * offset 0. If we tune the size of the object to the alignment then we can 3179 * get the required alignment by putting one properly sized object after 3180 * another. 3181 * 3182 * Notice that the allocation order determines the sizes of the per cpu 3183 * caches. Each processor has always one slab available for allocations. 3184 * Increasing the allocation order reduces the number of times that slabs 3185 * must be moved on and off the partial lists and is therefore a factor in 3186 * locking overhead. 3187 */ 3188 3189 /* 3190 * Mininum / Maximum order of slab pages. This influences locking overhead 3191 * and slab fragmentation. A higher order reduces the number of partial slabs 3192 * and increases the number of allocations possible without having to 3193 * take the list_lock. 3194 */ 3195 static int slub_min_order; 3196 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3197 static int slub_min_objects; 3198 3199 /* 3200 * Calculate the order of allocation given an slab object size. 3201 * 3202 * The order of allocation has significant impact on performance and other 3203 * system components. Generally order 0 allocations should be preferred since 3204 * order 0 does not cause fragmentation in the page allocator. Larger objects 3205 * be problematic to put into order 0 slabs because there may be too much 3206 * unused space left. We go to a higher order if more than 1/16th of the slab 3207 * would be wasted. 3208 * 3209 * In order to reach satisfactory performance we must ensure that a minimum 3210 * number of objects is in one slab. Otherwise we may generate too much 3211 * activity on the partial lists which requires taking the list_lock. This is 3212 * less a concern for large slabs though which are rarely used. 3213 * 3214 * slub_max_order specifies the order where we begin to stop considering the 3215 * number of objects in a slab as critical. If we reach slub_max_order then 3216 * we try to keep the page order as low as possible. So we accept more waste 3217 * of space in favor of a small page order. 3218 * 3219 * Higher order allocations also allow the placement of more objects in a 3220 * slab and thereby reduce object handling overhead. If the user has 3221 * requested a higher mininum order then we start with that one instead of 3222 * the smallest order which will fit the object. 3223 */ 3224 static inline int slab_order(int size, int min_objects, 3225 int max_order, int fract_leftover, int reserved) 3226 { 3227 int order; 3228 int rem; 3229 int min_order = slub_min_order; 3230 3231 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) 3232 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3233 3234 for (order = max(min_order, get_order(min_objects * size + reserved)); 3235 order <= max_order; order++) { 3236 3237 unsigned long slab_size = PAGE_SIZE << order; 3238 3239 rem = (slab_size - reserved) % size; 3240 3241 if (rem <= slab_size / fract_leftover) 3242 break; 3243 } 3244 3245 return order; 3246 } 3247 3248 static inline int calculate_order(int size, int reserved) 3249 { 3250 int order; 3251 int min_objects; 3252 int fraction; 3253 int max_objects; 3254 3255 /* 3256 * Attempt to find best configuration for a slab. This 3257 * works by first attempting to generate a layout with 3258 * the best configuration and backing off gradually. 3259 * 3260 * First we increase the acceptable waste in a slab. Then 3261 * we reduce the minimum objects required in a slab. 3262 */ 3263 min_objects = slub_min_objects; 3264 if (!min_objects) 3265 min_objects = 4 * (fls(nr_cpu_ids) + 1); 3266 max_objects = order_objects(slub_max_order, size, reserved); 3267 min_objects = min(min_objects, max_objects); 3268 3269 while (min_objects > 1) { 3270 fraction = 16; 3271 while (fraction >= 4) { 3272 order = slab_order(size, min_objects, 3273 slub_max_order, fraction, reserved); 3274 if (order <= slub_max_order) 3275 return order; 3276 fraction /= 2; 3277 } 3278 min_objects--; 3279 } 3280 3281 /* 3282 * We were unable to place multiple objects in a slab. Now 3283 * lets see if we can place a single object there. 3284 */ 3285 order = slab_order(size, 1, slub_max_order, 1, reserved); 3286 if (order <= slub_max_order) 3287 return order; 3288 3289 /* 3290 * Doh this slab cannot be placed using slub_max_order. 3291 */ 3292 order = slab_order(size, 1, MAX_ORDER, 1, reserved); 3293 if (order < MAX_ORDER) 3294 return order; 3295 return -ENOSYS; 3296 } 3297 3298 static void 3299 init_kmem_cache_node(struct kmem_cache_node *n) 3300 { 3301 n->nr_partial = 0; 3302 spin_lock_init(&n->list_lock); 3303 INIT_LIST_HEAD(&n->partial); 3304 #ifdef CONFIG_SLUB_DEBUG 3305 atomic_long_set(&n->nr_slabs, 0); 3306 atomic_long_set(&n->total_objects, 0); 3307 INIT_LIST_HEAD(&n->full); 3308 #endif 3309 } 3310 3311 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3312 { 3313 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3314 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3315 3316 /* 3317 * Must align to double word boundary for the double cmpxchg 3318 * instructions to work; see __pcpu_double_call_return_bool(). 3319 */ 3320 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3321 2 * sizeof(void *)); 3322 3323 if (!s->cpu_slab) 3324 return 0; 3325 3326 init_kmem_cache_cpus(s); 3327 3328 return 1; 3329 } 3330 3331 static struct kmem_cache *kmem_cache_node; 3332 3333 /* 3334 * No kmalloc_node yet so do it by hand. We know that this is the first 3335 * slab on the node for this slabcache. There are no concurrent accesses 3336 * possible. 3337 * 3338 * Note that this function only works on the kmem_cache_node 3339 * when allocating for the kmem_cache_node. This is used for bootstrapping 3340 * memory on a fresh node that has no slab structures yet. 3341 */ 3342 static void early_kmem_cache_node_alloc(int node) 3343 { 3344 struct page *page; 3345 struct kmem_cache_node *n; 3346 3347 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3348 3349 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3350 3351 BUG_ON(!page); 3352 if (page_to_nid(page) != node) { 3353 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3354 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3355 } 3356 3357 n = page->freelist; 3358 BUG_ON(!n); 3359 page->freelist = get_freepointer(kmem_cache_node, n); 3360 page->inuse = 1; 3361 page->frozen = 0; 3362 kmem_cache_node->node[node] = n; 3363 #ifdef CONFIG_SLUB_DEBUG 3364 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3365 init_tracking(kmem_cache_node, n); 3366 #endif 3367 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), 3368 GFP_KERNEL); 3369 init_kmem_cache_node(n); 3370 inc_slabs_node(kmem_cache_node, node, page->objects); 3371 3372 /* 3373 * No locks need to be taken here as it has just been 3374 * initialized and there is no concurrent access. 3375 */ 3376 __add_partial(n, page, DEACTIVATE_TO_HEAD); 3377 } 3378 3379 static void free_kmem_cache_nodes(struct kmem_cache *s) 3380 { 3381 int node; 3382 struct kmem_cache_node *n; 3383 3384 for_each_kmem_cache_node(s, node, n) { 3385 s->node[node] = NULL; 3386 kmem_cache_free(kmem_cache_node, n); 3387 } 3388 } 3389 3390 void __kmem_cache_release(struct kmem_cache *s) 3391 { 3392 cache_random_seq_destroy(s); 3393 free_percpu(s->cpu_slab); 3394 free_kmem_cache_nodes(s); 3395 } 3396 3397 static int init_kmem_cache_nodes(struct kmem_cache *s) 3398 { 3399 int node; 3400 3401 for_each_node_state(node, N_NORMAL_MEMORY) { 3402 struct kmem_cache_node *n; 3403 3404 if (slab_state == DOWN) { 3405 early_kmem_cache_node_alloc(node); 3406 continue; 3407 } 3408 n = kmem_cache_alloc_node(kmem_cache_node, 3409 GFP_KERNEL, node); 3410 3411 if (!n) { 3412 free_kmem_cache_nodes(s); 3413 return 0; 3414 } 3415 3416 init_kmem_cache_node(n); 3417 s->node[node] = n; 3418 } 3419 return 1; 3420 } 3421 3422 static void set_min_partial(struct kmem_cache *s, unsigned long min) 3423 { 3424 if (min < MIN_PARTIAL) 3425 min = MIN_PARTIAL; 3426 else if (min > MAX_PARTIAL) 3427 min = MAX_PARTIAL; 3428 s->min_partial = min; 3429 } 3430 3431 static void set_cpu_partial(struct kmem_cache *s) 3432 { 3433 #ifdef CONFIG_SLUB_CPU_PARTIAL 3434 /* 3435 * cpu_partial determined the maximum number of objects kept in the 3436 * per cpu partial lists of a processor. 3437 * 3438 * Per cpu partial lists mainly contain slabs that just have one 3439 * object freed. If they are used for allocation then they can be 3440 * filled up again with minimal effort. The slab will never hit the 3441 * per node partial lists and therefore no locking will be required. 3442 * 3443 * This setting also determines 3444 * 3445 * A) The number of objects from per cpu partial slabs dumped to the 3446 * per node list when we reach the limit. 3447 * B) The number of objects in cpu partial slabs to extract from the 3448 * per node list when we run out of per cpu objects. We only fetch 3449 * 50% to keep some capacity around for frees. 3450 */ 3451 if (!kmem_cache_has_cpu_partial(s)) 3452 s->cpu_partial = 0; 3453 else if (s->size >= PAGE_SIZE) 3454 s->cpu_partial = 2; 3455 else if (s->size >= 1024) 3456 s->cpu_partial = 6; 3457 else if (s->size >= 256) 3458 s->cpu_partial = 13; 3459 else 3460 s->cpu_partial = 30; 3461 #endif 3462 } 3463 3464 /* 3465 * calculate_sizes() determines the order and the distribution of data within 3466 * a slab object. 3467 */ 3468 static int calculate_sizes(struct kmem_cache *s, int forced_order) 3469 { 3470 slab_flags_t flags = s->flags; 3471 size_t size = s->object_size; 3472 int order; 3473 3474 /* 3475 * Round up object size to the next word boundary. We can only 3476 * place the free pointer at word boundaries and this determines 3477 * the possible location of the free pointer. 3478 */ 3479 size = ALIGN(size, sizeof(void *)); 3480 3481 #ifdef CONFIG_SLUB_DEBUG 3482 /* 3483 * Determine if we can poison the object itself. If the user of 3484 * the slab may touch the object after free or before allocation 3485 * then we should never poison the object itself. 3486 */ 3487 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 3488 !s->ctor) 3489 s->flags |= __OBJECT_POISON; 3490 else 3491 s->flags &= ~__OBJECT_POISON; 3492 3493 3494 /* 3495 * If we are Redzoning then check if there is some space between the 3496 * end of the object and the free pointer. If not then add an 3497 * additional word to have some bytes to store Redzone information. 3498 */ 3499 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 3500 size += sizeof(void *); 3501 #endif 3502 3503 /* 3504 * With that we have determined the number of bytes in actual use 3505 * by the object. This is the potential offset to the free pointer. 3506 */ 3507 s->inuse = size; 3508 3509 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 3510 s->ctor)) { 3511 /* 3512 * Relocate free pointer after the object if it is not 3513 * permitted to overwrite the first word of the object on 3514 * kmem_cache_free. 3515 * 3516 * This is the case if we do RCU, have a constructor or 3517 * destructor or are poisoning the objects. 3518 */ 3519 s->offset = size; 3520 size += sizeof(void *); 3521 } 3522 3523 #ifdef CONFIG_SLUB_DEBUG 3524 if (flags & SLAB_STORE_USER) 3525 /* 3526 * Need to store information about allocs and frees after 3527 * the object. 3528 */ 3529 size += 2 * sizeof(struct track); 3530 #endif 3531 3532 kasan_cache_create(s, &size, &s->flags); 3533 #ifdef CONFIG_SLUB_DEBUG 3534 if (flags & SLAB_RED_ZONE) { 3535 /* 3536 * Add some empty padding so that we can catch 3537 * overwrites from earlier objects rather than let 3538 * tracking information or the free pointer be 3539 * corrupted if a user writes before the start 3540 * of the object. 3541 */ 3542 size += sizeof(void *); 3543 3544 s->red_left_pad = sizeof(void *); 3545 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 3546 size += s->red_left_pad; 3547 } 3548 #endif 3549 3550 /* 3551 * SLUB stores one object immediately after another beginning from 3552 * offset 0. In order to align the objects we have to simply size 3553 * each object to conform to the alignment. 3554 */ 3555 size = ALIGN(size, s->align); 3556 s->size = size; 3557 if (forced_order >= 0) 3558 order = forced_order; 3559 else 3560 order = calculate_order(size, s->reserved); 3561 3562 if (order < 0) 3563 return 0; 3564 3565 s->allocflags = 0; 3566 if (order) 3567 s->allocflags |= __GFP_COMP; 3568 3569 if (s->flags & SLAB_CACHE_DMA) 3570 s->allocflags |= GFP_DMA; 3571 3572 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3573 s->allocflags |= __GFP_RECLAIMABLE; 3574 3575 /* 3576 * Determine the number of objects per slab 3577 */ 3578 s->oo = oo_make(order, size, s->reserved); 3579 s->min = oo_make(get_order(size), size, s->reserved); 3580 if (oo_objects(s->oo) > oo_objects(s->max)) 3581 s->max = s->oo; 3582 3583 return !!oo_objects(s->oo); 3584 } 3585 3586 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 3587 { 3588 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); 3589 s->reserved = 0; 3590 #ifdef CONFIG_SLAB_FREELIST_HARDENED 3591 s->random = get_random_long(); 3592 #endif 3593 3594 if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU)) 3595 s->reserved = sizeof(struct rcu_head); 3596 3597 if (!calculate_sizes(s, -1)) 3598 goto error; 3599 if (disable_higher_order_debug) { 3600 /* 3601 * Disable debugging flags that store metadata if the min slab 3602 * order increased. 3603 */ 3604 if (get_order(s->size) > get_order(s->object_size)) { 3605 s->flags &= ~DEBUG_METADATA_FLAGS; 3606 s->offset = 0; 3607 if (!calculate_sizes(s, -1)) 3608 goto error; 3609 } 3610 } 3611 3612 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 3613 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 3614 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 3615 /* Enable fast mode */ 3616 s->flags |= __CMPXCHG_DOUBLE; 3617 #endif 3618 3619 /* 3620 * The larger the object size is, the more pages we want on the partial 3621 * list to avoid pounding the page allocator excessively. 3622 */ 3623 set_min_partial(s, ilog2(s->size) / 2); 3624 3625 set_cpu_partial(s); 3626 3627 #ifdef CONFIG_NUMA 3628 s->remote_node_defrag_ratio = 1000; 3629 #endif 3630 3631 /* Initialize the pre-computed randomized freelist if slab is up */ 3632 if (slab_state >= UP) { 3633 if (init_cache_random_seq(s)) 3634 goto error; 3635 } 3636 3637 if (!init_kmem_cache_nodes(s)) 3638 goto error; 3639 3640 if (alloc_kmem_cache_cpus(s)) 3641 return 0; 3642 3643 free_kmem_cache_nodes(s); 3644 error: 3645 if (flags & SLAB_PANIC) 3646 panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n", 3647 s->name, (unsigned long)s->size, s->size, 3648 oo_order(s->oo), s->offset, (unsigned long)flags); 3649 return -EINVAL; 3650 } 3651 3652 static void list_slab_objects(struct kmem_cache *s, struct page *page, 3653 const char *text) 3654 { 3655 #ifdef CONFIG_SLUB_DEBUG 3656 void *addr = page_address(page); 3657 void *p; 3658 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * 3659 sizeof(long), GFP_ATOMIC); 3660 if (!map) 3661 return; 3662 slab_err(s, page, text, s->name); 3663 slab_lock(page); 3664 3665 get_map(s, page, map); 3666 for_each_object(p, s, addr, page->objects) { 3667 3668 if (!test_bit(slab_index(p, s, addr), map)) { 3669 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); 3670 print_tracking(s, p); 3671 } 3672 } 3673 slab_unlock(page); 3674 kfree(map); 3675 #endif 3676 } 3677 3678 /* 3679 * Attempt to free all partial slabs on a node. 3680 * This is called from __kmem_cache_shutdown(). We must take list_lock 3681 * because sysfs file might still access partial list after the shutdowning. 3682 */ 3683 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3684 { 3685 LIST_HEAD(discard); 3686 struct page *page, *h; 3687 3688 BUG_ON(irqs_disabled()); 3689 spin_lock_irq(&n->list_lock); 3690 list_for_each_entry_safe(page, h, &n->partial, lru) { 3691 if (!page->inuse) { 3692 remove_partial(n, page); 3693 list_add(&page->lru, &discard); 3694 } else { 3695 list_slab_objects(s, page, 3696 "Objects remaining in %s on __kmem_cache_shutdown()"); 3697 } 3698 } 3699 spin_unlock_irq(&n->list_lock); 3700 3701 list_for_each_entry_safe(page, h, &discard, lru) 3702 discard_slab(s, page); 3703 } 3704 3705 /* 3706 * Release all resources used by a slab cache. 3707 */ 3708 int __kmem_cache_shutdown(struct kmem_cache *s) 3709 { 3710 int node; 3711 struct kmem_cache_node *n; 3712 3713 flush_all(s); 3714 /* Attempt to free all objects */ 3715 for_each_kmem_cache_node(s, node, n) { 3716 free_partial(s, n); 3717 if (n->nr_partial || slabs_node(s, node)) 3718 return 1; 3719 } 3720 sysfs_slab_remove(s); 3721 return 0; 3722 } 3723 3724 /******************************************************************** 3725 * Kmalloc subsystem 3726 *******************************************************************/ 3727 3728 static int __init setup_slub_min_order(char *str) 3729 { 3730 get_option(&str, &slub_min_order); 3731 3732 return 1; 3733 } 3734 3735 __setup("slub_min_order=", setup_slub_min_order); 3736 3737 static int __init setup_slub_max_order(char *str) 3738 { 3739 get_option(&str, &slub_max_order); 3740 slub_max_order = min(slub_max_order, MAX_ORDER - 1); 3741 3742 return 1; 3743 } 3744 3745 __setup("slub_max_order=", setup_slub_max_order); 3746 3747 static int __init setup_slub_min_objects(char *str) 3748 { 3749 get_option(&str, &slub_min_objects); 3750 3751 return 1; 3752 } 3753 3754 __setup("slub_min_objects=", setup_slub_min_objects); 3755 3756 void *__kmalloc(size_t size, gfp_t flags) 3757 { 3758 struct kmem_cache *s; 3759 void *ret; 3760 3761 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 3762 return kmalloc_large(size, flags); 3763 3764 s = kmalloc_slab(size, flags); 3765 3766 if (unlikely(ZERO_OR_NULL_PTR(s))) 3767 return s; 3768 3769 ret = slab_alloc(s, flags, _RET_IP_); 3770 3771 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3772 3773 kasan_kmalloc(s, ret, size, flags); 3774 3775 return ret; 3776 } 3777 EXPORT_SYMBOL(__kmalloc); 3778 3779 #ifdef CONFIG_NUMA 3780 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 3781 { 3782 struct page *page; 3783 void *ptr = NULL; 3784 3785 flags |= __GFP_COMP; 3786 page = alloc_pages_node(node, flags, get_order(size)); 3787 if (page) 3788 ptr = page_address(page); 3789 3790 kmalloc_large_node_hook(ptr, size, flags); 3791 return ptr; 3792 } 3793 3794 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3795 { 3796 struct kmem_cache *s; 3797 void *ret; 3798 3799 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 3800 ret = kmalloc_large_node(size, flags, node); 3801 3802 trace_kmalloc_node(_RET_IP_, ret, 3803 size, PAGE_SIZE << get_order(size), 3804 flags, node); 3805 3806 return ret; 3807 } 3808 3809 s = kmalloc_slab(size, flags); 3810 3811 if (unlikely(ZERO_OR_NULL_PTR(s))) 3812 return s; 3813 3814 ret = slab_alloc_node(s, flags, node, _RET_IP_); 3815 3816 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3817 3818 kasan_kmalloc(s, ret, size, flags); 3819 3820 return ret; 3821 } 3822 EXPORT_SYMBOL(__kmalloc_node); 3823 #endif 3824 3825 #ifdef CONFIG_HARDENED_USERCOPY 3826 /* 3827 * Rejects incorrectly sized objects and objects that are to be copied 3828 * to/from userspace but do not fall entirely within the containing slab 3829 * cache's usercopy region. 3830 * 3831 * Returns NULL if check passes, otherwise const char * to name of cache 3832 * to indicate an error. 3833 */ 3834 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 3835 bool to_user) 3836 { 3837 struct kmem_cache *s; 3838 unsigned long offset; 3839 size_t object_size; 3840 3841 /* Find object and usable object size. */ 3842 s = page->slab_cache; 3843 3844 /* Reject impossible pointers. */ 3845 if (ptr < page_address(page)) 3846 usercopy_abort("SLUB object not in SLUB page?!", NULL, 3847 to_user, 0, n); 3848 3849 /* Find offset within object. */ 3850 offset = (ptr - page_address(page)) % s->size; 3851 3852 /* Adjust for redzone and reject if within the redzone. */ 3853 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { 3854 if (offset < s->red_left_pad) 3855 usercopy_abort("SLUB object in left red zone", 3856 s->name, to_user, offset, n); 3857 offset -= s->red_left_pad; 3858 } 3859 3860 /* Allow address range falling entirely within usercopy region. */ 3861 if (offset >= s->useroffset && 3862 offset - s->useroffset <= s->usersize && 3863 n <= s->useroffset - offset + s->usersize) 3864 return; 3865 3866 /* 3867 * If the copy is still within the allocated object, produce 3868 * a warning instead of rejecting the copy. This is intended 3869 * to be a temporary method to find any missing usercopy 3870 * whitelists. 3871 */ 3872 object_size = slab_ksize(s); 3873 if (usercopy_fallback && 3874 offset <= object_size && n <= object_size - offset) { 3875 usercopy_warn("SLUB object", s->name, to_user, offset, n); 3876 return; 3877 } 3878 3879 usercopy_abort("SLUB object", s->name, to_user, offset, n); 3880 } 3881 #endif /* CONFIG_HARDENED_USERCOPY */ 3882 3883 static size_t __ksize(const void *object) 3884 { 3885 struct page *page; 3886 3887 if (unlikely(object == ZERO_SIZE_PTR)) 3888 return 0; 3889 3890 page = virt_to_head_page(object); 3891 3892 if (unlikely(!PageSlab(page))) { 3893 WARN_ON(!PageCompound(page)); 3894 return PAGE_SIZE << compound_order(page); 3895 } 3896 3897 return slab_ksize(page->slab_cache); 3898 } 3899 3900 size_t ksize(const void *object) 3901 { 3902 size_t size = __ksize(object); 3903 /* We assume that ksize callers could use whole allocated area, 3904 * so we need to unpoison this area. 3905 */ 3906 kasan_unpoison_shadow(object, size); 3907 return size; 3908 } 3909 EXPORT_SYMBOL(ksize); 3910 3911 void kfree(const void *x) 3912 { 3913 struct page *page; 3914 void *object = (void *)x; 3915 3916 trace_kfree(_RET_IP_, x); 3917 3918 if (unlikely(ZERO_OR_NULL_PTR(x))) 3919 return; 3920 3921 page = virt_to_head_page(x); 3922 if (unlikely(!PageSlab(page))) { 3923 BUG_ON(!PageCompound(page)); 3924 kfree_hook(object); 3925 __free_pages(page, compound_order(page)); 3926 return; 3927 } 3928 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 3929 } 3930 EXPORT_SYMBOL(kfree); 3931 3932 #define SHRINK_PROMOTE_MAX 32 3933 3934 /* 3935 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 3936 * up most to the head of the partial lists. New allocations will then 3937 * fill those up and thus they can be removed from the partial lists. 3938 * 3939 * The slabs with the least items are placed last. This results in them 3940 * being allocated from last increasing the chance that the last objects 3941 * are freed in them. 3942 */ 3943 int __kmem_cache_shrink(struct kmem_cache *s) 3944 { 3945 int node; 3946 int i; 3947 struct kmem_cache_node *n; 3948 struct page *page; 3949 struct page *t; 3950 struct list_head discard; 3951 struct list_head promote[SHRINK_PROMOTE_MAX]; 3952 unsigned long flags; 3953 int ret = 0; 3954 3955 flush_all(s); 3956 for_each_kmem_cache_node(s, node, n) { 3957 INIT_LIST_HEAD(&discard); 3958 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 3959 INIT_LIST_HEAD(promote + i); 3960 3961 spin_lock_irqsave(&n->list_lock, flags); 3962 3963 /* 3964 * Build lists of slabs to discard or promote. 3965 * 3966 * Note that concurrent frees may occur while we hold the 3967 * list_lock. page->inuse here is the upper limit. 3968 */ 3969 list_for_each_entry_safe(page, t, &n->partial, lru) { 3970 int free = page->objects - page->inuse; 3971 3972 /* Do not reread page->inuse */ 3973 barrier(); 3974 3975 /* We do not keep full slabs on the list */ 3976 BUG_ON(free <= 0); 3977 3978 if (free == page->objects) { 3979 list_move(&page->lru, &discard); 3980 n->nr_partial--; 3981 } else if (free <= SHRINK_PROMOTE_MAX) 3982 list_move(&page->lru, promote + free - 1); 3983 } 3984 3985 /* 3986 * Promote the slabs filled up most to the head of the 3987 * partial list. 3988 */ 3989 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 3990 list_splice(promote + i, &n->partial); 3991 3992 spin_unlock_irqrestore(&n->list_lock, flags); 3993 3994 /* Release empty slabs */ 3995 list_for_each_entry_safe(page, t, &discard, lru) 3996 discard_slab(s, page); 3997 3998 if (slabs_node(s, node)) 3999 ret = 1; 4000 } 4001 4002 return ret; 4003 } 4004 4005 #ifdef CONFIG_MEMCG 4006 static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s) 4007 { 4008 /* 4009 * Called with all the locks held after a sched RCU grace period. 4010 * Even if @s becomes empty after shrinking, we can't know that @s 4011 * doesn't have allocations already in-flight and thus can't 4012 * destroy @s until the associated memcg is released. 4013 * 4014 * However, let's remove the sysfs files for empty caches here. 4015 * Each cache has a lot of interface files which aren't 4016 * particularly useful for empty draining caches; otherwise, we can 4017 * easily end up with millions of unnecessary sysfs files on 4018 * systems which have a lot of memory and transient cgroups. 4019 */ 4020 if (!__kmem_cache_shrink(s)) 4021 sysfs_slab_remove(s); 4022 } 4023 4024 void __kmemcg_cache_deactivate(struct kmem_cache *s) 4025 { 4026 /* 4027 * Disable empty slabs caching. Used to avoid pinning offline 4028 * memory cgroups by kmem pages that can be freed. 4029 */ 4030 slub_set_cpu_partial(s, 0); 4031 s->min_partial = 0; 4032 4033 /* 4034 * s->cpu_partial is checked locklessly (see put_cpu_partial), so 4035 * we have to make sure the change is visible before shrinking. 4036 */ 4037 slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu); 4038 } 4039 #endif 4040 4041 static int slab_mem_going_offline_callback(void *arg) 4042 { 4043 struct kmem_cache *s; 4044 4045 mutex_lock(&slab_mutex); 4046 list_for_each_entry(s, &slab_caches, list) 4047 __kmem_cache_shrink(s); 4048 mutex_unlock(&slab_mutex); 4049 4050 return 0; 4051 } 4052 4053 static void slab_mem_offline_callback(void *arg) 4054 { 4055 struct kmem_cache_node *n; 4056 struct kmem_cache *s; 4057 struct memory_notify *marg = arg; 4058 int offline_node; 4059 4060 offline_node = marg->status_change_nid_normal; 4061 4062 /* 4063 * If the node still has available memory. we need kmem_cache_node 4064 * for it yet. 4065 */ 4066 if (offline_node < 0) 4067 return; 4068 4069 mutex_lock(&slab_mutex); 4070 list_for_each_entry(s, &slab_caches, list) { 4071 n = get_node(s, offline_node); 4072 if (n) { 4073 /* 4074 * if n->nr_slabs > 0, slabs still exist on the node 4075 * that is going down. We were unable to free them, 4076 * and offline_pages() function shouldn't call this 4077 * callback. So, we must fail. 4078 */ 4079 BUG_ON(slabs_node(s, offline_node)); 4080 4081 s->node[offline_node] = NULL; 4082 kmem_cache_free(kmem_cache_node, n); 4083 } 4084 } 4085 mutex_unlock(&slab_mutex); 4086 } 4087 4088 static int slab_mem_going_online_callback(void *arg) 4089 { 4090 struct kmem_cache_node *n; 4091 struct kmem_cache *s; 4092 struct memory_notify *marg = arg; 4093 int nid = marg->status_change_nid_normal; 4094 int ret = 0; 4095 4096 /* 4097 * If the node's memory is already available, then kmem_cache_node is 4098 * already created. Nothing to do. 4099 */ 4100 if (nid < 0) 4101 return 0; 4102 4103 /* 4104 * We are bringing a node online. No memory is available yet. We must 4105 * allocate a kmem_cache_node structure in order to bring the node 4106 * online. 4107 */ 4108 mutex_lock(&slab_mutex); 4109 list_for_each_entry(s, &slab_caches, list) { 4110 /* 4111 * XXX: kmem_cache_alloc_node will fallback to other nodes 4112 * since memory is not yet available from the node that 4113 * is brought up. 4114 */ 4115 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4116 if (!n) { 4117 ret = -ENOMEM; 4118 goto out; 4119 } 4120 init_kmem_cache_node(n); 4121 s->node[nid] = n; 4122 } 4123 out: 4124 mutex_unlock(&slab_mutex); 4125 return ret; 4126 } 4127 4128 static int slab_memory_callback(struct notifier_block *self, 4129 unsigned long action, void *arg) 4130 { 4131 int ret = 0; 4132 4133 switch (action) { 4134 case MEM_GOING_ONLINE: 4135 ret = slab_mem_going_online_callback(arg); 4136 break; 4137 case MEM_GOING_OFFLINE: 4138 ret = slab_mem_going_offline_callback(arg); 4139 break; 4140 case MEM_OFFLINE: 4141 case MEM_CANCEL_ONLINE: 4142 slab_mem_offline_callback(arg); 4143 break; 4144 case MEM_ONLINE: 4145 case MEM_CANCEL_OFFLINE: 4146 break; 4147 } 4148 if (ret) 4149 ret = notifier_from_errno(ret); 4150 else 4151 ret = NOTIFY_OK; 4152 return ret; 4153 } 4154 4155 static struct notifier_block slab_memory_callback_nb = { 4156 .notifier_call = slab_memory_callback, 4157 .priority = SLAB_CALLBACK_PRI, 4158 }; 4159 4160 /******************************************************************** 4161 * Basic setup of slabs 4162 *******************************************************************/ 4163 4164 /* 4165 * Used for early kmem_cache structures that were allocated using 4166 * the page allocator. Allocate them properly then fix up the pointers 4167 * that may be pointing to the wrong kmem_cache structure. 4168 */ 4169 4170 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4171 { 4172 int node; 4173 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4174 struct kmem_cache_node *n; 4175 4176 memcpy(s, static_cache, kmem_cache->object_size); 4177 4178 /* 4179 * This runs very early, and only the boot processor is supposed to be 4180 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4181 * IPIs around. 4182 */ 4183 __flush_cpu_slab(s, smp_processor_id()); 4184 for_each_kmem_cache_node(s, node, n) { 4185 struct page *p; 4186 4187 list_for_each_entry(p, &n->partial, lru) 4188 p->slab_cache = s; 4189 4190 #ifdef CONFIG_SLUB_DEBUG 4191 list_for_each_entry(p, &n->full, lru) 4192 p->slab_cache = s; 4193 #endif 4194 } 4195 slab_init_memcg_params(s); 4196 list_add(&s->list, &slab_caches); 4197 memcg_link_cache(s); 4198 return s; 4199 } 4200 4201 void __init kmem_cache_init(void) 4202 { 4203 static __initdata struct kmem_cache boot_kmem_cache, 4204 boot_kmem_cache_node; 4205 4206 if (debug_guardpage_minorder()) 4207 slub_max_order = 0; 4208 4209 kmem_cache_node = &boot_kmem_cache_node; 4210 kmem_cache = &boot_kmem_cache; 4211 4212 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4213 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4214 4215 register_hotmemory_notifier(&slab_memory_callback_nb); 4216 4217 /* Able to allocate the per node structures */ 4218 slab_state = PARTIAL; 4219 4220 create_boot_cache(kmem_cache, "kmem_cache", 4221 offsetof(struct kmem_cache, node) + 4222 nr_node_ids * sizeof(struct kmem_cache_node *), 4223 SLAB_HWCACHE_ALIGN, 0, 0); 4224 4225 kmem_cache = bootstrap(&boot_kmem_cache); 4226 4227 /* 4228 * Allocate kmem_cache_node properly from the kmem_cache slab. 4229 * kmem_cache_node is separately allocated so no need to 4230 * update any list pointers. 4231 */ 4232 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4233 4234 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4235 setup_kmalloc_cache_index_table(); 4236 create_kmalloc_caches(0); 4237 4238 /* Setup random freelists for each cache */ 4239 init_freelist_randomization(); 4240 4241 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4242 slub_cpu_dead); 4243 4244 pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n", 4245 cache_line_size(), 4246 slub_min_order, slub_max_order, slub_min_objects, 4247 nr_cpu_ids, nr_node_ids); 4248 } 4249 4250 void __init kmem_cache_init_late(void) 4251 { 4252 } 4253 4254 struct kmem_cache * 4255 __kmem_cache_alias(const char *name, size_t size, size_t align, 4256 slab_flags_t flags, void (*ctor)(void *)) 4257 { 4258 struct kmem_cache *s, *c; 4259 4260 s = find_mergeable(size, align, flags, name, ctor); 4261 if (s) { 4262 s->refcount++; 4263 4264 /* 4265 * Adjust the object sizes so that we clear 4266 * the complete object on kzalloc. 4267 */ 4268 s->object_size = max(s->object_size, (int)size); 4269 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 4270 4271 for_each_memcg_cache(c, s) { 4272 c->object_size = s->object_size; 4273 c->inuse = max_t(int, c->inuse, 4274 ALIGN(size, sizeof(void *))); 4275 } 4276 4277 if (sysfs_slab_alias(s, name)) { 4278 s->refcount--; 4279 s = NULL; 4280 } 4281 } 4282 4283 return s; 4284 } 4285 4286 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4287 { 4288 int err; 4289 4290 err = kmem_cache_open(s, flags); 4291 if (err) 4292 return err; 4293 4294 /* Mutex is not taken during early boot */ 4295 if (slab_state <= UP) 4296 return 0; 4297 4298 memcg_propagate_slab_attrs(s); 4299 err = sysfs_slab_add(s); 4300 if (err) 4301 __kmem_cache_release(s); 4302 4303 return err; 4304 } 4305 4306 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4307 { 4308 struct kmem_cache *s; 4309 void *ret; 4310 4311 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4312 return kmalloc_large(size, gfpflags); 4313 4314 s = kmalloc_slab(size, gfpflags); 4315 4316 if (unlikely(ZERO_OR_NULL_PTR(s))) 4317 return s; 4318 4319 ret = slab_alloc(s, gfpflags, caller); 4320 4321 /* Honor the call site pointer we received. */ 4322 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4323 4324 return ret; 4325 } 4326 4327 #ifdef CONFIG_NUMA 4328 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4329 int node, unsigned long caller) 4330 { 4331 struct kmem_cache *s; 4332 void *ret; 4333 4334 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4335 ret = kmalloc_large_node(size, gfpflags, node); 4336 4337 trace_kmalloc_node(caller, ret, 4338 size, PAGE_SIZE << get_order(size), 4339 gfpflags, node); 4340 4341 return ret; 4342 } 4343 4344 s = kmalloc_slab(size, gfpflags); 4345 4346 if (unlikely(ZERO_OR_NULL_PTR(s))) 4347 return s; 4348 4349 ret = slab_alloc_node(s, gfpflags, node, caller); 4350 4351 /* Honor the call site pointer we received. */ 4352 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4353 4354 return ret; 4355 } 4356 #endif 4357 4358 #ifdef CONFIG_SYSFS 4359 static int count_inuse(struct page *page) 4360 { 4361 return page->inuse; 4362 } 4363 4364 static int count_total(struct page *page) 4365 { 4366 return page->objects; 4367 } 4368 #endif 4369 4370 #ifdef CONFIG_SLUB_DEBUG 4371 static int validate_slab(struct kmem_cache *s, struct page *page, 4372 unsigned long *map) 4373 { 4374 void *p; 4375 void *addr = page_address(page); 4376 4377 if (!check_slab(s, page) || 4378 !on_freelist(s, page, NULL)) 4379 return 0; 4380 4381 /* Now we know that a valid freelist exists */ 4382 bitmap_zero(map, page->objects); 4383 4384 get_map(s, page, map); 4385 for_each_object(p, s, addr, page->objects) { 4386 if (test_bit(slab_index(p, s, addr), map)) 4387 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) 4388 return 0; 4389 } 4390 4391 for_each_object(p, s, addr, page->objects) 4392 if (!test_bit(slab_index(p, s, addr), map)) 4393 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) 4394 return 0; 4395 return 1; 4396 } 4397 4398 static void validate_slab_slab(struct kmem_cache *s, struct page *page, 4399 unsigned long *map) 4400 { 4401 slab_lock(page); 4402 validate_slab(s, page, map); 4403 slab_unlock(page); 4404 } 4405 4406 static int validate_slab_node(struct kmem_cache *s, 4407 struct kmem_cache_node *n, unsigned long *map) 4408 { 4409 unsigned long count = 0; 4410 struct page *page; 4411 unsigned long flags; 4412 4413 spin_lock_irqsave(&n->list_lock, flags); 4414 4415 list_for_each_entry(page, &n->partial, lru) { 4416 validate_slab_slab(s, page, map); 4417 count++; 4418 } 4419 if (count != n->nr_partial) 4420 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4421 s->name, count, n->nr_partial); 4422 4423 if (!(s->flags & SLAB_STORE_USER)) 4424 goto out; 4425 4426 list_for_each_entry(page, &n->full, lru) { 4427 validate_slab_slab(s, page, map); 4428 count++; 4429 } 4430 if (count != atomic_long_read(&n->nr_slabs)) 4431 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 4432 s->name, count, atomic_long_read(&n->nr_slabs)); 4433 4434 out: 4435 spin_unlock_irqrestore(&n->list_lock, flags); 4436 return count; 4437 } 4438 4439 static long validate_slab_cache(struct kmem_cache *s) 4440 { 4441 int node; 4442 unsigned long count = 0; 4443 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 4444 sizeof(unsigned long), GFP_KERNEL); 4445 struct kmem_cache_node *n; 4446 4447 if (!map) 4448 return -ENOMEM; 4449 4450 flush_all(s); 4451 for_each_kmem_cache_node(s, node, n) 4452 count += validate_slab_node(s, n, map); 4453 kfree(map); 4454 return count; 4455 } 4456 /* 4457 * Generate lists of code addresses where slabcache objects are allocated 4458 * and freed. 4459 */ 4460 4461 struct location { 4462 unsigned long count; 4463 unsigned long addr; 4464 long long sum_time; 4465 long min_time; 4466 long max_time; 4467 long min_pid; 4468 long max_pid; 4469 DECLARE_BITMAP(cpus, NR_CPUS); 4470 nodemask_t nodes; 4471 }; 4472 4473 struct loc_track { 4474 unsigned long max; 4475 unsigned long count; 4476 struct location *loc; 4477 }; 4478 4479 static void free_loc_track(struct loc_track *t) 4480 { 4481 if (t->max) 4482 free_pages((unsigned long)t->loc, 4483 get_order(sizeof(struct location) * t->max)); 4484 } 4485 4486 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4487 { 4488 struct location *l; 4489 int order; 4490 4491 order = get_order(sizeof(struct location) * max); 4492 4493 l = (void *)__get_free_pages(flags, order); 4494 if (!l) 4495 return 0; 4496 4497 if (t->count) { 4498 memcpy(l, t->loc, sizeof(struct location) * t->count); 4499 free_loc_track(t); 4500 } 4501 t->max = max; 4502 t->loc = l; 4503 return 1; 4504 } 4505 4506 static int add_location(struct loc_track *t, struct kmem_cache *s, 4507 const struct track *track) 4508 { 4509 long start, end, pos; 4510 struct location *l; 4511 unsigned long caddr; 4512 unsigned long age = jiffies - track->when; 4513 4514 start = -1; 4515 end = t->count; 4516 4517 for ( ; ; ) { 4518 pos = start + (end - start + 1) / 2; 4519 4520 /* 4521 * There is nothing at "end". If we end up there 4522 * we need to add something to before end. 4523 */ 4524 if (pos == end) 4525 break; 4526 4527 caddr = t->loc[pos].addr; 4528 if (track->addr == caddr) { 4529 4530 l = &t->loc[pos]; 4531 l->count++; 4532 if (track->when) { 4533 l->sum_time += age; 4534 if (age < l->min_time) 4535 l->min_time = age; 4536 if (age > l->max_time) 4537 l->max_time = age; 4538 4539 if (track->pid < l->min_pid) 4540 l->min_pid = track->pid; 4541 if (track->pid > l->max_pid) 4542 l->max_pid = track->pid; 4543 4544 cpumask_set_cpu(track->cpu, 4545 to_cpumask(l->cpus)); 4546 } 4547 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4548 return 1; 4549 } 4550 4551 if (track->addr < caddr) 4552 end = pos; 4553 else 4554 start = pos; 4555 } 4556 4557 /* 4558 * Not found. Insert new tracking element. 4559 */ 4560 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4561 return 0; 4562 4563 l = t->loc + pos; 4564 if (pos < t->count) 4565 memmove(l + 1, l, 4566 (t->count - pos) * sizeof(struct location)); 4567 t->count++; 4568 l->count = 1; 4569 l->addr = track->addr; 4570 l->sum_time = age; 4571 l->min_time = age; 4572 l->max_time = age; 4573 l->min_pid = track->pid; 4574 l->max_pid = track->pid; 4575 cpumask_clear(to_cpumask(l->cpus)); 4576 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4577 nodes_clear(l->nodes); 4578 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4579 return 1; 4580 } 4581 4582 static void process_slab(struct loc_track *t, struct kmem_cache *s, 4583 struct page *page, enum track_item alloc, 4584 unsigned long *map) 4585 { 4586 void *addr = page_address(page); 4587 void *p; 4588 4589 bitmap_zero(map, page->objects); 4590 get_map(s, page, map); 4591 4592 for_each_object(p, s, addr, page->objects) 4593 if (!test_bit(slab_index(p, s, addr), map)) 4594 add_location(t, s, get_track(s, p, alloc)); 4595 } 4596 4597 static int list_locations(struct kmem_cache *s, char *buf, 4598 enum track_item alloc) 4599 { 4600 int len = 0; 4601 unsigned long i; 4602 struct loc_track t = { 0, 0, NULL }; 4603 int node; 4604 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 4605 sizeof(unsigned long), GFP_KERNEL); 4606 struct kmem_cache_node *n; 4607 4608 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4609 GFP_KERNEL)) { 4610 kfree(map); 4611 return sprintf(buf, "Out of memory\n"); 4612 } 4613 /* Push back cpu slabs */ 4614 flush_all(s); 4615 4616 for_each_kmem_cache_node(s, node, n) { 4617 unsigned long flags; 4618 struct page *page; 4619 4620 if (!atomic_long_read(&n->nr_slabs)) 4621 continue; 4622 4623 spin_lock_irqsave(&n->list_lock, flags); 4624 list_for_each_entry(page, &n->partial, lru) 4625 process_slab(&t, s, page, alloc, map); 4626 list_for_each_entry(page, &n->full, lru) 4627 process_slab(&t, s, page, alloc, map); 4628 spin_unlock_irqrestore(&n->list_lock, flags); 4629 } 4630 4631 for (i = 0; i < t.count; i++) { 4632 struct location *l = &t.loc[i]; 4633 4634 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 4635 break; 4636 len += sprintf(buf + len, "%7ld ", l->count); 4637 4638 if (l->addr) 4639 len += sprintf(buf + len, "%pS", (void *)l->addr); 4640 else 4641 len += sprintf(buf + len, "<not-available>"); 4642 4643 if (l->sum_time != l->min_time) { 4644 len += sprintf(buf + len, " age=%ld/%ld/%ld", 4645 l->min_time, 4646 (long)div_u64(l->sum_time, l->count), 4647 l->max_time); 4648 } else 4649 len += sprintf(buf + len, " age=%ld", 4650 l->min_time); 4651 4652 if (l->min_pid != l->max_pid) 4653 len += sprintf(buf + len, " pid=%ld-%ld", 4654 l->min_pid, l->max_pid); 4655 else 4656 len += sprintf(buf + len, " pid=%ld", 4657 l->min_pid); 4658 4659 if (num_online_cpus() > 1 && 4660 !cpumask_empty(to_cpumask(l->cpus)) && 4661 len < PAGE_SIZE - 60) 4662 len += scnprintf(buf + len, PAGE_SIZE - len - 50, 4663 " cpus=%*pbl", 4664 cpumask_pr_args(to_cpumask(l->cpus))); 4665 4666 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 4667 len < PAGE_SIZE - 60) 4668 len += scnprintf(buf + len, PAGE_SIZE - len - 50, 4669 " nodes=%*pbl", 4670 nodemask_pr_args(&l->nodes)); 4671 4672 len += sprintf(buf + len, "\n"); 4673 } 4674 4675 free_loc_track(&t); 4676 kfree(map); 4677 if (!t.count) 4678 len += sprintf(buf, "No data\n"); 4679 return len; 4680 } 4681 #endif 4682 4683 #ifdef SLUB_RESILIENCY_TEST 4684 static void __init resiliency_test(void) 4685 { 4686 u8 *p; 4687 4688 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); 4689 4690 pr_err("SLUB resiliency testing\n"); 4691 pr_err("-----------------------\n"); 4692 pr_err("A. Corruption after allocation\n"); 4693 4694 p = kzalloc(16, GFP_KERNEL); 4695 p[16] = 0x12; 4696 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", 4697 p + 16); 4698 4699 validate_slab_cache(kmalloc_caches[4]); 4700 4701 /* Hmmm... The next two are dangerous */ 4702 p = kzalloc(32, GFP_KERNEL); 4703 p[32 + sizeof(void *)] = 0x34; 4704 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n", 4705 p); 4706 pr_err("If allocated object is overwritten then not detectable\n\n"); 4707 4708 validate_slab_cache(kmalloc_caches[5]); 4709 p = kzalloc(64, GFP_KERNEL); 4710 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 4711 *p = 0x56; 4712 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 4713 p); 4714 pr_err("If allocated object is overwritten then not detectable\n\n"); 4715 validate_slab_cache(kmalloc_caches[6]); 4716 4717 pr_err("\nB. Corruption after free\n"); 4718 p = kzalloc(128, GFP_KERNEL); 4719 kfree(p); 4720 *p = 0x78; 4721 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 4722 validate_slab_cache(kmalloc_caches[7]); 4723 4724 p = kzalloc(256, GFP_KERNEL); 4725 kfree(p); 4726 p[50] = 0x9a; 4727 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); 4728 validate_slab_cache(kmalloc_caches[8]); 4729 4730 p = kzalloc(512, GFP_KERNEL); 4731 kfree(p); 4732 p[512] = 0xab; 4733 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 4734 validate_slab_cache(kmalloc_caches[9]); 4735 } 4736 #else 4737 #ifdef CONFIG_SYSFS 4738 static void resiliency_test(void) {}; 4739 #endif 4740 #endif 4741 4742 #ifdef CONFIG_SYSFS 4743 enum slab_stat_type { 4744 SL_ALL, /* All slabs */ 4745 SL_PARTIAL, /* Only partially allocated slabs */ 4746 SL_CPU, /* Only slabs used for cpu caches */ 4747 SL_OBJECTS, /* Determine allocated objects not slabs */ 4748 SL_TOTAL /* Determine object capacity not slabs */ 4749 }; 4750 4751 #define SO_ALL (1 << SL_ALL) 4752 #define SO_PARTIAL (1 << SL_PARTIAL) 4753 #define SO_CPU (1 << SL_CPU) 4754 #define SO_OBJECTS (1 << SL_OBJECTS) 4755 #define SO_TOTAL (1 << SL_TOTAL) 4756 4757 #ifdef CONFIG_MEMCG 4758 static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON); 4759 4760 static int __init setup_slub_memcg_sysfs(char *str) 4761 { 4762 int v; 4763 4764 if (get_option(&str, &v) > 0) 4765 memcg_sysfs_enabled = v; 4766 4767 return 1; 4768 } 4769 4770 __setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs); 4771 #endif 4772 4773 static ssize_t show_slab_objects(struct kmem_cache *s, 4774 char *buf, unsigned long flags) 4775 { 4776 unsigned long total = 0; 4777 int node; 4778 int x; 4779 unsigned long *nodes; 4780 4781 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 4782 if (!nodes) 4783 return -ENOMEM; 4784 4785 if (flags & SO_CPU) { 4786 int cpu; 4787 4788 for_each_possible_cpu(cpu) { 4789 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 4790 cpu); 4791 int node; 4792 struct page *page; 4793 4794 page = READ_ONCE(c->page); 4795 if (!page) 4796 continue; 4797 4798 node = page_to_nid(page); 4799 if (flags & SO_TOTAL) 4800 x = page->objects; 4801 else if (flags & SO_OBJECTS) 4802 x = page->inuse; 4803 else 4804 x = 1; 4805 4806 total += x; 4807 nodes[node] += x; 4808 4809 page = slub_percpu_partial_read_once(c); 4810 if (page) { 4811 node = page_to_nid(page); 4812 if (flags & SO_TOTAL) 4813 WARN_ON_ONCE(1); 4814 else if (flags & SO_OBJECTS) 4815 WARN_ON_ONCE(1); 4816 else 4817 x = page->pages; 4818 total += x; 4819 nodes[node] += x; 4820 } 4821 } 4822 } 4823 4824 get_online_mems(); 4825 #ifdef CONFIG_SLUB_DEBUG 4826 if (flags & SO_ALL) { 4827 struct kmem_cache_node *n; 4828 4829 for_each_kmem_cache_node(s, node, n) { 4830 4831 if (flags & SO_TOTAL) 4832 x = atomic_long_read(&n->total_objects); 4833 else if (flags & SO_OBJECTS) 4834 x = atomic_long_read(&n->total_objects) - 4835 count_partial(n, count_free); 4836 else 4837 x = atomic_long_read(&n->nr_slabs); 4838 total += x; 4839 nodes[node] += x; 4840 } 4841 4842 } else 4843 #endif 4844 if (flags & SO_PARTIAL) { 4845 struct kmem_cache_node *n; 4846 4847 for_each_kmem_cache_node(s, node, n) { 4848 if (flags & SO_TOTAL) 4849 x = count_partial(n, count_total); 4850 else if (flags & SO_OBJECTS) 4851 x = count_partial(n, count_inuse); 4852 else 4853 x = n->nr_partial; 4854 total += x; 4855 nodes[node] += x; 4856 } 4857 } 4858 x = sprintf(buf, "%lu", total); 4859 #ifdef CONFIG_NUMA 4860 for (node = 0; node < nr_node_ids; node++) 4861 if (nodes[node]) 4862 x += sprintf(buf + x, " N%d=%lu", 4863 node, nodes[node]); 4864 #endif 4865 put_online_mems(); 4866 kfree(nodes); 4867 return x + sprintf(buf + x, "\n"); 4868 } 4869 4870 #ifdef CONFIG_SLUB_DEBUG 4871 static int any_slab_objects(struct kmem_cache *s) 4872 { 4873 int node; 4874 struct kmem_cache_node *n; 4875 4876 for_each_kmem_cache_node(s, node, n) 4877 if (atomic_long_read(&n->total_objects)) 4878 return 1; 4879 4880 return 0; 4881 } 4882 #endif 4883 4884 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 4885 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 4886 4887 struct slab_attribute { 4888 struct attribute attr; 4889 ssize_t (*show)(struct kmem_cache *s, char *buf); 4890 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 4891 }; 4892 4893 #define SLAB_ATTR_RO(_name) \ 4894 static struct slab_attribute _name##_attr = \ 4895 __ATTR(_name, 0400, _name##_show, NULL) 4896 4897 #define SLAB_ATTR(_name) \ 4898 static struct slab_attribute _name##_attr = \ 4899 __ATTR(_name, 0600, _name##_show, _name##_store) 4900 4901 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 4902 { 4903 return sprintf(buf, "%d\n", s->size); 4904 } 4905 SLAB_ATTR_RO(slab_size); 4906 4907 static ssize_t align_show(struct kmem_cache *s, char *buf) 4908 { 4909 return sprintf(buf, "%d\n", s->align); 4910 } 4911 SLAB_ATTR_RO(align); 4912 4913 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 4914 { 4915 return sprintf(buf, "%d\n", s->object_size); 4916 } 4917 SLAB_ATTR_RO(object_size); 4918 4919 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 4920 { 4921 return sprintf(buf, "%d\n", oo_objects(s->oo)); 4922 } 4923 SLAB_ATTR_RO(objs_per_slab); 4924 4925 static ssize_t order_store(struct kmem_cache *s, 4926 const char *buf, size_t length) 4927 { 4928 unsigned long order; 4929 int err; 4930 4931 err = kstrtoul(buf, 10, &order); 4932 if (err) 4933 return err; 4934 4935 if (order > slub_max_order || order < slub_min_order) 4936 return -EINVAL; 4937 4938 calculate_sizes(s, order); 4939 return length; 4940 } 4941 4942 static ssize_t order_show(struct kmem_cache *s, char *buf) 4943 { 4944 return sprintf(buf, "%d\n", oo_order(s->oo)); 4945 } 4946 SLAB_ATTR(order); 4947 4948 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 4949 { 4950 return sprintf(buf, "%lu\n", s->min_partial); 4951 } 4952 4953 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 4954 size_t length) 4955 { 4956 unsigned long min; 4957 int err; 4958 4959 err = kstrtoul(buf, 10, &min); 4960 if (err) 4961 return err; 4962 4963 set_min_partial(s, min); 4964 return length; 4965 } 4966 SLAB_ATTR(min_partial); 4967 4968 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 4969 { 4970 return sprintf(buf, "%u\n", slub_cpu_partial(s)); 4971 } 4972 4973 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 4974 size_t length) 4975 { 4976 unsigned long objects; 4977 int err; 4978 4979 err = kstrtoul(buf, 10, &objects); 4980 if (err) 4981 return err; 4982 if (objects && !kmem_cache_has_cpu_partial(s)) 4983 return -EINVAL; 4984 4985 slub_set_cpu_partial(s, objects); 4986 flush_all(s); 4987 return length; 4988 } 4989 SLAB_ATTR(cpu_partial); 4990 4991 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 4992 { 4993 if (!s->ctor) 4994 return 0; 4995 return sprintf(buf, "%pS\n", s->ctor); 4996 } 4997 SLAB_ATTR_RO(ctor); 4998 4999 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5000 { 5001 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5002 } 5003 SLAB_ATTR_RO(aliases); 5004 5005 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5006 { 5007 return show_slab_objects(s, buf, SO_PARTIAL); 5008 } 5009 SLAB_ATTR_RO(partial); 5010 5011 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5012 { 5013 return show_slab_objects(s, buf, SO_CPU); 5014 } 5015 SLAB_ATTR_RO(cpu_slabs); 5016 5017 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5018 { 5019 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5020 } 5021 SLAB_ATTR_RO(objects); 5022 5023 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5024 { 5025 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5026 } 5027 SLAB_ATTR_RO(objects_partial); 5028 5029 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5030 { 5031 int objects = 0; 5032 int pages = 0; 5033 int cpu; 5034 int len; 5035 5036 for_each_online_cpu(cpu) { 5037 struct page *page; 5038 5039 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5040 5041 if (page) { 5042 pages += page->pages; 5043 objects += page->pobjects; 5044 } 5045 } 5046 5047 len = sprintf(buf, "%d(%d)", objects, pages); 5048 5049 #ifdef CONFIG_SMP 5050 for_each_online_cpu(cpu) { 5051 struct page *page; 5052 5053 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5054 5055 if (page && len < PAGE_SIZE - 20) 5056 len += sprintf(buf + len, " C%d=%d(%d)", cpu, 5057 page->pobjects, page->pages); 5058 } 5059 #endif 5060 return len + sprintf(buf + len, "\n"); 5061 } 5062 SLAB_ATTR_RO(slabs_cpu_partial); 5063 5064 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5065 { 5066 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5067 } 5068 5069 static ssize_t reclaim_account_store(struct kmem_cache *s, 5070 const char *buf, size_t length) 5071 { 5072 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 5073 if (buf[0] == '1') 5074 s->flags |= SLAB_RECLAIM_ACCOUNT; 5075 return length; 5076 } 5077 SLAB_ATTR(reclaim_account); 5078 5079 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5080 { 5081 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5082 } 5083 SLAB_ATTR_RO(hwcache_align); 5084 5085 #ifdef CONFIG_ZONE_DMA 5086 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5087 { 5088 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5089 } 5090 SLAB_ATTR_RO(cache_dma); 5091 #endif 5092 5093 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5094 { 5095 return sprintf(buf, "%zu\n", s->usersize); 5096 } 5097 SLAB_ATTR_RO(usersize); 5098 5099 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5100 { 5101 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5102 } 5103 SLAB_ATTR_RO(destroy_by_rcu); 5104 5105 static ssize_t reserved_show(struct kmem_cache *s, char *buf) 5106 { 5107 return sprintf(buf, "%d\n", s->reserved); 5108 } 5109 SLAB_ATTR_RO(reserved); 5110 5111 #ifdef CONFIG_SLUB_DEBUG 5112 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5113 { 5114 return show_slab_objects(s, buf, SO_ALL); 5115 } 5116 SLAB_ATTR_RO(slabs); 5117 5118 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5119 { 5120 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5121 } 5122 SLAB_ATTR_RO(total_objects); 5123 5124 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5125 { 5126 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5127 } 5128 5129 static ssize_t sanity_checks_store(struct kmem_cache *s, 5130 const char *buf, size_t length) 5131 { 5132 s->flags &= ~SLAB_CONSISTENCY_CHECKS; 5133 if (buf[0] == '1') { 5134 s->flags &= ~__CMPXCHG_DOUBLE; 5135 s->flags |= SLAB_CONSISTENCY_CHECKS; 5136 } 5137 return length; 5138 } 5139 SLAB_ATTR(sanity_checks); 5140 5141 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5142 { 5143 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5144 } 5145 5146 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 5147 size_t length) 5148 { 5149 /* 5150 * Tracing a merged cache is going to give confusing results 5151 * as well as cause other issues like converting a mergeable 5152 * cache into an umergeable one. 5153 */ 5154 if (s->refcount > 1) 5155 return -EINVAL; 5156 5157 s->flags &= ~SLAB_TRACE; 5158 if (buf[0] == '1') { 5159 s->flags &= ~__CMPXCHG_DOUBLE; 5160 s->flags |= SLAB_TRACE; 5161 } 5162 return length; 5163 } 5164 SLAB_ATTR(trace); 5165 5166 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5167 { 5168 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5169 } 5170 5171 static ssize_t red_zone_store(struct kmem_cache *s, 5172 const char *buf, size_t length) 5173 { 5174 if (any_slab_objects(s)) 5175 return -EBUSY; 5176 5177 s->flags &= ~SLAB_RED_ZONE; 5178 if (buf[0] == '1') { 5179 s->flags |= SLAB_RED_ZONE; 5180 } 5181 calculate_sizes(s, -1); 5182 return length; 5183 } 5184 SLAB_ATTR(red_zone); 5185 5186 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5187 { 5188 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5189 } 5190 5191 static ssize_t poison_store(struct kmem_cache *s, 5192 const char *buf, size_t length) 5193 { 5194 if (any_slab_objects(s)) 5195 return -EBUSY; 5196 5197 s->flags &= ~SLAB_POISON; 5198 if (buf[0] == '1') { 5199 s->flags |= SLAB_POISON; 5200 } 5201 calculate_sizes(s, -1); 5202 return length; 5203 } 5204 SLAB_ATTR(poison); 5205 5206 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5207 { 5208 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5209 } 5210 5211 static ssize_t store_user_store(struct kmem_cache *s, 5212 const char *buf, size_t length) 5213 { 5214 if (any_slab_objects(s)) 5215 return -EBUSY; 5216 5217 s->flags &= ~SLAB_STORE_USER; 5218 if (buf[0] == '1') { 5219 s->flags &= ~__CMPXCHG_DOUBLE; 5220 s->flags |= SLAB_STORE_USER; 5221 } 5222 calculate_sizes(s, -1); 5223 return length; 5224 } 5225 SLAB_ATTR(store_user); 5226 5227 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5228 { 5229 return 0; 5230 } 5231 5232 static ssize_t validate_store(struct kmem_cache *s, 5233 const char *buf, size_t length) 5234 { 5235 int ret = -EINVAL; 5236 5237 if (buf[0] == '1') { 5238 ret = validate_slab_cache(s); 5239 if (ret >= 0) 5240 ret = length; 5241 } 5242 return ret; 5243 } 5244 SLAB_ATTR(validate); 5245 5246 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 5247 { 5248 if (!(s->flags & SLAB_STORE_USER)) 5249 return -ENOSYS; 5250 return list_locations(s, buf, TRACK_ALLOC); 5251 } 5252 SLAB_ATTR_RO(alloc_calls); 5253 5254 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 5255 { 5256 if (!(s->flags & SLAB_STORE_USER)) 5257 return -ENOSYS; 5258 return list_locations(s, buf, TRACK_FREE); 5259 } 5260 SLAB_ATTR_RO(free_calls); 5261 #endif /* CONFIG_SLUB_DEBUG */ 5262 5263 #ifdef CONFIG_FAILSLAB 5264 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5265 { 5266 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5267 } 5268 5269 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 5270 size_t length) 5271 { 5272 if (s->refcount > 1) 5273 return -EINVAL; 5274 5275 s->flags &= ~SLAB_FAILSLAB; 5276 if (buf[0] == '1') 5277 s->flags |= SLAB_FAILSLAB; 5278 return length; 5279 } 5280 SLAB_ATTR(failslab); 5281 #endif 5282 5283 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5284 { 5285 return 0; 5286 } 5287 5288 static ssize_t shrink_store(struct kmem_cache *s, 5289 const char *buf, size_t length) 5290 { 5291 if (buf[0] == '1') 5292 kmem_cache_shrink(s); 5293 else 5294 return -EINVAL; 5295 return length; 5296 } 5297 SLAB_ATTR(shrink); 5298 5299 #ifdef CONFIG_NUMA 5300 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5301 { 5302 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 5303 } 5304 5305 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5306 const char *buf, size_t length) 5307 { 5308 unsigned long ratio; 5309 int err; 5310 5311 err = kstrtoul(buf, 10, &ratio); 5312 if (err) 5313 return err; 5314 5315 if (ratio <= 100) 5316 s->remote_node_defrag_ratio = ratio * 10; 5317 5318 return length; 5319 } 5320 SLAB_ATTR(remote_node_defrag_ratio); 5321 #endif 5322 5323 #ifdef CONFIG_SLUB_STATS 5324 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5325 { 5326 unsigned long sum = 0; 5327 int cpu; 5328 int len; 5329 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 5330 5331 if (!data) 5332 return -ENOMEM; 5333 5334 for_each_online_cpu(cpu) { 5335 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5336 5337 data[cpu] = x; 5338 sum += x; 5339 } 5340 5341 len = sprintf(buf, "%lu", sum); 5342 5343 #ifdef CONFIG_SMP 5344 for_each_online_cpu(cpu) { 5345 if (data[cpu] && len < PAGE_SIZE - 20) 5346 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 5347 } 5348 #endif 5349 kfree(data); 5350 return len + sprintf(buf + len, "\n"); 5351 } 5352 5353 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5354 { 5355 int cpu; 5356 5357 for_each_online_cpu(cpu) 5358 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5359 } 5360 5361 #define STAT_ATTR(si, text) \ 5362 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5363 { \ 5364 return show_stat(s, buf, si); \ 5365 } \ 5366 static ssize_t text##_store(struct kmem_cache *s, \ 5367 const char *buf, size_t length) \ 5368 { \ 5369 if (buf[0] != '') \ 5370 return -EINVAL; \ 5371 clear_stat(s, si); \ 5372 return length; \ 5373 } \ 5374 SLAB_ATTR(text); \ 5375 5376 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5377 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5378 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5379 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5380 STAT_ATTR(FREE_FROZEN, free_frozen); 5381 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5382 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5383 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5384 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5385 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5386 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5387 STAT_ATTR(FREE_SLAB, free_slab); 5388 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5389 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5390 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5391 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5392 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5393 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5394 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5395 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5396 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5397 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5398 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5399 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5400 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5401 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5402 #endif 5403 5404 static struct attribute *slab_attrs[] = { 5405 &slab_size_attr.attr, 5406 &object_size_attr.attr, 5407 &objs_per_slab_attr.attr, 5408 &order_attr.attr, 5409 &min_partial_attr.attr, 5410 &cpu_partial_attr.attr, 5411 &objects_attr.attr, 5412 &objects_partial_attr.attr, 5413 &partial_attr.attr, 5414 &cpu_slabs_attr.attr, 5415 &ctor_attr.attr, 5416 &aliases_attr.attr, 5417 &align_attr.attr, 5418 &hwcache_align_attr.attr, 5419 &reclaim_account_attr.attr, 5420 &destroy_by_rcu_attr.attr, 5421 &shrink_attr.attr, 5422 &reserved_attr.attr, 5423 &slabs_cpu_partial_attr.attr, 5424 #ifdef CONFIG_SLUB_DEBUG 5425 &total_objects_attr.attr, 5426 &slabs_attr.attr, 5427 &sanity_checks_attr.attr, 5428 &trace_attr.attr, 5429 &red_zone_attr.attr, 5430 &poison_attr.attr, 5431 &store_user_attr.attr, 5432 &validate_attr.attr, 5433 &alloc_calls_attr.attr, 5434 &free_calls_attr.attr, 5435 #endif 5436 #ifdef CONFIG_ZONE_DMA 5437 &cache_dma_attr.attr, 5438 #endif 5439 #ifdef CONFIG_NUMA 5440 &remote_node_defrag_ratio_attr.attr, 5441 #endif 5442 #ifdef CONFIG_SLUB_STATS 5443 &alloc_fastpath_attr.attr, 5444 &alloc_slowpath_attr.attr, 5445 &free_fastpath_attr.attr, 5446 &free_slowpath_attr.attr, 5447 &free_frozen_attr.attr, 5448 &free_add_partial_attr.attr, 5449 &free_remove_partial_attr.attr, 5450 &alloc_from_partial_attr.attr, 5451 &alloc_slab_attr.attr, 5452 &alloc_refill_attr.attr, 5453 &alloc_node_mismatch_attr.attr, 5454 &free_slab_attr.attr, 5455 &cpuslab_flush_attr.attr, 5456 &deactivate_full_attr.attr, 5457 &deactivate_empty_attr.attr, 5458 &deactivate_to_head_attr.attr, 5459 &deactivate_to_tail_attr.attr, 5460 &deactivate_remote_frees_attr.attr, 5461 &deactivate_bypass_attr.attr, 5462 &order_fallback_attr.attr, 5463 &cmpxchg_double_fail_attr.attr, 5464 &cmpxchg_double_cpu_fail_attr.attr, 5465 &cpu_partial_alloc_attr.attr, 5466 &cpu_partial_free_attr.attr, 5467 &cpu_partial_node_attr.attr, 5468 &cpu_partial_drain_attr.attr, 5469 #endif 5470 #ifdef CONFIG_FAILSLAB 5471 &failslab_attr.attr, 5472 #endif 5473 &usersize_attr.attr, 5474 5475 NULL 5476 }; 5477 5478 static const struct attribute_group slab_attr_group = { 5479 .attrs = slab_attrs, 5480 }; 5481 5482 static ssize_t slab_attr_show(struct kobject *kobj, 5483 struct attribute *attr, 5484 char *buf) 5485 { 5486 struct slab_attribute *attribute; 5487 struct kmem_cache *s; 5488 int err; 5489 5490 attribute = to_slab_attr(attr); 5491 s = to_slab(kobj); 5492 5493 if (!attribute->show) 5494 return -EIO; 5495 5496 err = attribute->show(s, buf); 5497 5498 return err; 5499 } 5500 5501 static ssize_t slab_attr_store(struct kobject *kobj, 5502 struct attribute *attr, 5503 const char *buf, size_t len) 5504 { 5505 struct slab_attribute *attribute; 5506 struct kmem_cache *s; 5507 int err; 5508 5509 attribute = to_slab_attr(attr); 5510 s = to_slab(kobj); 5511 5512 if (!attribute->store) 5513 return -EIO; 5514 5515 err = attribute->store(s, buf, len); 5516 #ifdef CONFIG_MEMCG 5517 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { 5518 struct kmem_cache *c; 5519 5520 mutex_lock(&slab_mutex); 5521 if (s->max_attr_size < len) 5522 s->max_attr_size = len; 5523 5524 /* 5525 * This is a best effort propagation, so this function's return 5526 * value will be determined by the parent cache only. This is 5527 * basically because not all attributes will have a well 5528 * defined semantics for rollbacks - most of the actions will 5529 * have permanent effects. 5530 * 5531 * Returning the error value of any of the children that fail 5532 * is not 100 % defined, in the sense that users seeing the 5533 * error code won't be able to know anything about the state of 5534 * the cache. 5535 * 5536 * Only returning the error code for the parent cache at least 5537 * has well defined semantics. The cache being written to 5538 * directly either failed or succeeded, in which case we loop 5539 * through the descendants with best-effort propagation. 5540 */ 5541 for_each_memcg_cache(c, s) 5542 attribute->store(c, buf, len); 5543 mutex_unlock(&slab_mutex); 5544 } 5545 #endif 5546 return err; 5547 } 5548 5549 static void memcg_propagate_slab_attrs(struct kmem_cache *s) 5550 { 5551 #ifdef CONFIG_MEMCG 5552 int i; 5553 char *buffer = NULL; 5554 struct kmem_cache *root_cache; 5555 5556 if (is_root_cache(s)) 5557 return; 5558 5559 root_cache = s->memcg_params.root_cache; 5560 5561 /* 5562 * This mean this cache had no attribute written. Therefore, no point 5563 * in copying default values around 5564 */ 5565 if (!root_cache->max_attr_size) 5566 return; 5567 5568 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { 5569 char mbuf[64]; 5570 char *buf; 5571 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); 5572 ssize_t len; 5573 5574 if (!attr || !attr->store || !attr->show) 5575 continue; 5576 5577 /* 5578 * It is really bad that we have to allocate here, so we will 5579 * do it only as a fallback. If we actually allocate, though, 5580 * we can just use the allocated buffer until the end. 5581 * 5582 * Most of the slub attributes will tend to be very small in 5583 * size, but sysfs allows buffers up to a page, so they can 5584 * theoretically happen. 5585 */ 5586 if (buffer) 5587 buf = buffer; 5588 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) 5589 buf = mbuf; 5590 else { 5591 buffer = (char *) get_zeroed_page(GFP_KERNEL); 5592 if (WARN_ON(!buffer)) 5593 continue; 5594 buf = buffer; 5595 } 5596 5597 len = attr->show(root_cache, buf); 5598 if (len > 0) 5599 attr->store(s, buf, len); 5600 } 5601 5602 if (buffer) 5603 free_page((unsigned long)buffer); 5604 #endif 5605 } 5606 5607 static void kmem_cache_release(struct kobject *k) 5608 { 5609 slab_kmem_cache_release(to_slab(k)); 5610 } 5611 5612 static const struct sysfs_ops slab_sysfs_ops = { 5613 .show = slab_attr_show, 5614 .store = slab_attr_store, 5615 }; 5616 5617 static struct kobj_type slab_ktype = { 5618 .sysfs_ops = &slab_sysfs_ops, 5619 .release = kmem_cache_release, 5620 }; 5621 5622 static int uevent_filter(struct kset *kset, struct kobject *kobj) 5623 { 5624 struct kobj_type *ktype = get_ktype(kobj); 5625 5626 if (ktype == &slab_ktype) 5627 return 1; 5628 return 0; 5629 } 5630 5631 static const struct kset_uevent_ops slab_uevent_ops = { 5632 .filter = uevent_filter, 5633 }; 5634 5635 static struct kset *slab_kset; 5636 5637 static inline struct kset *cache_kset(struct kmem_cache *s) 5638 { 5639 #ifdef CONFIG_MEMCG 5640 if (!is_root_cache(s)) 5641 return s->memcg_params.root_cache->memcg_kset; 5642 #endif 5643 return slab_kset; 5644 } 5645 5646 #define ID_STR_LENGTH 64 5647 5648 /* Create a unique string id for a slab cache: 5649 * 5650 * Format :[flags-]size 5651 */ 5652 static char *create_unique_id(struct kmem_cache *s) 5653 { 5654 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5655 char *p = name; 5656 5657 BUG_ON(!name); 5658 5659 *p++ = ':'; 5660 /* 5661 * First flags affecting slabcache operations. We will only 5662 * get here for aliasable slabs so we do not need to support 5663 * too many flags. The flags here must cover all flags that 5664 * are matched during merging to guarantee that the id is 5665 * unique. 5666 */ 5667 if (s->flags & SLAB_CACHE_DMA) 5668 *p++ = 'd'; 5669 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5670 *p++ = 'a'; 5671 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5672 *p++ = 'F'; 5673 if (s->flags & SLAB_ACCOUNT) 5674 *p++ = 'A'; 5675 if (p != name + 1) 5676 *p++ = '-'; 5677 p += sprintf(p, "%07d", s->size); 5678 5679 BUG_ON(p > name + ID_STR_LENGTH - 1); 5680 return name; 5681 } 5682 5683 static void sysfs_slab_remove_workfn(struct work_struct *work) 5684 { 5685 struct kmem_cache *s = 5686 container_of(work, struct kmem_cache, kobj_remove_work); 5687 5688 if (!s->kobj.state_in_sysfs) 5689 /* 5690 * For a memcg cache, this may be called during 5691 * deactivation and again on shutdown. Remove only once. 5692 * A cache is never shut down before deactivation is 5693 * complete, so no need to worry about synchronization. 5694 */ 5695 goto out; 5696 5697 #ifdef CONFIG_MEMCG 5698 kset_unregister(s->memcg_kset); 5699 #endif 5700 kobject_uevent(&s->kobj, KOBJ_REMOVE); 5701 kobject_del(&s->kobj); 5702 out: 5703 kobject_put(&s->kobj); 5704 } 5705 5706 static int sysfs_slab_add(struct kmem_cache *s) 5707 { 5708 int err; 5709 const char *name; 5710 struct kset *kset = cache_kset(s); 5711 int unmergeable = slab_unmergeable(s); 5712 5713 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); 5714 5715 if (!kset) { 5716 kobject_init(&s->kobj, &slab_ktype); 5717 return 0; 5718 } 5719 5720 if (!unmergeable && disable_higher_order_debug && 5721 (slub_debug & DEBUG_METADATA_FLAGS)) 5722 unmergeable = 1; 5723 5724 if (unmergeable) { 5725 /* 5726 * Slabcache can never be merged so we can use the name proper. 5727 * This is typically the case for debug situations. In that 5728 * case we can catch duplicate names easily. 5729 */ 5730 sysfs_remove_link(&slab_kset->kobj, s->name); 5731 name = s->name; 5732 } else { 5733 /* 5734 * Create a unique name for the slab as a target 5735 * for the symlinks. 5736 */ 5737 name = create_unique_id(s); 5738 } 5739 5740 s->kobj.kset = kset; 5741 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5742 if (err) 5743 goto out; 5744 5745 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5746 if (err) 5747 goto out_del_kobj; 5748 5749 #ifdef CONFIG_MEMCG 5750 if (is_root_cache(s) && memcg_sysfs_enabled) { 5751 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); 5752 if (!s->memcg_kset) { 5753 err = -ENOMEM; 5754 goto out_del_kobj; 5755 } 5756 } 5757 #endif 5758 5759 kobject_uevent(&s->kobj, KOBJ_ADD); 5760 if (!unmergeable) { 5761 /* Setup first alias */ 5762 sysfs_slab_alias(s, s->name); 5763 } 5764 out: 5765 if (!unmergeable) 5766 kfree(name); 5767 return err; 5768 out_del_kobj: 5769 kobject_del(&s->kobj); 5770 goto out; 5771 } 5772 5773 static void sysfs_slab_remove(struct kmem_cache *s) 5774 { 5775 if (slab_state < FULL) 5776 /* 5777 * Sysfs has not been setup yet so no need to remove the 5778 * cache from sysfs. 5779 */ 5780 return; 5781 5782 kobject_get(&s->kobj); 5783 schedule_work(&s->kobj_remove_work); 5784 } 5785 5786 void sysfs_slab_release(struct kmem_cache *s) 5787 { 5788 if (slab_state >= FULL) 5789 kobject_put(&s->kobj); 5790 } 5791 5792 /* 5793 * Need to buffer aliases during bootup until sysfs becomes 5794 * available lest we lose that information. 5795 */ 5796 struct saved_alias { 5797 struct kmem_cache *s; 5798 const char *name; 5799 struct saved_alias *next; 5800 }; 5801 5802 static struct saved_alias *alias_list; 5803 5804 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5805 { 5806 struct saved_alias *al; 5807 5808 if (slab_state == FULL) { 5809 /* 5810 * If we have a leftover link then remove it. 5811 */ 5812 sysfs_remove_link(&slab_kset->kobj, name); 5813 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5814 } 5815 5816 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5817 if (!al) 5818 return -ENOMEM; 5819 5820 al->s = s; 5821 al->name = name; 5822 al->next = alias_list; 5823 alias_list = al; 5824 return 0; 5825 } 5826 5827 static int __init slab_sysfs_init(void) 5828 { 5829 struct kmem_cache *s; 5830 int err; 5831 5832 mutex_lock(&slab_mutex); 5833 5834 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 5835 if (!slab_kset) { 5836 mutex_unlock(&slab_mutex); 5837 pr_err("Cannot register slab subsystem.\n"); 5838 return -ENOSYS; 5839 } 5840 5841 slab_state = FULL; 5842 5843 list_for_each_entry(s, &slab_caches, list) { 5844 err = sysfs_slab_add(s); 5845 if (err) 5846 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 5847 s->name); 5848 } 5849 5850 while (alias_list) { 5851 struct saved_alias *al = alias_list; 5852 5853 alias_list = alias_list->next; 5854 err = sysfs_slab_alias(al->s, al->name); 5855 if (err) 5856 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 5857 al->name); 5858 kfree(al); 5859 } 5860 5861 mutex_unlock(&slab_mutex); 5862 resiliency_test(); 5863 return 0; 5864 } 5865 5866 __initcall(slab_sysfs_init); 5867 #endif /* CONFIG_SYSFS */ 5868 5869 /* 5870 * The /proc/slabinfo ABI 5871 */ 5872 #ifdef CONFIG_SLUB_DEBUG 5873 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5874 { 5875 unsigned long nr_slabs = 0; 5876 unsigned long nr_objs = 0; 5877 unsigned long nr_free = 0; 5878 int node; 5879 struct kmem_cache_node *n; 5880 5881 for_each_kmem_cache_node(s, node, n) { 5882 nr_slabs += node_nr_slabs(n); 5883 nr_objs += node_nr_objs(n); 5884 nr_free += count_partial(n, count_free); 5885 } 5886 5887 sinfo->active_objs = nr_objs - nr_free; 5888 sinfo->num_objs = nr_objs; 5889 sinfo->active_slabs = nr_slabs; 5890 sinfo->num_slabs = nr_slabs; 5891 sinfo->objects_per_slab = oo_objects(s->oo); 5892 sinfo->cache_order = oo_order(s->oo); 5893 } 5894 5895 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 5896 { 5897 } 5898 5899 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 5900 size_t count, loff_t *ppos) 5901 { 5902 return -EIO; 5903 } 5904 #endif /* CONFIG_SLUB_DEBUG */ 5905
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.