1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 4 * 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocators 10 */ 11 12 #ifndef _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 14 15 #include <linux/gfp.h> 16 #include <linux/overflow.h> 17 #include <linux/types.h> 18 #include <linux/workqueue.h> 19 #include <linux/percpu-refcount.h> 20 21 22 /* 23 * Flags to pass to kmem_cache_create(). 24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 25 */ 26 /* DEBUG: Perform (expensive) checks on alloc/free */ 27 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) 28 /* DEBUG: Red zone objs in a cache */ 29 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) 30 /* DEBUG: Poison objects */ 31 #define SLAB_POISON ((slab_flags_t __force)0x00000800U) 32 /* Align objs on cache lines */ 33 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 34 /* Use GFP_DMA memory */ 35 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 36 /* Use GFP_DMA32 memory */ 37 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) 38 /* DEBUG: Store the last owner for bug hunting */ 39 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 40 /* Panic if kmem_cache_create() fails */ 41 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U) 42 /* 43 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 44 * 45 * This delays freeing the SLAB page by a grace period, it does _NOT_ 46 * delay object freeing. This means that if you do kmem_cache_free() 47 * that memory location is free to be reused at any time. Thus it may 48 * be possible to see another object there in the same RCU grace period. 49 * 50 * This feature only ensures the memory location backing the object 51 * stays valid, the trick to using this is relying on an independent 52 * object validation pass. Something like: 53 * 54 * rcu_read_lock() 55 * again: 56 * obj = lockless_lookup(key); 57 * if (obj) { 58 * if (!try_get_ref(obj)) // might fail for free objects 59 * goto again; 60 * 61 * if (obj->key != key) { // not the object we expected 62 * put_ref(obj); 63 * goto again; 64 * } 65 * } 66 * rcu_read_unlock(); 67 * 68 * This is useful if we need to approach a kernel structure obliquely, 69 * from its address obtained without the usual locking. We can lock 70 * the structure to stabilize it and check it's still at the given address, 71 * only if we can be sure that the memory has not been meanwhile reused 72 * for some other kind of object (which our subsystem's lock might corrupt). 73 * 74 * rcu_read_lock before reading the address, then rcu_read_unlock after 75 * taking the spinlock within the structure expected at that address. 76 * 77 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 78 */ 79 /* Defer freeing slabs to RCU */ 80 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) 81 /* Spread some memory over cpuset */ 82 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) 83 /* Trace allocations and frees */ 84 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) 85 86 /* Flag to prevent checks on free */ 87 #ifdef CONFIG_DEBUG_OBJECTS 88 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) 89 #else 90 # define SLAB_DEBUG_OBJECTS 0 91 #endif 92 93 /* Avoid kmemleak tracing */ 94 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) 95 96 /* Fault injection mark */ 97 #ifdef CONFIG_FAILSLAB 98 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) 99 #else 100 # define SLAB_FAILSLAB 0 101 #endif 102 /* Account to memcg */ 103 #ifdef CONFIG_MEMCG_KMEM 104 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) 105 #else 106 # define SLAB_ACCOUNT 0 107 #endif 108 109 #ifdef CONFIG_KASAN 110 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U) 111 #else 112 #define SLAB_KASAN 0 113 #endif 114 115 /* The following flags affect the page allocator grouping pages by mobility */ 116 /* Objects are reclaimable */ 117 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) 118 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 119 120 /* Slab deactivation flag */ 121 #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) 122 123 /* 124 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 125 * 126 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 127 * 128 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 129 * Both make kfree a no-op. 130 */ 131 #define ZERO_SIZE_PTR ((void *)16) 132 133 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 134 (unsigned long)ZERO_SIZE_PTR) 135 136 #include <linux/kasan.h> 137 138 struct mem_cgroup; 139 /* 140 * struct kmem_cache related prototypes 141 */ 142 void __init kmem_cache_init(void); 143 bool slab_is_available(void); 144 145 extern bool usercopy_fallback; 146 147 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, 148 unsigned int align, slab_flags_t flags, 149 void (*ctor)(void *)); 150 struct kmem_cache *kmem_cache_create_usercopy(const char *name, 151 unsigned int size, unsigned int align, 152 slab_flags_t flags, 153 unsigned int useroffset, unsigned int usersize, 154 void (*ctor)(void *)); 155 void kmem_cache_destroy(struct kmem_cache *); 156 int kmem_cache_shrink(struct kmem_cache *); 157 158 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); 159 void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); 160 161 /* 162 * Please use this macro to create slab caches. Simply specify the 163 * name of the structure and maybe some flags that are listed above. 164 * 165 * The alignment of the struct determines object alignment. If you 166 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 167 * then the objects will be properly aligned in SMP configurations. 168 */ 169 #define KMEM_CACHE(__struct, __flags) \ 170 kmem_cache_create(#__struct, sizeof(struct __struct), \ 171 __alignof__(struct __struct), (__flags), NULL) 172 173 /* 174 * To whitelist a single field for copying to/from usercopy, use this 175 * macro instead for KMEM_CACHE() above. 176 */ 177 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 178 kmem_cache_create_usercopy(#__struct, \ 179 sizeof(struct __struct), \ 180 __alignof__(struct __struct), (__flags), \ 181 offsetof(struct __struct, __field), \ 182 sizeof_field(struct __struct, __field), NULL) 183 184 /* 185 * Common kmalloc functions provided by all allocators 186 */ 187 void * __must_check __krealloc(const void *, size_t, gfp_t); 188 void * __must_check krealloc(const void *, size_t, gfp_t); 189 void kfree(const void *); 190 void kzfree(const void *); 191 size_t __ksize(const void *); 192 size_t ksize(const void *); 193 194 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 195 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 196 bool to_user); 197 #else 198 static inline void __check_heap_object(const void *ptr, unsigned long n, 199 struct page *page, bool to_user) { } 200 #endif 201 202 /* 203 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 204 * alignment larger than the alignment of a 64-bit integer. 205 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 206 */ 207 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 208 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 209 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 210 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 211 #else 212 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 213 #endif 214 215 /* 216 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 217 * Intended for arches that get misalignment faults even for 64 bit integer 218 * aligned buffers. 219 */ 220 #ifndef ARCH_SLAB_MINALIGN 221 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 222 #endif 223 224 /* 225 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned 226 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN 227 * aligned pointers. 228 */ 229 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 230 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 231 #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 232 233 /* 234 * Kmalloc array related definitions 235 */ 236 237 #ifdef CONFIG_SLAB 238 /* 239 * The largest kmalloc size supported by the SLAB allocators is 240 * 32 megabyte (2^25) or the maximum allocatable page order if that is 241 * less than 32 MB. 242 * 243 * WARNING: Its not easy to increase this value since the allocators have 244 * to do various tricks to work around compiler limitations in order to 245 * ensure proper constant folding. 246 */ 247 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 248 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 249 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 250 #ifndef KMALLOC_SHIFT_LOW 251 #define KMALLOC_SHIFT_LOW 5 252 #endif 253 #endif 254 255 #ifdef CONFIG_SLUB 256 /* 257 * SLUB directly allocates requests fitting in to an order-1 page 258 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 259 */ 260 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 261 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 262 #ifndef KMALLOC_SHIFT_LOW 263 #define KMALLOC_SHIFT_LOW 3 264 #endif 265 #endif 266 267 #ifdef CONFIG_SLOB 268 /* 269 * SLOB passes all requests larger than one page to the page allocator. 270 * No kmalloc array is necessary since objects of different sizes can 271 * be allocated from the same page. 272 */ 273 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT 274 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 275 #ifndef KMALLOC_SHIFT_LOW 276 #define KMALLOC_SHIFT_LOW 3 277 #endif 278 #endif 279 280 /* Maximum allocatable size */ 281 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 282 /* Maximum size for which we actually use a slab cache */ 283 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 284 /* Maximum order allocatable via the slab allocagtor */ 285 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 286 287 /* 288 * Kmalloc subsystem. 289 */ 290 #ifndef KMALLOC_MIN_SIZE 291 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 292 #endif 293 294 /* 295 * This restriction comes from byte sized index implementation. 296 * Page size is normally 2^12 bytes and, in this case, if we want to use 297 * byte sized index which can represent 2^8 entries, the size of the object 298 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 299 * If minimum size of kmalloc is less than 16, we use it as minimum object 300 * size and give up to use byte sized index. 301 */ 302 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 303 (KMALLOC_MIN_SIZE) : 16) 304 305 /* 306 * Whenever changing this, take care of that kmalloc_type() and 307 * create_kmalloc_caches() still work as intended. 308 */ 309 enum kmalloc_cache_type { 310 KMALLOC_NORMAL = 0, 311 KMALLOC_RECLAIM, 312 #ifdef CONFIG_ZONE_DMA 313 KMALLOC_DMA, 314 #endif 315 NR_KMALLOC_TYPES 316 }; 317 318 #ifndef CONFIG_SLOB 319 extern struct kmem_cache * 320 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; 321 322 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) 323 { 324 #ifdef CONFIG_ZONE_DMA 325 /* 326 * The most common case is KMALLOC_NORMAL, so test for it 327 * with a single branch for both flags. 328 */ 329 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) 330 return KMALLOC_NORMAL; 331 332 /* 333 * At least one of the flags has to be set. If both are, __GFP_DMA 334 * is more important. 335 */ 336 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; 337 #else 338 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; 339 #endif 340 } 341 342 /* 343 * Figure out which kmalloc slab an allocation of a certain size 344 * belongs to. 345 * 0 = zero alloc 346 * 1 = 65 .. 96 bytes 347 * 2 = 129 .. 192 bytes 348 * n = 2^(n-1)+1 .. 2^n 349 */ 350 static __always_inline unsigned int kmalloc_index(size_t size) 351 { 352 if (!size) 353 return 0; 354 355 if (size <= KMALLOC_MIN_SIZE) 356 return KMALLOC_SHIFT_LOW; 357 358 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 359 return 1; 360 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 361 return 2; 362 if (size <= 8) return 3; 363 if (size <= 16) return 4; 364 if (size <= 32) return 5; 365 if (size <= 64) return 6; 366 if (size <= 128) return 7; 367 if (size <= 256) return 8; 368 if (size <= 512) return 9; 369 if (size <= 1024) return 10; 370 if (size <= 2 * 1024) return 11; 371 if (size <= 4 * 1024) return 12; 372 if (size <= 8 * 1024) return 13; 373 if (size <= 16 * 1024) return 14; 374 if (size <= 32 * 1024) return 15; 375 if (size <= 64 * 1024) return 16; 376 if (size <= 128 * 1024) return 17; 377 if (size <= 256 * 1024) return 18; 378 if (size <= 512 * 1024) return 19; 379 if (size <= 1024 * 1024) return 20; 380 if (size <= 2 * 1024 * 1024) return 21; 381 if (size <= 4 * 1024 * 1024) return 22; 382 if (size <= 8 * 1024 * 1024) return 23; 383 if (size <= 16 * 1024 * 1024) return 24; 384 if (size <= 32 * 1024 * 1024) return 25; 385 if (size <= 64 * 1024 * 1024) return 26; 386 BUG(); 387 388 /* Will never be reached. Needed because the compiler may complain */ 389 return -1; 390 } 391 #endif /* !CONFIG_SLOB */ 392 393 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; 394 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; 395 void kmem_cache_free(struct kmem_cache *, void *); 396 397 /* 398 * Bulk allocation and freeing operations. These are accelerated in an 399 * allocator specific way to avoid taking locks repeatedly or building 400 * metadata structures unnecessarily. 401 * 402 * Note that interrupts must be enabled when calling these functions. 403 */ 404 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 405 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 406 407 /* 408 * Caller must not use kfree_bulk() on memory not originally allocated 409 * by kmalloc(), because the SLOB allocator cannot handle this. 410 */ 411 static __always_inline void kfree_bulk(size_t size, void **p) 412 { 413 kmem_cache_free_bulk(NULL, size, p); 414 } 415 416 #ifdef CONFIG_NUMA 417 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; 418 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; 419 #else 420 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 421 { 422 return __kmalloc(size, flags); 423 } 424 425 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) 426 { 427 return kmem_cache_alloc(s, flags); 428 } 429 #endif 430 431 #ifdef CONFIG_TRACING 432 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; 433 434 #ifdef CONFIG_NUMA 435 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 436 gfp_t gfpflags, 437 int node, size_t size) __assume_slab_alignment __malloc; 438 #else 439 static __always_inline void * 440 kmem_cache_alloc_node_trace(struct kmem_cache *s, 441 gfp_t gfpflags, 442 int node, size_t size) 443 { 444 return kmem_cache_alloc_trace(s, gfpflags, size); 445 } 446 #endif /* CONFIG_NUMA */ 447 448 #else /* CONFIG_TRACING */ 449 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 450 gfp_t flags, size_t size) 451 { 452 void *ret = kmem_cache_alloc(s, flags); 453 454 ret = kasan_kmalloc(s, ret, size, flags); 455 return ret; 456 } 457 458 static __always_inline void * 459 kmem_cache_alloc_node_trace(struct kmem_cache *s, 460 gfp_t gfpflags, 461 int node, size_t size) 462 { 463 void *ret = kmem_cache_alloc_node(s, gfpflags, node); 464 465 ret = kasan_kmalloc(s, ret, size, gfpflags); 466 return ret; 467 } 468 #endif /* CONFIG_TRACING */ 469 470 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 471 472 #ifdef CONFIG_TRACING 473 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 474 #else 475 static __always_inline void * 476 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 477 { 478 return kmalloc_order(size, flags, order); 479 } 480 #endif 481 482 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 483 { 484 unsigned int order = get_order(size); 485 return kmalloc_order_trace(size, flags, order); 486 } 487 488 /** 489 * kmalloc - allocate memory 490 * @size: how many bytes of memory are required. 491 * @flags: the type of memory to allocate. 492 * 493 * kmalloc is the normal method of allocating memory 494 * for objects smaller than page size in the kernel. 495 * 496 * The @flags argument may be one of the GFP flags defined at 497 * include/linux/gfp.h and described at 498 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` 499 * 500 * The recommended usage of the @flags is described at 501 * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>` 502 * 503 * Below is a brief outline of the most useful GFP flags 504 * 505 * %GFP_KERNEL 506 * Allocate normal kernel ram. May sleep. 507 * 508 * %GFP_NOWAIT 509 * Allocation will not sleep. 510 * 511 * %GFP_ATOMIC 512 * Allocation will not sleep. May use emergency pools. 513 * 514 * %GFP_HIGHUSER 515 * Allocate memory from high memory on behalf of user. 516 * 517 * Also it is possible to set different flags by OR'ing 518 * in one or more of the following additional @flags: 519 * 520 * %__GFP_HIGH 521 * This allocation has high priority and may use emergency pools. 522 * 523 * %__GFP_NOFAIL 524 * Indicate that this allocation is in no way allowed to fail 525 * (think twice before using). 526 * 527 * %__GFP_NORETRY 528 * If memory is not immediately available, 529 * then give up at once. 530 * 531 * %__GFP_NOWARN 532 * If allocation fails, don't issue any warnings. 533 * 534 * %__GFP_RETRY_MAYFAIL 535 * Try really hard to succeed the allocation but fail 536 * eventually. 537 */ 538 static __always_inline void *kmalloc(size_t size, gfp_t flags) 539 { 540 if (__builtin_constant_p(size)) { 541 #ifndef CONFIG_SLOB 542 unsigned int index; 543 #endif 544 if (size > KMALLOC_MAX_CACHE_SIZE) 545 return kmalloc_large(size, flags); 546 #ifndef CONFIG_SLOB 547 index = kmalloc_index(size); 548 549 if (!index) 550 return ZERO_SIZE_PTR; 551 552 return kmem_cache_alloc_trace( 553 kmalloc_caches[kmalloc_type(flags)][index], 554 flags, size); 555 #endif 556 } 557 return __kmalloc(size, flags); 558 } 559 560 /* 561 * Determine size used for the nth kmalloc cache. 562 * return size or 0 if a kmalloc cache for that 563 * size does not exist 564 */ 565 static __always_inline unsigned int kmalloc_size(unsigned int n) 566 { 567 #ifndef CONFIG_SLOB 568 if (n > 2) 569 return 1U << n; 570 571 if (n == 1 && KMALLOC_MIN_SIZE <= 32) 572 return 96; 573 574 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 575 return 192; 576 #endif 577 return 0; 578 } 579 580 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 581 { 582 #ifndef CONFIG_SLOB 583 if (__builtin_constant_p(size) && 584 size <= KMALLOC_MAX_CACHE_SIZE) { 585 unsigned int i = kmalloc_index(size); 586 587 if (!i) 588 return ZERO_SIZE_PTR; 589 590 return kmem_cache_alloc_node_trace( 591 kmalloc_caches[kmalloc_type(flags)][i], 592 flags, node, size); 593 } 594 #endif 595 return __kmalloc_node(size, flags, node); 596 } 597 598 struct memcg_cache_array { 599 struct rcu_head rcu; 600 struct kmem_cache *entries[0]; 601 }; 602 603 /* 604 * This is the main placeholder for memcg-related information in kmem caches. 605 * Both the root cache and the child caches will have it. For the root cache, 606 * this will hold a dynamically allocated array large enough to hold 607 * information about the currently limited memcgs in the system. To allow the 608 * array to be accessed without taking any locks, on relocation we free the old 609 * version only after a grace period. 610 * 611 * Root and child caches hold different metadata. 612 * 613 * @root_cache: Common to root and child caches. NULL for root, pointer to 614 * the root cache for children. 615 * 616 * The following fields are specific to root caches. 617 * 618 * @memcg_caches: kmemcg ID indexed table of child caches. This table is 619 * used to index child cachces during allocation and cleared 620 * early during shutdown. 621 * 622 * @root_caches_node: List node for slab_root_caches list. 623 * 624 * @children: List of all child caches. While the child caches are also 625 * reachable through @memcg_caches, a child cache remains on 626 * this list until it is actually destroyed. 627 * 628 * The following fields are specific to child caches. 629 * 630 * @memcg: Pointer to the memcg this cache belongs to. 631 * 632 * @children_node: List node for @root_cache->children list. 633 * 634 * @kmem_caches_node: List node for @memcg->kmem_caches list. 635 */ 636 struct memcg_cache_params { 637 struct kmem_cache *root_cache; 638 union { 639 struct { 640 struct memcg_cache_array __rcu *memcg_caches; 641 struct list_head __root_caches_node; 642 struct list_head children; 643 bool dying; 644 }; 645 struct { 646 struct mem_cgroup *memcg; 647 struct list_head children_node; 648 struct list_head kmem_caches_node; 649 struct percpu_ref refcnt; 650 651 void (*work_fn)(struct kmem_cache *); 652 union { 653 struct rcu_head rcu_head; 654 struct work_struct work; 655 }; 656 }; 657 }; 658 }; 659 660 int memcg_update_all_caches(int num_memcgs); 661 662 /** 663 * kmalloc_array - allocate memory for an array. 664 * @n: number of elements. 665 * @size: element size. 666 * @flags: the type of memory to allocate (see kmalloc). 667 */ 668 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 669 { 670 size_t bytes; 671 672 if (unlikely(check_mul_overflow(n, size, &bytes))) 673 return NULL; 674 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 675 return kmalloc(bytes, flags); 676 return __kmalloc(bytes, flags); 677 } 678 679 /** 680 * kcalloc - allocate memory for an array. The memory is set to zero. 681 * @n: number of elements. 682 * @size: element size. 683 * @flags: the type of memory to allocate (see kmalloc). 684 */ 685 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 686 { 687 return kmalloc_array(n, size, flags | __GFP_ZERO); 688 } 689 690 /* 691 * kmalloc_track_caller is a special version of kmalloc that records the 692 * calling function of the routine calling it for slab leak tracking instead 693 * of just the calling function (confusing, eh?). 694 * It's useful when the call to kmalloc comes from a widely-used standard 695 * allocator where we care about the real place the memory allocation 696 * request comes from. 697 */ 698 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 699 #define kmalloc_track_caller(size, flags) \ 700 __kmalloc_track_caller(size, flags, _RET_IP_) 701 702 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, 703 int node) 704 { 705 size_t bytes; 706 707 if (unlikely(check_mul_overflow(n, size, &bytes))) 708 return NULL; 709 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 710 return kmalloc_node(bytes, flags, node); 711 return __kmalloc_node(bytes, flags, node); 712 } 713 714 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) 715 { 716 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); 717 } 718 719 720 #ifdef CONFIG_NUMA 721 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 722 #define kmalloc_node_track_caller(size, flags, node) \ 723 __kmalloc_node_track_caller(size, flags, node, \ 724 _RET_IP_) 725 726 #else /* CONFIG_NUMA */ 727 728 #define kmalloc_node_track_caller(size, flags, node) \ 729 kmalloc_track_caller(size, flags) 730 731 #endif /* CONFIG_NUMA */ 732 733 /* 734 * Shortcuts 735 */ 736 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) 737 { 738 return kmem_cache_alloc(k, flags | __GFP_ZERO); 739 } 740 741 /** 742 * kzalloc - allocate memory. The memory is set to zero. 743 * @size: how many bytes of memory are required. 744 * @flags: the type of memory to allocate (see kmalloc). 745 */ 746 static inline void *kzalloc(size_t size, gfp_t flags) 747 { 748 return kmalloc(size, flags | __GFP_ZERO); 749 } 750 751 /** 752 * kzalloc_node - allocate zeroed memory from a particular memory node. 753 * @size: how many bytes of memory are required. 754 * @flags: the type of memory to allocate (see kmalloc). 755 * @node: memory node from which to allocate 756 */ 757 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 758 { 759 return kmalloc_node(size, flags | __GFP_ZERO, node); 760 } 761 762 unsigned int kmem_cache_size(struct kmem_cache *s); 763 void __init kmem_cache_init_late(void); 764 765 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) 766 int slab_prepare_cpu(unsigned int cpu); 767 int slab_dead_cpu(unsigned int cpu); 768 #else 769 #define slab_prepare_cpu NULL 770 #define slab_dead_cpu NULL 771 #endif 772 773 #endif /* _LINUX_SLAB_H */ 774
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.