~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/util.c

Version: ~ [ linux-6.4-rc3 ] ~ [ linux-6.3.4 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.30 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.113 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.180 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.243 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.283 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.315 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 #include <linux/mm.h>
  3 #include <linux/slab.h>
  4 #include <linux/string.h>
  5 #include <linux/compiler.h>
  6 #include <linux/export.h>
  7 #include <linux/err.h>
  8 #include <linux/sched.h>
  9 #include <linux/sched/mm.h>
 10 #include <linux/sched/signal.h>
 11 #include <linux/sched/task_stack.h>
 12 #include <linux/security.h>
 13 #include <linux/swap.h>
 14 #include <linux/swapops.h>
 15 #include <linux/mman.h>
 16 #include <linux/hugetlb.h>
 17 #include <linux/vmalloc.h>
 18 #include <linux/userfaultfd_k.h>
 19 #include <linux/elf.h>
 20 #include <linux/elf-randomize.h>
 21 #include <linux/personality.h>
 22 #include <linux/random.h>
 23 #include <linux/processor.h>
 24 #include <linux/sizes.h>
 25 #include <linux/compat.h>
 26 
 27 #include <linux/uaccess.h>
 28 
 29 #include "internal.h"
 30 
 31 /**
 32  * kfree_const - conditionally free memory
 33  * @x: pointer to the memory
 34  *
 35  * Function calls kfree only if @x is not in .rodata section.
 36  */
 37 void kfree_const(const void *x)
 38 {
 39         if (!is_kernel_rodata((unsigned long)x))
 40                 kfree(x);
 41 }
 42 EXPORT_SYMBOL(kfree_const);
 43 
 44 /**
 45  * kstrdup - allocate space for and copy an existing string
 46  * @s: the string to duplicate
 47  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 48  *
 49  * Return: newly allocated copy of @s or %NULL in case of error
 50  */
 51 char *kstrdup(const char *s, gfp_t gfp)
 52 {
 53         size_t len;
 54         char *buf;
 55 
 56         if (!s)
 57                 return NULL;
 58 
 59         len = strlen(s) + 1;
 60         buf = kmalloc_track_caller(len, gfp);
 61         if (buf)
 62                 memcpy(buf, s, len);
 63         return buf;
 64 }
 65 EXPORT_SYMBOL(kstrdup);
 66 
 67 /**
 68  * kstrdup_const - conditionally duplicate an existing const string
 69  * @s: the string to duplicate
 70  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 71  *
 72  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
 73  * must not be passed to krealloc().
 74  *
 75  * Return: source string if it is in .rodata section otherwise
 76  * fallback to kstrdup.
 77  */
 78 const char *kstrdup_const(const char *s, gfp_t gfp)
 79 {
 80         if (is_kernel_rodata((unsigned long)s))
 81                 return s;
 82 
 83         return kstrdup(s, gfp);
 84 }
 85 EXPORT_SYMBOL(kstrdup_const);
 86 
 87 /**
 88  * kstrndup - allocate space for and copy an existing string
 89  * @s: the string to duplicate
 90  * @max: read at most @max chars from @s
 91  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 92  *
 93  * Note: Use kmemdup_nul() instead if the size is known exactly.
 94  *
 95  * Return: newly allocated copy of @s or %NULL in case of error
 96  */
 97 char *kstrndup(const char *s, size_t max, gfp_t gfp)
 98 {
 99         size_t len;
100         char *buf;
101 
102         if (!s)
103                 return NULL;
104 
105         len = strnlen(s, max);
106         buf = kmalloc_track_caller(len+1, gfp);
107         if (buf) {
108                 memcpy(buf, s, len);
109                 buf[len] = '\0';
110         }
111         return buf;
112 }
113 EXPORT_SYMBOL(kstrndup);
114 
115 /**
116  * kmemdup - duplicate region of memory
117  *
118  * @src: memory region to duplicate
119  * @len: memory region length
120  * @gfp: GFP mask to use
121  *
122  * Return: newly allocated copy of @src or %NULL in case of error
123  */
124 void *kmemdup(const void *src, size_t len, gfp_t gfp)
125 {
126         void *p;
127 
128         p = kmalloc_track_caller(len, gfp);
129         if (p)
130                 memcpy(p, src, len);
131         return p;
132 }
133 EXPORT_SYMBOL(kmemdup);
134 
135 /**
136  * kmemdup_nul - Create a NUL-terminated string from unterminated data
137  * @s: The data to stringify
138  * @len: The size of the data
139  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
140  *
141  * Return: newly allocated copy of @s with NUL-termination or %NULL in
142  * case of error
143  */
144 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145 {
146         char *buf;
147 
148         if (!s)
149                 return NULL;
150 
151         buf = kmalloc_track_caller(len + 1, gfp);
152         if (buf) {
153                 memcpy(buf, s, len);
154                 buf[len] = '\0';
155         }
156         return buf;
157 }
158 EXPORT_SYMBOL(kmemdup_nul);
159 
160 /**
161  * memdup_user - duplicate memory region from user space
162  *
163  * @src: source address in user space
164  * @len: number of bytes to copy
165  *
166  * Return: an ERR_PTR() on failure.  Result is physically
167  * contiguous, to be freed by kfree().
168  */
169 void *memdup_user(const void __user *src, size_t len)
170 {
171         void *p;
172 
173         p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
174         if (!p)
175                 return ERR_PTR(-ENOMEM);
176 
177         if (copy_from_user(p, src, len)) {
178                 kfree(p);
179                 return ERR_PTR(-EFAULT);
180         }
181 
182         return p;
183 }
184 EXPORT_SYMBOL(memdup_user);
185 
186 /**
187  * vmemdup_user - duplicate memory region from user space
188  *
189  * @src: source address in user space
190  * @len: number of bytes to copy
191  *
192  * Return: an ERR_PTR() on failure.  Result may be not
193  * physically contiguous.  Use kvfree() to free.
194  */
195 void *vmemdup_user(const void __user *src, size_t len)
196 {
197         void *p;
198 
199         p = kvmalloc(len, GFP_USER);
200         if (!p)
201                 return ERR_PTR(-ENOMEM);
202 
203         if (copy_from_user(p, src, len)) {
204                 kvfree(p);
205                 return ERR_PTR(-EFAULT);
206         }
207 
208         return p;
209 }
210 EXPORT_SYMBOL(vmemdup_user);
211 
212 /**
213  * strndup_user - duplicate an existing string from user space
214  * @s: The string to duplicate
215  * @n: Maximum number of bytes to copy, including the trailing NUL.
216  *
217  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
218  */
219 char *strndup_user(const char __user *s, long n)
220 {
221         char *p;
222         long length;
223 
224         length = strnlen_user(s, n);
225 
226         if (!length)
227                 return ERR_PTR(-EFAULT);
228 
229         if (length > n)
230                 return ERR_PTR(-EINVAL);
231 
232         p = memdup_user(s, length);
233 
234         if (IS_ERR(p))
235                 return p;
236 
237         p[length - 1] = '\0';
238 
239         return p;
240 }
241 EXPORT_SYMBOL(strndup_user);
242 
243 /**
244  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
245  *
246  * @src: source address in user space
247  * @len: number of bytes to copy
248  *
249  * Return: an ERR_PTR() on failure.
250  */
251 void *memdup_user_nul(const void __user *src, size_t len)
252 {
253         char *p;
254 
255         /*
256          * Always use GFP_KERNEL, since copy_from_user() can sleep and
257          * cause pagefault, which makes it pointless to use GFP_NOFS
258          * or GFP_ATOMIC.
259          */
260         p = kmalloc_track_caller(len + 1, GFP_KERNEL);
261         if (!p)
262                 return ERR_PTR(-ENOMEM);
263 
264         if (copy_from_user(p, src, len)) {
265                 kfree(p);
266                 return ERR_PTR(-EFAULT);
267         }
268         p[len] = '\0';
269 
270         return p;
271 }
272 EXPORT_SYMBOL(memdup_user_nul);
273 
274 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
275                 struct vm_area_struct *prev)
276 {
277         struct vm_area_struct *next;
278 
279         vma->vm_prev = prev;
280         if (prev) {
281                 next = prev->vm_next;
282                 prev->vm_next = vma;
283         } else {
284                 next = mm->mmap;
285                 mm->mmap = vma;
286         }
287         vma->vm_next = next;
288         if (next)
289                 next->vm_prev = vma;
290 }
291 
292 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
293 {
294         struct vm_area_struct *prev, *next;
295 
296         next = vma->vm_next;
297         prev = vma->vm_prev;
298         if (prev)
299                 prev->vm_next = next;
300         else
301                 mm->mmap = next;
302         if (next)
303                 next->vm_prev = prev;
304 }
305 
306 /* Check if the vma is being used as a stack by this task */
307 int vma_is_stack_for_current(struct vm_area_struct *vma)
308 {
309         struct task_struct * __maybe_unused t = current;
310 
311         return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
312 }
313 
314 /*
315  * Change backing file, only valid to use during initial VMA setup.
316  */
317 void vma_set_file(struct vm_area_struct *vma, struct file *file)
318 {
319         /* Changing an anonymous vma with this is illegal */
320         get_file(file);
321         swap(vma->vm_file, file);
322         fput(file);
323 }
324 EXPORT_SYMBOL(vma_set_file);
325 
326 #ifndef STACK_RND_MASK
327 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
328 #endif
329 
330 unsigned long randomize_stack_top(unsigned long stack_top)
331 {
332         unsigned long random_variable = 0;
333 
334         if (current->flags & PF_RANDOMIZE) {
335                 random_variable = get_random_long();
336                 random_variable &= STACK_RND_MASK;
337                 random_variable <<= PAGE_SHIFT;
338         }
339 #ifdef CONFIG_STACK_GROWSUP
340         return PAGE_ALIGN(stack_top) + random_variable;
341 #else
342         return PAGE_ALIGN(stack_top) - random_variable;
343 #endif
344 }
345 
346 /**
347  * randomize_page - Generate a random, page aligned address
348  * @start:      The smallest acceptable address the caller will take.
349  * @range:      The size of the area, starting at @start, within which the
350  *              random address must fall.
351  *
352  * If @start + @range would overflow, @range is capped.
353  *
354  * NOTE: Historical use of randomize_range, which this replaces, presumed that
355  * @start was already page aligned.  We now align it regardless.
356  *
357  * Return: A page aligned address within [start, start + range).  On error,
358  * @start is returned.
359  */
360 unsigned long randomize_page(unsigned long start, unsigned long range)
361 {
362         if (!PAGE_ALIGNED(start)) {
363                 range -= PAGE_ALIGN(start) - start;
364                 start = PAGE_ALIGN(start);
365         }
366 
367         if (start > ULONG_MAX - range)
368                 range = ULONG_MAX - start;
369 
370         range >>= PAGE_SHIFT;
371 
372         if (range == 0)
373                 return start;
374 
375         return start + (get_random_long() % range << PAGE_SHIFT);
376 }
377 
378 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
379 unsigned long arch_randomize_brk(struct mm_struct *mm)
380 {
381         /* Is the current task 32bit ? */
382         if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
383                 return randomize_page(mm->brk, SZ_32M);
384 
385         return randomize_page(mm->brk, SZ_1G);
386 }
387 
388 unsigned long arch_mmap_rnd(void)
389 {
390         unsigned long rnd;
391 
392 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
393         if (is_compat_task())
394                 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
395         else
396 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
397                 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
398 
399         return rnd << PAGE_SHIFT;
400 }
401 
402 static int mmap_is_legacy(struct rlimit *rlim_stack)
403 {
404         if (current->personality & ADDR_COMPAT_LAYOUT)
405                 return 1;
406 
407         if (rlim_stack->rlim_cur == RLIM_INFINITY)
408                 return 1;
409 
410         return sysctl_legacy_va_layout;
411 }
412 
413 /*
414  * Leave enough space between the mmap area and the stack to honour ulimit in
415  * the face of randomisation.
416  */
417 #define MIN_GAP         (SZ_128M)
418 #define MAX_GAP         (STACK_TOP / 6 * 5)
419 
420 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
421 {
422         unsigned long gap = rlim_stack->rlim_cur;
423         unsigned long pad = stack_guard_gap;
424 
425         /* Account for stack randomization if necessary */
426         if (current->flags & PF_RANDOMIZE)
427                 pad += (STACK_RND_MASK << PAGE_SHIFT);
428 
429         /* Values close to RLIM_INFINITY can overflow. */
430         if (gap + pad > gap)
431                 gap += pad;
432 
433         if (gap < MIN_GAP)
434                 gap = MIN_GAP;
435         else if (gap > MAX_GAP)
436                 gap = MAX_GAP;
437 
438         return PAGE_ALIGN(STACK_TOP - gap - rnd);
439 }
440 
441 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
442 {
443         unsigned long random_factor = 0UL;
444 
445         if (current->flags & PF_RANDOMIZE)
446                 random_factor = arch_mmap_rnd();
447 
448         if (mmap_is_legacy(rlim_stack)) {
449                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
450                 mm->get_unmapped_area = arch_get_unmapped_area;
451         } else {
452                 mm->mmap_base = mmap_base(random_factor, rlim_stack);
453                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
454         }
455 }
456 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
457 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
458 {
459         mm->mmap_base = TASK_UNMAPPED_BASE;
460         mm->get_unmapped_area = arch_get_unmapped_area;
461 }
462 #endif
463 
464 /**
465  * __account_locked_vm - account locked pages to an mm's locked_vm
466  * @mm:          mm to account against
467  * @pages:       number of pages to account
468  * @inc:         %true if @pages should be considered positive, %false if not
469  * @task:        task used to check RLIMIT_MEMLOCK
470  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
471  *
472  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
473  * that mmap_lock is held as writer.
474  *
475  * Return:
476  * * 0       on success
477  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
478  */
479 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
480                         struct task_struct *task, bool bypass_rlim)
481 {
482         unsigned long locked_vm, limit;
483         int ret = 0;
484 
485         mmap_assert_write_locked(mm);
486 
487         locked_vm = mm->locked_vm;
488         if (inc) {
489                 if (!bypass_rlim) {
490                         limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
491                         if (locked_vm + pages > limit)
492                                 ret = -ENOMEM;
493                 }
494                 if (!ret)
495                         mm->locked_vm = locked_vm + pages;
496         } else {
497                 WARN_ON_ONCE(pages > locked_vm);
498                 mm->locked_vm = locked_vm - pages;
499         }
500 
501         pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
502                  (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
503                  locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
504                  ret ? " - exceeded" : "");
505 
506         return ret;
507 }
508 EXPORT_SYMBOL_GPL(__account_locked_vm);
509 
510 /**
511  * account_locked_vm - account locked pages to an mm's locked_vm
512  * @mm:          mm to account against, may be NULL
513  * @pages:       number of pages to account
514  * @inc:         %true if @pages should be considered positive, %false if not
515  *
516  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
517  *
518  * Return:
519  * * 0       on success, or if mm is NULL
520  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
521  */
522 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
523 {
524         int ret;
525 
526         if (pages == 0 || !mm)
527                 return 0;
528 
529         mmap_write_lock(mm);
530         ret = __account_locked_vm(mm, pages, inc, current,
531                                   capable(CAP_IPC_LOCK));
532         mmap_write_unlock(mm);
533 
534         return ret;
535 }
536 EXPORT_SYMBOL_GPL(account_locked_vm);
537 
538 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
539         unsigned long len, unsigned long prot,
540         unsigned long flag, unsigned long pgoff)
541 {
542         unsigned long ret;
543         struct mm_struct *mm = current->mm;
544         unsigned long populate;
545         LIST_HEAD(uf);
546 
547         ret = security_mmap_file(file, prot, flag);
548         if (!ret) {
549                 if (mmap_write_lock_killable(mm))
550                         return -EINTR;
551                 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
552                               &uf);
553                 mmap_write_unlock(mm);
554                 userfaultfd_unmap_complete(mm, &uf);
555                 if (populate)
556                         mm_populate(ret, populate);
557         }
558         return ret;
559 }
560 
561 unsigned long vm_mmap(struct file *file, unsigned long addr,
562         unsigned long len, unsigned long prot,
563         unsigned long flag, unsigned long offset)
564 {
565         if (unlikely(offset + PAGE_ALIGN(len) < offset))
566                 return -EINVAL;
567         if (unlikely(offset_in_page(offset)))
568                 return -EINVAL;
569 
570         return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
571 }
572 EXPORT_SYMBOL(vm_mmap);
573 
574 /**
575  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
576  * failure, fall back to non-contiguous (vmalloc) allocation.
577  * @size: size of the request.
578  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
579  * @node: numa node to allocate from
580  *
581  * Uses kmalloc to get the memory but if the allocation fails then falls back
582  * to the vmalloc allocator. Use kvfree for freeing the memory.
583  *
584  * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
585  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
586  * preferable to the vmalloc fallback, due to visible performance drawbacks.
587  *
588  * Return: pointer to the allocated memory of %NULL in case of failure
589  */
590 void *kvmalloc_node(size_t size, gfp_t flags, int node)
591 {
592         gfp_t kmalloc_flags = flags;
593         void *ret;
594 
595         /*
596          * We want to attempt a large physically contiguous block first because
597          * it is less likely to fragment multiple larger blocks and therefore
598          * contribute to a long term fragmentation less than vmalloc fallback.
599          * However make sure that larger requests are not too disruptive - no
600          * OOM killer and no allocation failure warnings as we have a fallback.
601          */
602         if (size > PAGE_SIZE) {
603                 kmalloc_flags |= __GFP_NOWARN;
604 
605                 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
606                         kmalloc_flags |= __GFP_NORETRY;
607 
608                 /* nofail semantic is implemented by the vmalloc fallback */
609                 kmalloc_flags &= ~__GFP_NOFAIL;
610         }
611 
612         ret = kmalloc_node(size, kmalloc_flags, node);
613 
614         /*
615          * It doesn't really make sense to fallback to vmalloc for sub page
616          * requests
617          */
618         if (ret || size <= PAGE_SIZE)
619                 return ret;
620 
621         /* Don't even allow crazy sizes */
622         if (unlikely(size > INT_MAX)) {
623                 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
624                 return NULL;
625         }
626 
627         return __vmalloc_node(size, 1, flags, node,
628                         __builtin_return_address(0));
629 }
630 EXPORT_SYMBOL(kvmalloc_node);
631 
632 /**
633  * kvfree() - Free memory.
634  * @addr: Pointer to allocated memory.
635  *
636  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
637  * It is slightly more efficient to use kfree() or vfree() if you are certain
638  * that you know which one to use.
639  *
640  * Context: Either preemptible task context or not-NMI interrupt.
641  */
642 void kvfree(const void *addr)
643 {
644         if (is_vmalloc_addr(addr))
645                 vfree(addr);
646         else
647                 kfree(addr);
648 }
649 EXPORT_SYMBOL(kvfree);
650 
651 /**
652  * kvfree_sensitive - Free a data object containing sensitive information.
653  * @addr: address of the data object to be freed.
654  * @len: length of the data object.
655  *
656  * Use the special memzero_explicit() function to clear the content of a
657  * kvmalloc'ed object containing sensitive data to make sure that the
658  * compiler won't optimize out the data clearing.
659  */
660 void kvfree_sensitive(const void *addr, size_t len)
661 {
662         if (likely(!ZERO_OR_NULL_PTR(addr))) {
663                 memzero_explicit((void *)addr, len);
664                 kvfree(addr);
665         }
666 }
667 EXPORT_SYMBOL(kvfree_sensitive);
668 
669 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
670 {
671         void *newp;
672 
673         if (oldsize >= newsize)
674                 return (void *)p;
675         newp = kvmalloc(newsize, flags);
676         if (!newp)
677                 return NULL;
678         memcpy(newp, p, oldsize);
679         kvfree(p);
680         return newp;
681 }
682 EXPORT_SYMBOL(kvrealloc);
683 
684 /* Neutral page->mapping pointer to address_space or anon_vma or other */
685 void *page_rmapping(struct page *page)
686 {
687         return folio_raw_mapping(page_folio(page));
688 }
689 
690 /**
691  * folio_mapped - Is this folio mapped into userspace?
692  * @folio: The folio.
693  *
694  * Return: True if any page in this folio is referenced by user page tables.
695  */
696 bool folio_mapped(struct folio *folio)
697 {
698         long i, nr;
699 
700         if (!folio_test_large(folio))
701                 return atomic_read(&folio->_mapcount) >= 0;
702         if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
703                 return true;
704         if (folio_test_hugetlb(folio))
705                 return false;
706 
707         nr = folio_nr_pages(folio);
708         for (i = 0; i < nr; i++) {
709                 if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
710                         return true;
711         }
712         return false;
713 }
714 EXPORT_SYMBOL(folio_mapped);
715 
716 struct anon_vma *page_anon_vma(struct page *page)
717 {
718         struct folio *folio = page_folio(page);
719         unsigned long mapping = (unsigned long)folio->mapping;
720 
721         if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
722                 return NULL;
723         return (void *)(mapping - PAGE_MAPPING_ANON);
724 }
725 
726 /**
727  * folio_mapping - Find the mapping where this folio is stored.
728  * @folio: The folio.
729  *
730  * For folios which are in the page cache, return the mapping that this
731  * page belongs to.  Folios in the swap cache return the swap mapping
732  * this page is stored in (which is different from the mapping for the
733  * swap file or swap device where the data is stored).
734  *
735  * You can call this for folios which aren't in the swap cache or page
736  * cache and it will return NULL.
737  */
738 struct address_space *folio_mapping(struct folio *folio)
739 {
740         struct address_space *mapping;
741 
742         /* This happens if someone calls flush_dcache_page on slab page */
743         if (unlikely(folio_test_slab(folio)))
744                 return NULL;
745 
746         if (unlikely(folio_test_swapcache(folio)))
747                 return swap_address_space(folio_swap_entry(folio));
748 
749         mapping = folio->mapping;
750         if ((unsigned long)mapping & PAGE_MAPPING_ANON)
751                 return NULL;
752 
753         return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
754 }
755 EXPORT_SYMBOL(folio_mapping);
756 
757 /* Slow path of page_mapcount() for compound pages */
758 int __page_mapcount(struct page *page)
759 {
760         int ret;
761 
762         ret = atomic_read(&page->_mapcount) + 1;
763         /*
764          * For file THP page->_mapcount contains total number of mapping
765          * of the page: no need to look into compound_mapcount.
766          */
767         if (!PageAnon(page) && !PageHuge(page))
768                 return ret;
769         page = compound_head(page);
770         ret += atomic_read(compound_mapcount_ptr(page)) + 1;
771         if (PageDoubleMap(page))
772                 ret--;
773         return ret;
774 }
775 EXPORT_SYMBOL_GPL(__page_mapcount);
776 
777 /**
778  * folio_copy - Copy the contents of one folio to another.
779  * @dst: Folio to copy to.
780  * @src: Folio to copy from.
781  *
782  * The bytes in the folio represented by @src are copied to @dst.
783  * Assumes the caller has validated that @dst is at least as large as @src.
784  * Can be called in atomic context for order-0 folios, but if the folio is
785  * larger, it may sleep.
786  */
787 void folio_copy(struct folio *dst, struct folio *src)
788 {
789         long i = 0;
790         long nr = folio_nr_pages(src);
791 
792         for (;;) {
793                 copy_highpage(folio_page(dst, i), folio_page(src, i));
794                 if (++i == nr)
795                         break;
796                 cond_resched();
797         }
798 }
799 
800 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
801 int sysctl_overcommit_ratio __read_mostly = 50;
802 unsigned long sysctl_overcommit_kbytes __read_mostly;
803 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
804 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
805 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
806 
807 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
808                 size_t *lenp, loff_t *ppos)
809 {
810         int ret;
811 
812         ret = proc_dointvec(table, write, buffer, lenp, ppos);
813         if (ret == 0 && write)
814                 sysctl_overcommit_kbytes = 0;
815         return ret;
816 }
817 
818 static void sync_overcommit_as(struct work_struct *dummy)
819 {
820         percpu_counter_sync(&vm_committed_as);
821 }
822 
823 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
824                 size_t *lenp, loff_t *ppos)
825 {
826         struct ctl_table t;
827         int new_policy = -1;
828         int ret;
829 
830         /*
831          * The deviation of sync_overcommit_as could be big with loose policy
832          * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
833          * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
834          * with the strict "NEVER", and to avoid possible race condition (even
835          * though user usually won't too frequently do the switching to policy
836          * OVERCOMMIT_NEVER), the switch is done in the following order:
837          *      1. changing the batch
838          *      2. sync percpu count on each CPU
839          *      3. switch the policy
840          */
841         if (write) {
842                 t = *table;
843                 t.data = &new_policy;
844                 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
845                 if (ret || new_policy == -1)
846                         return ret;
847 
848                 mm_compute_batch(new_policy);
849                 if (new_policy == OVERCOMMIT_NEVER)
850                         schedule_on_each_cpu(sync_overcommit_as);
851                 sysctl_overcommit_memory = new_policy;
852         } else {
853                 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
854         }
855 
856         return ret;
857 }
858 
859 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
860                 size_t *lenp, loff_t *ppos)
861 {
862         int ret;
863 
864         ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
865         if (ret == 0 && write)
866                 sysctl_overcommit_ratio = 0;
867         return ret;
868 }
869 
870 /*
871  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
872  */
873 unsigned long vm_commit_limit(void)
874 {
875         unsigned long allowed;
876 
877         if (sysctl_overcommit_kbytes)
878                 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
879         else
880                 allowed = ((totalram_pages() - hugetlb_total_pages())
881                            * sysctl_overcommit_ratio / 100);
882         allowed += total_swap_pages;
883 
884         return allowed;
885 }
886 
887 /*
888  * Make sure vm_committed_as in one cacheline and not cacheline shared with
889  * other variables. It can be updated by several CPUs frequently.
890  */
891 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
892 
893 /*
894  * The global memory commitment made in the system can be a metric
895  * that can be used to drive ballooning decisions when Linux is hosted
896  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
897  * balancing memory across competing virtual machines that are hosted.
898  * Several metrics drive this policy engine including the guest reported
899  * memory commitment.
900  *
901  * The time cost of this is very low for small platforms, and for big
902  * platform like a 2S/36C/72T Skylake server, in worst case where
903  * vm_committed_as's spinlock is under severe contention, the time cost
904  * could be about 30~40 microseconds.
905  */
906 unsigned long vm_memory_committed(void)
907 {
908         return percpu_counter_sum_positive(&vm_committed_as);
909 }
910 EXPORT_SYMBOL_GPL(vm_memory_committed);
911 
912 /*
913  * Check that a process has enough memory to allocate a new virtual
914  * mapping. 0 means there is enough memory for the allocation to
915  * succeed and -ENOMEM implies there is not.
916  *
917  * We currently support three overcommit policies, which are set via the
918  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
919  *
920  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
921  * Additional code 2002 Jul 20 by Robert Love.
922  *
923  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
924  *
925  * Note this is a helper function intended to be used by LSMs which
926  * wish to use this logic.
927  */
928 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
929 {
930         long allowed;
931 
932         vm_acct_memory(pages);
933 
934         /*
935          * Sometimes we want to use more memory than we have
936          */
937         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
938                 return 0;
939 
940         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
941                 if (pages > totalram_pages() + total_swap_pages)
942                         goto error;
943                 return 0;
944         }
945 
946         allowed = vm_commit_limit();
947         /*
948          * Reserve some for root
949          */
950         if (!cap_sys_admin)
951                 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
952 
953         /*
954          * Don't let a single process grow so big a user can't recover
955          */
956         if (mm) {
957                 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
958 
959                 allowed -= min_t(long, mm->total_vm / 32, reserve);
960         }
961 
962         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
963                 return 0;
964 error:
965         vm_unacct_memory(pages);
966 
967         return -ENOMEM;
968 }
969 
970 /**
971  * get_cmdline() - copy the cmdline value to a buffer.
972  * @task:     the task whose cmdline value to copy.
973  * @buffer:   the buffer to copy to.
974  * @buflen:   the length of the buffer. Larger cmdline values are truncated
975  *            to this length.
976  *
977  * Return: the size of the cmdline field copied. Note that the copy does
978  * not guarantee an ending NULL byte.
979  */
980 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
981 {
982         int res = 0;
983         unsigned int len;
984         struct mm_struct *mm = get_task_mm(task);
985         unsigned long arg_start, arg_end, env_start, env_end;
986         if (!mm)
987                 goto out;
988         if (!mm->arg_end)
989                 goto out_mm;    /* Shh! No looking before we're done */
990 
991         spin_lock(&mm->arg_lock);
992         arg_start = mm->arg_start;
993         arg_end = mm->arg_end;
994         env_start = mm->env_start;
995         env_end = mm->env_end;
996         spin_unlock(&mm->arg_lock);
997 
998         len = arg_end - arg_start;
999 
1000         if (len > buflen)
1001                 len = buflen;
1002 
1003         res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1004 
1005         /*
1006          * If the nul at the end of args has been overwritten, then
1007          * assume application is using setproctitle(3).
1008          */
1009         if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1010                 len = strnlen(buffer, res);
1011                 if (len < res) {
1012                         res = len;
1013                 } else {
1014                         len = env_end - env_start;
1015                         if (len > buflen - res)
1016                                 len = buflen - res;
1017                         res += access_process_vm(task, env_start,
1018                                                  buffer+res, len,
1019                                                  FOLL_FORCE);
1020                         res = strnlen(buffer, res);
1021                 }
1022         }
1023 out_mm:
1024         mmput(mm);
1025 out:
1026         return res;
1027 }
1028 
1029 int __weak memcmp_pages(struct page *page1, struct page *page2)
1030 {
1031         char *addr1, *addr2;
1032         int ret;
1033 
1034         addr1 = kmap_atomic(page1);
1035         addr2 = kmap_atomic(page2);
1036         ret = memcmp(addr1, addr2, PAGE_SIZE);
1037         kunmap_atomic(addr2);
1038         kunmap_atomic(addr1);
1039         return ret;
1040 }
1041 
1042 #ifdef CONFIG_PRINTK
1043 /**
1044  * mem_dump_obj - Print available provenance information
1045  * @object: object for which to find provenance information.
1046  *
1047  * This function uses pr_cont(), so that the caller is expected to have
1048  * printed out whatever preamble is appropriate.  The provenance information
1049  * depends on the type of object and on how much debugging is enabled.
1050  * For example, for a slab-cache object, the slab name is printed, and,
1051  * if available, the return address and stack trace from the allocation
1052  * and last free path of that object.
1053  */
1054 void mem_dump_obj(void *object)
1055 {
1056         const char *type;
1057 
1058         if (kmem_valid_obj(object)) {
1059                 kmem_dump_obj(object);
1060                 return;
1061         }
1062 
1063         if (vmalloc_dump_obj(object))
1064                 return;
1065 
1066         if (virt_addr_valid(object))
1067                 type = "non-slab/vmalloc memory";
1068         else if (object == NULL)
1069                 type = "NULL pointer";
1070         else if (object == ZERO_SIZE_PTR)
1071                 type = "zero-size pointer";
1072         else
1073                 type = "non-paged memory";
1074 
1075         pr_cont(" %s\n", type);
1076 }
1077 EXPORT_SYMBOL_GPL(mem_dump_obj);
1078 #endif
1079 
1080 /*
1081  * A driver might set a page logically offline -- PageOffline() -- and
1082  * turn the page inaccessible in the hypervisor; after that, access to page
1083  * content can be fatal.
1084  *
1085  * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1086  * pages after checking PageOffline(); however, these PFN walkers can race
1087  * with drivers that set PageOffline().
1088  *
1089  * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1090  * synchronize with such drivers, achieving that a page cannot be set
1091  * PageOffline() while frozen.
1092  *
1093  * page_offline_begin()/page_offline_end() is used by drivers that care about
1094  * such races when setting a page PageOffline().
1095  */
1096 static DECLARE_RWSEM(page_offline_rwsem);
1097 
1098 void page_offline_freeze(void)
1099 {
1100         down_read(&page_offline_rwsem);
1101 }
1102 
1103 void page_offline_thaw(void)
1104 {
1105         up_read(&page_offline_rwsem);
1106 }
1107 
1108 void page_offline_begin(void)
1109 {
1110         down_write(&page_offline_rwsem);
1111 }
1112 EXPORT_SYMBOL(page_offline_begin);
1113 
1114 void page_offline_end(void)
1115 {
1116         up_write(&page_offline_rwsem);
1117 }
1118 EXPORT_SYMBOL(page_offline_end);
1119 
1120 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1121 void flush_dcache_folio(struct folio *folio)
1122 {
1123         long i, nr = folio_nr_pages(folio);
1124 
1125         for (i = 0; i < nr; i++)
1126                 flush_dcache_page(folio_page(folio, i));
1127 }
1128 EXPORT_SYMBOL(flush_dcache_folio);
1129 #endif
1130 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp