~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/internal.h

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* internal.h: mm/ internal definitions
  2  *
  3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4  * Written by David Howells (dhowells@redhat.com)
  5  *
  6  * This program is free software; you can redistribute it and/or
  7  * modify it under the terms of the GNU General Public License
  8  * as published by the Free Software Foundation; either version
  9  * 2 of the License, or (at your option) any later version.
 10  */
 11 #ifndef __MM_INTERNAL_H
 12 #define __MM_INTERNAL_H
 13 
 14 #include <linux/fs.h>
 15 #include <linux/mm.h>
 16 #include <linux/pagemap.h>
 17 #include <linux/tracepoint-defs.h>
 18 
 19 /*
 20  * The set of flags that only affect watermark checking and reclaim
 21  * behaviour. This is used by the MM to obey the caller constraints
 22  * about IO, FS and watermark checking while ignoring placement
 23  * hints such as HIGHMEM usage.
 24  */
 25 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
 26                         __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
 27                         __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
 28                         __GFP_ATOMIC)
 29 
 30 /* The GFP flags allowed during early boot */
 31 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
 32 
 33 /* Control allocation cpuset and node placement constraints */
 34 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 35 
 36 /* Do not use these with a slab allocator */
 37 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 38 
 39 void page_writeback_init(void);
 40 
 41 vm_fault_t do_swap_page(struct vm_fault *vmf);
 42 
 43 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 44                 unsigned long floor, unsigned long ceiling);
 45 
 46 static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
 47 {
 48         return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
 49 }
 50 
 51 void unmap_page_range(struct mmu_gather *tlb,
 52                              struct vm_area_struct *vma,
 53                              unsigned long addr, unsigned long end,
 54                              struct zap_details *details);
 55 
 56 extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
 57                 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 58                 unsigned long lookahead_size);
 59 
 60 /*
 61  * Submit IO for the read-ahead request in file_ra_state.
 62  */
 63 static inline unsigned long ra_submit(struct file_ra_state *ra,
 64                 struct address_space *mapping, struct file *filp)
 65 {
 66         return __do_page_cache_readahead(mapping, filp,
 67                                         ra->start, ra->size, ra->async_size);
 68 }
 69 
 70 /*
 71  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
 72  * a count of one.
 73  */
 74 static inline void set_page_refcounted(struct page *page)
 75 {
 76         VM_BUG_ON_PAGE(PageTail(page), page);
 77         VM_BUG_ON_PAGE(page_ref_count(page), page);
 78         set_page_count(page, 1);
 79 }
 80 
 81 extern unsigned long highest_memmap_pfn;
 82 
 83 /*
 84  * Maximum number of reclaim retries without progress before the OOM
 85  * killer is consider the only way forward.
 86  */
 87 #define MAX_RECLAIM_RETRIES 16
 88 
 89 /*
 90  * in mm/vmscan.c:
 91  */
 92 extern int isolate_lru_page(struct page *page);
 93 extern void putback_lru_page(struct page *page);
 94 
 95 /*
 96  * in mm/rmap.c:
 97  */
 98 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 99 
100 /*
101  * in mm/page_alloc.c
102  */
103 
104 /*
105  * Structure for holding the mostly immutable allocation parameters passed
106  * between functions involved in allocations, including the alloc_pages*
107  * family of functions.
108  *
109  * nodemask, migratetype and high_zoneidx are initialized only once in
110  * __alloc_pages_nodemask() and then never change.
111  *
112  * zonelist, preferred_zone and classzone_idx are set first in
113  * __alloc_pages_nodemask() for the fast path, and might be later changed
114  * in __alloc_pages_slowpath(). All other functions pass the whole strucure
115  * by a const pointer.
116  */
117 struct alloc_context {
118         struct zonelist *zonelist;
119         nodemask_t *nodemask;
120         struct zoneref *preferred_zoneref;
121         int migratetype;
122         enum zone_type high_zoneidx;
123         bool spread_dirty_pages;
124 };
125 
126 #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
127 
128 /*
129  * Locate the struct page for both the matching buddy in our
130  * pair (buddy1) and the combined O(n+1) page they form (page).
131  *
132  * 1) Any buddy B1 will have an order O twin B2 which satisfies
133  * the following equation:
134  *     B2 = B1 ^ (1 << O)
135  * For example, if the starting buddy (buddy2) is #8 its order
136  * 1 buddy is #10:
137  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
138  *
139  * 2) Any buddy B will have an order O+1 parent P which
140  * satisfies the following equation:
141  *     P = B & ~(1 << O)
142  *
143  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
144  */
145 static inline unsigned long
146 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
147 {
148         return page_pfn ^ (1 << order);
149 }
150 
151 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
152                                 unsigned long end_pfn, struct zone *zone);
153 
154 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
155                                 unsigned long end_pfn, struct zone *zone)
156 {
157         if (zone->contiguous)
158                 return pfn_to_page(start_pfn);
159 
160         return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
161 }
162 
163 extern int __isolate_free_page(struct page *page, unsigned int order);
164 extern void memblock_free_pages(struct page *page, unsigned long pfn,
165                                         unsigned int order);
166 extern void prep_compound_page(struct page *page, unsigned int order);
167 extern void post_alloc_hook(struct page *page, unsigned int order,
168                                         gfp_t gfp_flags);
169 extern int user_min_free_kbytes;
170 
171 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
172 
173 /*
174  * in mm/compaction.c
175  */
176 /*
177  * compact_control is used to track pages being migrated and the free pages
178  * they are being migrated to during memory compaction. The free_pfn starts
179  * at the end of a zone and migrate_pfn begins at the start. Movable pages
180  * are moved to the end of a zone during a compaction run and the run
181  * completes when free_pfn <= migrate_pfn
182  */
183 struct compact_control {
184         struct list_head freepages;     /* List of free pages to migrate to */
185         struct list_head migratepages;  /* List of pages being migrated */
186         struct zone *zone;
187         unsigned long nr_freepages;     /* Number of isolated free pages */
188         unsigned long nr_migratepages;  /* Number of pages to migrate */
189         unsigned long total_migrate_scanned;
190         unsigned long total_free_scanned;
191         unsigned long free_pfn;         /* isolate_freepages search base */
192         unsigned long migrate_pfn;      /* isolate_migratepages search base */
193         unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
194         const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
195         int order;                      /* order a direct compactor needs */
196         int migratetype;                /* migratetype of direct compactor */
197         const unsigned int alloc_flags; /* alloc flags of a direct compactor */
198         const int classzone_idx;        /* zone index of a direct compactor */
199         enum migrate_mode mode;         /* Async or sync migration mode */
200         bool ignore_skip_hint;          /* Scan blocks even if marked skip */
201         bool no_set_skip_hint;          /* Don't mark blocks for skipping */
202         bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
203         bool direct_compaction;         /* False from kcompactd or /proc/... */
204         bool whole_zone;                /* Whole zone should/has been scanned */
205         bool contended;                 /* Signal lock or sched contention */
206         bool finishing_block;           /* Finishing current pageblock */
207 };
208 
209 unsigned long
210 isolate_freepages_range(struct compact_control *cc,
211                         unsigned long start_pfn, unsigned long end_pfn);
212 unsigned long
213 isolate_migratepages_range(struct compact_control *cc,
214                            unsigned long low_pfn, unsigned long end_pfn);
215 int find_suitable_fallback(struct free_area *area, unsigned int order,
216                         int migratetype, bool only_stealable, bool *can_steal);
217 
218 #endif
219 
220 /*
221  * This function returns the order of a free page in the buddy system. In
222  * general, page_zone(page)->lock must be held by the caller to prevent the
223  * page from being allocated in parallel and returning garbage as the order.
224  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
225  * page cannot be allocated or merged in parallel. Alternatively, it must
226  * handle invalid values gracefully, and use page_order_unsafe() below.
227  */
228 static inline unsigned int page_order(struct page *page)
229 {
230         /* PageBuddy() must be checked by the caller */
231         return page_private(page);
232 }
233 
234 /*
235  * Like page_order(), but for callers who cannot afford to hold the zone lock.
236  * PageBuddy() should be checked first by the caller to minimize race window,
237  * and invalid values must be handled gracefully.
238  *
239  * READ_ONCE is used so that if the caller assigns the result into a local
240  * variable and e.g. tests it for valid range before using, the compiler cannot
241  * decide to remove the variable and inline the page_private(page) multiple
242  * times, potentially observing different values in the tests and the actual
243  * use of the result.
244  */
245 #define page_order_unsafe(page)         READ_ONCE(page_private(page))
246 
247 static inline bool is_cow_mapping(vm_flags_t flags)
248 {
249         return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
250 }
251 
252 /*
253  * These three helpers classifies VMAs for virtual memory accounting.
254  */
255 
256 /*
257  * Executable code area - executable, not writable, not stack
258  */
259 static inline bool is_exec_mapping(vm_flags_t flags)
260 {
261         return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
262 }
263 
264 /*
265  * Stack area - atomatically grows in one direction
266  *
267  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
268  * do_mmap() forbids all other combinations.
269  */
270 static inline bool is_stack_mapping(vm_flags_t flags)
271 {
272         return (flags & VM_STACK) == VM_STACK;
273 }
274 
275 /*
276  * Data area - private, writable, not stack
277  */
278 static inline bool is_data_mapping(vm_flags_t flags)
279 {
280         return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
281 }
282 
283 /* mm/util.c */
284 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
285                 struct vm_area_struct *prev, struct rb_node *rb_parent);
286 
287 #ifdef CONFIG_MMU
288 extern long populate_vma_page_range(struct vm_area_struct *vma,
289                 unsigned long start, unsigned long end, int *nonblocking);
290 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
291                         unsigned long start, unsigned long end);
292 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
293 {
294         munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
295 }
296 
297 /*
298  * must be called with vma's mmap_sem held for read or write, and page locked.
299  */
300 extern void mlock_vma_page(struct page *page);
301 extern unsigned int munlock_vma_page(struct page *page);
302 
303 /*
304  * Clear the page's PageMlocked().  This can be useful in a situation where
305  * we want to unconditionally remove a page from the pagecache -- e.g.,
306  * on truncation or freeing.
307  *
308  * It is legal to call this function for any page, mlocked or not.
309  * If called for a page that is still mapped by mlocked vmas, all we do
310  * is revert to lazy LRU behaviour -- semantics are not broken.
311  */
312 extern void clear_page_mlock(struct page *page);
313 
314 /*
315  * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
316  * (because that does not go through the full procedure of migration ptes):
317  * to migrate the Mlocked page flag; update statistics.
318  */
319 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
320 {
321         if (TestClearPageMlocked(page)) {
322                 int nr_pages = hpage_nr_pages(page);
323 
324                 /* Holding pmd lock, no change in irq context: __mod is safe */
325                 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
326                 SetPageMlocked(newpage);
327                 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
328         }
329 }
330 
331 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
332 
333 /*
334  * At what user virtual address is page expected in @vma?
335  */
336 static inline unsigned long
337 __vma_address(struct page *page, struct vm_area_struct *vma)
338 {
339         pgoff_t pgoff = page_to_pgoff(page);
340         return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
341 }
342 
343 static inline unsigned long
344 vma_address(struct page *page, struct vm_area_struct *vma)
345 {
346         unsigned long start, end;
347 
348         start = __vma_address(page, vma);
349         end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
350 
351         /* page should be within @vma mapping range */
352         VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
353 
354         return max(start, vma->vm_start);
355 }
356 
357 #else /* !CONFIG_MMU */
358 static inline void clear_page_mlock(struct page *page) { }
359 static inline void mlock_vma_page(struct page *page) { }
360 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
361 
362 #endif /* !CONFIG_MMU */
363 
364 /*
365  * Return the mem_map entry representing the 'offset' subpage within
366  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
367  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
368  */
369 static inline struct page *mem_map_offset(struct page *base, int offset)
370 {
371         if (unlikely(offset >= MAX_ORDER_NR_PAGES))
372                 return nth_page(base, offset);
373         return base + offset;
374 }
375 
376 /*
377  * Iterator over all subpages within the maximally aligned gigantic
378  * page 'base'.  Handle any discontiguity in the mem_map.
379  */
380 static inline struct page *mem_map_next(struct page *iter,
381                                                 struct page *base, int offset)
382 {
383         if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
384                 unsigned long pfn = page_to_pfn(base) + offset;
385                 if (!pfn_valid(pfn))
386                         return NULL;
387                 return pfn_to_page(pfn);
388         }
389         return iter + 1;
390 }
391 
392 /* Memory initialisation debug and verification */
393 enum mminit_level {
394         MMINIT_WARNING,
395         MMINIT_VERIFY,
396         MMINIT_TRACE
397 };
398 
399 #ifdef CONFIG_DEBUG_MEMORY_INIT
400 
401 extern int mminit_loglevel;
402 
403 #define mminit_dprintk(level, prefix, fmt, arg...) \
404 do { \
405         if (level < mminit_loglevel) { \
406                 if (level <= MMINIT_WARNING) \
407                         pr_warn("mminit::" prefix " " fmt, ##arg);      \
408                 else \
409                         printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
410         } \
411 } while (0)
412 
413 extern void mminit_verify_pageflags_layout(void);
414 extern void mminit_verify_zonelist(void);
415 #else
416 
417 static inline void mminit_dprintk(enum mminit_level level,
418                                 const char *prefix, const char *fmt, ...)
419 {
420 }
421 
422 static inline void mminit_verify_pageflags_layout(void)
423 {
424 }
425 
426 static inline void mminit_verify_zonelist(void)
427 {
428 }
429 #endif /* CONFIG_DEBUG_MEMORY_INIT */
430 
431 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
432 #if defined(CONFIG_SPARSEMEM)
433 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
434                                 unsigned long *end_pfn);
435 #else
436 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
437                                 unsigned long *end_pfn)
438 {
439 }
440 #endif /* CONFIG_SPARSEMEM */
441 
442 #define NODE_RECLAIM_NOSCAN     -2
443 #define NODE_RECLAIM_FULL       -1
444 #define NODE_RECLAIM_SOME       0
445 #define NODE_RECLAIM_SUCCESS    1
446 
447 extern int hwpoison_filter(struct page *p);
448 
449 extern u32 hwpoison_filter_dev_major;
450 extern u32 hwpoison_filter_dev_minor;
451 extern u64 hwpoison_filter_flags_mask;
452 extern u64 hwpoison_filter_flags_value;
453 extern u64 hwpoison_filter_memcg;
454 extern u32 hwpoison_filter_enable;
455 
456 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
457         unsigned long, unsigned long,
458         unsigned long, unsigned long);
459 
460 extern void set_pageblock_order(void);
461 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
462                                             struct list_head *page_list);
463 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
464 #define ALLOC_WMARK_MIN         WMARK_MIN
465 #define ALLOC_WMARK_LOW         WMARK_LOW
466 #define ALLOC_WMARK_HIGH        WMARK_HIGH
467 #define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
468 
469 /* Mask to get the watermark bits */
470 #define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
471 
472 /*
473  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
474  * cannot assume a reduced access to memory reserves is sufficient for
475  * !MMU
476  */
477 #ifdef CONFIG_MMU
478 #define ALLOC_OOM               0x08
479 #else
480 #define ALLOC_OOM               ALLOC_NO_WATERMARKS
481 #endif
482 
483 #define ALLOC_HARDER            0x10 /* try to alloc harder */
484 #define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
485 #define ALLOC_CPUSET            0x40 /* check for correct cpuset */
486 #define ALLOC_CMA               0x80 /* allow allocations from CMA areas */
487 
488 enum ttu_flags;
489 struct tlbflush_unmap_batch;
490 
491 
492 /*
493  * only for MM internal work items which do not depend on
494  * any allocations or locks which might depend on allocations
495  */
496 extern struct workqueue_struct *mm_percpu_wq;
497 
498 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
499 void try_to_unmap_flush(void);
500 void try_to_unmap_flush_dirty(void);
501 void flush_tlb_batched_pending(struct mm_struct *mm);
502 #else
503 static inline void try_to_unmap_flush(void)
504 {
505 }
506 static inline void try_to_unmap_flush_dirty(void)
507 {
508 }
509 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
510 {
511 }
512 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
513 
514 extern const struct trace_print_flags pageflag_names[];
515 extern const struct trace_print_flags vmaflag_names[];
516 extern const struct trace_print_flags gfpflag_names[];
517 
518 static inline bool is_migrate_highatomic(enum migratetype migratetype)
519 {
520         return migratetype == MIGRATE_HIGHATOMIC;
521 }
522 
523 static inline bool is_migrate_highatomic_page(struct page *page)
524 {
525         return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
526 }
527 
528 void setup_zone_pageset(struct zone *zone);
529 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
530 #endif  /* __MM_INTERNAL_H */
531 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp