~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/internal.h

Version: ~ [ linux-5.9 ] ~ [ linux-5.8.14 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.70 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.150 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.200 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.238 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.238 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /* internal.h: mm/ internal definitions
  3  *
  4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  5  * Written by David Howells (dhowells@redhat.com)
  6  */
  7 #ifndef __MM_INTERNAL_H
  8 #define __MM_INTERNAL_H
  9 
 10 #include <linux/fs.h>
 11 #include <linux/mm.h>
 12 #include <linux/pagemap.h>
 13 #include <linux/tracepoint-defs.h>
 14 
 15 /*
 16  * The set of flags that only affect watermark checking and reclaim
 17  * behaviour. This is used by the MM to obey the caller constraints
 18  * about IO, FS and watermark checking while ignoring placement
 19  * hints such as HIGHMEM usage.
 20  */
 21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
 22                         __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
 23                         __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
 24                         __GFP_ATOMIC)
 25 
 26 /* The GFP flags allowed during early boot */
 27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
 28 
 29 /* Control allocation cpuset and node placement constraints */
 30 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 31 
 32 /* Do not use these with a slab allocator */
 33 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 34 
 35 void page_writeback_init(void);
 36 
 37 vm_fault_t do_swap_page(struct vm_fault *vmf);
 38 
 39 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 40                 unsigned long floor, unsigned long ceiling);
 41 
 42 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
 43 {
 44         return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
 45 }
 46 
 47 void unmap_page_range(struct mmu_gather *tlb,
 48                              struct vm_area_struct *vma,
 49                              unsigned long addr, unsigned long end,
 50                              struct zap_details *details);
 51 
 52 extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
 53                 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 54                 unsigned long lookahead_size);
 55 
 56 /*
 57  * Submit IO for the read-ahead request in file_ra_state.
 58  */
 59 static inline unsigned long ra_submit(struct file_ra_state *ra,
 60                 struct address_space *mapping, struct file *filp)
 61 {
 62         return __do_page_cache_readahead(mapping, filp,
 63                                         ra->start, ra->size, ra->async_size);
 64 }
 65 
 66 /**
 67  * page_evictable - test whether a page is evictable
 68  * @page: the page to test
 69  *
 70  * Test whether page is evictable--i.e., should be placed on active/inactive
 71  * lists vs unevictable list.
 72  *
 73  * Reasons page might not be evictable:
 74  * (1) page's mapping marked unevictable
 75  * (2) page is part of an mlocked VMA
 76  *
 77  */
 78 static inline bool page_evictable(struct page *page)
 79 {
 80         bool ret;
 81 
 82         /* Prevent address_space of inode and swap cache from being freed */
 83         rcu_read_lock();
 84         ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
 85         rcu_read_unlock();
 86         return ret;
 87 }
 88 
 89 /*
 90  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
 91  * a count of one.
 92  */
 93 static inline void set_page_refcounted(struct page *page)
 94 {
 95         VM_BUG_ON_PAGE(PageTail(page), page);
 96         VM_BUG_ON_PAGE(page_ref_count(page), page);
 97         set_page_count(page, 1);
 98 }
 99 
100 extern unsigned long highest_memmap_pfn;
101 
102 /*
103  * Maximum number of reclaim retries without progress before the OOM
104  * killer is consider the only way forward.
105  */
106 #define MAX_RECLAIM_RETRIES 16
107 
108 /*
109  * in mm/vmscan.c:
110  */
111 extern int isolate_lru_page(struct page *page);
112 extern void putback_lru_page(struct page *page);
113 
114 /*
115  * in mm/rmap.c:
116  */
117 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
118 
119 /*
120  * in mm/page_alloc.c
121  */
122 
123 /*
124  * Structure for holding the mostly immutable allocation parameters passed
125  * between functions involved in allocations, including the alloc_pages*
126  * family of functions.
127  *
128  * nodemask, migratetype and high_zoneidx are initialized only once in
129  * __alloc_pages_nodemask() and then never change.
130  *
131  * zonelist, preferred_zone and classzone_idx are set first in
132  * __alloc_pages_nodemask() for the fast path, and might be later changed
133  * in __alloc_pages_slowpath(). All other functions pass the whole strucure
134  * by a const pointer.
135  */
136 struct alloc_context {
137         struct zonelist *zonelist;
138         nodemask_t *nodemask;
139         struct zoneref *preferred_zoneref;
140         int migratetype;
141         enum zone_type high_zoneidx;
142         bool spread_dirty_pages;
143 };
144 
145 #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
146 
147 /*
148  * Locate the struct page for both the matching buddy in our
149  * pair (buddy1) and the combined O(n+1) page they form (page).
150  *
151  * 1) Any buddy B1 will have an order O twin B2 which satisfies
152  * the following equation:
153  *     B2 = B1 ^ (1 << O)
154  * For example, if the starting buddy (buddy2) is #8 its order
155  * 1 buddy is #10:
156  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
157  *
158  * 2) Any buddy B will have an order O+1 parent P which
159  * satisfies the following equation:
160  *     P = B & ~(1 << O)
161  *
162  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
163  */
164 static inline unsigned long
165 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
166 {
167         return page_pfn ^ (1 << order);
168 }
169 
170 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
171                                 unsigned long end_pfn, struct zone *zone);
172 
173 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
174                                 unsigned long end_pfn, struct zone *zone)
175 {
176         if (zone->contiguous)
177                 return pfn_to_page(start_pfn);
178 
179         return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
180 }
181 
182 extern int __isolate_free_page(struct page *page, unsigned int order);
183 extern void __putback_isolated_page(struct page *page, unsigned int order,
184                                     int mt);
185 extern void memblock_free_pages(struct page *page, unsigned long pfn,
186                                         unsigned int order);
187 extern void __free_pages_core(struct page *page, unsigned int order);
188 extern void prep_compound_page(struct page *page, unsigned int order);
189 extern void post_alloc_hook(struct page *page, unsigned int order,
190                                         gfp_t gfp_flags);
191 extern int user_min_free_kbytes;
192 
193 extern void zone_pcp_update(struct zone *zone);
194 extern void zone_pcp_reset(struct zone *zone);
195 
196 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
197 
198 /*
199  * in mm/compaction.c
200  */
201 /*
202  * compact_control is used to track pages being migrated and the free pages
203  * they are being migrated to during memory compaction. The free_pfn starts
204  * at the end of a zone and migrate_pfn begins at the start. Movable pages
205  * are moved to the end of a zone during a compaction run and the run
206  * completes when free_pfn <= migrate_pfn
207  */
208 struct compact_control {
209         struct list_head freepages;     /* List of free pages to migrate to */
210         struct list_head migratepages;  /* List of pages being migrated */
211         unsigned int nr_freepages;      /* Number of isolated free pages */
212         unsigned int nr_migratepages;   /* Number of pages to migrate */
213         unsigned long free_pfn;         /* isolate_freepages search base */
214         unsigned long migrate_pfn;      /* isolate_migratepages search base */
215         unsigned long fast_start_pfn;   /* a pfn to start linear scan from */
216         struct zone *zone;
217         unsigned long total_migrate_scanned;
218         unsigned long total_free_scanned;
219         unsigned short fast_search_fail;/* failures to use free list searches */
220         short search_order;             /* order to start a fast search at */
221         const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
222         int order;                      /* order a direct compactor needs */
223         int migratetype;                /* migratetype of direct compactor */
224         const unsigned int alloc_flags; /* alloc flags of a direct compactor */
225         const int classzone_idx;        /* zone index of a direct compactor */
226         enum migrate_mode mode;         /* Async or sync migration mode */
227         bool ignore_skip_hint;          /* Scan blocks even if marked skip */
228         bool no_set_skip_hint;          /* Don't mark blocks for skipping */
229         bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
230         bool direct_compaction;         /* False from kcompactd or /proc/... */
231         bool whole_zone;                /* Whole zone should/has been scanned */
232         bool contended;                 /* Signal lock or sched contention */
233         bool rescan;                    /* Rescanning the same pageblock */
234         bool alloc_contig;              /* alloc_contig_range allocation */
235 };
236 
237 /*
238  * Used in direct compaction when a page should be taken from the freelists
239  * immediately when one is created during the free path.
240  */
241 struct capture_control {
242         struct compact_control *cc;
243         struct page *page;
244 };
245 
246 unsigned long
247 isolate_freepages_range(struct compact_control *cc,
248                         unsigned long start_pfn, unsigned long end_pfn);
249 unsigned long
250 isolate_migratepages_range(struct compact_control *cc,
251                            unsigned long low_pfn, unsigned long end_pfn);
252 int find_suitable_fallback(struct free_area *area, unsigned int order,
253                         int migratetype, bool only_stealable, bool *can_steal);
254 
255 #endif
256 
257 /*
258  * This function returns the order of a free page in the buddy system. In
259  * general, page_zone(page)->lock must be held by the caller to prevent the
260  * page from being allocated in parallel and returning garbage as the order.
261  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
262  * page cannot be allocated or merged in parallel. Alternatively, it must
263  * handle invalid values gracefully, and use page_order_unsafe() below.
264  */
265 static inline unsigned int page_order(struct page *page)
266 {
267         /* PageBuddy() must be checked by the caller */
268         return page_private(page);
269 }
270 
271 /*
272  * Like page_order(), but for callers who cannot afford to hold the zone lock.
273  * PageBuddy() should be checked first by the caller to minimize race window,
274  * and invalid values must be handled gracefully.
275  *
276  * READ_ONCE is used so that if the caller assigns the result into a local
277  * variable and e.g. tests it for valid range before using, the compiler cannot
278  * decide to remove the variable and inline the page_private(page) multiple
279  * times, potentially observing different values in the tests and the actual
280  * use of the result.
281  */
282 #define page_order_unsafe(page)         READ_ONCE(page_private(page))
283 
284 static inline bool is_cow_mapping(vm_flags_t flags)
285 {
286         return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
287 }
288 
289 /*
290  * These three helpers classifies VMAs for virtual memory accounting.
291  */
292 
293 /*
294  * Executable code area - executable, not writable, not stack
295  */
296 static inline bool is_exec_mapping(vm_flags_t flags)
297 {
298         return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
299 }
300 
301 /*
302  * Stack area - atomatically grows in one direction
303  *
304  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
305  * do_mmap() forbids all other combinations.
306  */
307 static inline bool is_stack_mapping(vm_flags_t flags)
308 {
309         return (flags & VM_STACK) == VM_STACK;
310 }
311 
312 /*
313  * Data area - private, writable, not stack
314  */
315 static inline bool is_data_mapping(vm_flags_t flags)
316 {
317         return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
318 }
319 
320 /* mm/util.c */
321 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
322                 struct vm_area_struct *prev);
323 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
324 
325 #ifdef CONFIG_MMU
326 extern long populate_vma_page_range(struct vm_area_struct *vma,
327                 unsigned long start, unsigned long end, int *nonblocking);
328 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
329                         unsigned long start, unsigned long end);
330 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
331 {
332         munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
333 }
334 
335 /*
336  * must be called with vma's mmap_sem held for read or write, and page locked.
337  */
338 extern void mlock_vma_page(struct page *page);
339 extern unsigned int munlock_vma_page(struct page *page);
340 
341 /*
342  * Clear the page's PageMlocked().  This can be useful in a situation where
343  * we want to unconditionally remove a page from the pagecache -- e.g.,
344  * on truncation or freeing.
345  *
346  * It is legal to call this function for any page, mlocked or not.
347  * If called for a page that is still mapped by mlocked vmas, all we do
348  * is revert to lazy LRU behaviour -- semantics are not broken.
349  */
350 extern void clear_page_mlock(struct page *page);
351 
352 /*
353  * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
354  * (because that does not go through the full procedure of migration ptes):
355  * to migrate the Mlocked page flag; update statistics.
356  */
357 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
358 {
359         if (TestClearPageMlocked(page)) {
360                 int nr_pages = hpage_nr_pages(page);
361 
362                 /* Holding pmd lock, no change in irq context: __mod is safe */
363                 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
364                 SetPageMlocked(newpage);
365                 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
366         }
367 }
368 
369 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
370 
371 /*
372  * At what user virtual address is page expected in @vma?
373  */
374 static inline unsigned long
375 __vma_address(struct page *page, struct vm_area_struct *vma)
376 {
377         pgoff_t pgoff = page_to_pgoff(page);
378         return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
379 }
380 
381 static inline unsigned long
382 vma_address(struct page *page, struct vm_area_struct *vma)
383 {
384         unsigned long start, end;
385 
386         start = __vma_address(page, vma);
387         end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
388 
389         /* page should be within @vma mapping range */
390         VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
391 
392         return max(start, vma->vm_start);
393 }
394 
395 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
396                                                     struct file *fpin)
397 {
398         int flags = vmf->flags;
399 
400         if (fpin)
401                 return fpin;
402 
403         /*
404          * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
405          * anything, so we only pin the file and drop the mmap_sem if only
406          * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
407          */
408         if (fault_flag_allow_retry_first(flags) &&
409             !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
410                 fpin = get_file(vmf->vma->vm_file);
411                 up_read(&vmf->vma->vm_mm->mmap_sem);
412         }
413         return fpin;
414 }
415 
416 #else /* !CONFIG_MMU */
417 static inline void clear_page_mlock(struct page *page) { }
418 static inline void mlock_vma_page(struct page *page) { }
419 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
420 
421 #endif /* !CONFIG_MMU */
422 
423 /*
424  * Return the mem_map entry representing the 'offset' subpage within
425  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
426  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
427  */
428 static inline struct page *mem_map_offset(struct page *base, int offset)
429 {
430         if (unlikely(offset >= MAX_ORDER_NR_PAGES))
431                 return nth_page(base, offset);
432         return base + offset;
433 }
434 
435 /*
436  * Iterator over all subpages within the maximally aligned gigantic
437  * page 'base'.  Handle any discontiguity in the mem_map.
438  */
439 static inline struct page *mem_map_next(struct page *iter,
440                                                 struct page *base, int offset)
441 {
442         if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
443                 unsigned long pfn = page_to_pfn(base) + offset;
444                 if (!pfn_valid(pfn))
445                         return NULL;
446                 return pfn_to_page(pfn);
447         }
448         return iter + 1;
449 }
450 
451 /* Memory initialisation debug and verification */
452 enum mminit_level {
453         MMINIT_WARNING,
454         MMINIT_VERIFY,
455         MMINIT_TRACE
456 };
457 
458 #ifdef CONFIG_DEBUG_MEMORY_INIT
459 
460 extern int mminit_loglevel;
461 
462 #define mminit_dprintk(level, prefix, fmt, arg...) \
463 do { \
464         if (level < mminit_loglevel) { \
465                 if (level <= MMINIT_WARNING) \
466                         pr_warn("mminit::" prefix " " fmt, ##arg);      \
467                 else \
468                         printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
469         } \
470 } while (0)
471 
472 extern void mminit_verify_pageflags_layout(void);
473 extern void mminit_verify_zonelist(void);
474 #else
475 
476 static inline void mminit_dprintk(enum mminit_level level,
477                                 const char *prefix, const char *fmt, ...)
478 {
479 }
480 
481 static inline void mminit_verify_pageflags_layout(void)
482 {
483 }
484 
485 static inline void mminit_verify_zonelist(void)
486 {
487 }
488 #endif /* CONFIG_DEBUG_MEMORY_INIT */
489 
490 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
491 #if defined(CONFIG_SPARSEMEM)
492 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
493                                 unsigned long *end_pfn);
494 #else
495 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
496                                 unsigned long *end_pfn)
497 {
498 }
499 #endif /* CONFIG_SPARSEMEM */
500 
501 #define NODE_RECLAIM_NOSCAN     -2
502 #define NODE_RECLAIM_FULL       -1
503 #define NODE_RECLAIM_SOME       0
504 #define NODE_RECLAIM_SUCCESS    1
505 
506 #ifdef CONFIG_NUMA
507 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
508 #else
509 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
510                                 unsigned int order)
511 {
512         return NODE_RECLAIM_NOSCAN;
513 }
514 #endif
515 
516 extern int hwpoison_filter(struct page *p);
517 
518 extern u32 hwpoison_filter_dev_major;
519 extern u32 hwpoison_filter_dev_minor;
520 extern u64 hwpoison_filter_flags_mask;
521 extern u64 hwpoison_filter_flags_value;
522 extern u64 hwpoison_filter_memcg;
523 extern u32 hwpoison_filter_enable;
524 
525 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
526         unsigned long, unsigned long,
527         unsigned long, unsigned long);
528 
529 extern void set_pageblock_order(void);
530 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
531                                             struct list_head *page_list);
532 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
533 #define ALLOC_WMARK_MIN         WMARK_MIN
534 #define ALLOC_WMARK_LOW         WMARK_LOW
535 #define ALLOC_WMARK_HIGH        WMARK_HIGH
536 #define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
537 
538 /* Mask to get the watermark bits */
539 #define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
540 
541 /*
542  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
543  * cannot assume a reduced access to memory reserves is sufficient for
544  * !MMU
545  */
546 #ifdef CONFIG_MMU
547 #define ALLOC_OOM               0x08
548 #else
549 #define ALLOC_OOM               ALLOC_NO_WATERMARKS
550 #endif
551 
552 #define ALLOC_HARDER             0x10 /* try to alloc harder */
553 #define ALLOC_HIGH               0x20 /* __GFP_HIGH set */
554 #define ALLOC_CPUSET             0x40 /* check for correct cpuset */
555 #define ALLOC_CMA                0x80 /* allow allocations from CMA areas */
556 #ifdef CONFIG_ZONE_DMA32
557 #define ALLOC_NOFRAGMENT        0x100 /* avoid mixing pageblock types */
558 #else
559 #define ALLOC_NOFRAGMENT          0x0
560 #endif
561 #define ALLOC_KSWAPD            0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
562 
563 enum ttu_flags;
564 struct tlbflush_unmap_batch;
565 
566 
567 /*
568  * only for MM internal work items which do not depend on
569  * any allocations or locks which might depend on allocations
570  */
571 extern struct workqueue_struct *mm_percpu_wq;
572 
573 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
574 void try_to_unmap_flush(void);
575 void try_to_unmap_flush_dirty(void);
576 void flush_tlb_batched_pending(struct mm_struct *mm);
577 #else
578 static inline void try_to_unmap_flush(void)
579 {
580 }
581 static inline void try_to_unmap_flush_dirty(void)
582 {
583 }
584 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
585 {
586 }
587 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
588 
589 extern const struct trace_print_flags pageflag_names[];
590 extern const struct trace_print_flags vmaflag_names[];
591 extern const struct trace_print_flags gfpflag_names[];
592 
593 static inline bool is_migrate_highatomic(enum migratetype migratetype)
594 {
595         return migratetype == MIGRATE_HIGHATOMIC;
596 }
597 
598 static inline bool is_migrate_highatomic_page(struct page *page)
599 {
600         return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
601 }
602 
603 void setup_zone_pageset(struct zone *zone);
604 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
605 #endif  /* __MM_INTERNAL_H */
606 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp