~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/internal.h

Version: ~ [ linux-5.13-rc1 ] ~ [ linux-5.12.2 ] ~ [ linux-5.11.19 ] ~ [ linux-5.10.35 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.117 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.190 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.232 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.268 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.268 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* internal.h: mm/ internal definitions
  2  *
  3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4  * Written by David Howells (dhowells@redhat.com)
  5  *
  6  * This program is free software; you can redistribute it and/or
  7  * modify it under the terms of the GNU General Public License
  8  * as published by the Free Software Foundation; either version
  9  * 2 of the License, or (at your option) any later version.
 10  */
 11 #ifndef __MM_INTERNAL_H
 12 #define __MM_INTERNAL_H
 13 
 14 #include <linux/fs.h>
 15 #include <linux/mm.h>
 16 
 17 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 18                 unsigned long floor, unsigned long ceiling);
 19 
 20 static inline void set_page_count(struct page *page, int v)
 21 {
 22         atomic_set(&page->_count, v);
 23 }
 24 
 25 extern int __do_page_cache_readahead(struct address_space *mapping,
 26                 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
 27                 unsigned long lookahead_size);
 28 
 29 /*
 30  * Submit IO for the read-ahead request in file_ra_state.
 31  */
 32 static inline unsigned long ra_submit(struct file_ra_state *ra,
 33                 struct address_space *mapping, struct file *filp)
 34 {
 35         return __do_page_cache_readahead(mapping, filp,
 36                                         ra->start, ra->size, ra->async_size);
 37 }
 38 
 39 /*
 40  * Turn a non-refcounted page (->_count == 0) into refcounted with
 41  * a count of one.
 42  */
 43 static inline void set_page_refcounted(struct page *page)
 44 {
 45         VM_BUG_ON(PageTail(page));
 46         VM_BUG_ON(atomic_read(&page->_count));
 47         set_page_count(page, 1);
 48 }
 49 
 50 static inline void __get_page_tail_foll(struct page *page,
 51                                         bool get_page_head)
 52 {
 53         /*
 54          * If we're getting a tail page, the elevated page->_count is
 55          * required only in the head page and we will elevate the head
 56          * page->_count and tail page->_mapcount.
 57          *
 58          * We elevate page_tail->_mapcount for tail pages to force
 59          * page_tail->_count to be zero at all times to avoid getting
 60          * false positives from get_page_unless_zero() with
 61          * speculative page access (like in
 62          * page_cache_get_speculative()) on tail pages.
 63          */
 64         VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
 65         VM_BUG_ON(atomic_read(&page->_count) != 0);
 66         VM_BUG_ON(page_mapcount(page) < 0);
 67         if (get_page_head)
 68                 atomic_inc(&page->first_page->_count);
 69         atomic_inc(&page->_mapcount);
 70 }
 71 
 72 /*
 73  * This is meant to be called as the FOLL_GET operation of
 74  * follow_page() and it must be called while holding the proper PT
 75  * lock while the pte (or pmd_trans_huge) is still mapping the page.
 76  */
 77 static inline void get_page_foll(struct page *page)
 78 {
 79         if (unlikely(PageTail(page)))
 80                 /*
 81                  * This is safe only because
 82                  * __split_huge_page_refcount() can't run under
 83                  * get_page_foll() because we hold the proper PT lock.
 84                  */
 85                 __get_page_tail_foll(page, true);
 86         else {
 87                 /*
 88                  * Getting a normal page or the head of a compound page
 89                  * requires to already have an elevated page->_count.
 90                  */
 91                 VM_BUG_ON(atomic_read(&page->_count) <= 0);
 92                 atomic_inc(&page->_count);
 93         }
 94 }
 95 
 96 extern unsigned long highest_memmap_pfn;
 97 
 98 /*
 99  * in mm/vmscan.c:
100  */
101 extern int isolate_lru_page(struct page *page);
102 extern void putback_lru_page(struct page *page);
103 extern bool zone_reclaimable(struct zone *zone);
104 
105 /*
106  * in mm/rmap.c:
107  */
108 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
109 
110 /*
111  * in mm/page_alloc.c
112  */
113 extern void __free_pages_bootmem(struct page *page, unsigned int order);
114 extern void prep_compound_page(struct page *page, unsigned long order);
115 #ifdef CONFIG_MEMORY_FAILURE
116 extern bool is_free_buddy_page(struct page *page);
117 #endif
118 
119 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
120 
121 /*
122  * in mm/compaction.c
123  */
124 /*
125  * compact_control is used to track pages being migrated and the free pages
126  * they are being migrated to during memory compaction. The free_pfn starts
127  * at the end of a zone and migrate_pfn begins at the start. Movable pages
128  * are moved to the end of a zone during a compaction run and the run
129  * completes when free_pfn <= migrate_pfn
130  */
131 struct compact_control {
132         struct list_head freepages;     /* List of free pages to migrate to */
133         struct list_head migratepages;  /* List of pages being migrated */
134         unsigned long nr_freepages;     /* Number of isolated free pages */
135         unsigned long nr_migratepages;  /* Number of pages to migrate */
136         unsigned long free_pfn;         /* isolate_freepages search base */
137         unsigned long migrate_pfn;      /* isolate_migratepages search base */
138         enum migrate_mode mode;         /* Async or sync migration mode */
139         bool ignore_skip_hint;          /* Scan blocks even if marked skip */
140         bool finished_update_free;      /* True when the zone cached pfns are
141                                          * no longer being updated
142                                          */
143         bool finished_update_migrate;
144 
145         int order;                      /* order a direct compactor needs */
146         int migratetype;                /* MOVABLE, RECLAIMABLE etc */
147         struct zone *zone;
148         bool contended;                 /* True if a lock was contended, or
149                                          * need_resched() true during async
150                                          * compaction
151                                          */
152 };
153 
154 unsigned long
155 isolate_freepages_range(struct compact_control *cc,
156                         unsigned long start_pfn, unsigned long end_pfn);
157 unsigned long
158 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
159         unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
160 
161 #endif
162 
163 /*
164  * function for dealing with page's order in buddy system.
165  * zone->lock is already acquired when we use these.
166  * So, we don't need atomic page->flags operations here.
167  */
168 static inline unsigned long page_order(struct page *page)
169 {
170         /* PageBuddy() must be checked by the caller */
171         return page_private(page);
172 }
173 
174 /* mm/util.c */
175 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
176                 struct vm_area_struct *prev, struct rb_node *rb_parent);
177 
178 #ifdef CONFIG_MMU
179 extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
180                 unsigned long start, unsigned long end, int *nonblocking);
181 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
182                         unsigned long start, unsigned long end);
183 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
184 {
185         munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
186 }
187 
188 /*
189  * Called only in fault path, to determine if a new page is being
190  * mapped into a LOCKED vma.  If it is, mark page as mlocked.
191  */
192 static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
193                                     struct page *page)
194 {
195         VM_BUG_ON(PageLRU(page));
196 
197         if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
198                 return 0;
199 
200         if (!TestSetPageMlocked(page)) {
201                 mod_zone_page_state(page_zone(page), NR_MLOCK,
202                                     hpage_nr_pages(page));
203                 count_vm_event(UNEVICTABLE_PGMLOCKED);
204         }
205         return 1;
206 }
207 
208 /*
209  * must be called with vma's mmap_sem held for read or write, and page locked.
210  */
211 extern void mlock_vma_page(struct page *page);
212 extern unsigned int munlock_vma_page(struct page *page);
213 
214 /*
215  * Clear the page's PageMlocked().  This can be useful in a situation where
216  * we want to unconditionally remove a page from the pagecache -- e.g.,
217  * on truncation or freeing.
218  *
219  * It is legal to call this function for any page, mlocked or not.
220  * If called for a page that is still mapped by mlocked vmas, all we do
221  * is revert to lazy LRU behaviour -- semantics are not broken.
222  */
223 extern void clear_page_mlock(struct page *page);
224 
225 /*
226  * mlock_migrate_page - called only from migrate_page_copy() to
227  * migrate the Mlocked page flag; update statistics.
228  */
229 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
230 {
231         if (TestClearPageMlocked(page)) {
232                 unsigned long flags;
233                 int nr_pages = hpage_nr_pages(page);
234 
235                 local_irq_save(flags);
236                 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
237                 SetPageMlocked(newpage);
238                 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
239                 local_irq_restore(flags);
240         }
241 }
242 
243 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
244 
245 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
246 extern unsigned long vma_address(struct page *page,
247                                  struct vm_area_struct *vma);
248 #endif
249 #else /* !CONFIG_MMU */
250 static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
251 {
252         return 0;
253 }
254 static inline void clear_page_mlock(struct page *page) { }
255 static inline void mlock_vma_page(struct page *page) { }
256 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
257 
258 #endif /* !CONFIG_MMU */
259 
260 /*
261  * Return the mem_map entry representing the 'offset' subpage within
262  * the maximally aligned gigantic page 'base'.  Handle any discontiguity
263  * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
264  */
265 static inline struct page *mem_map_offset(struct page *base, int offset)
266 {
267         if (unlikely(offset >= MAX_ORDER_NR_PAGES))
268                 return pfn_to_page(page_to_pfn(base) + offset);
269         return base + offset;
270 }
271 
272 /*
273  * Iterator over all subpages within the maximally aligned gigantic
274  * page 'base'.  Handle any discontiguity in the mem_map.
275  */
276 static inline struct page *mem_map_next(struct page *iter,
277                                                 struct page *base, int offset)
278 {
279         if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
280                 unsigned long pfn = page_to_pfn(base) + offset;
281                 if (!pfn_valid(pfn))
282                         return NULL;
283                 return pfn_to_page(pfn);
284         }
285         return iter + 1;
286 }
287 
288 /*
289  * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
290  * so all functions starting at paging_init should be marked __init
291  * in those cases. SPARSEMEM, however, allows for memory hotplug,
292  * and alloc_bootmem_node is not used.
293  */
294 #ifdef CONFIG_SPARSEMEM
295 #define __paginginit __meminit
296 #else
297 #define __paginginit __init
298 #endif
299 
300 /* Memory initialisation debug and verification */
301 enum mminit_level {
302         MMINIT_WARNING,
303         MMINIT_VERIFY,
304         MMINIT_TRACE
305 };
306 
307 #ifdef CONFIG_DEBUG_MEMORY_INIT
308 
309 extern int mminit_loglevel;
310 
311 #define mminit_dprintk(level, prefix, fmt, arg...) \
312 do { \
313         if (level < mminit_loglevel) { \
314                 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
315                 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
316         } \
317 } while (0)
318 
319 extern void mminit_verify_pageflags_layout(void);
320 extern void mminit_verify_page_links(struct page *page,
321                 enum zone_type zone, unsigned long nid, unsigned long pfn);
322 extern void mminit_verify_zonelist(void);
323 
324 #else
325 
326 static inline void mminit_dprintk(enum mminit_level level,
327                                 const char *prefix, const char *fmt, ...)
328 {
329 }
330 
331 static inline void mminit_verify_pageflags_layout(void)
332 {
333 }
334 
335 static inline void mminit_verify_page_links(struct page *page,
336                 enum zone_type zone, unsigned long nid, unsigned long pfn)
337 {
338 }
339 
340 static inline void mminit_verify_zonelist(void)
341 {
342 }
343 #endif /* CONFIG_DEBUG_MEMORY_INIT */
344 
345 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
346 #if defined(CONFIG_SPARSEMEM)
347 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
348                                 unsigned long *end_pfn);
349 #else
350 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
351                                 unsigned long *end_pfn)
352 {
353 }
354 #endif /* CONFIG_SPARSEMEM */
355 
356 #define ZONE_RECLAIM_NOSCAN     -2
357 #define ZONE_RECLAIM_FULL       -1
358 #define ZONE_RECLAIM_SOME       0
359 #define ZONE_RECLAIM_SUCCESS    1
360 
361 extern int hwpoison_filter(struct page *p);
362 
363 extern u32 hwpoison_filter_dev_major;
364 extern u32 hwpoison_filter_dev_minor;
365 extern u64 hwpoison_filter_flags_mask;
366 extern u64 hwpoison_filter_flags_value;
367 extern u64 hwpoison_filter_memcg;
368 extern u32 hwpoison_filter_enable;
369 
370 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
371         unsigned long, unsigned long,
372         unsigned long, unsigned long);
373 
374 extern void set_pageblock_order(void);
375 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
376                                             struct list_head *page_list);
377 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
378 #define ALLOC_WMARK_MIN         WMARK_MIN
379 #define ALLOC_WMARK_LOW         WMARK_LOW
380 #define ALLOC_WMARK_HIGH        WMARK_HIGH
381 #define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
382 
383 /* Mask to get the watermark bits */
384 #define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
385 
386 #define ALLOC_HARDER            0x10 /* try to alloc harder */
387 #define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
388 #define ALLOC_CPUSET            0x40 /* check for correct cpuset */
389 #define ALLOC_CMA               0x80 /* allow allocations from CMA areas */
390 #define ALLOC_FAIR              0x100 /* fair zone allocation */
391 
392 #endif  /* __MM_INTERNAL_H */
393 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp