~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/page-flags.h

Version: ~ [ linux-5.3-rc5 ] ~ [ linux-5.2.9 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.67 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.139 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.189 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.189 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.72 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Macros for manipulating and testing page->flags
  3  */
  4 
  5 #ifndef PAGE_FLAGS_H
  6 #define PAGE_FLAGS_H
  7 
  8 #include <linux/types.h>
  9 #include <linux/bug.h>
 10 #include <linux/mmdebug.h>
 11 #ifndef __GENERATING_BOUNDS_H
 12 #include <linux/mm_types.h>
 13 #include <generated/bounds.h>
 14 #endif /* !__GENERATING_BOUNDS_H */
 15 
 16 /*
 17  * Various page->flags bits:
 18  *
 19  * PG_reserved is set for special pages, which can never be swapped out. Some
 20  * of them might not even exist (eg empty_bad_page)...
 21  *
 22  * The PG_private bitflag is set on pagecache pages if they contain filesystem
 23  * specific data (which is normally at page->private). It can be used by
 24  * private allocations for its own usage.
 25  *
 26  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
 27  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
 28  * is set before writeback starts and cleared when it finishes.
 29  *
 30  * PG_locked also pins a page in pagecache, and blocks truncation of the file
 31  * while it is held.
 32  *
 33  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
 34  * to become unlocked.
 35  *
 36  * PG_uptodate tells whether the page's contents is valid.  When a read
 37  * completes, the page becomes uptodate, unless a disk I/O error happened.
 38  *
 39  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
 40  * file-backed pagecache (see mm/vmscan.c).
 41  *
 42  * PG_error is set to indicate that an I/O error occurred on this page.
 43  *
 44  * PG_arch_1 is an architecture specific page state bit.  The generic code
 45  * guarantees that this bit is cleared for a page when it first is entered into
 46  * the page cache.
 47  *
 48  * PG_highmem pages are not permanently mapped into the kernel virtual address
 49  * space, they need to be kmapped separately for doing IO on the pages.  The
 50  * struct page (these bits with information) are always mapped into kernel
 51  * address space...
 52  *
 53  * PG_hwpoison indicates that a page got corrupted in hardware and contains
 54  * data with incorrect ECC bits that triggered a machine check. Accessing is
 55  * not safe since it may cause another machine check. Don't touch!
 56  */
 57 
 58 /*
 59  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
 60  * locked- and dirty-page accounting.
 61  *
 62  * The page flags field is split into two parts, the main flags area
 63  * which extends from the low bits upwards, and the fields area which
 64  * extends from the high bits downwards.
 65  *
 66  *  | FIELD | ... | FLAGS |
 67  *  N-1           ^       0
 68  *               (NR_PAGEFLAGS)
 69  *
 70  * The fields area is reserved for fields mapping zone, node (for NUMA) and
 71  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
 72  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
 73  */
 74 enum pageflags {
 75         PG_locked,              /* Page is locked. Don't touch. */
 76         PG_error,
 77         PG_referenced,
 78         PG_uptodate,
 79         PG_dirty,
 80         PG_lru,
 81         PG_active,
 82         PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
 83         PG_slab,
 84         PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
 85         PG_arch_1,
 86         PG_reserved,
 87         PG_private,             /* If pagecache, has fs-private data */
 88         PG_private_2,           /* If pagecache, has fs aux data */
 89         PG_writeback,           /* Page is under writeback */
 90         PG_head,                /* A head page */
 91         PG_mappedtodisk,        /* Has blocks allocated on-disk */
 92         PG_reclaim,             /* To be reclaimed asap */
 93         PG_swapbacked,          /* Page is backed by RAM/swap */
 94         PG_unevictable,         /* Page is "unevictable"  */
 95 #ifdef CONFIG_MMU
 96         PG_mlocked,             /* Page is vma mlocked */
 97 #endif
 98 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
 99         PG_uncached,            /* Page has been mapped as uncached */
100 #endif
101 #ifdef CONFIG_MEMORY_FAILURE
102         PG_hwpoison,            /* hardware poisoned page. Don't touch */
103 #endif
104 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
105         PG_young,
106         PG_idle,
107 #endif
108         __NR_PAGEFLAGS,
109 
110         /* Filesystems */
111         PG_checked = PG_owner_priv_1,
112 
113         /* SwapBacked */
114         PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
115 
116         /* Two page bits are conscripted by FS-Cache to maintain local caching
117          * state.  These bits are set on pages belonging to the netfs's inodes
118          * when those inodes are being locally cached.
119          */
120         PG_fscache = PG_private_2,      /* page backed by cache */
121 
122         /* XEN */
123         /* Pinned in Xen as a read-only pagetable page. */
124         PG_pinned = PG_owner_priv_1,
125         /* Pinned as part of domain save (see xen_mm_pin_all()). */
126         PG_savepinned = PG_dirty,
127         /* Has a grant mapping of another (foreign) domain's page. */
128         PG_foreign = PG_owner_priv_1,
129 
130         /* SLOB */
131         PG_slob_free = PG_private,
132 
133         /* Compound pages. Stored in first tail page's flags */
134         PG_double_map = PG_private_2,
135 
136         /* non-lru isolated movable page */
137         PG_isolated = PG_reclaim,
138 };
139 
140 #ifndef __GENERATING_BOUNDS_H
141 
142 struct page;    /* forward declaration */
143 
144 static inline struct page *compound_head(struct page *page)
145 {
146         unsigned long head = READ_ONCE(page->compound_head);
147 
148         if (unlikely(head & 1))
149                 return (struct page *) (head - 1);
150         return page;
151 }
152 
153 static __always_inline int PageTail(struct page *page)
154 {
155         return READ_ONCE(page->compound_head) & 1;
156 }
157 
158 static __always_inline int PageCompound(struct page *page)
159 {
160         return test_bit(PG_head, &page->flags) || PageTail(page);
161 }
162 
163 /*
164  * Page flags policies wrt compound pages
165  *
166  * PF_ANY:
167  *     the page flag is relevant for small, head and tail pages.
168  *
169  * PF_HEAD:
170  *     for compound page all operations related to the page flag applied to
171  *     head page.
172  *
173  * PF_ONLY_HEAD:
174  *     for compound page, callers only ever operate on the head page.
175  *
176  * PF_NO_TAIL:
177  *     modifications of the page flag must be done on small or head pages,
178  *     checks can be done on tail pages too.
179  *
180  * PF_NO_COMPOUND:
181  *     the page flag is not relevant for compound pages.
182  */
183 #define PF_ANY(page, enforce)   page
184 #define PF_HEAD(page, enforce)  compound_head(page)
185 #define PF_ONLY_HEAD(page, enforce) ({                                  \
186                 VM_BUG_ON_PGFLAGS(PageTail(page), page);                \
187                 page;})
188 #define PF_NO_TAIL(page, enforce) ({                                    \
189                 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
190                 compound_head(page);})
191 #define PF_NO_COMPOUND(page, enforce) ({                                \
192                 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
193                 page;})
194 
195 /*
196  * Macros to create function definitions for page flags
197  */
198 #define TESTPAGEFLAG(uname, lname, policy)                              \
199 static __always_inline int Page##uname(struct page *page)               \
200         { return test_bit(PG_##lname, &policy(page, 0)->flags); }
201 
202 #define SETPAGEFLAG(uname, lname, policy)                               \
203 static __always_inline void SetPage##uname(struct page *page)           \
204         { set_bit(PG_##lname, &policy(page, 1)->flags); }
205 
206 #define CLEARPAGEFLAG(uname, lname, policy)                             \
207 static __always_inline void ClearPage##uname(struct page *page)         \
208         { clear_bit(PG_##lname, &policy(page, 1)->flags); }
209 
210 #define __SETPAGEFLAG(uname, lname, policy)                             \
211 static __always_inline void __SetPage##uname(struct page *page)         \
212         { __set_bit(PG_##lname, &policy(page, 1)->flags); }
213 
214 #define __CLEARPAGEFLAG(uname, lname, policy)                           \
215 static __always_inline void __ClearPage##uname(struct page *page)       \
216         { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
217 
218 #define TESTSETFLAG(uname, lname, policy)                               \
219 static __always_inline int TestSetPage##uname(struct page *page)        \
220         { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
221 
222 #define TESTCLEARFLAG(uname, lname, policy)                             \
223 static __always_inline int TestClearPage##uname(struct page *page)      \
224         { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
225 
226 #define PAGEFLAG(uname, lname, policy)                                  \
227         TESTPAGEFLAG(uname, lname, policy)                              \
228         SETPAGEFLAG(uname, lname, policy)                               \
229         CLEARPAGEFLAG(uname, lname, policy)
230 
231 #define __PAGEFLAG(uname, lname, policy)                                \
232         TESTPAGEFLAG(uname, lname, policy)                              \
233         __SETPAGEFLAG(uname, lname, policy)                             \
234         __CLEARPAGEFLAG(uname, lname, policy)
235 
236 #define TESTSCFLAG(uname, lname, policy)                                \
237         TESTSETFLAG(uname, lname, policy)                               \
238         TESTCLEARFLAG(uname, lname, policy)
239 
240 #define TESTPAGEFLAG_FALSE(uname)                                       \
241 static inline int Page##uname(const struct page *page) { return 0; }
242 
243 #define SETPAGEFLAG_NOOP(uname)                                         \
244 static inline void SetPage##uname(struct page *page) {  }
245 
246 #define CLEARPAGEFLAG_NOOP(uname)                                       \
247 static inline void ClearPage##uname(struct page *page) {  }
248 
249 #define __CLEARPAGEFLAG_NOOP(uname)                                     \
250 static inline void __ClearPage##uname(struct page *page) {  }
251 
252 #define TESTSETFLAG_FALSE(uname)                                        \
253 static inline int TestSetPage##uname(struct page *page) { return 0; }
254 
255 #define TESTCLEARFLAG_FALSE(uname)                                      \
256 static inline int TestClearPage##uname(struct page *page) { return 0; }
257 
258 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)                 \
259         SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
260 
261 #define TESTSCFLAG_FALSE(uname)                                         \
262         TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
263 
264 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
265 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
266 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
267 PAGEFLAG(Referenced, referenced, PF_HEAD)
268         TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
269         __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
270 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
271         __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
272 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
273 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
274         TESTCLEARFLAG(Active, active, PF_HEAD)
275 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
276 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
277 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)         /* Used by some filesystems */
278 
279 /* Xen */
280 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
281         TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
282 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
283 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
284 
285 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
286         __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
287 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
288         __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
289         __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
290 
291 /*
292  * Private page markings that may be used by the filesystem that owns the page
293  * for its own purposes.
294  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
295  */
296 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
297         __CLEARPAGEFLAG(Private, private, PF_ANY)
298 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
299 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
300         TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
301 
302 /*
303  * Only test-and-set exist for PG_writeback.  The unconditional operators are
304  * risky: they bypass page accounting.
305  */
306 TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
307         TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
308 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
309 
310 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
311 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
312         TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
313 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
314         TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
315 
316 #ifdef CONFIG_HIGHMEM
317 /*
318  * Must use a macro here due to header dependency issues. page_zone() is not
319  * available at this point.
320  */
321 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
322 #else
323 PAGEFLAG_FALSE(HighMem)
324 #endif
325 
326 #ifdef CONFIG_SWAP
327 static __always_inline int PageSwapCache(struct page *page)
328 {
329         return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
330 
331 }
332 SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
333 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
334 #else
335 PAGEFLAG_FALSE(SwapCache)
336 #endif
337 
338 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
339         __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
340         TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
341 
342 #ifdef CONFIG_MMU
343 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
344         __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
345         TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
346 #else
347 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
348         TESTSCFLAG_FALSE(Mlocked)
349 #endif
350 
351 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
352 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
353 #else
354 PAGEFLAG_FALSE(Uncached)
355 #endif
356 
357 #ifdef CONFIG_MEMORY_FAILURE
358 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
359 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
360 #define __PG_HWPOISON (1UL << PG_hwpoison)
361 #else
362 PAGEFLAG_FALSE(HWPoison)
363 #define __PG_HWPOISON 0
364 #endif
365 
366 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
367 TESTPAGEFLAG(Young, young, PF_ANY)
368 SETPAGEFLAG(Young, young, PF_ANY)
369 TESTCLEARFLAG(Young, young, PF_ANY)
370 PAGEFLAG(Idle, idle, PF_ANY)
371 #endif
372 
373 /*
374  * On an anonymous page mapped into a user virtual memory area,
375  * page->mapping points to its anon_vma, not to a struct address_space;
376  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
377  *
378  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
379  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
380  * bit; and then page->mapping points, not to an anon_vma, but to a private
381  * structure which KSM associates with that merged page.  See ksm.h.
382  *
383  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
384  * page and then page->mapping points a struct address_space.
385  *
386  * Please note that, confusingly, "page_mapping" refers to the inode
387  * address_space which maps the page from disk; whereas "page_mapped"
388  * refers to user virtual address space into which the page is mapped.
389  */
390 #define PAGE_MAPPING_ANON       0x1
391 #define PAGE_MAPPING_MOVABLE    0x2
392 #define PAGE_MAPPING_KSM        (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
393 #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
394 
395 static __always_inline int PageMappingFlags(struct page *page)
396 {
397         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
398 }
399 
400 static __always_inline int PageAnon(struct page *page)
401 {
402         page = compound_head(page);
403         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
404 }
405 
406 static __always_inline int __PageMovable(struct page *page)
407 {
408         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
409                                 PAGE_MAPPING_MOVABLE;
410 }
411 
412 #ifdef CONFIG_KSM
413 /*
414  * A KSM page is one of those write-protected "shared pages" or "merged pages"
415  * which KSM maps into multiple mms, wherever identical anonymous page content
416  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
417  * anon_vma, but to that page's node of the stable tree.
418  */
419 static __always_inline int PageKsm(struct page *page)
420 {
421         page = compound_head(page);
422         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
423                                 PAGE_MAPPING_KSM;
424 }
425 #else
426 TESTPAGEFLAG_FALSE(Ksm)
427 #endif
428 
429 u64 stable_page_flags(struct page *page);
430 
431 static inline int PageUptodate(struct page *page)
432 {
433         int ret;
434         page = compound_head(page);
435         ret = test_bit(PG_uptodate, &(page)->flags);
436         /*
437          * Must ensure that the data we read out of the page is loaded
438          * _after_ we've loaded page->flags to check for PageUptodate.
439          * We can skip the barrier if the page is not uptodate, because
440          * we wouldn't be reading anything from it.
441          *
442          * See SetPageUptodate() for the other side of the story.
443          */
444         if (ret)
445                 smp_rmb();
446 
447         return ret;
448 }
449 
450 static __always_inline void __SetPageUptodate(struct page *page)
451 {
452         VM_BUG_ON_PAGE(PageTail(page), page);
453         smp_wmb();
454         __set_bit(PG_uptodate, &page->flags);
455 }
456 
457 static __always_inline void SetPageUptodate(struct page *page)
458 {
459         VM_BUG_ON_PAGE(PageTail(page), page);
460         /*
461          * Memory barrier must be issued before setting the PG_uptodate bit,
462          * so that all previous stores issued in order to bring the page
463          * uptodate are actually visible before PageUptodate becomes true.
464          */
465         smp_wmb();
466         set_bit(PG_uptodate, &page->flags);
467 }
468 
469 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
470 
471 int test_clear_page_writeback(struct page *page);
472 int __test_set_page_writeback(struct page *page, bool keep_write);
473 
474 #define test_set_page_writeback(page)                   \
475         __test_set_page_writeback(page, false)
476 #define test_set_page_writeback_keepwrite(page) \
477         __test_set_page_writeback(page, true)
478 
479 static inline void set_page_writeback(struct page *page)
480 {
481         test_set_page_writeback(page);
482 }
483 
484 static inline void set_page_writeback_keepwrite(struct page *page)
485 {
486         test_set_page_writeback_keepwrite(page);
487 }
488 
489 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
490 
491 static __always_inline void set_compound_head(struct page *page, struct page *head)
492 {
493         WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
494 }
495 
496 static __always_inline void clear_compound_head(struct page *page)
497 {
498         WRITE_ONCE(page->compound_head, 0);
499 }
500 
501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
502 static inline void ClearPageCompound(struct page *page)
503 {
504         BUG_ON(!PageHead(page));
505         ClearPageHead(page);
506 }
507 #endif
508 
509 #define PG_head_mask ((1UL << PG_head))
510 
511 #ifdef CONFIG_HUGETLB_PAGE
512 int PageHuge(struct page *page);
513 int PageHeadHuge(struct page *page);
514 bool page_huge_active(struct page *page);
515 #else
516 TESTPAGEFLAG_FALSE(Huge)
517 TESTPAGEFLAG_FALSE(HeadHuge)
518 
519 static inline bool page_huge_active(struct page *page)
520 {
521         return 0;
522 }
523 #endif
524 
525 
526 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
527 /*
528  * PageHuge() only returns true for hugetlbfs pages, but not for
529  * normal or transparent huge pages.
530  *
531  * PageTransHuge() returns true for both transparent huge and
532  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
533  * called only in the core VM paths where hugetlbfs pages can't exist.
534  */
535 static inline int PageTransHuge(struct page *page)
536 {
537         VM_BUG_ON_PAGE(PageTail(page), page);
538         return PageHead(page);
539 }
540 
541 /*
542  * PageTransCompound returns true for both transparent huge pages
543  * and hugetlbfs pages, so it should only be called when it's known
544  * that hugetlbfs pages aren't involved.
545  */
546 static inline int PageTransCompound(struct page *page)
547 {
548         return PageCompound(page);
549 }
550 
551 /*
552  * PageTransCompoundMap is the same as PageTransCompound, but it also
553  * guarantees the primary MMU has the entire compound page mapped
554  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
555  * can also map the entire compound page. This allows the secondary
556  * MMUs to call get_user_pages() only once for each compound page and
557  * to immediately map the entire compound page with a single secondary
558  * MMU fault. If there will be a pmd split later, the secondary MMUs
559  * will get an update through the MMU notifier invalidation through
560  * split_huge_pmd().
561  *
562  * Unlike PageTransCompound, this is safe to be called only while
563  * split_huge_pmd() cannot run from under us, like if protected by the
564  * MMU notifier, otherwise it may result in page->_mapcount < 0 false
565  * positives.
566  */
567 static inline int PageTransCompoundMap(struct page *page)
568 {
569         return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
570 }
571 
572 /*
573  * PageTransTail returns true for both transparent huge pages
574  * and hugetlbfs pages, so it should only be called when it's known
575  * that hugetlbfs pages aren't involved.
576  */
577 static inline int PageTransTail(struct page *page)
578 {
579         return PageTail(page);
580 }
581 
582 /*
583  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
584  * as PMDs.
585  *
586  * This is required for optimization of rmap operations for THP: we can postpone
587  * per small page mapcount accounting (and its overhead from atomic operations)
588  * until the first PMD split.
589  *
590  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
591  * by one. This reference will go away with last compound_mapcount.
592  *
593  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
594  */
595 static inline int PageDoubleMap(struct page *page)
596 {
597         return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
598 }
599 
600 static inline void SetPageDoubleMap(struct page *page)
601 {
602         VM_BUG_ON_PAGE(!PageHead(page), page);
603         set_bit(PG_double_map, &page[1].flags);
604 }
605 
606 static inline void ClearPageDoubleMap(struct page *page)
607 {
608         VM_BUG_ON_PAGE(!PageHead(page), page);
609         clear_bit(PG_double_map, &page[1].flags);
610 }
611 static inline int TestSetPageDoubleMap(struct page *page)
612 {
613         VM_BUG_ON_PAGE(!PageHead(page), page);
614         return test_and_set_bit(PG_double_map, &page[1].flags);
615 }
616 
617 static inline int TestClearPageDoubleMap(struct page *page)
618 {
619         VM_BUG_ON_PAGE(!PageHead(page), page);
620         return test_and_clear_bit(PG_double_map, &page[1].flags);
621 }
622 
623 #else
624 TESTPAGEFLAG_FALSE(TransHuge)
625 TESTPAGEFLAG_FALSE(TransCompound)
626 TESTPAGEFLAG_FALSE(TransCompoundMap)
627 TESTPAGEFLAG_FALSE(TransTail)
628 PAGEFLAG_FALSE(DoubleMap)
629         TESTSETFLAG_FALSE(DoubleMap)
630         TESTCLEARFLAG_FALSE(DoubleMap)
631 #endif
632 
633 /*
634  * For pages that are never mapped to userspace, page->mapcount may be
635  * used for storing extra information about page type. Any value used
636  * for this purpose must be <= -2, but it's better start not too close
637  * to -2 so that an underflow of the page_mapcount() won't be mistaken
638  * for a special page.
639  */
640 #define PAGE_MAPCOUNT_OPS(uname, lname)                                 \
641 static __always_inline int Page##uname(struct page *page)               \
642 {                                                                       \
643         return atomic_read(&page->_mapcount) ==                         \
644                                 PAGE_##lname##_MAPCOUNT_VALUE;          \
645 }                                                                       \
646 static __always_inline void __SetPage##uname(struct page *page)         \
647 {                                                                       \
648         VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);      \
649         atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE);    \
650 }                                                                       \
651 static __always_inline void __ClearPage##uname(struct page *page)       \
652 {                                                                       \
653         VM_BUG_ON_PAGE(!Page##uname(page), page);                       \
654         atomic_set(&page->_mapcount, -1);                               \
655 }
656 
657 /*
658  * PageBuddy() indicate that the page is free and in the buddy system
659  * (see mm/page_alloc.c).
660  */
661 #define PAGE_BUDDY_MAPCOUNT_VALUE               (-128)
662 PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
663 
664 /*
665  * PageBalloon() is set on pages that are on the balloon page list
666  * (see mm/balloon_compaction.c).
667  */
668 #define PAGE_BALLOON_MAPCOUNT_VALUE             (-256)
669 PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
670 
671 /*
672  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
673  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
674  */
675 #define PAGE_KMEMCG_MAPCOUNT_VALUE              (-512)
676 PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
677 
678 extern bool is_free_buddy_page(struct page *page);
679 
680 __PAGEFLAG(Isolated, isolated, PF_ANY);
681 
682 /*
683  * If network-based swap is enabled, sl*b must keep track of whether pages
684  * were allocated from pfmemalloc reserves.
685  */
686 static inline int PageSlabPfmemalloc(struct page *page)
687 {
688         VM_BUG_ON_PAGE(!PageSlab(page), page);
689         return PageActive(page);
690 }
691 
692 static inline void SetPageSlabPfmemalloc(struct page *page)
693 {
694         VM_BUG_ON_PAGE(!PageSlab(page), page);
695         SetPageActive(page);
696 }
697 
698 static inline void __ClearPageSlabPfmemalloc(struct page *page)
699 {
700         VM_BUG_ON_PAGE(!PageSlab(page), page);
701         __ClearPageActive(page);
702 }
703 
704 static inline void ClearPageSlabPfmemalloc(struct page *page)
705 {
706         VM_BUG_ON_PAGE(!PageSlab(page), page);
707         ClearPageActive(page);
708 }
709 
710 #ifdef CONFIG_MMU
711 #define __PG_MLOCKED            (1UL << PG_mlocked)
712 #else
713 #define __PG_MLOCKED            0
714 #endif
715 
716 /*
717  * Flags checked when a page is freed.  Pages being freed should not have
718  * these flags set.  It they are, there is a problem.
719  */
720 #define PAGE_FLAGS_CHECK_AT_FREE                                \
721         (1UL << PG_lru          | 1UL << PG_locked      |       \
722          1UL << PG_private      | 1UL << PG_private_2   |       \
723          1UL << PG_writeback    | 1UL << PG_reserved    |       \
724          1UL << PG_slab         | 1UL << PG_active      |       \
725          1UL << PG_unevictable  | __PG_MLOCKED)
726 
727 /*
728  * Flags checked when a page is prepped for return by the page allocator.
729  * Pages being prepped should not have these flags set.  It they are set,
730  * there has been a kernel bug or struct page corruption.
731  *
732  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
733  * alloc-free cycle to prevent from reusing the page.
734  */
735 #define PAGE_FLAGS_CHECK_AT_PREP        \
736         (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
737 
738 #define PAGE_FLAGS_PRIVATE                              \
739         (1UL << PG_private | 1UL << PG_private_2)
740 /**
741  * page_has_private - Determine if page has private stuff
742  * @page: The page to be checked
743  *
744  * Determine if a page has private stuff, indicating that release routines
745  * should be invoked upon it.
746  */
747 static inline int page_has_private(struct page *page)
748 {
749         return !!(page->flags & PAGE_FLAGS_PRIVATE);
750 }
751 
752 #undef PF_ANY
753 #undef PF_HEAD
754 #undef PF_ONLY_HEAD
755 #undef PF_NO_TAIL
756 #undef PF_NO_COMPOUND
757 #endif /* !__GENERATING_BOUNDS_H */
758 
759 #endif  /* PAGE_FLAGS_H */
760 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp