~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/mm_types.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_MM_TYPES_H
  2 #define _LINUX_MM_TYPES_H
  3 
  4 #include <linux/auxvec.h>
  5 #include <linux/types.h>
  6 #include <linux/threads.h>
  7 #include <linux/list.h>
  8 #include <linux/spinlock.h>
  9 #include <linux/rbtree.h>
 10 #include <linux/rwsem.h>
 11 #include <linux/completion.h>
 12 #include <linux/cpumask.h>
 13 #include <linux/uprobes.h>
 14 #include <linux/page-flags-layout.h>
 15 #include <asm/page.h>
 16 #include <asm/mmu.h>
 17 
 18 #ifndef AT_VECTOR_SIZE_ARCH
 19 #define AT_VECTOR_SIZE_ARCH 0
 20 #endif
 21 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
 22 
 23 struct address_space;
 24 struct mem_cgroup;
 25 
 26 #define USE_SPLIT_PTE_PTLOCKS   (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 27 #define USE_SPLIT_PMD_PTLOCKS   (USE_SPLIT_PTE_PTLOCKS && \
 28                 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
 29 #define ALLOC_SPLIT_PTLOCKS     (SPINLOCK_SIZE > BITS_PER_LONG/8)
 30 
 31 typedef void compound_page_dtor(struct page *);
 32 
 33 /*
 34  * Each physical page in the system has a struct page associated with
 35  * it to keep track of whatever it is we are using the page for at the
 36  * moment. Note that we have no way to track which tasks are using
 37  * a page, though if it is a pagecache page, rmap structures can tell us
 38  * who is mapping it.
 39  *
 40  * The objects in struct page are organized in double word blocks in
 41  * order to allows us to use atomic double word operations on portions
 42  * of struct page. That is currently only used by slub but the arrangement
 43  * allows the use of atomic double word operations on the flags/mapping
 44  * and lru list pointers also.
 45  */
 46 struct page {
 47         /* First double word block */
 48         unsigned long flags;            /* Atomic flags, some possibly
 49                                          * updated asynchronously */
 50         union {
 51                 struct address_space *mapping;  /* If low bit clear, points to
 52                                                  * inode address_space, or NULL.
 53                                                  * If page mapped as anonymous
 54                                                  * memory, low bit is set, and
 55                                                  * it points to anon_vma object:
 56                                                  * see PAGE_MAPPING_ANON below.
 57                                                  */
 58                 void *s_mem;                    /* slab first object */
 59         };
 60 
 61         /* Second double word */
 62         struct {
 63                 union {
 64                         pgoff_t index;          /* Our offset within mapping. */
 65                         void *freelist;         /* sl[aou]b first free object */
 66                 };
 67 
 68                 union {
 69 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 70         defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 71                         /* Used for cmpxchg_double in slub */
 72                         unsigned long counters;
 73 #else
 74                         /*
 75                          * Keep _count separate from slub cmpxchg_double data.
 76                          * As the rest of the double word is protected by
 77                          * slab_lock but _count is not.
 78                          */
 79                         unsigned counters;
 80 #endif
 81 
 82                         struct {
 83 
 84                                 union {
 85                                         /*
 86                                          * Count of ptes mapped in
 87                                          * mms, to show when page is
 88                                          * mapped & limit reverse map
 89                                          * searches.
 90                                          *
 91                                          * Used also for tail pages
 92                                          * refcounting instead of
 93                                          * _count. Tail pages cannot
 94                                          * be mapped and keeping the
 95                                          * tail page _count zero at
 96                                          * all times guarantees
 97                                          * get_page_unless_zero() will
 98                                          * never succeed on tail
 99                                          * pages.
100                                          */
101                                         atomic_t _mapcount;
102 
103                                         struct { /* SLUB */
104                                                 unsigned inuse:16;
105                                                 unsigned objects:15;
106                                                 unsigned frozen:1;
107                                         };
108                                         int units;      /* SLOB */
109                                 };
110                                 atomic_t _count;                /* Usage count, see below. */
111                         };
112                         unsigned int active;    /* SLAB */
113                 };
114         };
115 
116         /* Third double word block */
117         union {
118                 struct list_head lru;   /* Pageout list, eg. active_list
119                                          * protected by zone->lru_lock !
120                                          * Can be used as a generic list
121                                          * by the page owner.
122                                          */
123                 struct {                /* slub per cpu partial pages */
124                         struct page *next;      /* Next partial slab */
125 #ifdef CONFIG_64BIT
126                         int pages;      /* Nr of partial slabs left */
127                         int pobjects;   /* Approximate # of objects */
128 #else
129                         short int pages;
130                         short int pobjects;
131 #endif
132                 };
133 
134                 struct slab *slab_page; /* slab fields */
135                 struct rcu_head rcu_head;       /* Used by SLAB
136                                                  * when destroying via RCU
137                                                  */
138                 /* First tail page of compound page */
139                 struct {
140                         compound_page_dtor *compound_dtor;
141                         unsigned long compound_order;
142                 };
143 
144 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
145                 pgtable_t pmd_huge_pte; /* protected by page->ptl */
146 #endif
147         };
148 
149         /* Remainder is not double word aligned */
150         union {
151                 unsigned long private;          /* Mapping-private opaque data:
152                                                  * usually used for buffer_heads
153                                                  * if PagePrivate set; used for
154                                                  * swp_entry_t if PageSwapCache;
155                                                  * indicates order in the buddy
156                                                  * system if PG_buddy is set.
157                                                  */
158 #if USE_SPLIT_PTE_PTLOCKS
159 #if ALLOC_SPLIT_PTLOCKS
160                 spinlock_t *ptl;
161 #else
162                 spinlock_t ptl;
163 #endif
164 #endif
165                 struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
166                 struct page *first_page;        /* Compound tail pages */
167         };
168 
169 #ifdef CONFIG_MEMCG
170         struct mem_cgroup *mem_cgroup;
171 #endif
172 
173         /*
174          * On machines where all RAM is mapped into kernel address space,
175          * we can simply calculate the virtual address. On machines with
176          * highmem some memory is mapped into kernel virtual memory
177          * dynamically, so we need a place to store that address.
178          * Note that this field could be 16 bits on x86 ... ;)
179          *
180          * Architectures with slow multiplication can define
181          * WANT_PAGE_VIRTUAL in asm/page.h
182          */
183 #if defined(WANT_PAGE_VIRTUAL)
184         void *virtual;                  /* Kernel virtual address (NULL if
185                                            not kmapped, ie. highmem) */
186 #endif /* WANT_PAGE_VIRTUAL */
187 
188 #ifdef CONFIG_KMEMCHECK
189         /*
190          * kmemcheck wants to track the status of each byte in a page; this
191          * is a pointer to such a status block. NULL if not tracked.
192          */
193         void *shadow;
194 #endif
195 
196 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
197         int _last_cpupid;
198 #endif
199 }
200 /*
201  * The struct page can be forced to be double word aligned so that atomic ops
202  * on double words work. The SLUB allocator can make use of such a feature.
203  */
204 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
205         __aligned(2 * sizeof(unsigned long))
206 #endif
207 ;
208 
209 struct page_frag {
210         struct page *page;
211 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
212         __u32 offset;
213         __u32 size;
214 #else
215         __u16 offset;
216         __u16 size;
217 #endif
218 };
219 
220 #define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
221 #define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
222 
223 struct page_frag_cache {
224         void * va;
225 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
226         __u16 offset;
227         __u16 size;
228 #else
229         __u32 offset;
230 #endif
231         /* we maintain a pagecount bias, so that we dont dirty cache line
232          * containing page->_count every time we allocate a fragment.
233          */
234         unsigned int            pagecnt_bias;
235         bool pfmemalloc;
236 };
237 
238 typedef unsigned long vm_flags_t;
239 
240 /*
241  * A region containing a mapping of a non-memory backed file under NOMMU
242  * conditions.  These are held in a global tree and are pinned by the VMAs that
243  * map parts of them.
244  */
245 struct vm_region {
246         struct rb_node  vm_rb;          /* link in global region tree */
247         vm_flags_t      vm_flags;       /* VMA vm_flags */
248         unsigned long   vm_start;       /* start address of region */
249         unsigned long   vm_end;         /* region initialised to here */
250         unsigned long   vm_top;         /* region allocated to here */
251         unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
252         struct file     *vm_file;       /* the backing file or NULL */
253 
254         int             vm_usage;       /* region usage count (access under nommu_region_sem) */
255         bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
256                                                 * this region */
257 };
258 
259 #ifdef CONFIG_USERFAULTFD
260 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
261 struct vm_userfaultfd_ctx {
262         struct userfaultfd_ctx *ctx;
263 };
264 #else /* CONFIG_USERFAULTFD */
265 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
266 struct vm_userfaultfd_ctx {};
267 #endif /* CONFIG_USERFAULTFD */
268 
269 /*
270  * This struct defines a memory VMM memory area. There is one of these
271  * per VM-area/task.  A VM area is any part of the process virtual memory
272  * space that has a special rule for the page-fault handlers (ie a shared
273  * library, the executable area etc).
274  */
275 struct vm_area_struct {
276         /* The first cache line has the info for VMA tree walking. */
277 
278         unsigned long vm_start;         /* Our start address within vm_mm. */
279         unsigned long vm_end;           /* The first byte after our end address
280                                            within vm_mm. */
281 
282         /* linked list of VM areas per task, sorted by address */
283         struct vm_area_struct *vm_next, *vm_prev;
284 
285         struct rb_node vm_rb;
286 
287         /*
288          * Largest free memory gap in bytes to the left of this VMA.
289          * Either between this VMA and vma->vm_prev, or between one of the
290          * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
291          * get_unmapped_area find a free area of the right size.
292          */
293         unsigned long rb_subtree_gap;
294 
295         /* Second cache line starts here. */
296 
297         struct mm_struct *vm_mm;        /* The address space we belong to. */
298         pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
299         unsigned long vm_flags;         /* Flags, see mm.h. */
300 
301         /*
302          * For areas with an address space and backing store,
303          * linkage into the address_space->i_mmap interval tree.
304          */
305         struct {
306                 struct rb_node rb;
307                 unsigned long rb_subtree_last;
308         } shared;
309 
310         /*
311          * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
312          * list, after a COW of one of the file pages.  A MAP_SHARED vma
313          * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
314          * or brk vma (with NULL file) can only be in an anon_vma list.
315          */
316         struct list_head anon_vma_chain; /* Serialized by mmap_sem &
317                                           * page_table_lock */
318         struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
319 
320         /* Function pointers to deal with this struct. */
321         const struct vm_operations_struct *vm_ops;
322 
323         /* Information about our backing store: */
324         unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
325                                            units, *not* PAGE_CACHE_SIZE */
326         struct file * vm_file;          /* File we map to (can be NULL). */
327         void * vm_private_data;         /* was vm_pte (shared mem) */
328 
329 #ifndef CONFIG_MMU
330         struct vm_region *vm_region;    /* NOMMU mapping region */
331 #endif
332 #ifdef CONFIG_NUMA
333         struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
334 #endif
335         struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
336 };
337 
338 struct core_thread {
339         struct task_struct *task;
340         struct core_thread *next;
341 };
342 
343 struct core_state {
344         atomic_t nr_threads;
345         struct core_thread dumper;
346         struct completion startup;
347 };
348 
349 enum {
350         MM_FILEPAGES,
351         MM_ANONPAGES,
352         MM_SWAPENTS,
353         NR_MM_COUNTERS
354 };
355 
356 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
357 #define SPLIT_RSS_COUNTING
358 /* per-thread cached information, */
359 struct task_rss_stat {
360         int events;     /* for synchronization threshold */
361         int count[NR_MM_COUNTERS];
362 };
363 #endif /* USE_SPLIT_PTE_PTLOCKS */
364 
365 struct mm_rss_stat {
366         atomic_long_t count[NR_MM_COUNTERS];
367 };
368 
369 struct kioctx_table;
370 struct mm_struct {
371         struct vm_area_struct *mmap;            /* list of VMAs */
372         struct rb_root mm_rb;
373         u32 vmacache_seqnum;                   /* per-thread vmacache */
374 #ifdef CONFIG_MMU
375         unsigned long (*get_unmapped_area) (struct file *filp,
376                                 unsigned long addr, unsigned long len,
377                                 unsigned long pgoff, unsigned long flags);
378 #endif
379         unsigned long mmap_base;                /* base of mmap area */
380         unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
381         unsigned long task_size;                /* size of task vm space */
382         unsigned long highest_vm_end;           /* highest vma end address */
383         pgd_t * pgd;
384         atomic_t mm_users;                      /* How many users with user space? */
385         atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
386         atomic_long_t nr_ptes;                  /* PTE page table pages */
387 #if CONFIG_PGTABLE_LEVELS > 2
388         atomic_long_t nr_pmds;                  /* PMD page table pages */
389 #endif
390         int map_count;                          /* number of VMAs */
391 
392         spinlock_t page_table_lock;             /* Protects page tables and some counters */
393         struct rw_semaphore mmap_sem;
394 
395         struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
396                                                  * together off init_mm.mmlist, and are protected
397                                                  * by mmlist_lock
398                                                  */
399 
400 
401         unsigned long hiwater_rss;      /* High-watermark of RSS usage */
402         unsigned long hiwater_vm;       /* High-water virtual memory usage */
403 
404         unsigned long total_vm;         /* Total pages mapped */
405         unsigned long locked_vm;        /* Pages that have PG_mlocked set */
406         unsigned long pinned_vm;        /* Refcount permanently increased */
407         unsigned long shared_vm;        /* Shared pages (files) */
408         unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
409         unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
410         unsigned long def_flags;
411         unsigned long start_code, end_code, start_data, end_data;
412         unsigned long start_brk, brk, start_stack;
413         unsigned long arg_start, arg_end, env_start, env_end;
414 
415         unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
416 
417         /*
418          * Special counters, in some configurations protected by the
419          * page_table_lock, in other configurations by being atomic.
420          */
421         struct mm_rss_stat rss_stat;
422 
423         struct linux_binfmt *binfmt;
424 
425         cpumask_var_t cpu_vm_mask_var;
426 
427         /* Architecture-specific MM context */
428         mm_context_t context;
429 
430         unsigned long flags; /* Must use atomic bitops to access the bits */
431 
432         struct core_state *core_state; /* coredumping support */
433 #ifdef CONFIG_AIO
434         spinlock_t                      ioctx_lock;
435         struct kioctx_table __rcu       *ioctx_table;
436 #endif
437 #ifdef CONFIG_MEMCG
438         /*
439          * "owner" points to a task that is regarded as the canonical
440          * user/owner of this mm. All of the following must be true in
441          * order for it to be changed:
442          *
443          * current == mm->owner
444          * current->mm != mm
445          * new_owner->mm == mm
446          * new_owner->alloc_lock is held
447          */
448         struct task_struct __rcu *owner;
449 #endif
450 
451         /* store ref to file /proc/<pid>/exe symlink points to */
452         struct file __rcu *exe_file;
453 #ifdef CONFIG_MMU_NOTIFIER
454         struct mmu_notifier_mm *mmu_notifier_mm;
455 #endif
456 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
457         pgtable_t pmd_huge_pte; /* protected by page_table_lock */
458 #endif
459 #ifdef CONFIG_CPUMASK_OFFSTACK
460         struct cpumask cpumask_allocation;
461 #endif
462 #ifdef CONFIG_NUMA_BALANCING
463         /*
464          * numa_next_scan is the next time that the PTEs will be marked
465          * pte_numa. NUMA hinting faults will gather statistics and migrate
466          * pages to new nodes if necessary.
467          */
468         unsigned long numa_next_scan;
469 
470         /* Restart point for scanning and setting pte_numa */
471         unsigned long numa_scan_offset;
472 
473         /* numa_scan_seq prevents two threads setting pte_numa */
474         int numa_scan_seq;
475 #endif
476 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
477         /*
478          * An operation with batched TLB flushing is going on. Anything that
479          * can move process memory needs to flush the TLB when moving a
480          * PROT_NONE or PROT_NUMA mapped page.
481          */
482         bool tlb_flush_pending;
483 #endif
484         struct uprobes_state uprobes_state;
485 #ifdef CONFIG_X86_INTEL_MPX
486         /* address of the bounds directory */
487         void __user *bd_addr;
488 #endif
489 };
490 
491 static inline void mm_init_cpumask(struct mm_struct *mm)
492 {
493 #ifdef CONFIG_CPUMASK_OFFSTACK
494         mm->cpu_vm_mask_var = &mm->cpumask_allocation;
495 #endif
496         cpumask_clear(mm->cpu_vm_mask_var);
497 }
498 
499 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
500 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
501 {
502         return mm->cpu_vm_mask_var;
503 }
504 
505 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
506 /*
507  * Memory barriers to keep this state in sync are graciously provided by
508  * the page table locks, outside of which no page table modifications happen.
509  * The barriers below prevent the compiler from re-ordering the instructions
510  * around the memory barriers that are already present in the code.
511  */
512 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
513 {
514         barrier();
515         return mm->tlb_flush_pending;
516 }
517 static inline void set_tlb_flush_pending(struct mm_struct *mm)
518 {
519         mm->tlb_flush_pending = true;
520 
521         /*
522          * Guarantee that the tlb_flush_pending store does not leak into the
523          * critical section updating the page tables
524          */
525         smp_mb__before_spinlock();
526 }
527 /* Clearing is done after a TLB flush, which also provides a barrier. */
528 static inline void clear_tlb_flush_pending(struct mm_struct *mm)
529 {
530         barrier();
531         mm->tlb_flush_pending = false;
532 }
533 #else
534 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
535 {
536         return false;
537 }
538 static inline void set_tlb_flush_pending(struct mm_struct *mm)
539 {
540 }
541 static inline void clear_tlb_flush_pending(struct mm_struct *mm)
542 {
543 }
544 #endif
545 
546 struct vm_special_mapping
547 {
548         const char *name;
549         struct page **pages;
550 };
551 
552 enum tlb_flush_reason {
553         TLB_FLUSH_ON_TASK_SWITCH,
554         TLB_REMOTE_SHOOTDOWN,
555         TLB_LOCAL_SHOOTDOWN,
556         TLB_LOCAL_MM_SHOOTDOWN,
557         TLB_REMOTE_SEND_IPI,
558         NR_TLB_FLUSH_REASONS,
559 };
560 
561  /*
562   * A swap entry has to fit into a "unsigned long", as the entry is hidden
563   * in the "index" field of the swapper address space.
564   */
565 typedef struct {
566         unsigned long val;
567 } swp_entry_t;
568 
569 #endif /* _LINUX_MM_TYPES_H */
570 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp