~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/mm_types.h

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_MM_TYPES_H
  2 #define _LINUX_MM_TYPES_H
  3 
  4 #include <linux/auxvec.h>
  5 #include <linux/types.h>
  6 #include <linux/threads.h>
  7 #include <linux/list.h>
  8 #include <linux/spinlock.h>
  9 #include <linux/rbtree.h>
 10 #include <linux/rwsem.h>
 11 #include <linux/completion.h>
 12 #include <linux/cpumask.h>
 13 #include <linux/page-debug-flags.h>
 14 #include <linux/uprobes.h>
 15 #include <linux/page-flags-layout.h>
 16 #include <asm/page.h>
 17 #include <asm/mmu.h>
 18 
 19 #ifndef AT_VECTOR_SIZE_ARCH
 20 #define AT_VECTOR_SIZE_ARCH 0
 21 #endif
 22 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
 23 
 24 struct address_space;
 25 
 26 #define USE_SPLIT_PTLOCKS       (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 27 
 28 /*
 29  * Each physical page in the system has a struct page associated with
 30  * it to keep track of whatever it is we are using the page for at the
 31  * moment. Note that we have no way to track which tasks are using
 32  * a page, though if it is a pagecache page, rmap structures can tell us
 33  * who is mapping it.
 34  *
 35  * The objects in struct page are organized in double word blocks in
 36  * order to allows us to use atomic double word operations on portions
 37  * of struct page. That is currently only used by slub but the arrangement
 38  * allows the use of atomic double word operations on the flags/mapping
 39  * and lru list pointers also.
 40  */
 41 struct page {
 42         /* First double word block */
 43         unsigned long flags;            /* Atomic flags, some possibly
 44                                          * updated asynchronously */
 45         struct address_space *mapping;  /* If low bit clear, points to
 46                                          * inode address_space, or NULL.
 47                                          * If page mapped as anonymous
 48                                          * memory, low bit is set, and
 49                                          * it points to anon_vma object:
 50                                          * see PAGE_MAPPING_ANON below.
 51                                          */
 52         /* Second double word */
 53         struct {
 54                 union {
 55                         pgoff_t index;          /* Our offset within mapping. */
 56                         void *freelist;         /* slub/slob first free object */
 57                         bool pfmemalloc;        /* If set by the page allocator,
 58                                                  * ALLOC_NO_WATERMARKS was set
 59                                                  * and the low watermark was not
 60                                                  * met implying that the system
 61                                                  * is under some pressure. The
 62                                                  * caller should try ensure
 63                                                  * this page is only used to
 64                                                  * free other pages.
 65                                                  */
 66                 };
 67 
 68                 union {
 69 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 70         defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 71                         /* Used for cmpxchg_double in slub */
 72                         unsigned long counters;
 73 #else
 74                         /*
 75                          * Keep _count separate from slub cmpxchg_double data.
 76                          * As the rest of the double word is protected by
 77                          * slab_lock but _count is not.
 78                          */
 79                         unsigned counters;
 80 #endif
 81 
 82                         struct {
 83 
 84                                 union {
 85                                         /*
 86                                          * Count of ptes mapped in
 87                                          * mms, to show when page is
 88                                          * mapped & limit reverse map
 89                                          * searches.
 90                                          *
 91                                          * Used also for tail pages
 92                                          * refcounting instead of
 93                                          * _count. Tail pages cannot
 94                                          * be mapped and keeping the
 95                                          * tail page _count zero at
 96                                          * all times guarantees
 97                                          * get_page_unless_zero() will
 98                                          * never succeed on tail
 99                                          * pages.
100                                          */
101                                         atomic_t _mapcount;
102 
103                                         struct { /* SLUB */
104                                                 unsigned inuse:16;
105                                                 unsigned objects:15;
106                                                 unsigned frozen:1;
107                                         };
108                                         int units;      /* SLOB */
109                                 };
110                                 atomic_t _count;                /* Usage count, see below. */
111                         };
112                 };
113         };
114 
115         /* Third double word block */
116         union {
117                 struct list_head lru;   /* Pageout list, eg. active_list
118                                          * protected by zone->lru_lock !
119                                          */
120                 struct {                /* slub per cpu partial pages */
121                         struct page *next;      /* Next partial slab */
122 #ifdef CONFIG_64BIT
123                         int pages;      /* Nr of partial slabs left */
124                         int pobjects;   /* Approximate # of objects */
125 #else
126                         short int pages;
127                         short int pobjects;
128 #endif
129                 };
130 
131                 struct list_head list;  /* slobs list of pages */
132                 struct slab *slab_page; /* slab fields */
133         };
134 
135         /* Remainder is not double word aligned */
136         union {
137                 unsigned long private;          /* Mapping-private opaque data:
138                                                  * usually used for buffer_heads
139                                                  * if PagePrivate set; used for
140                                                  * swp_entry_t if PageSwapCache;
141                                                  * indicates order in the buddy
142                                                  * system if PG_buddy is set.
143                                                  */
144 #if USE_SPLIT_PTLOCKS
145                 spinlock_t ptl;
146 #endif
147                 struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
148                 struct page *first_page;        /* Compound tail pages */
149         };
150 
151         /*
152          * On machines where all RAM is mapped into kernel address space,
153          * we can simply calculate the virtual address. On machines with
154          * highmem some memory is mapped into kernel virtual memory
155          * dynamically, so we need a place to store that address.
156          * Note that this field could be 16 bits on x86 ... ;)
157          *
158          * Architectures with slow multiplication can define
159          * WANT_PAGE_VIRTUAL in asm/page.h
160          */
161 #if defined(WANT_PAGE_VIRTUAL)
162         void *virtual;                  /* Kernel virtual address (NULL if
163                                            not kmapped, ie. highmem) */
164 #endif /* WANT_PAGE_VIRTUAL */
165 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
166         unsigned long debug_flags;      /* Use atomic bitops on this */
167 #endif
168 
169 #ifdef CONFIG_KMEMCHECK
170         /*
171          * kmemcheck wants to track the status of each byte in a page; this
172          * is a pointer to such a status block. NULL if not tracked.
173          */
174         void *shadow;
175 #endif
176 
177 #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
178         int _last_nid;
179 #endif
180 }
181 /*
182  * The struct page can be forced to be double word aligned so that atomic ops
183  * on double words work. The SLUB allocator can make use of such a feature.
184  */
185 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
186         __aligned(2 * sizeof(unsigned long))
187 #endif
188 ;
189 
190 struct page_frag {
191         struct page *page;
192 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
193         __u32 offset;
194         __u32 size;
195 #else
196         __u16 offset;
197         __u16 size;
198 #endif
199 };
200 
201 typedef unsigned long __nocast vm_flags_t;
202 
203 /*
204  * A region containing a mapping of a non-memory backed file under NOMMU
205  * conditions.  These are held in a global tree and are pinned by the VMAs that
206  * map parts of them.
207  */
208 struct vm_region {
209         struct rb_node  vm_rb;          /* link in global region tree */
210         vm_flags_t      vm_flags;       /* VMA vm_flags */
211         unsigned long   vm_start;       /* start address of region */
212         unsigned long   vm_end;         /* region initialised to here */
213         unsigned long   vm_top;         /* region allocated to here */
214         unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
215         struct file     *vm_file;       /* the backing file or NULL */
216 
217         int             vm_usage;       /* region usage count (access under nommu_region_sem) */
218         bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
219                                                 * this region */
220 };
221 
222 /*
223  * This struct defines a memory VMM memory area. There is one of these
224  * per VM-area/task.  A VM area is any part of the process virtual memory
225  * space that has a special rule for the page-fault handlers (ie a shared
226  * library, the executable area etc).
227  */
228 struct vm_area_struct {
229         /* The first cache line has the info for VMA tree walking. */
230 
231         unsigned long vm_start;         /* Our start address within vm_mm. */
232         unsigned long vm_end;           /* The first byte after our end address
233                                            within vm_mm. */
234 
235         /* linked list of VM areas per task, sorted by address */
236         struct vm_area_struct *vm_next, *vm_prev;
237 
238         struct rb_node vm_rb;
239 
240         /*
241          * Largest free memory gap in bytes to the left of this VMA.
242          * Either between this VMA and vma->vm_prev, or between one of the
243          * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
244          * get_unmapped_area find a free area of the right size.
245          */
246         unsigned long rb_subtree_gap;
247 
248         /* Second cache line starts here. */
249 
250         struct mm_struct *vm_mm;        /* The address space we belong to. */
251         pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
252         unsigned long vm_flags;         /* Flags, see mm.h. */
253 
254         /*
255          * For areas with an address space and backing store,
256          * linkage into the address_space->i_mmap interval tree, or
257          * linkage of vma in the address_space->i_mmap_nonlinear list.
258          */
259         union {
260                 struct {
261                         struct rb_node rb;
262                         unsigned long rb_subtree_last;
263                 } linear;
264                 struct list_head nonlinear;
265         } shared;
266 
267         /*
268          * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
269          * list, after a COW of one of the file pages.  A MAP_SHARED vma
270          * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
271          * or brk vma (with NULL file) can only be in an anon_vma list.
272          */
273         struct list_head anon_vma_chain; /* Serialized by mmap_sem &
274                                           * page_table_lock */
275         struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
276 
277         /* Function pointers to deal with this struct. */
278         const struct vm_operations_struct *vm_ops;
279 
280         /* Information about our backing store: */
281         unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
282                                            units, *not* PAGE_CACHE_SIZE */
283         struct file * vm_file;          /* File we map to (can be NULL). */
284         void * vm_private_data;         /* was vm_pte (shared mem) */
285 
286 #ifndef CONFIG_MMU
287         struct vm_region *vm_region;    /* NOMMU mapping region */
288 #endif
289 #ifdef CONFIG_NUMA
290         struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
291 #endif
292 };
293 
294 struct core_thread {
295         struct task_struct *task;
296         struct core_thread *next;
297 };
298 
299 struct core_state {
300         atomic_t nr_threads;
301         struct core_thread dumper;
302         struct completion startup;
303 };
304 
305 enum {
306         MM_FILEPAGES,
307         MM_ANONPAGES,
308         MM_SWAPENTS,
309         NR_MM_COUNTERS
310 };
311 
312 #if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
313 #define SPLIT_RSS_COUNTING
314 /* per-thread cached information, */
315 struct task_rss_stat {
316         int events;     /* for synchronization threshold */
317         int count[NR_MM_COUNTERS];
318 };
319 #endif /* USE_SPLIT_PTLOCKS */
320 
321 struct mm_rss_stat {
322         atomic_long_t count[NR_MM_COUNTERS];
323 };
324 
325 struct mm_struct {
326         struct vm_area_struct * mmap;           /* list of VMAs */
327         struct rb_root mm_rb;
328         struct vm_area_struct * mmap_cache;     /* last find_vma result */
329 #ifdef CONFIG_MMU
330         unsigned long (*get_unmapped_area) (struct file *filp,
331                                 unsigned long addr, unsigned long len,
332                                 unsigned long pgoff, unsigned long flags);
333 #endif
334         unsigned long mmap_base;                /* base of mmap area */
335         unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
336         unsigned long task_size;                /* size of task vm space */
337         unsigned long highest_vm_end;           /* highest vma end address */
338         pgd_t * pgd;
339         atomic_t mm_users;                      /* How many users with user space? */
340         atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
341         int map_count;                          /* number of VMAs */
342 
343         spinlock_t page_table_lock;             /* Protects page tables and some counters */
344         struct rw_semaphore mmap_sem;
345 
346         struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
347                                                  * together off init_mm.mmlist, and are protected
348                                                  * by mmlist_lock
349                                                  */
350 
351 
352         unsigned long hiwater_rss;      /* High-watermark of RSS usage */
353         unsigned long hiwater_vm;       /* High-water virtual memory usage */
354 
355         unsigned long total_vm;         /* Total pages mapped */
356         unsigned long locked_vm;        /* Pages that have PG_mlocked set */
357         unsigned long pinned_vm;        /* Refcount permanently increased */
358         unsigned long shared_vm;        /* Shared pages (files) */
359         unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
360         unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
361         unsigned long def_flags;
362         unsigned long nr_ptes;          /* Page table pages */
363         unsigned long start_code, end_code, start_data, end_data;
364         unsigned long start_brk, brk, start_stack;
365         unsigned long arg_start, arg_end, env_start, env_end;
366 
367         unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
368 
369         /*
370          * Special counters, in some configurations protected by the
371          * page_table_lock, in other configurations by being atomic.
372          */
373         struct mm_rss_stat rss_stat;
374 
375         struct linux_binfmt *binfmt;
376 
377         cpumask_var_t cpu_vm_mask_var;
378 
379         /* Architecture-specific MM context */
380         mm_context_t context;
381 
382         unsigned long flags; /* Must use atomic bitops to access the bits */
383 
384         struct core_state *core_state; /* coredumping support */
385 #ifdef CONFIG_AIO
386         spinlock_t              ioctx_lock;
387         struct hlist_head       ioctx_list;
388 #endif
389 #ifdef CONFIG_MM_OWNER
390         /*
391          * "owner" points to a task that is regarded as the canonical
392          * user/owner of this mm. All of the following must be true in
393          * order for it to be changed:
394          *
395          * current == mm->owner
396          * current->mm != mm
397          * new_owner->mm == mm
398          * new_owner->alloc_lock is held
399          */
400         struct task_struct __rcu *owner;
401 #endif
402 
403         /* store ref to file /proc/<pid>/exe symlink points to */
404         struct file *exe_file;
405 #ifdef CONFIG_MMU_NOTIFIER
406         struct mmu_notifier_mm *mmu_notifier_mm;
407 #endif
408 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
409         pgtable_t pmd_huge_pte; /* protected by page_table_lock */
410 #endif
411 #ifdef CONFIG_CPUMASK_OFFSTACK
412         struct cpumask cpumask_allocation;
413 #endif
414 #ifdef CONFIG_NUMA_BALANCING
415         /*
416          * numa_next_scan is the next time that the PTEs will be marked
417          * pte_numa. NUMA hinting faults will gather statistics and migrate
418          * pages to new nodes if necessary.
419          */
420         unsigned long numa_next_scan;
421 
422         /* numa_next_reset is when the PTE scanner period will be reset */
423         unsigned long numa_next_reset;
424 
425         /* Restart point for scanning and setting pte_numa */
426         unsigned long numa_scan_offset;
427 
428         /* numa_scan_seq prevents two threads setting pte_numa */
429         int numa_scan_seq;
430 
431         /*
432          * The first node a task was scheduled on. If a task runs on
433          * a different node than Make PTE Scan Go Now.
434          */
435         int first_nid;
436 #endif
437         struct uprobes_state uprobes_state;
438 };
439 
440 /* first nid will either be a valid NID or one of these values */
441 #define NUMA_PTE_SCAN_INIT      -1
442 #define NUMA_PTE_SCAN_ACTIVE    -2
443 
444 static inline void mm_init_cpumask(struct mm_struct *mm)
445 {
446 #ifdef CONFIG_CPUMASK_OFFSTACK
447         mm->cpu_vm_mask_var = &mm->cpumask_allocation;
448 #endif
449 }
450 
451 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
452 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
453 {
454         return mm->cpu_vm_mask_var;
455 }
456 
457 #endif /* _LINUX_MM_TYPES_H */
458 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp