~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/mm_types.h

Version: ~ [ linux-4.20-rc5 ] ~ [ linux-4.19.6 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.85 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.142 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.166 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.128 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.61 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_MM_TYPES_H
  3 #define _LINUX_MM_TYPES_H
  4 
  5 #include <linux/mm_types_task.h>
  6 
  7 #include <linux/auxvec.h>
  8 #include <linux/list.h>
  9 #include <linux/spinlock.h>
 10 #include <linux/rbtree.h>
 11 #include <linux/rwsem.h>
 12 #include <linux/completion.h>
 13 #include <linux/cpumask.h>
 14 #include <linux/uprobes.h>
 15 #include <linux/page-flags-layout.h>
 16 #include <linux/workqueue.h>
 17 
 18 #include <asm/mmu.h>
 19 
 20 #ifndef AT_VECTOR_SIZE_ARCH
 21 #define AT_VECTOR_SIZE_ARCH 0
 22 #endif
 23 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
 24 
 25 typedef int vm_fault_t;
 26 
 27 struct address_space;
 28 struct mem_cgroup;
 29 struct hmm;
 30 
 31 /*
 32  * Each physical page in the system has a struct page associated with
 33  * it to keep track of whatever it is we are using the page for at the
 34  * moment. Note that we have no way to track which tasks are using
 35  * a page, though if it is a pagecache page, rmap structures can tell us
 36  * who is mapping it.
 37  *
 38  * If you allocate the page using alloc_pages(), you can use some of the
 39  * space in struct page for your own purposes.  The five words in the main
 40  * union are available, except for bit 0 of the first word which must be
 41  * kept clear.  Many users use this word to store a pointer to an object
 42  * which is guaranteed to be aligned.  If you use the same storage as
 43  * page->mapping, you must restore it to NULL before freeing the page.
 44  *
 45  * If your page will not be mapped to userspace, you can also use the four
 46  * bytes in the mapcount union, but you must call page_mapcount_reset()
 47  * before freeing it.
 48  *
 49  * If you want to use the refcount field, it must be used in such a way
 50  * that other CPUs temporarily incrementing and then decrementing the
 51  * refcount does not cause problems.  On receiving the page from
 52  * alloc_pages(), the refcount will be positive.
 53  *
 54  * If you allocate pages of order > 0, you can use some of the fields
 55  * in each subpage, but you may need to restore some of their values
 56  * afterwards.
 57  *
 58  * SLUB uses cmpxchg_double() to atomically update its freelist and
 59  * counters.  That requires that freelist & counters be adjacent and
 60  * double-word aligned.  We align all struct pages to double-word
 61  * boundaries, and ensure that 'freelist' is aligned within the
 62  * struct.
 63  */
 64 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
 65 #define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
 66 #else
 67 #define _struct_page_alignment
 68 #endif
 69 
 70 struct page {
 71         unsigned long flags;            /* Atomic flags, some possibly
 72                                          * updated asynchronously */
 73         /*
 74          * Five words (20/40 bytes) are available in this union.
 75          * WARNING: bit 0 of the first word is used for PageTail(). That
 76          * means the other users of this union MUST NOT use the bit to
 77          * avoid collision and false-positive PageTail().
 78          */
 79         union {
 80                 struct {        /* Page cache and anonymous pages */
 81                         /**
 82                          * @lru: Pageout list, eg. active_list protected by
 83                          * zone_lru_lock.  Sometimes used as a generic list
 84                          * by the page owner.
 85                          */
 86                         struct list_head lru;
 87                         /* See page-flags.h for PAGE_MAPPING_FLAGS */
 88                         struct address_space *mapping;
 89                         pgoff_t index;          /* Our offset within mapping. */
 90                         /**
 91                          * @private: Mapping-private opaque data.
 92                          * Usually used for buffer_heads if PagePrivate.
 93                          * Used for swp_entry_t if PageSwapCache.
 94                          * Indicates order in the buddy system if PageBuddy.
 95                          */
 96                         unsigned long private;
 97                 };
 98                 struct {        /* slab, slob and slub */
 99                         union {
100                                 struct list_head slab_list;     /* uses lru */
101                                 struct {        /* Partial pages */
102                                         struct page *next;
103 #ifdef CONFIG_64BIT
104                                         int pages;      /* Nr of pages left */
105                                         int pobjects;   /* Approximate count */
106 #else
107                                         short int pages;
108                                         short int pobjects;
109 #endif
110                                 };
111                         };
112                         struct kmem_cache *slab_cache; /* not slob */
113                         /* Double-word boundary */
114                         void *freelist;         /* first free object */
115                         union {
116                                 void *s_mem;    /* slab: first object */
117                                 unsigned long counters;         /* SLUB */
118                                 struct {                        /* SLUB */
119                                         unsigned inuse:16;
120                                         unsigned objects:15;
121                                         unsigned frozen:1;
122                                 };
123                         };
124                 };
125                 struct {        /* Tail pages of compound page */
126                         unsigned long compound_head;    /* Bit zero is set */
127 
128                         /* First tail page only */
129                         unsigned char compound_dtor;
130                         unsigned char compound_order;
131                         atomic_t compound_mapcount;
132                 };
133                 struct {        /* Second tail page of compound page */
134                         unsigned long _compound_pad_1;  /* compound_head */
135                         unsigned long _compound_pad_2;
136                         struct list_head deferred_list;
137                 };
138                 struct {        /* Page table pages */
139                         unsigned long _pt_pad_1;        /* compound_head */
140                         pgtable_t pmd_huge_pte; /* protected by page->ptl */
141                         unsigned long _pt_pad_2;        /* mapping */
142                         union {
143                                 struct mm_struct *pt_mm; /* x86 pgds only */
144                                 atomic_t pt_frag_refcount; /* powerpc */
145                         };
146 #if ALLOC_SPLIT_PTLOCKS
147                         spinlock_t *ptl;
148 #else
149                         spinlock_t ptl;
150 #endif
151                 };
152                 struct {        /* ZONE_DEVICE pages */
153                         /** @pgmap: Points to the hosting device page map. */
154                         struct dev_pagemap *pgmap;
155                         unsigned long hmm_data;
156                         unsigned long _zd_pad_1;        /* uses mapping */
157                 };
158 
159                 /** @rcu_head: You can use this to free a page by RCU. */
160                 struct rcu_head rcu_head;
161         };
162 
163         union {         /* This union is 4 bytes in size. */
164                 /*
165                  * If the page can be mapped to userspace, encodes the number
166                  * of times this page is referenced by a page table.
167                  */
168                 atomic_t _mapcount;
169 
170                 /*
171                  * If the page is neither PageSlab nor mappable to userspace,
172                  * the value stored here may help determine what this page
173                  * is used for.  See page-flags.h for a list of page types
174                  * which are currently stored here.
175                  */
176                 unsigned int page_type;
177 
178                 unsigned int active;            /* SLAB */
179                 int units;                      /* SLOB */
180         };
181 
182         /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
183         atomic_t _refcount;
184 
185 #ifdef CONFIG_MEMCG
186         struct mem_cgroup *mem_cgroup;
187 #endif
188 
189         /*
190          * On machines where all RAM is mapped into kernel address space,
191          * we can simply calculate the virtual address. On machines with
192          * highmem some memory is mapped into kernel virtual memory
193          * dynamically, so we need a place to store that address.
194          * Note that this field could be 16 bits on x86 ... ;)
195          *
196          * Architectures with slow multiplication can define
197          * WANT_PAGE_VIRTUAL in asm/page.h
198          */
199 #if defined(WANT_PAGE_VIRTUAL)
200         void *virtual;                  /* Kernel virtual address (NULL if
201                                            not kmapped, ie. highmem) */
202 #endif /* WANT_PAGE_VIRTUAL */
203 
204 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
205         int _last_cpupid;
206 #endif
207 } _struct_page_alignment;
208 
209 #define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
210 #define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
211 
212 struct page_frag_cache {
213         void * va;
214 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
215         __u16 offset;
216         __u16 size;
217 #else
218         __u32 offset;
219 #endif
220         /* we maintain a pagecount bias, so that we dont dirty cache line
221          * containing page->_refcount every time we allocate a fragment.
222          */
223         unsigned int            pagecnt_bias;
224         bool pfmemalloc;
225 };
226 
227 typedef unsigned long vm_flags_t;
228 
229 /*
230  * A region containing a mapping of a non-memory backed file under NOMMU
231  * conditions.  These are held in a global tree and are pinned by the VMAs that
232  * map parts of them.
233  */
234 struct vm_region {
235         struct rb_node  vm_rb;          /* link in global region tree */
236         vm_flags_t      vm_flags;       /* VMA vm_flags */
237         unsigned long   vm_start;       /* start address of region */
238         unsigned long   vm_end;         /* region initialised to here */
239         unsigned long   vm_top;         /* region allocated to here */
240         unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
241         struct file     *vm_file;       /* the backing file or NULL */
242 
243         int             vm_usage;       /* region usage count (access under nommu_region_sem) */
244         bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
245                                                 * this region */
246 };
247 
248 #ifdef CONFIG_USERFAULTFD
249 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
250 struct vm_userfaultfd_ctx {
251         struct userfaultfd_ctx *ctx;
252 };
253 #else /* CONFIG_USERFAULTFD */
254 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
255 struct vm_userfaultfd_ctx {};
256 #endif /* CONFIG_USERFAULTFD */
257 
258 /*
259  * This struct defines a memory VMM memory area. There is one of these
260  * per VM-area/task.  A VM area is any part of the process virtual memory
261  * space that has a special rule for the page-fault handlers (ie a shared
262  * library, the executable area etc).
263  */
264 struct vm_area_struct {
265         /* The first cache line has the info for VMA tree walking. */
266 
267         unsigned long vm_start;         /* Our start address within vm_mm. */
268         unsigned long vm_end;           /* The first byte after our end address
269                                            within vm_mm. */
270 
271         /* linked list of VM areas per task, sorted by address */
272         struct vm_area_struct *vm_next, *vm_prev;
273 
274         struct rb_node vm_rb;
275 
276         /*
277          * Largest free memory gap in bytes to the left of this VMA.
278          * Either between this VMA and vma->vm_prev, or between one of the
279          * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
280          * get_unmapped_area find a free area of the right size.
281          */
282         unsigned long rb_subtree_gap;
283 
284         /* Second cache line starts here. */
285 
286         struct mm_struct *vm_mm;        /* The address space we belong to. */
287         pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
288         unsigned long vm_flags;         /* Flags, see mm.h. */
289 
290         /*
291          * For areas with an address space and backing store,
292          * linkage into the address_space->i_mmap interval tree.
293          */
294         struct {
295                 struct rb_node rb;
296                 unsigned long rb_subtree_last;
297         } shared;
298 
299         /*
300          * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
301          * list, after a COW of one of the file pages.  A MAP_SHARED vma
302          * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
303          * or brk vma (with NULL file) can only be in an anon_vma list.
304          */
305         struct list_head anon_vma_chain; /* Serialized by mmap_sem &
306                                           * page_table_lock */
307         struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
308 
309         /* Function pointers to deal with this struct. */
310         const struct vm_operations_struct *vm_ops;
311 
312         /* Information about our backing store: */
313         unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
314                                            units */
315         struct file * vm_file;          /* File we map to (can be NULL). */
316         void * vm_private_data;         /* was vm_pte (shared mem) */
317 
318         atomic_long_t swap_readahead_info;
319 #ifndef CONFIG_MMU
320         struct vm_region *vm_region;    /* NOMMU mapping region */
321 #endif
322 #ifdef CONFIG_NUMA
323         struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
324 #endif
325         struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
326 } __randomize_layout;
327 
328 struct core_thread {
329         struct task_struct *task;
330         struct core_thread *next;
331 };
332 
333 struct core_state {
334         atomic_t nr_threads;
335         struct core_thread dumper;
336         struct completion startup;
337 };
338 
339 struct kioctx_table;
340 struct mm_struct {
341         struct {
342                 struct vm_area_struct *mmap;            /* list of VMAs */
343                 struct rb_root mm_rb;
344                 u64 vmacache_seqnum;                   /* per-thread vmacache */
345 #ifdef CONFIG_MMU
346                 unsigned long (*get_unmapped_area) (struct file *filp,
347                                 unsigned long addr, unsigned long len,
348                                 unsigned long pgoff, unsigned long flags);
349 #endif
350                 unsigned long mmap_base;        /* base of mmap area */
351                 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
352 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
353                 /* Base adresses for compatible mmap() */
354                 unsigned long mmap_compat_base;
355                 unsigned long mmap_compat_legacy_base;
356 #endif
357                 unsigned long task_size;        /* size of task vm space */
358                 unsigned long highest_vm_end;   /* highest vma end address */
359                 pgd_t * pgd;
360 
361                 /**
362                  * @mm_users: The number of users including userspace.
363                  *
364                  * Use mmget()/mmget_not_zero()/mmput() to modify. When this
365                  * drops to 0 (i.e. when the task exits and there are no other
366                  * temporary reference holders), we also release a reference on
367                  * @mm_count (which may then free the &struct mm_struct if
368                  * @mm_count also drops to 0).
369                  */
370                 atomic_t mm_users;
371 
372                 /**
373                  * @mm_count: The number of references to &struct mm_struct
374                  * (@mm_users count as 1).
375                  *
376                  * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
377                  * &struct mm_struct is freed.
378                  */
379                 atomic_t mm_count;
380 
381 #ifdef CONFIG_MMU
382                 atomic_long_t pgtables_bytes;   /* PTE page table pages */
383 #endif
384                 int map_count;                  /* number of VMAs */
385 
386                 spinlock_t page_table_lock; /* Protects page tables and some
387                                              * counters
388                                              */
389                 struct rw_semaphore mmap_sem;
390 
391                 struct list_head mmlist; /* List of maybe swapped mm's. These
392                                           * are globally strung together off
393                                           * init_mm.mmlist, and are protected
394                                           * by mmlist_lock
395                                           */
396 
397 
398                 unsigned long hiwater_rss; /* High-watermark of RSS usage */
399                 unsigned long hiwater_vm;  /* High-water virtual memory usage */
400 
401                 unsigned long total_vm;    /* Total pages mapped */
402                 unsigned long locked_vm;   /* Pages that have PG_mlocked set */
403                 unsigned long pinned_vm;   /* Refcount permanently increased */
404                 unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
405                 unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
406                 unsigned long stack_vm;    /* VM_STACK */
407                 unsigned long def_flags;
408 
409                 spinlock_t arg_lock; /* protect the below fields */
410                 unsigned long start_code, end_code, start_data, end_data;
411                 unsigned long start_brk, brk, start_stack;
412                 unsigned long arg_start, arg_end, env_start, env_end;
413 
414                 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
415 
416                 /*
417                  * Special counters, in some configurations protected by the
418                  * page_table_lock, in other configurations by being atomic.
419                  */
420                 struct mm_rss_stat rss_stat;
421 
422                 struct linux_binfmt *binfmt;
423 
424                 /* Architecture-specific MM context */
425                 mm_context_t context;
426 
427                 unsigned long flags; /* Must use atomic bitops to access */
428 
429                 struct core_state *core_state; /* coredumping support */
430 #ifdef CONFIG_MEMBARRIER
431                 atomic_t membarrier_state;
432 #endif
433 #ifdef CONFIG_AIO
434                 spinlock_t                      ioctx_lock;
435                 struct kioctx_table __rcu       *ioctx_table;
436 #endif
437 #ifdef CONFIG_MEMCG
438                 /*
439                  * "owner" points to a task that is regarded as the canonical
440                  * user/owner of this mm. All of the following must be true in
441                  * order for it to be changed:
442                  *
443                  * current == mm->owner
444                  * current->mm != mm
445                  * new_owner->mm == mm
446                  * new_owner->alloc_lock is held
447                  */
448                 struct task_struct __rcu *owner;
449 #endif
450                 struct user_namespace *user_ns;
451 
452                 /* store ref to file /proc/<pid>/exe symlink points to */
453                 struct file __rcu *exe_file;
454 #ifdef CONFIG_MMU_NOTIFIER
455                 struct mmu_notifier_mm *mmu_notifier_mm;
456 #endif
457 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
458                 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
459 #endif
460 #ifdef CONFIG_NUMA_BALANCING
461                 /*
462                  * numa_next_scan is the next time that the PTEs will be marked
463                  * pte_numa. NUMA hinting faults will gather statistics and
464                  * migrate pages to new nodes if necessary.
465                  */
466                 unsigned long numa_next_scan;
467 
468                 /* Restart point for scanning and setting pte_numa */
469                 unsigned long numa_scan_offset;
470 
471                 /* numa_scan_seq prevents two threads setting pte_numa */
472                 int numa_scan_seq;
473 #endif
474                 /*
475                  * An operation with batched TLB flushing is going on. Anything
476                  * that can move process memory needs to flush the TLB when
477                  * moving a PROT_NONE or PROT_NUMA mapped page.
478                  */
479                 atomic_t tlb_flush_pending;
480 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
481                 /* See flush_tlb_batched_pending() */
482                 bool tlb_flush_batched;
483 #endif
484                 struct uprobes_state uprobes_state;
485 #ifdef CONFIG_HUGETLB_PAGE
486                 atomic_long_t hugetlb_usage;
487 #endif
488                 struct work_struct async_put_work;
489 
490 #if IS_ENABLED(CONFIG_HMM)
491                 /* HMM needs to track a few things per mm */
492                 struct hmm *hmm;
493 #endif
494         } __randomize_layout;
495 
496         /*
497          * The mm_cpumask needs to be at the end of mm_struct, because it
498          * is dynamically sized based on nr_cpu_ids.
499          */
500         unsigned long cpu_bitmap[];
501 };
502 
503 extern struct mm_struct init_mm;
504 
505 /* Pointer magic because the dynamic array size confuses some compilers. */
506 static inline void mm_init_cpumask(struct mm_struct *mm)
507 {
508         unsigned long cpu_bitmap = (unsigned long)mm;
509 
510         cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
511         cpumask_clear((struct cpumask *)cpu_bitmap);
512 }
513 
514 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
515 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
516 {
517         return (struct cpumask *)&mm->cpu_bitmap;
518 }
519 
520 struct mmu_gather;
521 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
522                                 unsigned long start, unsigned long end);
523 extern void tlb_finish_mmu(struct mmu_gather *tlb,
524                                 unsigned long start, unsigned long end);
525 
526 static inline void init_tlb_flush_pending(struct mm_struct *mm)
527 {
528         atomic_set(&mm->tlb_flush_pending, 0);
529 }
530 
531 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
532 {
533         atomic_inc(&mm->tlb_flush_pending);
534         /*
535          * The only time this value is relevant is when there are indeed pages
536          * to flush. And we'll only flush pages after changing them, which
537          * requires the PTL.
538          *
539          * So the ordering here is:
540          *
541          *      atomic_inc(&mm->tlb_flush_pending);
542          *      spin_lock(&ptl);
543          *      ...
544          *      set_pte_at();
545          *      spin_unlock(&ptl);
546          *
547          *                              spin_lock(&ptl)
548          *                              mm_tlb_flush_pending();
549          *                              ....
550          *                              spin_unlock(&ptl);
551          *
552          *      flush_tlb_range();
553          *      atomic_dec(&mm->tlb_flush_pending);
554          *
555          * Where the increment if constrained by the PTL unlock, it thus
556          * ensures that the increment is visible if the PTE modification is
557          * visible. After all, if there is no PTE modification, nobody cares
558          * about TLB flushes either.
559          *
560          * This very much relies on users (mm_tlb_flush_pending() and
561          * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
562          * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
563          * locks (PPC) the unlock of one doesn't order against the lock of
564          * another PTL.
565          *
566          * The decrement is ordered by the flush_tlb_range(), such that
567          * mm_tlb_flush_pending() will not return false unless all flushes have
568          * completed.
569          */
570 }
571 
572 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
573 {
574         /*
575          * See inc_tlb_flush_pending().
576          *
577          * This cannot be smp_mb__before_atomic() because smp_mb() simply does
578          * not order against TLB invalidate completion, which is what we need.
579          *
580          * Therefore we must rely on tlb_flush_*() to guarantee order.
581          */
582         atomic_dec(&mm->tlb_flush_pending);
583 }
584 
585 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
586 {
587         /*
588          * Must be called after having acquired the PTL; orders against that
589          * PTLs release and therefore ensures that if we observe the modified
590          * PTE we must also observe the increment from inc_tlb_flush_pending().
591          *
592          * That is, it only guarantees to return true if there is a flush
593          * pending for _this_ PTL.
594          */
595         return atomic_read(&mm->tlb_flush_pending);
596 }
597 
598 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
599 {
600         /*
601          * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
602          * for which there is a TLB flush pending in order to guarantee
603          * we've seen both that PTE modification and the increment.
604          *
605          * (no requirement on actually still holding the PTL, that is irrelevant)
606          */
607         return atomic_read(&mm->tlb_flush_pending) > 1;
608 }
609 
610 struct vm_fault;
611 
612 struct vm_special_mapping {
613         const char *name;       /* The name, e.g. "[vdso]". */
614 
615         /*
616          * If .fault is not provided, this points to a
617          * NULL-terminated array of pages that back the special mapping.
618          *
619          * This must not be NULL unless .fault is provided.
620          */
621         struct page **pages;
622 
623         /*
624          * If non-NULL, then this is called to resolve page faults
625          * on the special mapping.  If used, .pages is not checked.
626          */
627         vm_fault_t (*fault)(const struct vm_special_mapping *sm,
628                                 struct vm_area_struct *vma,
629                                 struct vm_fault *vmf);
630 
631         int (*mremap)(const struct vm_special_mapping *sm,
632                      struct vm_area_struct *new_vma);
633 };
634 
635 enum tlb_flush_reason {
636         TLB_FLUSH_ON_TASK_SWITCH,
637         TLB_REMOTE_SHOOTDOWN,
638         TLB_LOCAL_SHOOTDOWN,
639         TLB_LOCAL_MM_SHOOTDOWN,
640         TLB_REMOTE_SEND_IPI,
641         NR_TLB_FLUSH_REASONS,
642 };
643 
644  /*
645   * A swap entry has to fit into a "unsigned long", as the entry is hidden
646   * in the "index" field of the swapper address space.
647   */
648 typedef struct {
649         unsigned long val;
650 } swp_entry_t;
651 
652 #endif /* _LINUX_MM_TYPES_H */
653 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp