~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/tlbflush.h

Version: ~ [ linux-5.8-rc3 ] ~ [ linux-5.7.5 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.48 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.129 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.185 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.228 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.228 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Based on arch/arm/include/asm/tlbflush.h
  3  *
  4  * Copyright (C) 1999-2003 Russell King
  5  * Copyright (C) 2012 ARM Ltd.
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  *
 11  * This program is distributed in the hope that it will be useful,
 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14  * GNU General Public License for more details.
 15  *
 16  * You should have received a copy of the GNU General Public License
 17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18  */
 19 #ifndef __ASM_TLBFLUSH_H
 20 #define __ASM_TLBFLUSH_H
 21 
 22 #ifndef __ASSEMBLY__
 23 
 24 #include <linux/sched.h>
 25 #include <asm/cputype.h>
 26 
 27 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
 28 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
 29 
 30 extern struct cpu_tlb_fns cpu_tlb;
 31 
 32 /*
 33  *      TLB Management
 34  *      ==============
 35  *
 36  *      The arch/arm64/mm/tlb.S files implement these methods.
 37  *
 38  *      The TLB specific code is expected to perform whatever tests it needs
 39  *      to determine if it should invalidate the TLB for each call.  Start
 40  *      addresses are inclusive and end addresses are exclusive; it is safe to
 41  *      round these addresses down.
 42  *
 43  *      flush_tlb_all()
 44  *
 45  *              Invalidate the entire TLB.
 46  *
 47  *      flush_tlb_mm(mm)
 48  *
 49  *              Invalidate all TLB entries in a particular address space.
 50  *              - mm    - mm_struct describing address space
 51  *
 52  *      flush_tlb_range(mm,start,end)
 53  *
 54  *              Invalidate a range of TLB entries in the specified address
 55  *              space.
 56  *              - mm    - mm_struct describing address space
 57  *              - start - start address (may not be aligned)
 58  *              - end   - end address (exclusive, may not be aligned)
 59  *
 60  *      flush_tlb_page(vaddr,vma)
 61  *
 62  *              Invalidate the specified page in the specified address range.
 63  *              - vaddr - virtual address (may not be aligned)
 64  *              - vma   - vma_struct describing address range
 65  *
 66  *      flush_kern_tlb_page(kaddr)
 67  *
 68  *              Invalidate the TLB entry for the specified page.  The address
 69  *              will be in the kernels virtual memory space.  Current uses
 70  *              only require the D-TLB to be invalidated.
 71  *              - kaddr - Kernel virtual memory address
 72  */
 73 static inline void flush_tlb_all(void)
 74 {
 75         dsb(ishst);
 76         asm("tlbi       vmalle1is");
 77         dsb(ish);
 78         isb();
 79 }
 80 
 81 static inline void flush_tlb_mm(struct mm_struct *mm)
 82 {
 83         unsigned long asid = (unsigned long)ASID(mm) << 48;
 84 
 85         dsb(ishst);
 86         asm("tlbi       aside1is, %0" : : "r" (asid));
 87         dsb(ish);
 88 }
 89 
 90 static inline void flush_tlb_page(struct vm_area_struct *vma,
 91                                   unsigned long uaddr)
 92 {
 93         unsigned long addr = uaddr >> 12 |
 94                 ((unsigned long)ASID(vma->vm_mm) << 48);
 95 
 96         dsb(ishst);
 97         asm("tlbi       vae1is, %0" : : "r" (addr));
 98         dsb(ish);
 99 }
100 
101 static inline void __flush_tlb_range(struct vm_area_struct *vma,
102                                      unsigned long start, unsigned long end)
103 {
104         unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
105         unsigned long addr;
106         start = asid | (start >> 12);
107         end = asid | (end >> 12);
108 
109         dsb(ishst);
110         for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
111                 asm("tlbi vae1is, %0" : : "r"(addr));
112         dsb(ish);
113 }
114 
115 static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
116 {
117         unsigned long addr;
118         start >>= 12;
119         end >>= 12;
120 
121         dsb(ishst);
122         for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
123                 asm("tlbi vaae1is, %0" : : "r"(addr));
124         dsb(ish);
125         isb();
126 }
127 
128 /*
129  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
130  * necessarily a performance improvement.
131  */
132 #define MAX_TLB_RANGE   (1024UL << PAGE_SHIFT)
133 
134 static inline void flush_tlb_range(struct vm_area_struct *vma,
135                                    unsigned long start, unsigned long end)
136 {
137         if ((end - start) <= MAX_TLB_RANGE)
138                 __flush_tlb_range(vma, start, end);
139         else
140                 flush_tlb_mm(vma->vm_mm);
141 }
142 
143 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
144 {
145         if ((end - start) <= MAX_TLB_RANGE)
146                 __flush_tlb_kernel_range(start, end);
147         else
148                 flush_tlb_all();
149 }
150 
151 /*
152  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
153  * table levels (pgd/pud/pmd).
154  */
155 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
156                                        unsigned long uaddr)
157 {
158         unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
159 
160         dsb(ishst);
161         asm("tlbi       vae1is, %0" : : "r" (addr));
162         dsb(ish);
163 }
164 /*
165  * On AArch64, the cache coherency is handled via the set_pte_at() function.
166  */
167 static inline void update_mmu_cache(struct vm_area_struct *vma,
168                                     unsigned long addr, pte_t *ptep)
169 {
170         /*
171          * set_pte() does not have a DSB for user mappings, so make sure that
172          * the page table write is visible.
173          */
174         dsb(ishst);
175 }
176 
177 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
178 
179 #endif
180 
181 #endif
182 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp