~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/xtensa/mm/tlb.c

Version: ~ [ linux-5.9-rc6 ] ~ [ linux-5.8.10 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.66 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.146 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.198 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.236 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.236 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * arch/xtensa/mm/tlb.c
  3  *
  4  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
  5  *
  6  * This file is subject to the terms and conditions of the GNU General Public
  7  * License.  See the file "COPYING" in the main directory of this archive
  8  * for more details.
  9  *
 10  * Copyright (C) 2001 - 2003 Tensilica Inc.
 11  *
 12  * Joe Taylor
 13  * Chris Zankel <chris@zankel.net>
 14  * Marc Gauthier
 15  */
 16 
 17 #include <linux/mm.h>
 18 #include <asm/processor.h>
 19 #include <asm/mmu_context.h>
 20 #include <asm/tlbflush.h>
 21 #include <asm/cacheflush.h>
 22 
 23 
 24 static inline void __flush_itlb_all (void)
 25 {
 26         int w, i;
 27 
 28         for (w = 0; w < ITLB_ARF_WAYS; w++) {
 29                 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
 30                         int e = w + (i << PAGE_SHIFT);
 31                         invalidate_itlb_entry_no_isync(e);
 32                 }
 33         }
 34         asm volatile ("isync\n");
 35 }
 36 
 37 static inline void __flush_dtlb_all (void)
 38 {
 39         int w, i;
 40 
 41         for (w = 0; w < DTLB_ARF_WAYS; w++) {
 42                 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
 43                         int e = w + (i << PAGE_SHIFT);
 44                         invalidate_dtlb_entry_no_isync(e);
 45                 }
 46         }
 47         asm volatile ("isync\n");
 48 }
 49 
 50 
 51 void local_flush_tlb_all(void)
 52 {
 53         __flush_itlb_all();
 54         __flush_dtlb_all();
 55 }
 56 
 57 /* If mm is current, we simply assign the current task a new ASID, thus,
 58  * invalidating all previous tlb entries. If mm is someone else's user mapping,
 59  * wie invalidate the context, thus, when that user mapping is swapped in,
 60  * a new context will be assigned to it.
 61  */
 62 
 63 void local_flush_tlb_mm(struct mm_struct *mm)
 64 {
 65         int cpu = smp_processor_id();
 66 
 67         if (mm == current->active_mm) {
 68                 unsigned long flags;
 69                 local_irq_save(flags);
 70                 mm->context.asid[cpu] = NO_CONTEXT;
 71                 activate_context(mm, cpu);
 72                 local_irq_restore(flags);
 73         } else {
 74                 mm->context.asid[cpu] = NO_CONTEXT;
 75                 mm->context.cpu = -1;
 76         }
 77 }
 78 
 79 
 80 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
 81 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
 82 #if _ITLB_ENTRIES > _DTLB_ENTRIES
 83 # define _TLB_ENTRIES _ITLB_ENTRIES
 84 #else
 85 # define _TLB_ENTRIES _DTLB_ENTRIES
 86 #endif
 87 
 88 void local_flush_tlb_range(struct vm_area_struct *vma,
 89                 unsigned long start, unsigned long end)
 90 {
 91         int cpu = smp_processor_id();
 92         struct mm_struct *mm = vma->vm_mm;
 93         unsigned long flags;
 94 
 95         if (mm->context.asid[cpu] == NO_CONTEXT)
 96                 return;
 97 
 98         pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
 99                  (unsigned long)mm->context.asid[cpu], start, end);
100         local_irq_save(flags);
101 
102         if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
103                 int oldpid = get_rasid_register();
104 
105                 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
106                 start &= PAGE_MASK;
107                 if (vma->vm_flags & VM_EXEC)
108                         while(start < end) {
109                                 invalidate_itlb_mapping(start);
110                                 invalidate_dtlb_mapping(start);
111                                 start += PAGE_SIZE;
112                         }
113                 else
114                         while(start < end) {
115                                 invalidate_dtlb_mapping(start);
116                                 start += PAGE_SIZE;
117                         }
118 
119                 set_rasid_register(oldpid);
120         } else {
121                 local_flush_tlb_mm(mm);
122         }
123         local_irq_restore(flags);
124 }
125 
126 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
127 {
128         int cpu = smp_processor_id();
129         struct mm_struct* mm = vma->vm_mm;
130         unsigned long flags;
131         int oldpid;
132 
133         if (mm->context.asid[cpu] == NO_CONTEXT)
134                 return;
135 
136         local_irq_save(flags);
137 
138         oldpid = get_rasid_register();
139         set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
140 
141         if (vma->vm_flags & VM_EXEC)
142                 invalidate_itlb_mapping(page);
143         invalidate_dtlb_mapping(page);
144 
145         set_rasid_register(oldpid);
146 
147         local_irq_restore(flags);
148 }
149 
150 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
151 {
152         if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
153             end - start < _TLB_ENTRIES << PAGE_SHIFT) {
154                 start &= PAGE_MASK;
155                 while (start < end) {
156                         invalidate_itlb_mapping(start);
157                         invalidate_dtlb_mapping(start);
158                         start += PAGE_SIZE;
159                 }
160         } else {
161                 local_flush_tlb_all();
162         }
163 }
164 
165 #ifdef CONFIG_DEBUG_TLB_SANITY
166 
167 static unsigned get_pte_for_vaddr(unsigned vaddr)
168 {
169         struct task_struct *task = get_current();
170         struct mm_struct *mm = task->mm;
171         pgd_t *pgd;
172         pmd_t *pmd;
173         pte_t *pte;
174 
175         if (!mm)
176                 mm = task->active_mm;
177         pgd = pgd_offset(mm, vaddr);
178         if (pgd_none_or_clear_bad(pgd))
179                 return 0;
180         pmd = pmd_offset(pgd, vaddr);
181         if (pmd_none_or_clear_bad(pmd))
182                 return 0;
183         pte = pte_offset_map(pmd, vaddr);
184         if (!pte)
185                 return 0;
186         return pte_val(*pte);
187 }
188 
189 enum {
190         TLB_SUSPICIOUS  = 1,
191         TLB_INSANE      = 2,
192 };
193 
194 static void tlb_insane(void)
195 {
196         BUG_ON(1);
197 }
198 
199 static void tlb_suspicious(void)
200 {
201         WARN_ON(1);
202 }
203 
204 /*
205  * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
206  * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
207  *
208  * Check that valid TLB entries either have the same PA as the PTE, or PTE is
209  * marked as non-present. Non-present PTE and the page with non-zero refcount
210  * and zero mapcount is normal for batched TLB flush operation. Zero refcount
211  * means that the page was freed prematurely. Non-zero mapcount is unusual,
212  * but does not necessary means an error, thus marked as suspicious.
213  */
214 static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
215 {
216         unsigned tlbidx = w | (e << PAGE_SHIFT);
217         unsigned r0 = dtlb ?
218                 read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
219         unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
220         unsigned pte = get_pte_for_vaddr(vpn);
221         unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
222         unsigned tlb_asid = r0 & ASID_MASK;
223         bool kernel = tlb_asid == 1;
224         int rc = 0;
225 
226         if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
227                 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
228                                 dtlb ? 'D' : 'I', w, e, vpn,
229                                 kernel ? "kernel" : "user");
230                 rc |= TLB_INSANE;
231         }
232 
233         if (tlb_asid == mm_asid) {
234                 unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
235                         read_itlb_translation(tlbidx);
236                 if ((pte ^ r1) & PAGE_MASK) {
237                         pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
238                                         dtlb ? 'D' : 'I', w, e, r0, r1, pte);
239                         if (pte == 0 || !pte_present(__pte(pte))) {
240                                 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
241                                 pr_err("page refcount: %d, mapcount: %d\n",
242                                                 page_count(p),
243                                                 page_mapcount(p));
244                                 if (!page_count(p))
245                                         rc |= TLB_INSANE;
246                                 else if (page_mapcount(p))
247                                         rc |= TLB_SUSPICIOUS;
248                         } else {
249                                 rc |= TLB_INSANE;
250                         }
251                 }
252         }
253         return rc;
254 }
255 
256 void check_tlb_sanity(void)
257 {
258         unsigned long flags;
259         unsigned w, e;
260         int bug = 0;
261 
262         local_irq_save(flags);
263         for (w = 0; w < DTLB_ARF_WAYS; ++w)
264                 for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
265                         bug |= check_tlb_entry(w, e, true);
266         for (w = 0; w < ITLB_ARF_WAYS; ++w)
267                 for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
268                         bug |= check_tlb_entry(w, e, false);
269         if (bug & TLB_INSANE)
270                 tlb_insane();
271         if (bug & TLB_SUSPICIOUS)
272                 tlb_suspicious();
273         local_irq_restore(flags);
274 }
275 
276 #endif /* CONFIG_DEBUG_TLB_SANITY */
277 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp