~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips64/mm/tlb-r4k.c

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Carsten Langgaard, carstenl@mips.com
  3  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  4  *
  5  * This program is free software; you can distribute it and/or modify it
  6  * under the terms of the GNU General Public License (Version 2) as
  7  * published by the Free Software Foundation.
  8  *
  9  * This program is distributed in the hope it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License along
 15  * with this program; if not, write to the Free Software Foundation, Inc.,
 16  * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
 17  *
 18  * MIPS64 CPU variant specific MMU routines.
 19  * These routine are not optimized in any way, they are done in a generic way
 20  * so they can be used on all MIPS64 compliant CPUs, and also done in an
 21  * attempt not to break anything for the R4xx0 style CPUs.
 22  */
 23 #include <linux/init.h>
 24 #include <linux/sched.h>
 25 #include <linux/mm.h>
 26 
 27 #include <asm/cpu.h>
 28 #include <asm/bootinfo.h>
 29 #include <asm/mmu_context.h>
 30 #include <asm/pgtable.h>
 31 #include <asm/system.h>
 32 
 33 #undef DEBUG_TLB
 34 #undef DEBUG_TLBUPDATE
 35 
 36 extern void except_vec1_r4k(void);
 37 
 38 /* CP0 hazard avoidance. */
 39 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
 40                                      "nop; nop; nop; nop; nop; nop;\n\t" \
 41                                      ".set reorder\n\t")
 42 
 43 void local_flush_tlb_all(void)
 44 {
 45         unsigned long flags;
 46         unsigned long old_ctx;
 47         int entry;
 48 
 49 #ifdef DEBUG_TLB
 50         printk("[tlball]");
 51 #endif
 52 
 53         local_irq_save(flags);
 54         /* Save old context and create impossible VPN2 value */
 55         old_ctx = read_c0_entryhi();
 56         write_c0_entryhi(XKPHYS);
 57         write_c0_entrylo0(0);
 58         write_c0_entrylo1(0);
 59         BARRIER;
 60 
 61         entry = read_c0_wired();
 62 
 63         /* Blast 'em all away. */
 64         while(entry < current_cpu_data.tlbsize) {
 65                 /* Make sure all entries differ. */
 66                 write_c0_entryhi(XKPHYS+entry*0x2000);
 67                 write_c0_index(entry);
 68                 BARRIER;
 69                 tlb_write_indexed();
 70                 BARRIER;
 71                 entry++;
 72         }
 73         BARRIER;
 74         write_c0_entryhi(old_ctx);
 75         local_irq_restore(flags);
 76 }
 77 
 78 void local_flush_tlb_mm(struct mm_struct *mm)
 79 {
 80         int cpu = smp_processor_id();
 81 
 82         if (cpu_context(cpu, mm) != 0) {
 83 #ifdef DEBUG_TLB
 84                 printk("[tlbmm<%d>]", mm->context);
 85 #endif
 86                 drop_mmu_context(mm,cpu);
 87         }
 88 }
 89 
 90 void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
 91                                 unsigned long end)
 92 {
 93         int cpu = smp_processor_id();
 94 
 95         if (cpu_context(cpu, mm) != 0) {
 96                 unsigned long flags;
 97                 int size;
 98 
 99 #ifdef DEBUG_TLB
100                 printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK),
101                        start, end);
102 #endif
103                 local_irq_save(flags);
104                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
105                 size = (size + 1) >> 1;
106                 if (size <= current_cpu_data.tlbsize/2) {
107                         int oldpid = read_c0_entryhi();
108                         int newpid = cpu_asid(cpu, mm);
109 
110                         start &= (PAGE_MASK << 1);
111                         end += ((PAGE_SIZE << 1) - 1);
112                         end &= (PAGE_MASK << 1);
113                         while(start < end) {
114                                 int idx;
115 
116                                 write_c0_entryhi(start | newpid);
117                                 start += (PAGE_SIZE << 1);
118                                 BARRIER;
119                                 tlb_probe();
120                                 BARRIER;
121                                 idx = read_c0_index();
122                                 write_c0_entrylo0(0);
123                                 write_c0_entrylo1(0);
124                                 if(idx < 0)
125                                         continue;
126                                 /* Make sure all entries differ. */
127                                 write_c0_entryhi(XKPHYS+idx*0x2000);
128                                 BARRIER;
129                                 tlb_write_indexed();
130                                 BARRIER;
131                         }
132                         write_c0_entryhi(oldpid);
133                 } else {
134                         drop_mmu_context(mm, cpu);
135                 }
136                 local_irq_restore(flags);
137         }
138 }
139 
140 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
141 {
142         int cpu = smp_processor_id();
143 
144         if (cpu_context(cpu, vma->vm_mm) != 0) {
145                 unsigned long flags;
146                 unsigned long oldpid, newpid, idx;
147 
148 #ifdef DEBUG_TLB
149                 printk("[tlbpage<%d,%08lx>]", vma->vm_mm->context, page);
150 #endif
151                 newpid = cpu_asid(cpu, vma->vm_mm);
152                 page &= (PAGE_MASK << 1);
153                 local_irq_save(flags);
154                 oldpid = read_c0_entryhi();
155                 write_c0_entryhi(page | newpid);
156                 BARRIER;
157                 tlb_probe();
158                 BARRIER;
159                 idx = read_c0_index();
160                 write_c0_entrylo0(0);
161                 write_c0_entrylo1(0);
162                 if(idx < 0)
163                         goto finish;
164                 /* Make sure all entries differ. */
165                 write_c0_entryhi(XKPHYS+idx*0x2000);
166                 BARRIER;
167                 tlb_write_indexed();
168         finish:
169                 BARRIER;
170                 write_c0_entryhi(oldpid);
171                 local_irq_restore(flags);
172         }
173 }
174 
175 /*
176  * Remove one kernel space TLB entry.  This entry is assumed to be marked
177  * global so we don't do the ASID thing.
178  */
179 void local_flush_tlb_one(unsigned long page)
180 {
181         unsigned long flags;
182         int oldpid, idx;
183 
184         page &= (PAGE_MASK << 1);
185         oldpid = read_c0_entryhi();
186 
187         local_irq_save(flags);
188         write_c0_entryhi(page);
189         BARRIER;
190         tlb_probe();
191         BARRIER;
192         idx = read_c0_index();
193         write_c0_entrylo0(0);
194         write_c0_entrylo1(0);
195         if (idx >= 0) {
196                 /* Make sure all entries differ. */
197                 write_c0_entryhi(KSEG0+idx*0x2000);
198                 tlb_write_indexed();
199         }
200         BARRIER;
201         write_c0_entryhi(oldpid);
202         local_irq_restore(flags);
203 }
204 
205 /*
206  * Updates the TLB with the new pte(s).
207  */
208 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
209 {
210         unsigned long flags;
211         unsigned int asid;
212         pgd_t *pgdp;
213         pmd_t *pmdp;
214         pte_t *ptep;
215         int idx, pid;
216 
217         /*
218          * Handle debugger faulting in for debugee.
219          */
220         if (current->active_mm != vma->vm_mm)
221                 return;
222 
223         pid = read_c0_entryhi() & ASID_MASK;
224 
225 #ifdef DEBUG_TLB
226         if ((pid != (cpu_asid(smp_processor_id(), vma->vm_mm))) ||
227            (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
228                 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d"
229                        "tlbpid=%d\n", (int) (cpu_context(smp_processor_id(),
230                        vma->vm_mm) & ASID_MASK), pid);
231         }
232 #endif
233 
234         local_irq_save(flags);
235         address &= (PAGE_MASK << 1);
236         write_c0_entryhi(address | (pid));
237         pgdp = pgd_offset(vma->vm_mm, address);
238         BARRIER;
239         tlb_probe();
240         BARRIER;
241         pmdp = pmd_offset(pgdp, address);
242         idx = read_c0_index();
243         ptep = pte_offset(pmdp, address);
244         BARRIER;
245         write_c0_entrylo0(pte_val(*ptep++) >> 6);
246         write_c0_entrylo1(pte_val(*ptep) >> 6);
247         write_c0_entryhi(address | (pid));
248         BARRIER;
249         if(idx < 0) {
250                 tlb_write_random();
251         } else {
252                 tlb_write_indexed();
253         }
254         BARRIER;
255         write_c0_entryhi(pid);
256         BARRIER;
257         local_irq_restore(flags);
258 }
259 
260 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
261                                       unsigned long entryhi, unsigned long pagemask)
262 {
263         unsigned long flags;
264         unsigned long wired;
265         unsigned long old_pagemask;
266         unsigned long old_ctx;
267 
268         local_irq_save(flags);
269         /* Save old context and create impossible VPN2 value */
270         old_ctx = (read_c0_entryhi() & ASID_MASK);
271         old_pagemask = read_c0_pagemask();
272         wired = read_c0_wired();
273         write_c0_wired(wired + 1);
274         write_c0_index(wired);
275         BARRIER;
276         write_c0_pagemask(pagemask);
277         write_c0_entryhi(entryhi);
278         write_c0_entrylo0(entrylo0);
279         write_c0_entrylo1(entrylo1);
280         BARRIER;
281         tlb_write_indexed();
282         BARRIER;
283 
284         write_c0_entryhi(old_ctx);
285         BARRIER;
286         write_c0_pagemask(old_pagemask);
287         local_flush_tlb_all();
288         local_irq_restore(flags);
289 }
290 
291 /*
292  * Used for loading TLB entries before trap_init() has started, when we
293  * don't actually want to add a wired entry which remains throughout the
294  * lifetime of the system
295  */
296 
297 static int temp_tlb_entry __initdata;
298 
299 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
300                                unsigned long entryhi, unsigned long pagemask)
301 {
302         int ret = 0;
303         unsigned long flags;
304         unsigned long wired;
305         unsigned long old_pagemask;
306         unsigned long old_ctx;
307 
308         local_irq_save(flags);
309         /* Save old context and create impossible VPN2 value */
310         old_ctx = read_c0_entryhi() & ASID_MASK;
311         old_pagemask = read_c0_pagemask();
312         wired = read_c0_wired();
313         if (--temp_tlb_entry < wired) {
314                 printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
315                 ret = -ENOSPC;
316                 goto out;
317         }
318 
319         write_c0_index(temp_tlb_entry);
320         BARRIER;
321         write_c0_pagemask(pagemask);
322         write_c0_entryhi(entryhi);
323         write_c0_entrylo0(entrylo0);
324         write_c0_entrylo1(entrylo1);
325         BARRIER;
326         tlb_write_indexed();
327         BARRIER;
328 
329         write_c0_entryhi(old_ctx);
330         BARRIER;
331         write_c0_pagemask(old_pagemask);
332 out:
333         local_irq_restore(flags);
334         return ret;
335 }
336 
337 static void __init probe_tlb(unsigned long config)
338 {
339         struct cpuinfo_mips *c = &current_cpu_data;
340         unsigned int config1;
341 
342         /*
343          * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
344          * is not supported, we assume R4k style.  Cpu probing already figured
345          * out the number of tlb entries.
346          */
347         if ((c->processor_id  & 0xff0000) == PRID_COMP_LEGACY)
348                 return;
349 
350         config1 = read_c0_config1();
351         if (!((config1 >> 7) & 3))
352                 panic("No MMU present");
353 
354         c->tlbsize = ((config1 >> 25) & 0x3f) + 1;
355 }
356 
357 void __init r4k_tlb_init(void)
358 {
359         unsigned long config = read_c0_config();
360         probe_tlb(config);
361         write_c0_pagemask(PM_DEFAULT_MASK);
362         write_c0_wired(0);
363         temp_tlb_entry = current_cpu_data.tlbsize - 1;
364         local_flush_tlb_all();
365 
366         memcpy((void *)(KSEG0 + 0x80), except_vec1_r4k, 0x80);
367         flush_icache_range(KSEG0, KSEG0 + 0x80);
368 }
369 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp