~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/mm/c-sb1.c

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  3  * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
  4  * Copyright (C) 2000, 2001 Broadcom Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or
  7  * modify it under the terms of the GNU General Public License
  8  * as published by the Free Software Foundation; either version 2
  9  * of the License, or (at your option) any later version.
 10  *
 11  * This program is distributed in the hope that it will be useful,
 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14  * GNU General Public License for more details.
 15  *
 16  * You should have received a copy of the GNU General Public License
 17  * along with this program; if not, write to the Free Software
 18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 19  */
 20 #include <linux/config.h>
 21 #include <linux/init.h>
 22 #include <asm/mmu_context.h>
 23 #include <asm/bootinfo.h>
 24 #include <asm/cacheops.h>
 25 #include <asm/cpu.h>
 26 #include <asm/uaccess.h>
 27 
 28 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
 29 extern void sb1_dma_init(void);
 30 extern void sb1_clear_page_dma(void * page);
 31 extern void sb1_copy_page_dma(void * to, void * from);
 32 #else
 33 extern void sb1_clear_page(void * page);
 34 extern void sb1_copy_page(void * to, void * from);
 35 #endif
 36 
 37 /* These are probed at ld_mmu time */
 38 static unsigned long icache_size;
 39 static unsigned long dcache_size;
 40 
 41 static unsigned long icache_line_size;
 42 static unsigned long dcache_line_size;
 43 
 44 static unsigned int icache_index_mask;
 45 static unsigned int dcache_index_mask;
 46 
 47 static unsigned long icache_assoc;
 48 static unsigned long dcache_assoc;
 49 
 50 static unsigned int icache_sets;
 51 static unsigned int dcache_sets;
 52 
 53 static unsigned int icache_range_cutoff;
 54 static unsigned int dcache_range_cutoff;
 55 
 56 /*
 57  * The dcache is fully coherent to the system, with one
 58  * big caveat:  the instruction stream.  In other words,
 59  * if we miss in the icache, and have dirty data in the
 60  * L1 dcache, then we'll go out to memory (or the L2) and
 61  * get the not-as-recent data.
 62  *
 63  * So the only time we have to flush the dcache is when
 64  * we're flushing the icache.  Since the L2 is fully
 65  * coherent to everything, including I/O, we never have
 66  * to flush it
 67  */
 68 
 69 #define cache_set_op(op, addr)                                          \
 70         __asm__ __volatile__(                                           \
 71         "       .set    noreorder               \n"                     \
 72         "       .set    mips64\n\t              \n"                     \
 73         "       cache   %0, (0<<13)(%1)         \n"                     \
 74         "       cache   %0, (1<<13)(%1)         \n"                     \
 75         "       cache   %0, (2<<13)(%1)         \n"                     \
 76         "       cache   %0, (3<<13)(%1)         \n"                     \
 77         "       .set    mips0                   \n"                     \
 78         "       .set    reorder"                                        \
 79         :                                                               \
 80         : "i" (op), "r" (addr))
 81 
 82 #define sync()                                                          \
 83         __asm__ __volatile(                                             \
 84         "       .set    mips64\n\t              \n"                     \
 85         "       sync                            \n"                     \
 86         "       .set    mips0")
 87 
 88 /*
 89  * Writeback and invalidate the entire dcache
 90  */
 91 static inline void __sb1_writeback_inv_dcache_all(void)
 92 {
 93         unsigned long addr = 0;
 94 
 95         while (addr < dcache_line_size * dcache_sets) {
 96                 cache_set_op(Index_Writeback_Inv_D, addr);
 97                 addr += dcache_line_size;
 98         }
 99 }
100 
101 /*
102  * Writeback and invalidate a range of the dcache.  The addresses are
103  * virtual, and since we're using index ops and bit 12 is part of both
104  * the virtual frame and physical index, we have to clear both sets
105  * (bit 12 set and cleared).
106  */
107 static inline void __sb1_writeback_inv_dcache_range(unsigned long start,
108         unsigned long end)
109 {
110         start &= ~(dcache_line_size - 1);
111         end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
112 
113         while (start != end) {
114                 cache_set_op(Index_Writeback_Inv_D, start);
115                 cache_set_op(Index_Writeback_Inv_D, start ^ (1<<12));
116                 start += dcache_line_size;
117         }
118 }
119 
120 /*
121  * Writeback and invalidate a range of the dcache.  With physical
122  * addresseses, we don't have to worry about possible bit 12 aliasing.
123  * XXXKW is it worth turning on KX and using hit ops with xkphys?
124  */
125 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,
126         unsigned long end)
127 {
128         start &= ~(dcache_line_size - 1);
129         end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
130 
131         while (start != end) {
132                 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask);
133                 start += dcache_line_size;
134         }
135         sync();
136 }
137 
138 
139 /*
140  * Invalidate the entire icache
141  */
142 static inline void __sb1_flush_icache_all(void)
143 {
144         unsigned long addr = 0;
145 
146         while (addr < icache_line_size * icache_sets) {
147                 cache_set_op(Index_Invalidate_I, addr);
148                 addr += icache_line_size;
149         }
150 }
151 
152 /*
153  * Flush the icache for a given physical page.  Need to writeback the
154  * dcache first, then invalidate the icache.  If the page isn't
155  * executable, nothing is required.
156  */
157 static void local_sb1_flush_cache_page(struct vm_area_struct *vma,
158         unsigned long addr)
159 {
160         int cpu = smp_processor_id();
161 
162 #ifndef CONFIG_SMP
163         if (!(vma->vm_flags & VM_EXEC))
164                 return;
165 #endif
166 
167         __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);
168 
169         /*
170          * Bumping the ASID is probably cheaper than the flush ...
171          */
172         if (cpu_context(cpu, vma->vm_mm) != 0)
173                 drop_mmu_context(vma->vm_mm, cpu);
174 }
175 
176 #ifdef CONFIG_SMP
177 struct flush_cache_page_args {
178         struct vm_area_struct *vma;
179         unsigned long addr;
180 };
181 
182 static void sb1_flush_cache_page_ipi(void *info)
183 {
184         struct flush_cache_page_args *args = info;
185 
186         local_sb1_flush_cache_page(args->vma, args->addr);
187 }
188 
189 /* Dirty dcache could be on another CPU, so do the IPIs */
190 static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
191 {
192         struct flush_cache_page_args args;
193 
194         if (!(vma->vm_flags & VM_EXEC))
195                 return;
196 
197         args.vma = vma;
198         args.addr = addr;
199         on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
200 }
201 #else
202 void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
203 asm("sb1_flush_cache_page = local_sb1_flush_cache_page");
204 #endif
205 
206 /*
207  * Invalidate a range of the icache.  The addresses are virtual, and
208  * the cache is virtually indexed and tagged.  However, we don't
209  * necessarily have the right ASID context, so use index ops instead
210  * of hit ops.
211  */
212 static inline void __sb1_flush_icache_range(unsigned long start,
213         unsigned long end)
214 {
215         start &= ~(icache_line_size - 1);
216         end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
217 
218         while (start != end) {
219                 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
220                 start += icache_line_size;
221         }
222 
223         __asm__ __volatile__(
224         "       bnezl  $0, 1f           \n" /* Force mispredict */
225         "1:                             \n");
226 
227         sync();
228 }
229 
230 
231 /*
232  * Invalidate all caches on this CPU
233  */
234 static void local_sb1___flush_cache_all(void)
235 {
236         __sb1_writeback_inv_dcache_all();
237         __sb1_flush_icache_all();
238 }
239 
240 #ifdef CONFIG_SMP
241 extern void sb1___flush_cache_all_ipi(void *ignored);
242 asm("sb1___flush_cache_all_ipi = local_sb1___flush_cache_all");
243 
244 static void sb1___flush_cache_all(void)
245 {
246         on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1);
247 }
248 #else
249 extern void sb1___flush_cache_all(void);
250 asm("sb1___flush_cache_all = local_sb1___flush_cache_all");
251 #endif
252 
253 /*
254  * When flushing a range in the icache, we have to first writeback
255  * the dcache for the same range, so new ifetches will see any
256  * data that was dirty in the dcache.
257  *
258  * The start/end arguments are Kseg addresses (possibly mapped Kseg).
259  */
260 
261 static void local_sb1_flush_icache_range(unsigned long start,
262         unsigned long end)
263 {
264         /* Just wb-inv the whole dcache if the range is big enough */
265         if ((end - start) > dcache_range_cutoff)
266                 __sb1_writeback_inv_dcache_all();
267         else
268                 __sb1_writeback_inv_dcache_range(start, end);
269         
270         /* Just flush the whole icache if the range is big enough */
271         if ((end - start) > icache_range_cutoff)
272                 __sb1_flush_icache_all();
273         else
274                 __sb1_flush_icache_range(start, end);
275 }
276 
277 #ifdef CONFIG_SMP
278 struct flush_icache_range_args {
279         unsigned long start;
280         unsigned long end;
281 };
282 
283 static void sb1_flush_icache_range_ipi(void *info)
284 {
285         struct flush_icache_range_args *args = info;
286 
287         local_sb1_flush_icache_range(args->start, args->end);
288 }
289 
290 void sb1_flush_icache_range(unsigned long start, unsigned long end)
291 {
292         struct flush_icache_range_args args;
293 
294         args.start = start;
295         args.end = end;
296         on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1);
297 }
298 #else
299 void sb1_flush_icache_range(unsigned long start, unsigned long end);
300 asm("sb1_flush_icache_range = local_sb1_flush_icache_range");
301 #endif
302 
303 /*
304  * Flush the icache for a given physical page.  Need to writeback the
305  * dcache first, then invalidate the icache.  If the page isn't
306  * executable, nothing is required.
307  */
308 static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
309         struct page *page)
310 {
311         unsigned long start;
312         int cpu = smp_processor_id();
313 
314 #ifndef CONFIG_SMP
315         if (!(vma->vm_flags & VM_EXEC))
316                 return;
317 #endif
318 
319         /* Need to writeback any dirty data for that page, we have the PA */
320         start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
321         __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
322         /*
323          * If there's a context, bump the ASID (cheaper than a flush,
324          * since we don't know VAs!)
325          */
326         if (cpu_context(cpu, vma->vm_mm) != 0) {
327                 drop_mmu_context(vma->vm_mm, cpu);
328         }
329 }
330 
331 #ifdef CONFIG_SMP
332 struct flush_icache_page_args {
333         struct vm_area_struct *vma;
334         struct page *page;
335 };
336 
337 static void sb1_flush_icache_page_ipi(void *info)
338 {
339         struct flush_icache_page_args *args = info;
340         local_sb1_flush_icache_page(args->vma, args->page);
341 }
342 
343 /* Dirty dcache could be on another CPU, so do the IPIs */
344 static void sb1_flush_icache_page(struct vm_area_struct *vma,
345         struct page *page)
346 {
347         struct flush_icache_page_args args;
348 
349         if (!(vma->vm_flags & VM_EXEC))
350                 return;
351         args.vma = vma;
352         args.page = page;
353         on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
354 }
355 #else
356 void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page);
357 asm("sb1_flush_icache_page = local_sb1_flush_icache_page");
358 #endif
359 
360 /*
361  * A signal trampoline must fit into a single cacheline.
362  */
363 static void local_sb1_flush_cache_sigtramp(unsigned long addr)
364 {
365         __asm__ __volatile__ (
366         "       .set    push            \n"
367         "       .set    noreorder       \n"
368         "       .set    noat            \n"
369         "       .set    mips4           \n"
370         "       cache   %2, (0<<13)(%0) \n" /* Index-inval this address */
371         "       cache   %2, (1<<13)(%0) \n" /* Index-inval this address */
372         "       cache   %2, (2<<13)(%0) \n" /* Index-inval this address */
373         "       cache   %2, (3<<13)(%0) \n" /* Index-inval this address */
374         "       xori    $1, %0, 1<<12   \n" /* Flip index bit 12        */
375         "       cache   %2, (0<<13)($1) \n" /* Index-inval this address */
376         "       cache   %2, (1<<13)($1) \n" /* Index-inval this address */
377         "       cache   %2, (2<<13)($1) \n" /* Index-inval this address */
378         "       cache   %2, (3<<13)($1) \n" /* Index-inval this address */
379         "       cache   %3, (0<<13)(%1) \n" /* Index-inval this address */
380         "       cache   %3, (1<<13)(%1) \n" /* Index-inval this address */
381         "       cache   %3, (2<<13)(%1) \n" /* Index-inval this address */
382         "       cache   %3, (3<<13)(%1) \n" /* Index-inval this address */
383         "       bnezl   $0, 1f          \n" /* Force mispredict */
384         "        nop                    \n"
385         "1:                             \n"
386         "       .set    pop             \n"
387         :
388         : "r" (addr & dcache_index_mask), "r" (addr & icache_index_mask),
389           "i" (Index_Writeback_Inv_D), "i" (Index_Invalidate_I));
390 }
391 
392 #ifdef CONFIG_SMP
393 static void sb1_flush_cache_sigtramp_ipi(void *info)
394 {
395         unsigned long iaddr = (unsigned long) info;
396         local_sb1_flush_cache_sigtramp(iaddr);
397 }
398 
399 static void sb1_flush_cache_sigtramp(unsigned long addr)
400 {
401         on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
402 }
403 #else
404 void sb1_flush_cache_sigtramp(unsigned long addr);
405 asm("sb1_flush_cache_sigtramp = local_sb1_flush_cache_sigtramp");
406 #endif
407 
408 
409 /*
410  * Anything that just flushes dcache state can be ignored, as we're always
411  * coherent in dcache space.  This is just a dummy function that all the
412  * nop'ed routines point to
413  */
414 static void sb1_nop(void)
415 {
416 }
417 
418 /*
419  *  Cache set values (from the mips64 spec)
420  * 0 - 64
421  * 1 - 128
422  * 2 - 256
423  * 3 - 512
424  * 4 - 1024
425  * 5 - 2048
426  * 6 - 4096
427  * 7 - Reserved
428  */
429 
430 static unsigned int decode_cache_sets(unsigned int config_field)
431 {
432         if (config_field == 7) {
433                 /* JDCXXX - Find a graceful way to abort. */
434                 return 0;
435         }
436         return (1<<(config_field + 6));
437 }
438 
439 /*
440  *  Cache line size values (from the mips64 spec)
441  * 0 - No cache present.
442  * 1 - 4 bytes
443  * 2 - 8 bytes
444  * 3 - 16 bytes
445  * 4 - 32 bytes
446  * 5 - 64 bytes
447  * 6 - 128 bytes
448  * 7 - Reserved
449  */
450 
451 static unsigned int decode_cache_line_size(unsigned int config_field)
452 {
453         if (config_field == 0) {
454                 return 0;
455         } else if (config_field == 7) {
456                 /* JDCXXX - Find a graceful way to abort. */
457                 return 0;
458         }
459         return (1<<(config_field + 1));
460 }
461 
462 /*
463  * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
464  *
465  * 24:22 Icache sets per way
466  * 21:19 Icache line size
467  * 18:16 Icache Associativity
468  * 15:13 Dcache sets per way
469  * 12:10 Dcache line size
470  * 9:7   Dcache Associativity
471  */
472 
473 static __init void probe_cache_sizes(void)
474 {
475         u32 config1;
476 
477         config1 = read_c0_config1();
478         icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7);
479         dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7);
480         icache_sets = decode_cache_sets((config1 >> 22) & 0x7);
481         dcache_sets = decode_cache_sets((config1 >> 13) & 0x7);
482         icache_assoc = ((config1 >> 16) & 0x7) + 1;
483         dcache_assoc = ((config1 >> 7) & 0x7) + 1;
484         icache_size = icache_line_size * icache_sets * icache_assoc;
485         dcache_size = dcache_line_size * dcache_sets * dcache_assoc;
486         /* Need to remove non-index bits for index ops */
487         icache_index_mask = (icache_sets - 1) * icache_line_size;
488         dcache_index_mask = (dcache_sets - 1) * dcache_line_size;
489         /*
490          * These are for choosing range (index ops) versus all.
491          * icache flushes all ways for each set, so drop icache_assoc.
492          * dcache flushes all ways and each setting of bit 12 for each
493          * index, so drop dcache_assoc and halve the dcache_sets.
494          */
495         icache_range_cutoff = icache_sets * icache_line_size;
496         dcache_range_cutoff = (dcache_sets / 2) * icache_line_size;
497 }
498 
499 /*
500  * This is called from loadmmu.c.  We have to set up all the
501  * memory management function pointers, as well as initialize
502  * the caches and tlbs
503  */
504 void ld_mmu_sb1(void)
505 {
506         extern char except_vec2_sb1;
507 
508         /* Special cache error handler for SB1 */
509         memcpy((void *)(KSEG0 + 0x100), &except_vec2_sb1, 0x80);
510         memcpy((void *)(KSEG1 + 0x100), &except_vec2_sb1, 0x80);
511 
512         probe_cache_sizes();
513 
514 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
515         _clear_page = sb1_clear_page_dma;
516         _copy_page = sb1_copy_page_dma;
517         sb1_dma_init();
518 #else
519         _clear_page = sb1_clear_page;
520         _copy_page = sb1_copy_page;
521 #endif
522 
523         /*
524          * None of these are needed for the SB1 - the Dcache is
525          * physically indexed and tagged, so no virtual aliasing can
526          * occur
527          */
528         flush_cache_range = (void *) sb1_nop;
529         flush_cache_page = sb1_flush_cache_page;
530         flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop;
531         flush_cache_all = sb1_nop;
532 
533         /* These routines are for Icache coherence with the Dcache */
534         flush_icache_range = sb1_flush_icache_range;
535         flush_icache_page = sb1_flush_icache_page;
536         flush_icache_all = __sb1_flush_icache_all; /* local only */
537 
538         flush_cache_sigtramp = sb1_flush_cache_sigtramp;
539         flush_data_cache_page = (void *) sb1_nop;
540 
541         /* Full flush */
542         __flush_cache_all = sb1___flush_cache_all;
543 
544         change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
545 
546         /*
547          * This is the only way to force the update of K0 to complete
548          * before subsequent instruction fetch.
549          */
550         write_c0_epc(&&here);
551 here:
552         __asm__ __volatile__(
553         "       .set    noreorder               \n"
554         "       .set    mips3\n\t               \n"
555         "       eret                            \n"
556         "       .set    mips0\n\t               \n"
557         "       .set    reorder"
558         :
559         :
560         : "memory");
561 
562         flush_cache_all();
563 }
564 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp