1 /* 2 * srmmu.c: SRMMU specific routines for memory management. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) 6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) 9 */ 10 11 #include <linux/config.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 #include <linux/pagemap.h> 17 #include <linux/init.h> 18 #include <linux/spinlock.h> 19 #include <linux/bootmem.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 23 #include <asm/bitext.h> 24 #include <asm/page.h> 25 #include <asm/pgalloc.h> 26 #include <asm/pgtable.h> 27 #include <asm/io.h> 28 #include <asm/kdebug.h> 29 #include <asm/vaddrs.h> 30 #include <asm/traps.h> 31 #include <asm/smp.h> 32 #include <asm/mbus.h> 33 #include <asm/cache.h> 34 #include <asm/oplib.h> 35 #include <asm/sbus.h> 36 #include <asm/asi.h> 37 #include <asm/msi.h> 38 #include <asm/a.out.h> 39 #include <asm/mmu_context.h> 40 #include <asm/io-unit.h> 41 #include <asm/cacheflush.h> 42 #include <asm/tlbflush.h> 43 44 /* Now the cpu specific definitions. */ 45 #include <asm/viking.h> 46 #include <asm/mxcc.h> 47 #include <asm/ross.h> 48 #include <asm/tsunami.h> 49 #include <asm/swift.h> 50 #include <asm/turbosparc.h> 51 52 #include <asm/btfixup.h> 53 54 /* 55 * To support pagetables in highmem, Linux introduces APIs which 56 * return struct page* and generally manipulate page tables when 57 * they are not mapped into kernel space. Our hardware page tables 58 * are smaller than pages. We lump hardware tabes into big, page sized 59 * software tables. 60 * 61 * PMD_SHIFT determines the size of the area a second-level page table entry 62 * can map, and our pmd_t is 16 times larger than normal. 63 */ 64 #define SRMMU_PTRS_PER_PMD_SOFT 0x4 /* Each pmd_t contains 16 hard PTPs */ 65 #define SRMMU_PTRS_PER_PTE_SOFT 0x400 /* 16 hard tables per 4K page */ 66 #define SRMMU_PTE_SZ_SOFT 0x1000 /* same as above, in bytes */ 67 68 #define SRMMU_PMD_SHIFT_SOFT 22 69 #define SRMMU_PMD_SIZE_SOFT (1UL << SRMMU_PMD_SHIFT_SOFT) 70 #define SRMMU_PMD_MASK_SOFT (~(SRMMU_PMD_SIZE_SOFT-1)) 71 // #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK) 72 73 74 enum mbus_module srmmu_modtype; 75 unsigned int hwbug_bitmask; 76 int vac_cache_size; 77 int vac_line_size; 78 79 extern struct resource sparc_iomap; 80 81 extern unsigned long last_valid_pfn; 82 83 extern unsigned long page_kernel; 84 85 pgd_t *srmmu_swapper_pg_dir; 86 87 #ifdef CONFIG_SMP 88 #define FLUSH_BEGIN(mm) 89 #define FLUSH_END 90 #else 91 #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { 92 #define FLUSH_END } 93 #endif 94 95 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) 96 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) 97 98 int flush_page_for_dma_global = 1; 99 100 #ifdef CONFIG_SMP 101 BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) 102 #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) 103 #endif 104 105 char *srmmu_name; 106 107 ctxd_t *srmmu_ctx_table_phys; 108 ctxd_t *srmmu_context_table; 109 110 int viking_mxcc_present; 111 static spinlock_t srmmu_context_spinlock = SPIN_LOCK_UNLOCKED; 112 113 int is_hypersparc; 114 115 /* 116 * In general all page table modifications should use the V8 atomic 117 * swap instruction. This insures the mmu and the cpu are in sync 118 * with respect to ref/mod bits in the page tables. 119 */ 120 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) 121 { 122 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "" (value), "r" (addr)); 123 return value; 124 } 125 126 static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) 127 { 128 srmmu_swap((unsigned long *)ptep, pte_val(pteval)); 129 } 130 131 /* The very generic SRMMU page table operations. */ 132 static inline int srmmu_device_memory(unsigned long x) 133 { 134 return ((x & 0xF0000000) != 0); 135 } 136 137 int srmmu_cache_pagetables; 138 139 /* these will be initialized in srmmu_nocache_calcsize() */ 140 unsigned long srmmu_nocache_size; 141 unsigned long srmmu_nocache_end; 142 unsigned long pkmap_base; 143 unsigned long pkmap_base_end; 144 extern unsigned long fix_kmap_begin; 145 extern unsigned long fix_kmap_end; 146 147 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ 148 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) 149 150 /* The context table is a nocache user with the biggest alignment needs. */ 151 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) 152 153 void *srmmu_nocache_pool; 154 void *srmmu_nocache_bitmap; 155 static struct bit_map srmmu_nocache_map; 156 157 /* This makes sense. Honest it does - Anton */ 158 #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool)) 159 #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR) 160 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) 161 162 static unsigned long srmmu_pte_pfn(pte_t pte) 163 { 164 if (srmmu_device_memory(pte_val(pte))) { 165 /* XXX Anton obviously had something in mind when he did this. 166 * But what? 167 */ 168 /* return (struct page *)~0; */ 169 BUG(); 170 } 171 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); 172 } 173 174 static struct page *srmmu_pmd_page(pmd_t pmd) 175 { 176 177 if (srmmu_device_memory(pmd_val(pmd))) 178 BUG(); 179 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); 180 } 181 182 static inline unsigned long srmmu_pgd_page(pgd_t pgd) 183 { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } 184 185 186 static inline int srmmu_pte_none(pte_t pte) 187 { return !(pte_val(pte) & 0xFFFFFFF); } 188 189 static inline int srmmu_pte_present(pte_t pte) 190 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } 191 192 static inline void srmmu_pte_clear(pte_t *ptep) 193 { srmmu_set_pte(ptep, __pte(0)); } 194 195 static inline int srmmu_pmd_none(pmd_t pmd) 196 { return !(pmd_val(pmd) & 0xFFFFFFF); } 197 198 static inline int srmmu_pmd_bad(pmd_t pmd) 199 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } 200 201 static inline int srmmu_pmd_present(pmd_t pmd) 202 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } 203 204 static inline void srmmu_pmd_clear(pmd_t *pmdp) { 205 int i; 206 for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) 207 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); 208 } 209 210 static inline int srmmu_pgd_none(pgd_t pgd) 211 { return !(pgd_val(pgd) & 0xFFFFFFF); } 212 213 static inline int srmmu_pgd_bad(pgd_t pgd) 214 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } 215 216 static inline int srmmu_pgd_present(pgd_t pgd) 217 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } 218 219 static inline void srmmu_pgd_clear(pgd_t * pgdp) 220 { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } 221 222 static inline pte_t srmmu_pte_wrprotect(pte_t pte) 223 { return __pte(pte_val(pte) & ~SRMMU_WRITE);} 224 225 static inline pte_t srmmu_pte_mkclean(pte_t pte) 226 { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} 227 228 static inline pte_t srmmu_pte_mkold(pte_t pte) 229 { return __pte(pte_val(pte) & ~SRMMU_REF);} 230 231 static inline pte_t srmmu_pte_mkwrite(pte_t pte) 232 { return __pte(pte_val(pte) | SRMMU_WRITE);} 233 234 static inline pte_t srmmu_pte_mkdirty(pte_t pte) 235 { return __pte(pte_val(pte) | SRMMU_DIRTY);} 236 237 static inline pte_t srmmu_pte_mkyoung(pte_t pte) 238 { return __pte(pte_val(pte) | SRMMU_REF);} 239 240 /* 241 * Conversion functions: convert a page and protection to a page entry, 242 * and a page entry and page directory to the page they refer to. 243 */ 244 static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) 245 { return __pte(((page - mem_map) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } 246 247 static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) 248 { return __pte(((page) >> 4) | pgprot_val(pgprot)); } 249 250 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) 251 { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } 252 253 /* XXX should we hyper_flush_whole_icache here - Anton */ 254 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) 255 { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } 256 257 static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) 258 { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } 259 260 static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) 261 { 262 unsigned long ptp; /* Physical address, shifted right by 4 */ 263 int i; 264 265 ptp = __nocache_pa((unsigned long) ptep) >> 4; 266 for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) { 267 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 268 ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4); 269 } 270 } 271 272 static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) 273 { 274 unsigned long ptp; /* Physical address, shifted right by 4 */ 275 int i; 276 277 ptp = (ptep - mem_map) << (PAGE_SHIFT-4); /* watch for overflow */ 278 for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) { 279 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 280 ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4); 281 } 282 } 283 284 static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) 285 { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } 286 287 /* to find an entry in a top-level page table... */ 288 extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) 289 { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } 290 291 /* Find an entry in the second-level page table.. */ 292 static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) 293 { 294 return (pmd_t *) srmmu_pgd_page(*dir) + 295 ((address >> SRMMU_PMD_SHIFT_SOFT) & (SRMMU_PTRS_PER_PMD_SOFT - 1)); 296 } 297 298 /* Find an entry in the third-level page table.. */ 299 static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) 300 { 301 void *pte; 302 303 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); 304 return (pte_t *) pte + 305 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1)); 306 } 307 308 /* 309 * size: bytes to allocate in the nocache area. 310 * align: bytes, number to align at. 311 * Returns the virtual address of the allocated area. 312 */ 313 static unsigned long __srmmu_get_nocache(int size, int align) 314 { 315 int offset; 316 317 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { 318 printk("Size 0x%x too small for nocache request\n", size); 319 size = SRMMU_NOCACHE_BITMAP_SHIFT; 320 } 321 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { 322 printk("Size 0x%x unaligned int nocache request\n", size); 323 size += SRMMU_NOCACHE_BITMAP_SHIFT-1; 324 } 325 if (align > SRMMU_NOCACHE_ALIGN_MAX) { 326 BUG(); 327 return 0; 328 } 329 330 offset = bit_map_string_get(&srmmu_nocache_map, 331 size >> SRMMU_NOCACHE_BITMAP_SHIFT, 332 align >> SRMMU_NOCACHE_BITMAP_SHIFT); 333 if (offset == -1) { 334 printk("srmmu: out of nocache %d: %d/%d\n", 335 size, (int) srmmu_nocache_size, 336 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 337 return 0; 338 } 339 340 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); 341 } 342 343 unsigned inline long srmmu_get_nocache(int size, int align) 344 { 345 unsigned long tmp; 346 347 tmp = __srmmu_get_nocache(size, align); 348 349 if (tmp) 350 memset((void *)tmp, 0, size); 351 352 return tmp; 353 } 354 355 void srmmu_free_nocache(unsigned long vaddr, int size) 356 { 357 int offset; 358 359 if (vaddr < SRMMU_NOCACHE_VADDR) { 360 printk("Vaddr %lx is smaller than nocache base 0x%lx\n", 361 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); 362 BUG(); 363 } 364 if (vaddr >= srmmu_nocache_end) { 365 printk("Vaddr %lx is bigger than nocache end 0x%lx\n", 366 vaddr, srmmu_nocache_end); 367 BUG(); 368 } 369 if (size & (size-1)) { 370 printk("Size 0x%x is not a power of 2\n", size); 371 BUG(); 372 } 373 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { 374 printk("Size 0x%x is too small\n", size); 375 BUG(); 376 } 377 if (vaddr & (size-1)) { 378 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); 379 BUG(); 380 } 381 382 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; 383 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; 384 385 bit_map_clear(&srmmu_nocache_map, offset, size); 386 } 387 388 void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); 389 390 extern unsigned long probe_memory(void); /* in fault.c */ 391 392 /* 393 * Reserve nocache dynamically proportionally to the amount of 394 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 395 */ 396 void srmmu_nocache_calcsize(void) 397 { 398 unsigned long sysmemavail = probe_memory() / 1024; 399 int srmmu_nocache_npages; 400 401 srmmu_nocache_npages = 402 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; 403 404 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ 405 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; 406 if (srmmu_nocache_npages < 550) srmmu_nocache_npages = 550; 407 408 /* anything above 1280 blows up */ 409 if (srmmu_nocache_npages > 1280) srmmu_nocache_npages = 1280; 410 411 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; 412 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; 413 fix_kmap_begin = srmmu_nocache_end; 414 fix_kmap_end = fix_kmap_begin + (KM_TYPE_NR * NR_CPUS - 1) * PAGE_SIZE; 415 pkmap_base = SRMMU_NOCACHE_VADDR + srmmu_nocache_size + 0x40000; 416 pkmap_base_end = pkmap_base + LAST_PKMAP * PAGE_SIZE; 417 } 418 419 void srmmu_nocache_init(void) 420 { 421 unsigned int bitmap_bits; 422 pgd_t *pgd; 423 pmd_t *pmd; 424 pte_t *pte; 425 unsigned long paddr, vaddr; 426 unsigned long pteval; 427 428 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; 429 430 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, 431 SRMMU_NOCACHE_ALIGN_MAX, 0UL); 432 memset(srmmu_nocache_pool, 0, srmmu_nocache_size); 433 434 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); 435 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); 436 437 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 438 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); 439 init_mm.pgd = srmmu_swapper_pg_dir; 440 441 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); 442 443 paddr = __pa((unsigned long)srmmu_nocache_pool); 444 vaddr = SRMMU_NOCACHE_VADDR; 445 446 while (vaddr < srmmu_nocache_end) { 447 pgd = pgd_offset_k(vaddr); 448 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); 449 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); 450 451 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); 452 453 if (srmmu_cache_pagetables) 454 pteval |= SRMMU_CACHE; 455 456 srmmu_set_pte(__nocache_fix(pte), pteval); 457 458 vaddr += PAGE_SIZE; 459 paddr += PAGE_SIZE; 460 } 461 462 flush_cache_all(); 463 flush_tlb_all(); 464 } 465 466 static inline pgd_t *srmmu_get_pgd_fast(void) 467 { 468 pgd_t *pgd = NULL; 469 470 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 471 if (pgd) { 472 pgd_t *init = pgd_offset_k(0); 473 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 474 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 475 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 476 } 477 478 return pgd; 479 } 480 481 static void srmmu_free_pgd_fast(pgd_t *pgd) 482 { 483 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); 484 } 485 486 static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) 487 { 488 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 489 } 490 491 static void srmmu_pmd_free(pmd_t * pmd) 492 { 493 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); 494 } 495 496 /* 497 * Hardware needs alignment to 256 only, but we align to whole page size 498 * to reduce fragmentation problems due to the buddy principle. 499 * XXX Provide actual fragmentation statistics in /proc. 500 * 501 * Alignments up to the page size are the same for physical and virtual 502 * addresses of the nocache area. 503 */ 504 static pte_t * 505 srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 506 { 507 return (pte_t *)srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, SRMMU_PTE_SZ_SOFT); 508 } 509 510 static struct page * 511 srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) 512 { 513 unsigned long pte; 514 515 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) 516 return NULL; 517 return mem_map + (__nocache_pa(pte) >> PAGE_SHIFT); 518 } 519 520 static void srmmu_free_pte_fast(pte_t *pte) 521 { 522 srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_SZ_SOFT); 523 } 524 525 static void srmmu_pte_free(struct page *pte) 526 { 527 unsigned long p; 528 529 p = (unsigned long)page_address(pte); /* Cached address (for test) */ 530 if (p == 0) 531 BUG(); 532 p = ((pte - mem_map) << PAGE_SHIFT); /* Physical address */ 533 p = (unsigned long) __nocache_va(p); /* Nocached virtual */ 534 srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT); 535 } 536 537 /* 538 */ 539 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) 540 { 541 struct ctx_list *ctxp; 542 543 ctxp = ctx_free.next; 544 if(ctxp != &ctx_free) { 545 remove_from_ctx_list(ctxp); 546 add_to_used_ctxlist(ctxp); 547 mm->context = ctxp->ctx_number; 548 ctxp->ctx_mm = mm; 549 return; 550 } 551 ctxp = ctx_used.next; 552 if(ctxp->ctx_mm == old_mm) 553 ctxp = ctxp->next; 554 if(ctxp == &ctx_used) 555 panic("out of mmu contexts"); 556 flush_cache_mm(ctxp->ctx_mm); 557 flush_tlb_mm(ctxp->ctx_mm); 558 remove_from_ctx_list(ctxp); 559 add_to_used_ctxlist(ctxp); 560 ctxp->ctx_mm->context = NO_CONTEXT; 561 ctxp->ctx_mm = mm; 562 mm->context = ctxp->ctx_number; 563 } 564 565 static inline void free_context(int context) 566 { 567 struct ctx_list *ctx_old; 568 569 ctx_old = ctx_list_pool + context; 570 remove_from_ctx_list(ctx_old); 571 add_to_free_ctxlist(ctx_old); 572 } 573 574 575 static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, 576 struct task_struct *tsk, int cpu) 577 { 578 if(mm->context == NO_CONTEXT) { 579 spin_lock(&srmmu_context_spinlock); 580 alloc_context(old_mm, mm); 581 spin_unlock(&srmmu_context_spinlock); 582 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); 583 } 584 585 if (is_hypersparc) 586 hyper_flush_whole_icache(); 587 588 srmmu_set_context(mm->context); 589 } 590 591 /* Low level IO area allocation on the SRMMU. */ 592 static inline void srmmu_mapioaddr(unsigned long physaddr, 593 unsigned long virt_addr, int bus_type) 594 { 595 pgd_t *pgdp; 596 pmd_t *pmdp; 597 pte_t *ptep; 598 unsigned long tmp; 599 600 physaddr &= PAGE_MASK; 601 pgdp = pgd_offset_k(virt_addr); 602 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 603 ptep = srmmu_pte_offset(pmdp, virt_addr); 604 tmp = (physaddr >> 4) | SRMMU_ET_PTE; 605 606 /* 607 * I need to test whether this is consistent over all 608 * sun4m's. The bus_type represents the upper 4 bits of 609 * 36-bit physical address on the I/O space lines... 610 */ 611 tmp |= (bus_type << 28); 612 tmp |= SRMMU_PRIV; 613 __flush_page_to_ram(virt_addr); 614 srmmu_set_pte(ptep, __pte(tmp)); 615 } 616 617 static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, 618 unsigned long xva, unsigned int len) 619 { 620 while (len != 0) { 621 len -= PAGE_SIZE; 622 srmmu_mapioaddr(xpa, xva, bus); 623 xva += PAGE_SIZE; 624 xpa += PAGE_SIZE; 625 } 626 flush_tlb_all(); 627 } 628 629 static inline void srmmu_unmapioaddr(unsigned long virt_addr) 630 { 631 pgd_t *pgdp; 632 pmd_t *pmdp; 633 pte_t *ptep; 634 635 pgdp = pgd_offset_k(virt_addr); 636 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 637 ptep = srmmu_pte_offset(pmdp, virt_addr); 638 639 /* No need to flush uncacheable page. */ 640 srmmu_pte_clear(ptep); 641 } 642 643 static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) 644 { 645 while (len != 0) { 646 len -= PAGE_SIZE; 647 srmmu_unmapioaddr(virt_addr); 648 virt_addr += PAGE_SIZE; 649 } 650 flush_tlb_all(); 651 } 652 653 /* 654 * On the SRMMU we do not have the problems with limited tlb entries 655 * for mapping kernel pages, so we just take things from the free page 656 * pool. As a side effect we are putting a little too much pressure 657 * on the gfp() subsystem. This setup also makes the logic of the 658 * iommu mapping code a lot easier as we can transparently handle 659 * mappings on the kernel stack without any special code as we did 660 * need on the sun4c. 661 */ 662 struct thread_info *srmmu_alloc_thread_info(void) 663 { 664 return (struct thread_info *) 665 __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER); 666 } 667 668 static void srmmu_free_thread_info(struct thread_info *ti) 669 { 670 free_pages((unsigned long)ti, THREAD_INFO_ORDER); 671 } 672 673 /* tsunami.S */ 674 extern void tsunami_flush_cache_all(void); 675 extern void tsunami_flush_cache_mm(struct mm_struct *mm); 676 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 677 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 678 extern void tsunami_flush_page_to_ram(unsigned long page); 679 extern void tsunami_flush_page_for_dma(unsigned long page); 680 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); 681 extern void tsunami_flush_tlb_all(void); 682 extern void tsunami_flush_tlb_mm(struct mm_struct *mm); 683 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 684 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 685 extern void tsunami_setup_blockops(void); 686 687 /* 688 * Workaround, until we find what's going on with Swift. When low on memory, 689 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find 690 * out it is already in page tables/ fault again on the same instruction. 691 * I really don't understand it, have checked it and contexts 692 * are right, flush_tlb_all is done as well, and it faults again... 693 * Strange. -jj 694 * 695 * The following code is a deadwood that may be necessary when 696 * we start to make precise page flushes again. --zaitcev 697 */ 698 static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) 699 { 700 #if 0 701 static unsigned long last; 702 unsigned int val; 703 /* unsigned int n; */ 704 705 if (address == last) { 706 val = srmmu_hwprobe(address); 707 if (val != 0 && pte_val(pte) != val) { 708 printk("swift_update_mmu_cache: " 709 "addr %lx put %08x probed %08x from %p\n", 710 address, pte_val(pte), val, 711 __builtin_return_address(0)); 712 srmmu_flush_whole_tlb(); 713 } 714 } 715 last = address; 716 #endif 717 } 718 719 /* swift.S */ 720 extern void swift_flush_cache_all(void); 721 extern void swift_flush_cache_mm(struct mm_struct *mm); 722 extern void swift_flush_cache_range(struct vm_area_struct *vma, 723 unsigned long start, unsigned long end); 724 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 725 extern void swift_flush_page_to_ram(unsigned long page); 726 extern void swift_flush_page_for_dma(unsigned long page); 727 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); 728 extern void swift_flush_tlb_all(void); 729 extern void swift_flush_tlb_mm(struct mm_struct *mm); 730 extern void swift_flush_tlb_range(struct vm_area_struct *vma, 731 unsigned long start, unsigned long end); 732 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 733 734 #if 0 /* P3: deadwood to debug precise flushes on Swift. */ 735 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 736 { 737 int cctx, ctx1; 738 739 page &= PAGE_MASK; 740 if ((ctx1 = vma->vm_mm->context) != -1) { 741 cctx = srmmu_get_context(); 742 /* Is context # ever different from current context? P3 */ 743 if (cctx != ctx1) { 744 printk("flush ctx %02x curr %02x\n", ctx1, cctx); 745 srmmu_set_context(ctx1); 746 swift_flush_page(page); 747 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 748 "r" (page), "i" (ASI_M_FLUSH_PROBE)); 749 srmmu_set_context(cctx); 750 } else { 751 /* Rm. prot. bits from virt. c. */ 752 /* swift_flush_cache_all(); */ 753 /* swift_flush_cache_page(vma, page); */ 754 swift_flush_page(page); 755 756 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 757 "r" (page), "i" (ASI_M_FLUSH_PROBE)); 758 /* same as above: srmmu_flush_tlb_page() */ 759 } 760 } 761 } 762 #endif 763 764 /* 765 * The following are all MBUS based SRMMU modules, and therefore could 766 * be found in a multiprocessor configuration. On the whole, these 767 * chips seems to be much more touchy about DVMA and page tables 768 * with respect to cache coherency. 769 */ 770 771 /* Cypress flushes. */ 772 static void cypress_flush_cache_all(void) 773 { 774 volatile unsigned long cypress_sucks; 775 unsigned long faddr, tagval; 776 777 flush_user_windows(); 778 for(faddr = 0; faddr < 0x10000; faddr += 0x20) { 779 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : 780 "=r" (tagval) : 781 "r" (faddr), "r" (0x40000), 782 "i" (ASI_M_DATAC_TAG)); 783 784 /* If modified and valid, kick it. */ 785 if((tagval & 0x60) == 0x60) 786 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); 787 } 788 } 789 790 static void cypress_flush_cache_mm(struct mm_struct *mm) 791 { 792 register unsigned long a, b, c, d, e, f, g; 793 unsigned long flags, faddr; 794 int octx; 795 796 FLUSH_BEGIN(mm) 797 flush_user_windows(); 798 local_irq_save(flags); 799 octx = srmmu_get_context(); 800 srmmu_set_context(mm->context); 801 a = 0x20; b = 0x40; c = 0x60; 802 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 803 804 faddr = (0x10000 - 0x100); 805 goto inside; 806 do { 807 faddr -= 0x100; 808 inside: 809 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 810 "sta %%g0, [%0 + %2] %1\n\t" 811 "sta %%g0, [%0 + %3] %1\n\t" 812 "sta %%g0, [%0 + %4] %1\n\t" 813 "sta %%g0, [%0 + %5] %1\n\t" 814 "sta %%g0, [%0 + %6] %1\n\t" 815 "sta %%g0, [%0 + %7] %1\n\t" 816 "sta %%g0, [%0 + %8] %1\n\t" : : 817 "r" (faddr), "i" (ASI_M_FLUSH_CTX), 818 "r" (a), "r" (b), "r" (c), "r" (d), 819 "r" (e), "r" (f), "r" (g)); 820 } while(faddr); 821 srmmu_set_context(octx); 822 local_irq_restore(flags); 823 FLUSH_END 824 } 825 826 static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 827 { 828 struct mm_struct *mm = vma->vm_mm; 829 register unsigned long a, b, c, d, e, f, g; 830 unsigned long flags, faddr; 831 int octx; 832 833 FLUSH_BEGIN(mm) 834 flush_user_windows(); 835 local_irq_save(flags); 836 octx = srmmu_get_context(); 837 srmmu_set_context(mm->context); 838 a = 0x20; b = 0x40; c = 0x60; 839 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 840 841 start &= SRMMU_PMD_MASK; 842 while(start < end) { 843 faddr = (start + (0x10000 - 0x100)); 844 goto inside; 845 do { 846 faddr -= 0x100; 847 inside: 848 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 849 "sta %%g0, [%0 + %2] %1\n\t" 850 "sta %%g0, [%0 + %3] %1\n\t" 851 "sta %%g0, [%0 + %4] %1\n\t" 852 "sta %%g0, [%0 + %5] %1\n\t" 853 "sta %%g0, [%0 + %6] %1\n\t" 854 "sta %%g0, [%0 + %7] %1\n\t" 855 "sta %%g0, [%0 + %8] %1\n\t" : : 856 "r" (faddr), 857 "i" (ASI_M_FLUSH_SEG), 858 "r" (a), "r" (b), "r" (c), "r" (d), 859 "r" (e), "r" (f), "r" (g)); 860 } while (faddr != start); 861 start += SRMMU_PMD_SIZE; 862 } 863 srmmu_set_context(octx); 864 local_irq_restore(flags); 865 FLUSH_END 866 } 867 868 static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) 869 { 870 register unsigned long a, b, c, d, e, f, g; 871 struct mm_struct *mm = vma->vm_mm; 872 unsigned long flags, line; 873 int octx; 874 875 FLUSH_BEGIN(mm) 876 flush_user_windows(); 877 local_irq_save(flags); 878 octx = srmmu_get_context(); 879 srmmu_set_context(mm->context); 880 a = 0x20; b = 0x40; c = 0x60; 881 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 882 883 page &= PAGE_MASK; 884 line = (page + PAGE_SIZE) - 0x100; 885 goto inside; 886 do { 887 line -= 0x100; 888 inside: 889 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 890 "sta %%g0, [%0 + %2] %1\n\t" 891 "sta %%g0, [%0 + %3] %1\n\t" 892 "sta %%g0, [%0 + %4] %1\n\t" 893 "sta %%g0, [%0 + %5] %1\n\t" 894 "sta %%g0, [%0 + %6] %1\n\t" 895 "sta %%g0, [%0 + %7] %1\n\t" 896 "sta %%g0, [%0 + %8] %1\n\t" : : 897 "r" (line), 898 "i" (ASI_M_FLUSH_PAGE), 899 "r" (a), "r" (b), "r" (c), "r" (d), 900 "r" (e), "r" (f), "r" (g)); 901 } while(line != page); 902 srmmu_set_context(octx); 903 local_irq_restore(flags); 904 FLUSH_END 905 } 906 907 /* Cypress is copy-back, at least that is how we configure it. */ 908 static void cypress_flush_page_to_ram(unsigned long page) 909 { 910 register unsigned long a, b, c, d, e, f, g; 911 unsigned long line; 912 913 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 914 page &= PAGE_MASK; 915 line = (page + PAGE_SIZE) - 0x100; 916 goto inside; 917 do { 918 line -= 0x100; 919 inside: 920 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 921 "sta %%g0, [%0 + %2] %1\n\t" 922 "sta %%g0, [%0 + %3] %1\n\t" 923 "sta %%g0, [%0 + %4] %1\n\t" 924 "sta %%g0, [%0 + %5] %1\n\t" 925 "sta %%g0, [%0 + %6] %1\n\t" 926 "sta %%g0, [%0 + %7] %1\n\t" 927 "sta %%g0, [%0 + %8] %1\n\t" : : 928 "r" (line), 929 "i" (ASI_M_FLUSH_PAGE), 930 "r" (a), "r" (b), "r" (c), "r" (d), 931 "r" (e), "r" (f), "r" (g)); 932 } while(line != page); 933 } 934 935 /* Cypress is also IO cache coherent. */ 936 static void cypress_flush_page_for_dma(unsigned long page) 937 { 938 } 939 940 /* Cypress has unified L2 VIPT, from which both instructions and data 941 * are stored. It does not have an onboard icache of any sort, therefore 942 * no flush is necessary. 943 */ 944 static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 945 { 946 } 947 948 static void cypress_flush_tlb_all(void) 949 { 950 srmmu_flush_whole_tlb(); 951 } 952 953 static void cypress_flush_tlb_mm(struct mm_struct *mm) 954 { 955 FLUSH_BEGIN(mm) 956 __asm__ __volatile__( 957 "lda [%0] %3, %%g5\n\t" 958 "sta %2, [%0] %3\n\t" 959 "sta %%g0, [%1] %4\n\t" 960 "sta %%g5, [%0] %3\n" 961 : /* no outputs */ 962 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), 963 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) 964 : "g5"); 965 FLUSH_END 966 } 967 968 static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 969 { 970 struct mm_struct *mm = vma->vm_mm; 971 unsigned long size; 972 973 FLUSH_BEGIN(mm) 974 start &= SRMMU_PGDIR_MASK; 975 size = SRMMU_PGDIR_ALIGN(end) - start; 976 __asm__ __volatile__( 977 "lda [%0] %5, %%g5\n\t" 978 "sta %1, [%0] %5\n" 979 "1:\n\t" 980 "subcc %3, %4, %3\n\t" 981 "bne 1b\n\t" 982 " sta %%g0, [%2 + %3] %6\n\t" 983 "sta %%g5, [%0] %5\n" 984 : /* no outputs */ 985 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), 986 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), 987 "i" (ASI_M_FLUSH_PROBE) 988 : "g5", "cc"); 989 FLUSH_END 990 } 991 992 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 993 { 994 struct mm_struct *mm = vma->vm_mm; 995 996 FLUSH_BEGIN(mm) 997 __asm__ __volatile__( 998 "lda [%0] %3, %%g5\n\t" 999 "sta %1, [%0] %3\n\t" 1000 "sta %%g0, [%2] %4\n\t" 1001 "sta %%g5, [%0] %3\n" 1002 : /* no outputs */ 1003 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), 1004 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) 1005 : "g5"); 1006 FLUSH_END 1007 } 1008 1009 /* viking.S */ 1010 extern void viking_flush_cache_all(void); 1011 extern void viking_flush_cache_mm(struct mm_struct *mm); 1012 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, 1013 unsigned long end); 1014 extern void viking_flush_cache_page(struct vm_area_struct *vma, 1015 unsigned long page); 1016 extern void viking_flush_page_to_ram(unsigned long page); 1017 extern void viking_flush_page_for_dma(unsigned long page); 1018 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); 1019 extern void viking_flush_page(unsigned long page); 1020 extern void viking_mxcc_flush_page(unsigned long page); 1021 extern void viking_flush_tlb_all(void); 1022 extern void viking_flush_tlb_mm(struct mm_struct *mm); 1023 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 1024 unsigned long end); 1025 extern void viking_flush_tlb_page(struct vm_area_struct *vma, 1026 unsigned long page); 1027 extern void sun4dsmp_flush_tlb_all(void); 1028 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); 1029 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 1030 unsigned long end); 1031 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, 1032 unsigned long page); 1033 1034 /* hypersparc.S */ 1035 extern void hypersparc_flush_cache_all(void); 1036 extern void hypersparc_flush_cache_mm(struct mm_struct *mm); 1037 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 1038 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 1039 extern void hypersparc_flush_page_to_ram(unsigned long page); 1040 extern void hypersparc_flush_page_for_dma(unsigned long page); 1041 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); 1042 extern void hypersparc_flush_tlb_all(void); 1043 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); 1044 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 1045 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 1046 extern void hypersparc_setup_blockops(void); 1047 1048 /* 1049 * NOTE: All of this startup code assumes the low 16mb (approx.) of 1050 * kernel mappings are done with one single contiguous chunk of 1051 * ram. On small ram machines (classics mainly) we only get 1052 * around 8mb mapped for us. 1053 */ 1054 1055 void __init early_pgtable_allocfail(char *type) 1056 { 1057 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); 1058 prom_halt(); 1059 } 1060 1061 void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) 1062 { 1063 pgd_t *pgdp; 1064 pmd_t *pmdp; 1065 pte_t *ptep; 1066 1067 while(start < end) { 1068 pgdp = pgd_offset_k(start); 1069 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 1070 pmdp = (pmd_t *) __srmmu_get_nocache( 1071 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 1072 if (pmdp == NULL) 1073 early_pgtable_allocfail("pmd"); 1074 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 1075 srmmu_pgd_set(__nocache_fix(pgdp), pmdp); 1076 } 1077 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 1078 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 1079 ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, 1080 SRMMU_PTE_SZ_SOFT); 1081 if (ptep == NULL) 1082 early_pgtable_allocfail("pte"); 1083 memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT); 1084 srmmu_pmd_set(__nocache_fix(pmdp), ptep); 1085 } 1086 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK; 1087 } 1088 } 1089 1090 void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) 1091 { 1092 pgd_t *pgdp; 1093 pmd_t *pmdp; 1094 pte_t *ptep; 1095 1096 while(start < end) { 1097 pgdp = pgd_offset_k(start); 1098 if(srmmu_pgd_none(*pgdp)) { 1099 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 1100 if (pmdp == NULL) 1101 early_pgtable_allocfail("pmd"); 1102 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); 1103 srmmu_pgd_set(pgdp, pmdp); 1104 } 1105 pmdp = srmmu_pmd_offset(pgdp, start); 1106 if(srmmu_pmd_none(*pmdp)) { 1107 ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, 1108 SRMMU_PTE_SZ_SOFT); 1109 if (ptep == NULL) 1110 early_pgtable_allocfail("pte"); 1111 memset(ptep, 0, SRMMU_PTE_SZ_SOFT); 1112 srmmu_pmd_set(pmdp, ptep); 1113 } 1114 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK; 1115 } 1116 } 1117 1118 /* 1119 * This is much cleaner than poking around physical address space 1120 * looking at the prom's page table directly which is what most 1121 * other OS's do. Yuck... this is much better. 1122 */ 1123 void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) 1124 { 1125 pgd_t *pgdp; 1126 pmd_t *pmdp; 1127 pte_t *ptep; 1128 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ 1129 unsigned long prompte; 1130 1131 while(start <= end) { 1132 if (start == 0) 1133 break; /* probably wrap around */ 1134 if(start == 0xfef00000) 1135 start = KADB_DEBUGGER_BEGVM; 1136 if(!(prompte = srmmu_hwprobe(start))) { 1137 start += PAGE_SIZE; 1138 continue; 1139 } 1140 1141 /* A red snapper, see what it really is. */ 1142 what = 0; 1143 1144 if(!(start & ~(SRMMU_PMD_MASK))) { 1145 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte) 1146 what = 1; 1147 } 1148 1149 if(!(start & ~(SRMMU_PGDIR_MASK))) { 1150 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == 1151 prompte) 1152 what = 2; 1153 } 1154 1155 pgdp = pgd_offset_k(start); 1156 if(what == 2) { 1157 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); 1158 start += SRMMU_PGDIR_SIZE; 1159 continue; 1160 } 1161 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 1162 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 1163 if (pmdp == NULL) 1164 early_pgtable_allocfail("pmd"); 1165 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 1166 srmmu_pgd_set(__nocache_fix(pgdp), pmdp); 1167 } 1168 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 1169 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 1170 ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, 1171 SRMMU_PTE_SZ_SOFT); 1172 if (ptep == NULL) 1173 early_pgtable_allocfail("pte"); 1174 memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT); 1175 srmmu_pmd_set(__nocache_fix(pmdp), ptep); 1176 } 1177 if(what == 1) { 1178 /* 1179 * We bend the rule where all 16 PTPs in a pmd_t point 1180 * inside the same PTE page, and we leak a perfectly 1181 * good hardware PTE piece. Alternatives seem worse. 1182 */ 1183 unsigned int x; /* Index of HW PMD in soft cluster */ 1184 x = (start >> SRMMU_PMD_SHIFT) & 15; 1185 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; 1186 start += SRMMU_PMD_SIZE; 1187 continue; 1188 } 1189 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); 1190 *(pte_t *)__nocache_fix(ptep) = __pte(prompte); 1191 start += PAGE_SIZE; 1192 } 1193 } 1194 1195 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) 1196 1197 /* Create a third-level SRMMU 16MB page mapping. */ 1198 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) 1199 { 1200 pgd_t *pgdp = pgd_offset_k(vaddr); 1201 unsigned long big_pte; 1202 1203 big_pte = KERNEL_PTE(phys_base >> 4); 1204 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); 1205 } 1206 1207 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ 1208 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) 1209 { 1210 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); 1211 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); 1212 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); 1213 /* Map "low" memory only */ 1214 const unsigned long min_vaddr = PAGE_OFFSET; 1215 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; 1216 1217 if (vstart < min_vaddr || vstart >= max_vaddr) 1218 return vstart; 1219 1220 if (vend > max_vaddr || vend < min_vaddr) 1221 vend = max_vaddr; 1222 1223 while(vstart < vend) { 1224 do_large_mapping(vstart, pstart); 1225 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; 1226 } 1227 return vstart; 1228 } 1229 1230 static inline void memprobe_error(char *msg) 1231 { 1232 prom_printf(msg); 1233 prom_printf("Halting now...\n"); 1234 prom_halt(); 1235 } 1236 1237 static inline void map_kernel(void) 1238 { 1239 int i; 1240 1241 if (phys_base > 0) { 1242 do_large_mapping(PAGE_OFFSET, phys_base); 1243 } 1244 1245 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1246 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); 1247 } 1248 1249 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); 1250 } 1251 1252 /* Paging initialization on the Sparc Reference MMU. */ 1253 extern void sparc_context_init(int); 1254 1255 extern int linux_num_cpus; 1256 1257 void (*poke_srmmu)(void) __initdata = NULL; 1258 1259 extern unsigned long bootmem_init(unsigned long *pages_avail); 1260 1261 void __init srmmu_paging_init(void) 1262 { 1263 int i, cpunode; 1264 char node_str[128]; 1265 pgd_t *pgd; 1266 pmd_t *pmd; 1267 pte_t *pte; 1268 unsigned long pages_avail; 1269 1270 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ 1271 1272 if (sparc_cpu_model == sun4d) 1273 num_contexts = 65536; /* We know it is Viking */ 1274 else { 1275 /* Find the number of contexts on the srmmu. */ 1276 cpunode = prom_getchild(prom_root_node); 1277 num_contexts = 0; 1278 while(cpunode != 0) { 1279 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); 1280 if(!strcmp(node_str, "cpu")) { 1281 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); 1282 break; 1283 } 1284 cpunode = prom_getsibling(cpunode); 1285 } 1286 } 1287 1288 if(!num_contexts) { 1289 prom_printf("Something wrong, can't find cpu node in paging_init.\n"); 1290 prom_halt(); 1291 } 1292 1293 pages_avail = 0; 1294 last_valid_pfn = bootmem_init(&pages_avail); 1295 1296 srmmu_nocache_calcsize(); 1297 srmmu_nocache_init(); 1298 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); 1299 map_kernel(); 1300 1301 /* ctx table has to be physically aligned to its size */ 1302 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); 1303 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); 1304 1305 for(i = 0; i < num_contexts; i++) 1306 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); 1307 1308 flush_cache_all(); 1309 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 1310 flush_tlb_all(); 1311 poke_srmmu(); 1312 1313 #ifdef CONFIG_SUN_IO 1314 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); 1315 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); 1316 #endif 1317 1318 srmmu_allocate_ptable_skeleton(fix_kmap_begin, fix_kmap_end); 1319 srmmu_allocate_ptable_skeleton(pkmap_base, pkmap_base_end); 1320 1321 pgd = pgd_offset_k(pkmap_base); 1322 pmd = srmmu_pmd_offset(pgd, pkmap_base); 1323 pte = srmmu_pte_offset(pmd, pkmap_base); 1324 pkmap_page_table = pte; 1325 1326 flush_cache_all(); 1327 flush_tlb_all(); 1328 1329 sparc_context_init(num_contexts); 1330 1331 kmap_init(); 1332 1333 { 1334 unsigned long zones_size[MAX_NR_ZONES]; 1335 unsigned long zholes_size[MAX_NR_ZONES]; 1336 unsigned long npages; 1337 int znum; 1338 1339 for (znum = 0; znum < MAX_NR_ZONES; znum++) 1340 zones_size[znum] = zholes_size[znum] = 0; 1341 1342 npages = max_low_pfn - (phys_base >> PAGE_SHIFT); 1343 1344 zones_size[ZONE_DMA] = npages; 1345 zholes_size[ZONE_DMA] = npages - pages_avail; 1346 1347 npages = highend_pfn - max_low_pfn; 1348 zones_size[ZONE_HIGHMEM] = npages; 1349 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); 1350 1351 free_area_init_node(0, &contig_page_data, NULL, zones_size, 1352 phys_base >> PAGE_SHIFT, zholes_size); 1353 mem_map = contig_page_data.node_mem_map; 1354 } 1355 1356 /* P3: easy to fix, todo. Current code is utterly broken, though. */ 1357 if (phys_base != 0) 1358 panic("phys_base nonzero"); 1359 } 1360 1361 static void srmmu_mmu_info(struct seq_file *m) 1362 { 1363 seq_printf(m, 1364 "MMU type\t: %s\n" 1365 "contexts\t: %d\n" 1366 "nocache total\t: %ld\n" 1367 "nocache used\t: %d\n", 1368 srmmu_name, 1369 num_contexts, 1370 srmmu_nocache_size, 1371 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 1372 } 1373 1374 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) 1375 { 1376 } 1377 1378 static void srmmu_destroy_context(struct mm_struct *mm) 1379 { 1380 1381 if(mm->context != NO_CONTEXT) { 1382 flush_cache_mm(mm); 1383 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); 1384 flush_tlb_mm(mm); 1385 spin_lock(&srmmu_context_spinlock); 1386 free_context(mm->context); 1387 spin_unlock(&srmmu_context_spinlock); 1388 mm->context = NO_CONTEXT; 1389 } 1390 } 1391 1392 /* Init various srmmu chip types. */ 1393 static void __init srmmu_is_bad(void) 1394 { 1395 prom_printf("Could not determine SRMMU chip type.\n"); 1396 prom_halt(); 1397 } 1398 1399 static void __init init_vac_layout(void) 1400 { 1401 int nd, cache_lines; 1402 char node_str[128]; 1403 #ifdef CONFIG_SMP 1404 int cpu = 0; 1405 unsigned long max_size = 0; 1406 unsigned long min_line_size = 0x10000000; 1407 #endif 1408 1409 nd = prom_getchild(prom_root_node); 1410 while((nd = prom_getsibling(nd)) != 0) { 1411 prom_getstring(nd, "device_type", node_str, sizeof(node_str)); 1412 if(!strcmp(node_str, "cpu")) { 1413 vac_line_size = prom_getint(nd, "cache-line-size"); 1414 if (vac_line_size == -1) { 1415 prom_printf("can't determine cache-line-size, " 1416 "halting.\n"); 1417 prom_halt(); 1418 } 1419 cache_lines = prom_getint(nd, "cache-nlines"); 1420 if (cache_lines == -1) { 1421 prom_printf("can't determine cache-nlines, halting.\n"); 1422 prom_halt(); 1423 } 1424 1425 vac_cache_size = cache_lines * vac_line_size; 1426 #ifdef CONFIG_SMP 1427 if(vac_cache_size > max_size) 1428 max_size = vac_cache_size; 1429 if(vac_line_size < min_line_size) 1430 min_line_size = vac_line_size; 1431 cpu++; 1432 if (cpu >= NR_CPUS || !cpu_online(cpu)) 1433 break; 1434 #else 1435 break; 1436 #endif 1437 } 1438 } 1439 if(nd == 0) { 1440 prom_printf("No CPU nodes found, halting.\n"); 1441 prom_halt(); 1442 } 1443 #ifdef CONFIG_SMP 1444 vac_cache_size = max_size; 1445 vac_line_size = min_line_size; 1446 #endif 1447 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", 1448 (int)vac_cache_size, (int)vac_line_size); 1449 } 1450 1451 static void __init poke_hypersparc(void) 1452 { 1453 volatile unsigned long clear; 1454 unsigned long mreg = srmmu_get_mmureg(); 1455 1456 hyper_flush_unconditional_combined(); 1457 1458 mreg &= ~(HYPERSPARC_CWENABLE); 1459 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); 1460 mreg |= (HYPERSPARC_CMODE); 1461 1462 srmmu_set_mmureg(mreg); 1463 1464 #if 0 /* XXX I think this is bad news... -DaveM */ 1465 hyper_clear_all_tags(); 1466 #endif 1467 1468 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); 1469 hyper_flush_whole_icache(); 1470 clear = srmmu_get_faddr(); 1471 clear = srmmu_get_fstatus(); 1472 } 1473 1474 static void __init init_hypersparc(void) 1475 { 1476 srmmu_name = "ROSS HyperSparc"; 1477 1478 init_vac_layout(); 1479 1480 is_hypersparc = 1; 1481 1482 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); 1483 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); 1484 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); 1485 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); 1486 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); 1487 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); 1488 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); 1489 1490 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); 1491 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); 1492 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); 1493 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); 1494 1495 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); 1496 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); 1497 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); 1498 1499 1500 poke_srmmu = poke_hypersparc; 1501 1502 hypersparc_setup_blockops(); 1503 } 1504 1505 static void __init poke_cypress(void) 1506 { 1507 unsigned long mreg = srmmu_get_mmureg(); 1508 unsigned long faddr, tagval; 1509 volatile unsigned long cypress_sucks; 1510 volatile unsigned long clear; 1511 1512 clear = srmmu_get_faddr(); 1513 clear = srmmu_get_fstatus(); 1514 1515 if (!(mreg & CYPRESS_CENABLE)) { 1516 for(faddr = 0x0; faddr < 0x10000; faddr += 20) { 1517 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" 1518 "sta %%g0, [%0] %2\n\t" : : 1519 "r" (faddr), "r" (0x40000), 1520 "i" (ASI_M_DATAC_TAG)); 1521 } 1522 } else { 1523 for(faddr = 0; faddr < 0x10000; faddr += 0x20) { 1524 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : 1525 "=r" (tagval) : 1526 "r" (faddr), "r" (0x40000), 1527 "i" (ASI_M_DATAC_TAG)); 1528 1529 /* If modified and valid, kick it. */ 1530 if((tagval & 0x60) == 0x60) 1531 cypress_sucks = *(unsigned long *) 1532 (0xf0020000 + faddr); 1533 } 1534 } 1535 1536 /* And one more, for our good neighbor, Mr. Broken Cypress. */ 1537 clear = srmmu_get_faddr(); 1538 clear = srmmu_get_fstatus(); 1539 1540 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); 1541 srmmu_set_mmureg(mreg); 1542 } 1543 1544 static void __init init_cypress_common(void) 1545 { 1546 init_vac_layout(); 1547 1548 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); 1549 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); 1550 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); 1551 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); 1552 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); 1553 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); 1554 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); 1555 1556 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); 1557 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); 1558 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); 1559 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); 1560 1561 1562 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); 1563 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); 1564 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); 1565 1566 poke_srmmu = poke_cypress; 1567 } 1568 1569 static void __init init_cypress_604(void) 1570 { 1571 srmmu_name = "ROSS Cypress-604(UP)"; 1572 srmmu_modtype = Cypress; 1573 init_cypress_common(); 1574 } 1575 1576 static void __init init_cypress_605(unsigned long mrev) 1577 { 1578 srmmu_name = "ROSS Cypress-605(MP)"; 1579 if(mrev == 0xe) { 1580 srmmu_modtype = Cypress_vE; 1581 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; 1582 } else { 1583 if(mrev == 0xd) { 1584 srmmu_modtype = Cypress_vD; 1585 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; 1586 } else { 1587 srmmu_modtype = Cypress; 1588 } 1589 } 1590 init_cypress_common(); 1591 } 1592 1593 static void __init poke_swift(void) 1594 { 1595 unsigned long mreg; 1596 1597 /* Clear any crap from the cache or else... */ 1598 swift_flush_cache_all(); 1599 1600 /* Enable I & D caches */ 1601 mreg = srmmu_get_mmureg(); 1602 mreg |= (SWIFT_IE | SWIFT_DE); 1603 /* 1604 * The Swift branch folding logic is completely broken. At 1605 * trap time, if things are just right, if can mistakenly 1606 * think that a trap is coming from kernel mode when in fact 1607 * it is coming from user mode (it mis-executes the branch in 1608 * the trap code). So you see things like crashme completely 1609 * hosing your machine which is completely unacceptable. Turn 1610 * this shit off... nice job Fujitsu. 1611 */ 1612 mreg &= ~(SWIFT_BF); 1613 srmmu_set_mmureg(mreg); 1614 } 1615 1616 #define SWIFT_MASKID_ADDR 0x10003018 1617 static void __init init_swift(void) 1618 { 1619 unsigned long swift_rev; 1620 1621 __asm__ __volatile__("lda [%1] %2, %0\n\t" 1622 "srl %0, 0x18, %0\n\t" : 1623 "=r" (swift_rev) : 1624 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); 1625 srmmu_name = "Fujitsu Swift"; 1626 switch(swift_rev) { 1627 case 0x11: 1628 case 0x20: 1629 case 0x23: 1630 case 0x30: 1631 srmmu_modtype = Swift_lots_o_bugs; 1632 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); 1633 /* 1634 * Gee george, I wonder why Sun is so hush hush about 1635 * this hardware bug... really braindamage stuff going 1636 * on here. However I think we can find a way to avoid 1637 * all of the workaround overhead under Linux. Basically, 1638 * any page fault can cause kernel pages to become user 1639 * accessible (the mmu gets confused and clears some of 1640 * the ACC bits in kernel ptes). Aha, sounds pretty 1641 * horrible eh? But wait, after extensive testing it appears 1642 * that if you use pgd_t level large kernel pte's (like the 1643 * 4MB pages on the Pentium) the bug does not get tripped 1644 * at all. This avoids almost all of the major overhead. 1645 * Welcome to a world where your vendor tells you to, 1646 * "apply this kernel patch" instead of "sorry for the 1647 * broken hardware, send it back and we'll give you 1648 * properly functioning parts" 1649 */ 1650 break; 1651 case 0x25: 1652 case 0x31: 1653 srmmu_modtype = Swift_bad_c; 1654 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; 1655 /* 1656 * You see Sun allude to this hardware bug but never 1657 * admit things directly, they'll say things like, 1658 * "the Swift chip cache problems" or similar. 1659 */ 1660 break; 1661 default: 1662 srmmu_modtype = Swift_ok; 1663 break; 1664 }; 1665 1666 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1667 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); 1668 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); 1669 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); 1670 1671 1672 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); 1673 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); 1674 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); 1675 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); 1676 1677 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); 1678 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); 1679 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); 1680 1681 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); 1682 1683 flush_page_for_dma_global = 0; 1684 1685 /* 1686 * Are you now convinced that the Swift is one of the 1687 * biggest VLSI abortions of all time? Bravo Fujitsu! 1688 * Fujitsu, the !#?!%$'d up processor people. I bet if 1689 * you examined the microcode of the Swift you'd find 1690 * XXX's all over the place. 1691 */ 1692 poke_srmmu = poke_swift; 1693 } 1694 1695 static void turbosparc_flush_cache_all(void) 1696 { 1697 flush_user_windows(); 1698 turbosparc_idflash_clear(); 1699 } 1700 1701 static void turbosparc_flush_cache_mm(struct mm_struct *mm) 1702 { 1703 FLUSH_BEGIN(mm) 1704 flush_user_windows(); 1705 turbosparc_idflash_clear(); 1706 FLUSH_END 1707 } 1708 1709 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1710 { 1711 struct mm_struct *mm = vma->vm_mm; 1712 1713 FLUSH_BEGIN(mm) 1714 flush_user_windows(); 1715 turbosparc_idflash_clear(); 1716 FLUSH_END 1717 } 1718 1719 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) 1720 { 1721 FLUSH_BEGIN(vma->vm_mm) 1722 flush_user_windows(); 1723 if (vma->vm_flags & VM_EXEC) 1724 turbosparc_flush_icache(); 1725 turbosparc_flush_dcache(); 1726 FLUSH_END 1727 } 1728 1729 /* TurboSparc is copy-back, if we turn it on, but this does not work. */ 1730 static void turbosparc_flush_page_to_ram(unsigned long page) 1731 { 1732 #ifdef TURBOSPARC_WRITEBACK 1733 volatile unsigned long clear; 1734 1735 if (srmmu_hwprobe(page)) 1736 turbosparc_flush_page_cache(page); 1737 clear = srmmu_get_fstatus(); 1738 #endif 1739 } 1740 1741 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 1742 { 1743 } 1744 1745 static void turbosparc_flush_page_for_dma(unsigned long page) 1746 { 1747 turbosparc_flush_dcache(); 1748 } 1749 1750 static void turbosparc_flush_tlb_all(void) 1751 { 1752 srmmu_flush_whole_tlb(); 1753 } 1754 1755 static void turbosparc_flush_tlb_mm(struct mm_struct *mm) 1756 { 1757 FLUSH_BEGIN(mm) 1758 srmmu_flush_whole_tlb(); 1759 FLUSH_END 1760 } 1761 1762 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1763 { 1764 struct mm_struct *mm = vma->vm_mm; 1765 1766 FLUSH_BEGIN(mm) 1767 srmmu_flush_whole_tlb(); 1768 FLUSH_END 1769 } 1770 1771 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 1772 { 1773 FLUSH_BEGIN(vma->vm_mm) 1774 srmmu_flush_whole_tlb(); 1775 FLUSH_END 1776 } 1777 1778 1779 static void __init poke_turbosparc(void) 1780 { 1781 unsigned long mreg = srmmu_get_mmureg(); 1782 unsigned long ccreg; 1783 1784 /* Clear any crap from the cache or else... */ 1785 turbosparc_flush_cache_all(); 1786 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ 1787 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ 1788 srmmu_set_mmureg(mreg); 1789 1790 ccreg = turbosparc_get_ccreg(); 1791 1792 #ifdef TURBOSPARC_WRITEBACK 1793 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ 1794 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); 1795 /* Write-back D-cache, emulate VLSI 1796 * abortion number three, not number one */ 1797 #else 1798 /* For now let's play safe, optimize later */ 1799 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); 1800 /* Do DVMA snooping in Dcache, Write-thru D-cache */ 1801 ccreg &= ~(TURBOSPARC_uS2); 1802 /* Emulate VLSI abortion number three, not number one */ 1803 #endif 1804 1805 switch (ccreg & 7) { 1806 case 0: /* No SE cache */ 1807 case 7: /* Test mode */ 1808 break; 1809 default: 1810 ccreg |= (TURBOSPARC_SCENABLE); 1811 } 1812 turbosparc_set_ccreg (ccreg); 1813 1814 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ 1815 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ 1816 srmmu_set_mmureg(mreg); 1817 } 1818 1819 static void __init init_turbosparc(void) 1820 { 1821 srmmu_name = "Fujitsu TurboSparc"; 1822 srmmu_modtype = TurboSparc; 1823 1824 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); 1825 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); 1826 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); 1827 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); 1828 1829 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); 1830 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); 1831 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); 1832 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); 1833 1834 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); 1835 1836 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); 1837 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); 1838 1839 poke_srmmu = poke_turbosparc; 1840 } 1841 1842 static void __init poke_tsunami(void) 1843 { 1844 unsigned long mreg = srmmu_get_mmureg(); 1845 1846 tsunami_flush_icache(); 1847 tsunami_flush_dcache(); 1848 mreg &= ~TSUNAMI_ITD; 1849 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); 1850 srmmu_set_mmureg(mreg); 1851 } 1852 1853 static void __init init_tsunami(void) 1854 { 1855 /* 1856 * Tsunami's pretty sane, Sun and TI actually got it 1857 * somewhat right this time. Fujitsu should have 1858 * taken some lessons from them. 1859 */ 1860 1861 srmmu_name = "TI Tsunami"; 1862 srmmu_modtype = Tsunami; 1863 1864 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); 1865 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); 1866 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); 1867 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); 1868 1869 1870 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); 1871 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); 1872 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); 1873 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); 1874 1875 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); 1876 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); 1877 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); 1878 1879 poke_srmmu = poke_tsunami; 1880 1881 tsunami_setup_blockops(); 1882 } 1883 1884 static void __init poke_viking(void) 1885 { 1886 unsigned long mreg = srmmu_get_mmureg(); 1887 static int smp_catch; 1888 1889 if(viking_mxcc_present) { 1890 unsigned long mxcc_control = mxcc_get_creg(); 1891 1892 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); 1893 mxcc_control &= ~(MXCC_CTL_RRC); 1894 mxcc_set_creg(mxcc_control); 1895 1896 /* 1897 * We don't need memory parity checks. 1898 * XXX This is a mess, have to dig out later. ecd. 1899 viking_mxcc_turn_off_parity(&mreg, &mxcc_control); 1900 */ 1901 1902 /* We do cache ptables on MXCC. */ 1903 mreg |= VIKING_TCENABLE; 1904 } else { 1905 unsigned long bpreg; 1906 1907 mreg &= ~(VIKING_TCENABLE); 1908 if(smp_catch++) { 1909 /* Must disable mixed-cmd mode here for other cpu's. */ 1910 bpreg = viking_get_bpreg(); 1911 bpreg &= ~(VIKING_ACTION_MIX); 1912 viking_set_bpreg(bpreg); 1913 1914 /* Just in case PROM does something funny. */ 1915 msi_set_sync(); 1916 } 1917 } 1918 1919 mreg |= VIKING_SPENABLE; 1920 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); 1921 mreg |= VIKING_SBENABLE; 1922 mreg &= ~(VIKING_ACENABLE); 1923 srmmu_set_mmureg(mreg); 1924 1925 #ifdef CONFIG_SMP 1926 /* Avoid unnecessary cross calls. */ 1927 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); 1928 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); 1929 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); 1930 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); 1931 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); 1932 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); 1933 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); 1934 btfixup(); 1935 #endif 1936 } 1937 1938 static void __init init_viking(void) 1939 { 1940 unsigned long mreg = srmmu_get_mmureg(); 1941 1942 /* Ahhh, the viking. SRMMU VLSI abortion number two... */ 1943 if(mreg & VIKING_MMODE) { 1944 srmmu_name = "TI Viking"; 1945 viking_mxcc_present = 0; 1946 msi_set_sync(); 1947 1948 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); 1949 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); 1950 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); 1951 1952 /* 1953 * We need this to make sure old viking takes no hits 1954 * on it's cache for dma snoops to workaround the 1955 * "load from non-cacheable memory" interrupt bug. 1956 * This is only necessary because of the new way in 1957 * which we use the IOMMU. 1958 */ 1959 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); 1960 1961 flush_page_for_dma_global = 0; 1962 } else { 1963 srmmu_name = "TI Viking/MXCC"; 1964 viking_mxcc_present = 1; 1965 1966 srmmu_cache_pagetables = 1; 1967 1968 /* MXCC vikings lack the DMA snooping bug. */ 1969 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); 1970 } 1971 1972 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); 1973 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); 1974 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); 1975 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); 1976 1977 #ifdef CONFIG_SMP 1978 if (sparc_cpu_model == sun4d) { 1979 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); 1980 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); 1981 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); 1982 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); 1983 } else 1984 #endif 1985 { 1986 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); 1987 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); 1988 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); 1989 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); 1990 } 1991 1992 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); 1993 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); 1994 1995 poke_srmmu = poke_viking; 1996 } 1997 1998 /* Probe for the srmmu chip version. */ 1999 static void __init get_srmmu_type(void) 2000 { 2001 unsigned long mreg, psr; 2002 unsigned long mod_typ, mod_rev, psr_typ, psr_vers; 2003 2004 srmmu_modtype = SRMMU_INVAL_MOD; 2005 hwbug_bitmask = 0; 2006 2007 mreg = srmmu_get_mmureg(); psr = get_psr(); 2008 mod_typ = (mreg & 0xf0000000) >> 28; 2009 mod_rev = (mreg & 0x0f000000) >> 24; 2010 psr_typ = (psr >> 28) & 0xf; 2011 psr_vers = (psr >> 24) & 0xf; 2012 2013 /* First, check for HyperSparc or Cypress. */ 2014 if(mod_typ == 1) { 2015 switch(mod_rev) { 2016 case 7: 2017 /* UP or MP Hypersparc */ 2018 init_hypersparc(); 2019 break; 2020 case 0: 2021 case 2: 2022 /* Uniprocessor Cypress */ 2023 init_cypress_604(); 2024 break; 2025 case 10: 2026 case 11: 2027 case 12: 2028 /* _REALLY OLD_ Cypress MP chips... */ 2029 case 13: 2030 case 14: 2031 case 15: 2032 /* MP Cypress mmu/cache-controller */ 2033 init_cypress_605(mod_rev); 2034 break; 2035 default: 2036 /* Some other Cypress revision, assume a 605. */ 2037 init_cypress_605(mod_rev); 2038 break; 2039 }; 2040 return; 2041 } 2042 2043 /* 2044 * Now Fujitsu TurboSparc. It might happen that it is 2045 * in Swift emulation mode, so we will check later... 2046 */ 2047 if (psr_typ == 0 && psr_vers == 5) { 2048 init_turbosparc(); 2049 return; 2050 } 2051 2052 /* Next check for Fujitsu Swift. */ 2053 if(psr_typ == 0 && psr_vers == 4) { 2054 int cpunode; 2055 char node_str[128]; 2056 2057 /* Look if it is not a TurboSparc emulating Swift... */ 2058 cpunode = prom_getchild(prom_root_node); 2059 while((cpunode = prom_getsibling(cpunode)) != 0) { 2060 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); 2061 if(!strcmp(node_str, "cpu")) { 2062 if (!prom_getintdefault(cpunode, "psr-implementation", 1) && 2063 prom_getintdefault(cpunode, "psr-version", 1) == 5) { 2064 init_turbosparc(); 2065 return; 2066 } 2067 break; 2068 } 2069 } 2070 2071 init_swift(); 2072 return; 2073 } 2074 2075 /* Now the Viking family of srmmu. */ 2076 if(psr_typ == 4 && 2077 ((psr_vers == 0) || 2078 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { 2079 init_viking(); 2080 return; 2081 } 2082 2083 /* Finally the Tsunami. */ 2084 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { 2085 init_tsunami(); 2086 return; 2087 } 2088 2089 /* Oh well */ 2090 srmmu_is_bad(); 2091 } 2092 2093 /* don't laugh, static pagetables */ 2094 static void srmmu_check_pgt_cache(int low, int high) 2095 { 2096 } 2097 2098 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, 2099 tsetup_mmu_patchme, rtrap_mmu_patchme; 2100 2101 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, 2102 tsetup_srmmu_stackchk, srmmu_rett_stackchk; 2103 2104 extern unsigned long srmmu_fault; 2105 2106 #define PATCH_BRANCH(insn, dest) do { \ 2107 iaddr = &(insn); \ 2108 daddr = &(dest); \ 2109 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ 2110 } while(0) 2111 2112 static void __init patch_window_trap_handlers(void) 2113 { 2114 unsigned long *iaddr, *daddr; 2115 2116 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); 2117 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); 2118 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); 2119 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); 2120 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); 2121 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); 2122 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); 2123 } 2124 2125 #ifdef CONFIG_SMP 2126 /* Local cross-calls. */ 2127 static void smp_flush_page_for_dma(unsigned long page) 2128 { 2129 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); 2130 local_flush_page_for_dma(page); 2131 } 2132 2133 #endif 2134 2135 static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) 2136 { 2137 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); 2138 } 2139 2140 static unsigned long srmmu_pte_to_pgoff(pte_t pte) 2141 { 2142 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; 2143 } 2144 2145 /* Load up routines and constants for sun4m and sun4d mmu */ 2146 void __init ld_mmu_srmmu(void) 2147 { 2148 extern void ld_mmu_iommu(void); 2149 extern void ld_mmu_iounit(void); 2150 extern void ___xchg32_sun4md(void); 2151 2152 BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT_SOFT); 2153 BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE_SOFT); 2154 BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK_SOFT); 2155 2156 BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); 2157 BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); 2158 BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); 2159 2160 BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE_SOFT); 2161 BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD_SOFT); 2162 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); 2163 2164 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); 2165 BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); 2166 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); 2167 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); 2168 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); 2169 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); 2170 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF; 2171 2172 /* Functions */ 2173 #ifndef CONFIG_SMP 2174 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); 2175 #endif 2176 BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); 2177 2178 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); 2179 BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); 2180 2181 BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); 2182 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); 2183 BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); 2184 2185 BTFIXUPSET_SETHI(none_mask, 0xF0000000); 2186 2187 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); 2188 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); 2189 2190 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); 2191 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); 2192 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); 2193 2194 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); 2195 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); 2196 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); 2197 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); 2198 2199 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); 2200 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); 2201 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); 2202 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); 2203 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); 2204 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); 2205 2206 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); 2207 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); 2208 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); 2209 2210 BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); 2211 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); 2212 BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); 2213 BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); 2214 BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); 2215 BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); 2216 BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); 2217 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); 2218 2219 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); 2220 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); 2221 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); 2222 BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); 2223 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); 2224 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); 2225 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); 2226 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); 2227 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); 2228 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); 2229 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); 2230 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); 2231 2232 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); 2233 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); 2234 2235 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); 2236 2237 BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); 2238 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); 2239 2240 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); 2241 BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); 2242 2243 get_srmmu_type(); 2244 patch_window_trap_handlers(); 2245 2246 #ifdef CONFIG_SMP 2247 /* El switcheroo... */ 2248 2249 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); 2250 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); 2251 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); 2252 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); 2253 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); 2254 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); 2255 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); 2256 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); 2257 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); 2258 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); 2259 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); 2260 2261 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); 2262 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); 2263 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); 2264 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); 2265 if (sparc_cpu_model != sun4d) { 2266 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); 2267 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); 2268 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); 2269 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); 2270 } 2271 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); 2272 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); 2273 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); 2274 #endif 2275 2276 if (sparc_cpu_model == sun4d) 2277 ld_mmu_iounit(); 2278 else 2279 ld_mmu_iommu(); 2280 #ifdef CONFIG_SMP 2281 if (sparc_cpu_model == sun4d) 2282 sun4d_init_smp(); 2283 else 2284 sun4m_init_smp(); 2285 #endif 2286 } 2287
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.