1 /* 2 * Port on Texas Instruments TMS320C6x architecture 3 * 4 * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated 5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/dma-mapping.h> 12 #include <linux/memblock.h> 13 #include <linux/seq_file.h> 14 #include <linux/bootmem.h> 15 #include <linux/clkdev.h> 16 #include <linux/initrd.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of_fdt.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/cache.h> 23 #include <linux/delay.h> 24 #include <linux/sched.h> 25 #include <linux/clk.h> 26 #include <linux/cpu.h> 27 #include <linux/fs.h> 28 #include <linux/of.h> 29 #include <linux/console.h> 30 #include <linux/screen_info.h> 31 32 #include <asm/sections.h> 33 #include <asm/div64.h> 34 #include <asm/setup.h> 35 #include <asm/dscr.h> 36 #include <asm/clock.h> 37 #include <asm/soc.h> 38 #include <asm/special_insns.h> 39 40 static const char *c6x_soc_name; 41 42 struct screen_info screen_info; 43 44 int c6x_num_cores; 45 EXPORT_SYMBOL_GPL(c6x_num_cores); 46 47 unsigned int c6x_silicon_rev; 48 EXPORT_SYMBOL_GPL(c6x_silicon_rev); 49 50 /* 51 * Device status register. This holds information 52 * about device configuration needed by some drivers. 53 */ 54 unsigned int c6x_devstat; 55 EXPORT_SYMBOL_GPL(c6x_devstat); 56 57 /* 58 * Some SoCs have fuse registers holding a unique MAC 59 * address. This is parsed out of the device tree with 60 * the resulting MAC being held here. 61 */ 62 unsigned char c6x_fuse_mac[6]; 63 64 unsigned long memory_start; 65 unsigned long memory_end; 66 EXPORT_SYMBOL(memory_end); 67 68 unsigned long ram_start; 69 unsigned long ram_end; 70 71 /* Uncached memory for DMA consistent use (memdma=) */ 72 static unsigned long dma_start __initdata; 73 static unsigned long dma_size __initdata; 74 75 struct cpuinfo_c6x { 76 const char *cpu_name; 77 const char *cpu_voltage; 78 const char *mmu; 79 const char *fpu; 80 char *cpu_rev; 81 unsigned int core_id; 82 char __cpu_rev[5]; 83 }; 84 85 static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data); 86 87 unsigned int ticks_per_ns_scaled; 88 EXPORT_SYMBOL(ticks_per_ns_scaled); 89 90 unsigned int c6x_core_freq; 91 92 static void __init get_cpuinfo(void) 93 { 94 unsigned cpu_id, rev_id, csr; 95 struct clk *coreclk = clk_get_sys(NULL, "core"); 96 unsigned long core_khz; 97 u64 tmp; 98 struct cpuinfo_c6x *p; 99 struct device_node *node, *np; 100 101 p = &per_cpu(cpu_data, smp_processor_id()); 102 103 if (!IS_ERR(coreclk)) 104 c6x_core_freq = clk_get_rate(coreclk); 105 else { 106 printk(KERN_WARNING 107 "Cannot find core clock frequency. Using 700MHz\n"); 108 c6x_core_freq = 700000000; 109 } 110 111 core_khz = c6x_core_freq / 1000; 112 113 tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE; 114 do_div(tmp, 1000000); 115 ticks_per_ns_scaled = tmp; 116 117 csr = get_creg(CSR); 118 cpu_id = csr >> 24; 119 rev_id = (csr >> 16) & 0xff; 120 121 p->mmu = "none"; 122 p->fpu = "none"; 123 p->cpu_voltage = "unknown"; 124 125 switch (cpu_id) { 126 case 0: 127 p->cpu_name = "C67x"; 128 p->fpu = "yes"; 129 break; 130 case 2: 131 p->cpu_name = "C62x"; 132 break; 133 case 8: 134 p->cpu_name = "C64x"; 135 break; 136 case 12: 137 p->cpu_name = "C64x"; 138 break; 139 case 16: 140 p->cpu_name = "C64x+"; 141 p->cpu_voltage = "1.2"; 142 break; 143 case 21: 144 p->cpu_name = "C66X"; 145 p->cpu_voltage = "1.2"; 146 break; 147 default: 148 p->cpu_name = "unknown"; 149 break; 150 } 151 152 if (cpu_id < 16) { 153 switch (rev_id) { 154 case 0x1: 155 if (cpu_id > 8) { 156 p->cpu_rev = "DM640/DM641/DM642/DM643"; 157 p->cpu_voltage = "1.2 - 1.4"; 158 } else { 159 p->cpu_rev = "C6201"; 160 p->cpu_voltage = "2.5"; 161 } 162 break; 163 case 0x2: 164 p->cpu_rev = "C6201B/C6202/C6211"; 165 p->cpu_voltage = "1.8"; 166 break; 167 case 0x3: 168 p->cpu_rev = "C6202B/C6203/C6204/C6205"; 169 p->cpu_voltage = "1.5"; 170 break; 171 case 0x201: 172 p->cpu_rev = "C6701 revision 0 (early CPU)"; 173 p->cpu_voltage = "1.8"; 174 break; 175 case 0x202: 176 p->cpu_rev = "C6701/C6711/C6712"; 177 p->cpu_voltage = "1.8"; 178 break; 179 case 0x801: 180 p->cpu_rev = "C64x"; 181 p->cpu_voltage = "1.5"; 182 break; 183 default: 184 p->cpu_rev = "unknown"; 185 } 186 } else { 187 p->cpu_rev = p->__cpu_rev; 188 snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id); 189 } 190 191 p->core_id = get_coreid(); 192 193 node = of_find_node_by_name(NULL, "cpus"); 194 if (node) { 195 for_each_child_of_node(node, np) 196 if (!strcmp("cpu", np->name)) 197 ++c6x_num_cores; 198 of_node_put(node); 199 } 200 201 node = of_find_node_by_name(NULL, "soc"); 202 if (node) { 203 if (of_property_read_string(node, "model", &c6x_soc_name)) 204 c6x_soc_name = "unknown"; 205 of_node_put(node); 206 } else 207 c6x_soc_name = "unknown"; 208 209 printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n", 210 p->core_id, p->cpu_name, p->cpu_rev, 211 p->cpu_voltage, c6x_core_freq / 1000000); 212 } 213 214 /* 215 * Early parsing of the command line 216 */ 217 static u32 mem_size __initdata; 218 219 /* "mem=" parsing. */ 220 static int __init early_mem(char *p) 221 { 222 if (!p) 223 return -EINVAL; 224 225 mem_size = memparse(p, &p); 226 /* don't remove all of memory when handling "mem={invalid}" */ 227 if (mem_size == 0) 228 return -EINVAL; 229 230 return 0; 231 } 232 early_param("mem", early_mem); 233 234 /* "memdma=<size>[@<address>]" parsing. */ 235 static int __init early_memdma(char *p) 236 { 237 if (!p) 238 return -EINVAL; 239 240 dma_size = memparse(p, &p); 241 if (*p == '@') 242 dma_start = memparse(p, &p); 243 244 return 0; 245 } 246 early_param("memdma", early_memdma); 247 248 int __init c6x_add_memory(phys_addr_t start, unsigned long size) 249 { 250 static int ram_found __initdata; 251 252 /* We only handle one bank (the one with PAGE_OFFSET) for now */ 253 if (ram_found) 254 return -EINVAL; 255 256 if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size)) 257 return 0; 258 259 ram_start = start; 260 ram_end = start + size; 261 262 ram_found = 1; 263 return 0; 264 } 265 266 /* 267 * Do early machine setup and device tree parsing. This is called very 268 * early on the boot process. 269 */ 270 notrace void __init machine_init(unsigned long dt_ptr) 271 { 272 void *dtb = __va(dt_ptr); 273 void *fdt = _fdt_start; 274 275 /* interrupts must be masked */ 276 set_creg(IER, 2); 277 278 /* 279 * Set the Interrupt Service Table (IST) to the beginning of the 280 * vector table. 281 */ 282 set_ist(_vectors_start); 283 284 /* 285 * dtb is passed in from bootloader. 286 * fdt is linked in blob. 287 */ 288 if (dtb && dtb != fdt) 289 fdt = dtb; 290 291 /* Do some early initialization based on the flat device tree */ 292 early_init_dt_scan(fdt); 293 294 parse_early_param(); 295 } 296 297 void __init setup_arch(char **cmdline_p) 298 { 299 int bootmap_size; 300 struct memblock_region *reg; 301 302 printk(KERN_INFO "Initializing kernel\n"); 303 304 /* Initialize command line */ 305 *cmdline_p = boot_command_line; 306 307 memory_end = ram_end; 308 memory_end &= ~(PAGE_SIZE - 1); 309 310 if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end) 311 memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size); 312 313 /* add block that this kernel can use */ 314 memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET); 315 316 /* reserve kernel text/data/bss */ 317 memblock_reserve(PAGE_OFFSET, 318 PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET)); 319 320 if (dma_size) { 321 /* align to cacheability granularity */ 322 dma_size = CACHE_REGION_END(dma_size); 323 324 if (!dma_start) 325 dma_start = memory_end - dma_size; 326 327 /* align to cacheability granularity */ 328 dma_start = CACHE_REGION_START(dma_start); 329 330 /* reserve DMA memory taken from kernel memory */ 331 if (memblock_is_region_memory(dma_start, dma_size)) 332 memblock_reserve(dma_start, dma_size); 333 } 334 335 memory_start = PAGE_ALIGN((unsigned int) &_end); 336 337 printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n", 338 memory_start, memory_end); 339 340 #ifdef CONFIG_BLK_DEV_INITRD 341 /* 342 * Reserve initrd memory if in kernel memory. 343 */ 344 if (initrd_start < initrd_end) 345 if (memblock_is_region_memory(initrd_start, 346 initrd_end - initrd_start)) 347 memblock_reserve(initrd_start, 348 initrd_end - initrd_start); 349 #endif 350 351 init_mm.start_code = (unsigned long) &_stext; 352 init_mm.end_code = (unsigned long) &_etext; 353 init_mm.end_data = memory_start; 354 init_mm.brk = memory_start; 355 356 /* 357 * Give all the memory to the bootmap allocator, tell it to put the 358 * boot mem_map at the start of memory 359 */ 360 bootmap_size = init_bootmem_node(NODE_DATA(0), 361 memory_start >> PAGE_SHIFT, 362 PAGE_OFFSET >> PAGE_SHIFT, 363 memory_end >> PAGE_SHIFT); 364 memblock_reserve(memory_start, bootmap_size); 365 366 unflatten_device_tree(); 367 368 c6x_cache_init(); 369 370 /* Set the whole external memory as non-cacheable */ 371 disable_caching(ram_start, ram_end - 1); 372 373 /* Set caching of external RAM used by Linux */ 374 for_each_memblock(memory, reg) 375 enable_caching(CACHE_REGION_START(reg->base), 376 CACHE_REGION_START(reg->base + reg->size - 1)); 377 378 #ifdef CONFIG_BLK_DEV_INITRD 379 /* 380 * Enable caching for initrd which falls outside kernel memory. 381 */ 382 if (initrd_start < initrd_end) { 383 if (!memblock_is_region_memory(initrd_start, 384 initrd_end - initrd_start)) 385 enable_caching(CACHE_REGION_START(initrd_start), 386 CACHE_REGION_START(initrd_end - 1)); 387 } 388 #endif 389 390 /* 391 * Disable caching for dma coherent memory taken from kernel memory. 392 */ 393 if (dma_size && memblock_is_region_memory(dma_start, dma_size)) 394 disable_caching(dma_start, 395 CACHE_REGION_START(dma_start + dma_size - 1)); 396 397 /* Initialize the coherent memory allocator */ 398 coherent_mem_init(dma_start, dma_size); 399 400 /* 401 * Free all memory as a starting point. 402 */ 403 free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET); 404 405 /* 406 * Then reserve memory which is already being used. 407 */ 408 for_each_memblock(reserved, reg) { 409 pr_debug("reserved - 0x%08x-0x%08x\n", 410 (u32) reg->base, (u32) reg->size); 411 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 412 } 413 414 max_low_pfn = PFN_DOWN(memory_end); 415 min_low_pfn = PFN_UP(memory_start); 416 max_mapnr = max_low_pfn - min_low_pfn; 417 418 /* Get kmalloc into gear */ 419 paging_init(); 420 421 /* 422 * Probe for Device State Configuration Registers. 423 * We have to do this early in case timer needs to be enabled 424 * through DSCR. 425 */ 426 dscr_probe(); 427 428 /* We do this early for timer and core clock frequency */ 429 c64x_setup_clocks(); 430 431 /* Get CPU info */ 432 get_cpuinfo(); 433 434 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) 435 conswitchp = &dummy_con; 436 #endif 437 } 438 439 #define cpu_to_ptr(n) ((void *)((long)(n)+1)) 440 #define ptr_to_cpu(p) ((long)(p) - 1) 441 442 static int show_cpuinfo(struct seq_file *m, void *v) 443 { 444 int n = ptr_to_cpu(v); 445 struct cpuinfo_c6x *p = &per_cpu(cpu_data, n); 446 447 if (n == 0) { 448 seq_printf(m, 449 "soc\t\t: %s\n" 450 "soc revision\t: 0x%x\n" 451 "soc cores\t: %d\n", 452 c6x_soc_name, c6x_silicon_rev, c6x_num_cores); 453 } 454 455 seq_printf(m, 456 "\n" 457 "processor\t: %d\n" 458 "cpu\t\t: %s\n" 459 "core revision\t: %s\n" 460 "core voltage\t: %s\n" 461 "core id\t\t: %d\n" 462 "mmu\t\t: %s\n" 463 "fpu\t\t: %s\n" 464 "cpu MHz\t\t: %u\n" 465 "bogomips\t: %lu.%02lu\n\n", 466 n, 467 p->cpu_name, p->cpu_rev, p->cpu_voltage, 468 p->core_id, p->mmu, p->fpu, 469 (c6x_core_freq + 500000) / 1000000, 470 (loops_per_jiffy/(500000/HZ)), 471 (loops_per_jiffy/(5000/HZ))%100); 472 473 return 0; 474 } 475 476 static void *c_start(struct seq_file *m, loff_t *pos) 477 { 478 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; 479 } 480 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 481 { 482 ++*pos; 483 return NULL; 484 } 485 static void c_stop(struct seq_file *m, void *v) 486 { 487 } 488 489 const struct seq_operations cpuinfo_op = { 490 c_start, 491 c_stop, 492 c_next, 493 show_cpuinfo 494 }; 495 496 static struct cpu cpu_devices[NR_CPUS]; 497 498 static int __init topology_init(void) 499 { 500 int i; 501 502 for_each_present_cpu(i) 503 register_cpu(&cpu_devices[i], i); 504 505 return 0; 506 } 507 508 subsys_initcall(topology_init); 509
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.