1 /* 2 * SMP initialisation and IPI support 3 * Based on arch/arm/kernel/smp.c 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/acpi.h> 21 #include <linux/delay.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/sched.h> 25 #include <linux/interrupt.h> 26 #include <linux/cache.h> 27 #include <linux/profile.h> 28 #include <linux/errno.h> 29 #include <linux/mm.h> 30 #include <linux/err.h> 31 #include <linux/cpu.h> 32 #include <linux/smp.h> 33 #include <linux/seq_file.h> 34 #include <linux/irq.h> 35 #include <linux/percpu.h> 36 #include <linux/clockchips.h> 37 #include <linux/completion.h> 38 #include <linux/of.h> 39 #include <linux/irq_work.h> 40 41 #include <asm/alternative.h> 42 #include <asm/atomic.h> 43 #include <asm/cacheflush.h> 44 #include <asm/cpu.h> 45 #include <asm/cputype.h> 46 #include <asm/cpu_ops.h> 47 #include <asm/mmu_context.h> 48 #include <asm/numa.h> 49 #include <asm/pgtable.h> 50 #include <asm/pgalloc.h> 51 #include <asm/processor.h> 52 #include <asm/smp_plat.h> 53 #include <asm/sections.h> 54 #include <asm/tlbflush.h> 55 #include <asm/ptrace.h> 56 #include <asm/virt.h> 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/ipi.h> 60 61 /* 62 * as from 2.5, kernels no longer have an init_tasks structure 63 * so we need some other way of telling a new secondary core 64 * where to place its SVC stack 65 */ 66 struct secondary_data secondary_data; 67 /* Number of CPUs which aren't online, but looping in kernel text. */ 68 int cpus_stuck_in_kernel; 69 70 enum ipi_msg_type { 71 IPI_RESCHEDULE, 72 IPI_CALL_FUNC, 73 IPI_CPU_STOP, 74 IPI_TIMER, 75 IPI_IRQ_WORK, 76 IPI_WAKEUP 77 }; 78 79 #ifdef CONFIG_ARM64_VHE 80 81 /* Whether the boot CPU is running in HYP mode or not*/ 82 static bool boot_cpu_hyp_mode; 83 84 static inline void save_boot_cpu_run_el(void) 85 { 86 boot_cpu_hyp_mode = is_kernel_in_hyp_mode(); 87 } 88 89 static inline bool is_boot_cpu_in_hyp_mode(void) 90 { 91 return boot_cpu_hyp_mode; 92 } 93 94 /* 95 * Verify that a secondary CPU is running the kernel at the same 96 * EL as that of the boot CPU. 97 */ 98 void verify_cpu_run_el(void) 99 { 100 bool in_el2 = is_kernel_in_hyp_mode(); 101 bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode(); 102 103 if (in_el2 ^ boot_cpu_el2) { 104 pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n", 105 smp_processor_id(), 106 in_el2 ? 2 : 1, 107 boot_cpu_el2 ? 2 : 1); 108 cpu_panic_kernel(); 109 } 110 } 111 112 #else 113 static inline void save_boot_cpu_run_el(void) {} 114 #endif 115 116 #ifdef CONFIG_HOTPLUG_CPU 117 static int op_cpu_kill(unsigned int cpu); 118 #else 119 static inline int op_cpu_kill(unsigned int cpu) 120 { 121 return -ENOSYS; 122 } 123 #endif 124 125 126 /* 127 * Boot a secondary CPU, and assign it the specified idle task. 128 * This also gives us the initial stack to use for this CPU. 129 */ 130 static int boot_secondary(unsigned int cpu, struct task_struct *idle) 131 { 132 if (cpu_ops[cpu]->cpu_boot) 133 return cpu_ops[cpu]->cpu_boot(cpu); 134 135 return -EOPNOTSUPP; 136 } 137 138 static DECLARE_COMPLETION(cpu_running); 139 140 int __cpu_up(unsigned int cpu, struct task_struct *idle) 141 { 142 int ret; 143 long status; 144 145 /* 146 * We need to tell the secondary core where to find its stack and the 147 * page tables. 148 */ 149 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 150 update_cpu_boot_status(CPU_MMU_OFF); 151 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 152 153 /* 154 * Now bring the CPU into our world. 155 */ 156 ret = boot_secondary(cpu, idle); 157 if (ret == 0) { 158 /* 159 * CPU was successfully started, wait for it to come online or 160 * time out. 161 */ 162 wait_for_completion_timeout(&cpu_running, 163 msecs_to_jiffies(1000)); 164 165 if (!cpu_online(cpu)) { 166 pr_crit("CPU%u: failed to come online\n", cpu); 167 ret = -EIO; 168 } 169 } else { 170 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 171 } 172 173 secondary_data.stack = NULL; 174 status = READ_ONCE(secondary_data.status); 175 if (ret && status) { 176 177 if (status == CPU_MMU_OFF) 178 status = READ_ONCE(__early_cpu_boot_status); 179 180 switch (status) { 181 default: 182 pr_err("CPU%u: failed in unknown state : 0x%lx\n", 183 cpu, status); 184 break; 185 case CPU_KILL_ME: 186 if (!op_cpu_kill(cpu)) { 187 pr_crit("CPU%u: died during early boot\n", cpu); 188 break; 189 } 190 /* Fall through */ 191 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 192 case CPU_STUCK_IN_KERNEL: 193 pr_crit("CPU%u: is stuck in kernel\n", cpu); 194 cpus_stuck_in_kernel++; 195 break; 196 case CPU_PANIC_KERNEL: 197 panic("CPU%u detected unsupported configuration\n", cpu); 198 } 199 } 200 201 return ret; 202 } 203 204 /* 205 * This is the secondary CPU boot entry. We're using this CPUs 206 * idle thread stack, but a set of temporary page tables. 207 */ 208 asmlinkage void secondary_start_kernel(void) 209 { 210 struct mm_struct *mm = &init_mm; 211 unsigned int cpu = smp_processor_id(); 212 213 /* 214 * All kernel threads share the same mm context; grab a 215 * reference and switch to it. 216 */ 217 atomic_inc(&mm->mm_count); 218 current->active_mm = mm; 219 220 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 221 222 /* 223 * TTBR0 is only used for the identity mapping at this stage. Make it 224 * point to zero page to avoid speculatively fetching new entries. 225 */ 226 cpu_uninstall_idmap(); 227 228 preempt_disable(); 229 trace_hardirqs_off(); 230 231 /* 232 * If the system has established the capabilities, make sure 233 * this CPU ticks all of those. If it doesn't, the CPU will 234 * fail to come online. 235 */ 236 verify_local_cpu_capabilities(); 237 238 if (cpu_ops[cpu]->cpu_postboot) 239 cpu_ops[cpu]->cpu_postboot(); 240 241 /* 242 * Log the CPU info before it is marked online and might get read. 243 */ 244 cpuinfo_store_cpu(); 245 246 /* 247 * Enable GIC and timers. 248 */ 249 notify_cpu_starting(cpu); 250 251 store_cpu_topology(cpu); 252 253 /* 254 * OK, now it's safe to let the boot CPU continue. Wait for 255 * the CPU migration code to notice that the CPU is online 256 * before we continue. 257 */ 258 pr_info("CPU%u: Booted secondary processor [%08x]\n", 259 cpu, read_cpuid_id()); 260 update_cpu_boot_status(CPU_BOOT_SUCCESS); 261 set_cpu_online(cpu, true); 262 complete(&cpu_running); 263 264 local_irq_enable(); 265 local_async_enable(); 266 267 /* 268 * OK, it's off to the idle thread for us 269 */ 270 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 271 } 272 273 #ifdef CONFIG_HOTPLUG_CPU 274 static int op_cpu_disable(unsigned int cpu) 275 { 276 /* 277 * If we don't have a cpu_die method, abort before we reach the point 278 * of no return. CPU0 may not have an cpu_ops, so test for it. 279 */ 280 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) 281 return -EOPNOTSUPP; 282 283 /* 284 * We may need to abort a hot unplug for some other mechanism-specific 285 * reason. 286 */ 287 if (cpu_ops[cpu]->cpu_disable) 288 return cpu_ops[cpu]->cpu_disable(cpu); 289 290 return 0; 291 } 292 293 /* 294 * __cpu_disable runs on the processor to be shutdown. 295 */ 296 int __cpu_disable(void) 297 { 298 unsigned int cpu = smp_processor_id(); 299 int ret; 300 301 ret = op_cpu_disable(cpu); 302 if (ret) 303 return ret; 304 305 /* 306 * Take this CPU offline. Once we clear this, we can't return, 307 * and we must not schedule until we're ready to give up the cpu. 308 */ 309 set_cpu_online(cpu, false); 310 311 /* 312 * OK - migrate IRQs away from this CPU 313 */ 314 irq_migrate_all_off_this_cpu(); 315 316 return 0; 317 } 318 319 static int op_cpu_kill(unsigned int cpu) 320 { 321 /* 322 * If we have no means of synchronising with the dying CPU, then assume 323 * that it is really dead. We can only wait for an arbitrary length of 324 * time and hope that it's dead, so let's skip the wait and just hope. 325 */ 326 if (!cpu_ops[cpu]->cpu_kill) 327 return 0; 328 329 return cpu_ops[cpu]->cpu_kill(cpu); 330 } 331 332 /* 333 * called on the thread which is asking for a CPU to be shutdown - 334 * waits until shutdown has completed, or it is timed out. 335 */ 336 void __cpu_die(unsigned int cpu) 337 { 338 int err; 339 340 if (!cpu_wait_death(cpu, 5)) { 341 pr_crit("CPU%u: cpu didn't die\n", cpu); 342 return; 343 } 344 pr_notice("CPU%u: shutdown\n", cpu); 345 346 /* 347 * Now that the dying CPU is beyond the point of no return w.r.t. 348 * in-kernel synchronisation, try to get the firwmare to help us to 349 * verify that it has really left the kernel before we consider 350 * clobbering anything it might still be using. 351 */ 352 err = op_cpu_kill(cpu); 353 if (err) 354 pr_warn("CPU%d may not have shut down cleanly: %d\n", 355 cpu, err); 356 } 357 358 /* 359 * Called from the idle thread for the CPU which has been shutdown. 360 * 361 * Note that we disable IRQs here, but do not re-enable them 362 * before returning to the caller. This is also the behaviour 363 * of the other hotplug-cpu capable cores, so presumably coming 364 * out of idle fixes this. 365 */ 366 void cpu_die(void) 367 { 368 unsigned int cpu = smp_processor_id(); 369 370 idle_task_exit(); 371 372 local_irq_disable(); 373 374 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 375 (void)cpu_report_death(); 376 377 /* 378 * Actually shutdown the CPU. This must never fail. The specific hotplug 379 * mechanism must perform all required cache maintenance to ensure that 380 * no dirty lines are lost in the process of shutting down the CPU. 381 */ 382 cpu_ops[cpu]->cpu_die(cpu); 383 384 BUG(); 385 } 386 #endif 387 388 /* 389 * Kill the calling secondary CPU, early in bringup before it is turned 390 * online. 391 */ 392 void cpu_die_early(void) 393 { 394 int cpu = smp_processor_id(); 395 396 pr_crit("CPU%d: will not boot\n", cpu); 397 398 /* Mark this CPU absent */ 399 set_cpu_present(cpu, 0); 400 401 #ifdef CONFIG_HOTPLUG_CPU 402 update_cpu_boot_status(CPU_KILL_ME); 403 /* Check if we can park ourselves */ 404 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) 405 cpu_ops[cpu]->cpu_die(cpu); 406 #endif 407 update_cpu_boot_status(CPU_STUCK_IN_KERNEL); 408 409 cpu_park_loop(); 410 } 411 412 static void __init hyp_mode_check(void) 413 { 414 if (is_hyp_mode_available()) 415 pr_info("CPU: All CPU(s) started at EL2\n"); 416 else if (is_hyp_mode_mismatched()) 417 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, 418 "CPU: CPUs started in inconsistent modes"); 419 else 420 pr_info("CPU: All CPU(s) started at EL1\n"); 421 } 422 423 void __init smp_cpus_done(unsigned int max_cpus) 424 { 425 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 426 setup_cpu_features(); 427 hyp_mode_check(); 428 apply_alternatives_all(); 429 } 430 431 void __init smp_prepare_boot_cpu(void) 432 { 433 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 434 cpuinfo_store_boot_cpu(); 435 save_boot_cpu_run_el(); 436 } 437 438 static u64 __init of_get_cpu_mpidr(struct device_node *dn) 439 { 440 const __be32 *cell; 441 u64 hwid; 442 443 /* 444 * A cpu node with missing "reg" property is 445 * considered invalid to build a cpu_logical_map 446 * entry. 447 */ 448 cell = of_get_property(dn, "reg", NULL); 449 if (!cell) { 450 pr_err("%s: missing reg property\n", dn->full_name); 451 return INVALID_HWID; 452 } 453 454 hwid = of_read_number(cell, of_n_addr_cells(dn)); 455 /* 456 * Non affinity bits must be set to 0 in the DT 457 */ 458 if (hwid & ~MPIDR_HWID_BITMASK) { 459 pr_err("%s: invalid reg property\n", dn->full_name); 460 return INVALID_HWID; 461 } 462 return hwid; 463 } 464 465 /* 466 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized 467 * entries and check for duplicates. If any is found just ignore the 468 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid 469 * matching valid MPIDR values. 470 */ 471 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) 472 { 473 unsigned int i; 474 475 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) 476 if (cpu_logical_map(i) == hwid) 477 return true; 478 return false; 479 } 480 481 /* 482 * Initialize cpu operations for a logical cpu and 483 * set it in the possible mask on success 484 */ 485 static int __init smp_cpu_setup(int cpu) 486 { 487 if (cpu_read_ops(cpu)) 488 return -ENODEV; 489 490 if (cpu_ops[cpu]->cpu_init(cpu)) 491 return -ENODEV; 492 493 set_cpu_possible(cpu, true); 494 495 return 0; 496 } 497 498 static bool bootcpu_valid __initdata; 499 static unsigned int cpu_count = 1; 500 501 #ifdef CONFIG_ACPI 502 /* 503 * acpi_map_gic_cpu_interface - parse processor MADT entry 504 * 505 * Carry out sanity checks on MADT processor entry and initialize 506 * cpu_logical_map on success 507 */ 508 static void __init 509 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) 510 { 511 u64 hwid = processor->arm_mpidr; 512 513 if (!(processor->flags & ACPI_MADT_ENABLED)) { 514 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); 515 return; 516 } 517 518 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { 519 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); 520 return; 521 } 522 523 if (is_mpidr_duplicate(cpu_count, hwid)) { 524 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); 525 return; 526 } 527 528 /* Check if GICC structure of boot CPU is available in the MADT */ 529 if (cpu_logical_map(0) == hwid) { 530 if (bootcpu_valid) { 531 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", 532 hwid); 533 return; 534 } 535 bootcpu_valid = true; 536 return; 537 } 538 539 if (cpu_count >= NR_CPUS) 540 return; 541 542 /* map the logical cpu id to cpu MPIDR */ 543 cpu_logical_map(cpu_count) = hwid; 544 545 /* 546 * Set-up the ACPI parking protocol cpu entries 547 * while initializing the cpu_logical_map to 548 * avoid parsing MADT entries multiple times for 549 * nothing (ie a valid cpu_logical_map entry should 550 * contain a valid parking protocol data set to 551 * initialize the cpu if the parking protocol is 552 * the only available enable method). 553 */ 554 acpi_set_mailbox_entry(cpu_count, processor); 555 556 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid)); 557 558 cpu_count++; 559 } 560 561 static int __init 562 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, 563 const unsigned long end) 564 { 565 struct acpi_madt_generic_interrupt *processor; 566 567 processor = (struct acpi_madt_generic_interrupt *)header; 568 if (BAD_MADT_GICC_ENTRY(processor, end)) 569 return -EINVAL; 570 571 acpi_table_print_madt_entry(header); 572 573 acpi_map_gic_cpu_interface(processor); 574 575 return 0; 576 } 577 #else 578 #define acpi_table_parse_madt(...) do { } while (0) 579 #endif 580 581 /* 582 * Enumerate the possible CPU set from the device tree and build the 583 * cpu logical map array containing MPIDR values related to logical 584 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 585 */ 586 static void __init of_parse_and_init_cpus(void) 587 { 588 struct device_node *dn = NULL; 589 590 while ((dn = of_find_node_by_type(dn, "cpu"))) { 591 u64 hwid = of_get_cpu_mpidr(dn); 592 593 if (hwid == INVALID_HWID) 594 goto next; 595 596 if (is_mpidr_duplicate(cpu_count, hwid)) { 597 pr_err("%s: duplicate cpu reg properties in the DT\n", 598 dn->full_name); 599 goto next; 600 } 601 602 /* 603 * The numbering scheme requires that the boot CPU 604 * must be assigned logical id 0. Record it so that 605 * the logical map built from DT is validated and can 606 * be used. 607 */ 608 if (hwid == cpu_logical_map(0)) { 609 if (bootcpu_valid) { 610 pr_err("%s: duplicate boot cpu reg property in DT\n", 611 dn->full_name); 612 goto next; 613 } 614 615 bootcpu_valid = true; 616 617 /* 618 * cpu_logical_map has already been 619 * initialized and the boot cpu doesn't need 620 * the enable-method so continue without 621 * incrementing cpu. 622 */ 623 continue; 624 } 625 626 if (cpu_count >= NR_CPUS) 627 goto next; 628 629 pr_debug("cpu logical map 0x%llx\n", hwid); 630 cpu_logical_map(cpu_count) = hwid; 631 632 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); 633 next: 634 cpu_count++; 635 } 636 } 637 638 /* 639 * Enumerate the possible CPU set from the device tree or ACPI and build the 640 * cpu logical map array containing MPIDR values related to logical 641 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 642 */ 643 void __init smp_init_cpus(void) 644 { 645 int i; 646 647 if (acpi_disabled) 648 of_parse_and_init_cpus(); 649 else 650 /* 651 * do a walk of MADT to determine how many CPUs 652 * we have including disabled CPUs, and get information 653 * we need for SMP init 654 */ 655 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 656 acpi_parse_gic_cpu_interface, 0); 657 658 if (cpu_count > nr_cpu_ids) 659 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", 660 cpu_count, nr_cpu_ids); 661 662 if (!bootcpu_valid) { 663 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 664 return; 665 } 666 667 /* 668 * We need to set the cpu_logical_map entries before enabling 669 * the cpus so that cpu processor description entries (DT cpu nodes 670 * and ACPI MADT entries) can be retrieved by matching the cpu hwid 671 * with entries in cpu_logical_map while initializing the cpus. 672 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 673 */ 674 for (i = 1; i < nr_cpu_ids; i++) { 675 if (cpu_logical_map(i) != INVALID_HWID) { 676 if (smp_cpu_setup(i)) 677 cpu_logical_map(i) = INVALID_HWID; 678 } 679 } 680 } 681 682 void __init smp_prepare_cpus(unsigned int max_cpus) 683 { 684 int err; 685 unsigned int cpu; 686 unsigned int this_cpu; 687 688 init_cpu_topology(); 689 690 this_cpu = smp_processor_id(); 691 store_cpu_topology(this_cpu); 692 numa_store_cpu_info(this_cpu); 693 694 /* 695 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 696 * secondary CPUs present. 697 */ 698 if (max_cpus == 0) 699 return; 700 701 /* 702 * Initialise the present map (which describes the set of CPUs 703 * actually populated at the present time) and release the 704 * secondaries from the bootloader. 705 */ 706 for_each_possible_cpu(cpu) { 707 708 if (cpu == smp_processor_id()) 709 continue; 710 711 if (!cpu_ops[cpu]) 712 continue; 713 714 err = cpu_ops[cpu]->cpu_prepare(cpu); 715 if (err) 716 continue; 717 718 set_cpu_present(cpu, true); 719 numa_store_cpu_info(cpu); 720 } 721 } 722 723 void (*__smp_cross_call)(const struct cpumask *, unsigned int); 724 725 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) 726 { 727 __smp_cross_call = fn; 728 } 729 730 static const char *ipi_types[NR_IPI] __tracepoint_string = { 731 #define S(x,s) [x] = s 732 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 733 S(IPI_CALL_FUNC, "Function call interrupts"), 734 S(IPI_CPU_STOP, "CPU stop interrupts"), 735 S(IPI_TIMER, "Timer broadcast interrupts"), 736 S(IPI_IRQ_WORK, "IRQ work interrupts"), 737 S(IPI_WAKEUP, "CPU wake-up interrupts"), 738 }; 739 740 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 741 { 742 trace_ipi_raise(target, ipi_types[ipinr]); 743 __smp_cross_call(target, ipinr); 744 } 745 746 void show_ipi_list(struct seq_file *p, int prec) 747 { 748 unsigned int cpu, i; 749 750 for (i = 0; i < NR_IPI; i++) { 751 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, 752 prec >= 4 ? " " : ""); 753 for_each_online_cpu(cpu) 754 seq_printf(p, "%10u ", 755 __get_irq_stat(cpu, ipi_irqs[i])); 756 seq_printf(p, " %s\n", ipi_types[i]); 757 } 758 } 759 760 u64 smp_irq_stat_cpu(unsigned int cpu) 761 { 762 u64 sum = 0; 763 int i; 764 765 for (i = 0; i < NR_IPI; i++) 766 sum += __get_irq_stat(cpu, ipi_irqs[i]); 767 768 return sum; 769 } 770 771 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 772 { 773 smp_cross_call(mask, IPI_CALL_FUNC); 774 } 775 776 void arch_send_call_function_single_ipi(int cpu) 777 { 778 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); 779 } 780 781 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 782 void arch_send_wakeup_ipi_mask(const struct cpumask *mask) 783 { 784 smp_cross_call(mask, IPI_WAKEUP); 785 } 786 #endif 787 788 #ifdef CONFIG_IRQ_WORK 789 void arch_irq_work_raise(void) 790 { 791 if (__smp_cross_call) 792 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 793 } 794 #endif 795 796 /* 797 * ipi_cpu_stop - handle IPI from smp_send_stop() 798 */ 799 static void ipi_cpu_stop(unsigned int cpu) 800 { 801 set_cpu_online(cpu, false); 802 803 local_irq_disable(); 804 805 while (1) 806 cpu_relax(); 807 } 808 809 /* 810 * Main handler for inter-processor interrupts 811 */ 812 void handle_IPI(int ipinr, struct pt_regs *regs) 813 { 814 unsigned int cpu = smp_processor_id(); 815 struct pt_regs *old_regs = set_irq_regs(regs); 816 817 if ((unsigned)ipinr < NR_IPI) { 818 trace_ipi_entry_rcuidle(ipi_types[ipinr]); 819 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 820 } 821 822 switch (ipinr) { 823 case IPI_RESCHEDULE: 824 scheduler_ipi(); 825 break; 826 827 case IPI_CALL_FUNC: 828 irq_enter(); 829 generic_smp_call_function_interrupt(); 830 irq_exit(); 831 break; 832 833 case IPI_CPU_STOP: 834 irq_enter(); 835 ipi_cpu_stop(cpu); 836 irq_exit(); 837 break; 838 839 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 840 case IPI_TIMER: 841 irq_enter(); 842 tick_receive_broadcast(); 843 irq_exit(); 844 break; 845 #endif 846 847 #ifdef CONFIG_IRQ_WORK 848 case IPI_IRQ_WORK: 849 irq_enter(); 850 irq_work_run(); 851 irq_exit(); 852 break; 853 #endif 854 855 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 856 case IPI_WAKEUP: 857 WARN_ONCE(!acpi_parking_protocol_valid(cpu), 858 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", 859 cpu); 860 break; 861 #endif 862 863 default: 864 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 865 break; 866 } 867 868 if ((unsigned)ipinr < NR_IPI) 869 trace_ipi_exit_rcuidle(ipi_types[ipinr]); 870 set_irq_regs(old_regs); 871 } 872 873 void smp_send_reschedule(int cpu) 874 { 875 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 876 } 877 878 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 879 void tick_broadcast(const struct cpumask *mask) 880 { 881 smp_cross_call(mask, IPI_TIMER); 882 } 883 #endif 884 885 void smp_send_stop(void) 886 { 887 unsigned long timeout; 888 889 if (num_online_cpus() > 1) { 890 cpumask_t mask; 891 892 cpumask_copy(&mask, cpu_online_mask); 893 cpumask_clear_cpu(smp_processor_id(), &mask); 894 895 if (system_state == SYSTEM_BOOTING || 896 system_state == SYSTEM_RUNNING) 897 pr_crit("SMP: stopping secondary CPUs\n"); 898 smp_cross_call(&mask, IPI_CPU_STOP); 899 } 900 901 /* Wait up to one second for other CPUs to stop */ 902 timeout = USEC_PER_SEC; 903 while (num_online_cpus() > 1 && timeout--) 904 udelay(1); 905 906 if (num_online_cpus() > 1) 907 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", 908 cpumask_pr_args(cpu_online_mask)); 909 } 910 911 /* 912 * not supported here 913 */ 914 int setup_profiling_timer(unsigned int multiplier) 915 { 916 return -EINVAL; 917 } 918 919 static bool have_cpu_die(void) 920 { 921 #ifdef CONFIG_HOTPLUG_CPU 922 int any_cpu = raw_smp_processor_id(); 923 924 if (cpu_ops[any_cpu]->cpu_die) 925 return true; 926 #endif 927 return false; 928 } 929 930 bool cpus_are_stuck_in_kernel(void) 931 { 932 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); 933 934 return !!cpus_stuck_in_kernel || smp_spin_tables; 935 } 936
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.