~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/smp.c

Version: ~ [ linux-5.10-rc6 ] ~ [ linux-5.9.12 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.81 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.161 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.210 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.247 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.247 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * SMP initialisation and IPI support
  4  * Based on arch/arm/kernel/smp.c
  5  *
  6  * Copyright (C) 2012 ARM Ltd.
  7  */
  8 
  9 #include <linux/acpi.h>
 10 #include <linux/arm_sdei.h>
 11 #include <linux/delay.h>
 12 #include <linux/init.h>
 13 #include <linux/spinlock.h>
 14 #include <linux/sched/mm.h>
 15 #include <linux/sched/hotplug.h>
 16 #include <linux/sched/task_stack.h>
 17 #include <linux/interrupt.h>
 18 #include <linux/cache.h>
 19 #include <linux/profile.h>
 20 #include <linux/errno.h>
 21 #include <linux/mm.h>
 22 #include <linux/err.h>
 23 #include <linux/cpu.h>
 24 #include <linux/smp.h>
 25 #include <linux/seq_file.h>
 26 #include <linux/irq.h>
 27 #include <linux/irqchip/arm-gic-v3.h>
 28 #include <linux/percpu.h>
 29 #include <linux/clockchips.h>
 30 #include <linux/completion.h>
 31 #include <linux/of.h>
 32 #include <linux/irq_work.h>
 33 #include <linux/kernel_stat.h>
 34 #include <linux/kexec.h>
 35 #include <linux/kvm_host.h>
 36 
 37 #include <asm/alternative.h>
 38 #include <asm/atomic.h>
 39 #include <asm/cacheflush.h>
 40 #include <asm/cpu.h>
 41 #include <asm/cputype.h>
 42 #include <asm/cpu_ops.h>
 43 #include <asm/daifflags.h>
 44 #include <asm/kvm_mmu.h>
 45 #include <asm/mmu_context.h>
 46 #include <asm/numa.h>
 47 #include <asm/processor.h>
 48 #include <asm/smp_plat.h>
 49 #include <asm/sections.h>
 50 #include <asm/tlbflush.h>
 51 #include <asm/ptrace.h>
 52 #include <asm/virt.h>
 53 
 54 #define CREATE_TRACE_POINTS
 55 #include <trace/events/ipi.h>
 56 
 57 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
 58 EXPORT_PER_CPU_SYMBOL(cpu_number);
 59 
 60 /*
 61  * as from 2.5, kernels no longer have an init_tasks structure
 62  * so we need some other way of telling a new secondary core
 63  * where to place its SVC stack
 64  */
 65 struct secondary_data secondary_data;
 66 /* Number of CPUs which aren't online, but looping in kernel text. */
 67 static int cpus_stuck_in_kernel;
 68 
 69 enum ipi_msg_type {
 70         IPI_RESCHEDULE,
 71         IPI_CALL_FUNC,
 72         IPI_CPU_STOP,
 73         IPI_CPU_CRASH_STOP,
 74         IPI_TIMER,
 75         IPI_IRQ_WORK,
 76         IPI_WAKEUP,
 77         NR_IPI
 78 };
 79 
 80 static int ipi_irq_base __read_mostly;
 81 static int nr_ipi __read_mostly = NR_IPI;
 82 static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
 83 
 84 static void ipi_setup(int cpu);
 85 
 86 #ifdef CONFIG_HOTPLUG_CPU
 87 static void ipi_teardown(int cpu);
 88 static int op_cpu_kill(unsigned int cpu);
 89 #else
 90 static inline int op_cpu_kill(unsigned int cpu)
 91 {
 92         return -ENOSYS;
 93 }
 94 #endif
 95 
 96 
 97 /*
 98  * Boot a secondary CPU, and assign it the specified idle task.
 99  * This also gives us the initial stack to use for this CPU.
100  */
101 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
102 {
103         const struct cpu_operations *ops = get_cpu_ops(cpu);
104 
105         if (ops->cpu_boot)
106                 return ops->cpu_boot(cpu);
107 
108         return -EOPNOTSUPP;
109 }
110 
111 static DECLARE_COMPLETION(cpu_running);
112 
113 int __cpu_up(unsigned int cpu, struct task_struct *idle)
114 {
115         int ret;
116         long status;
117 
118         /*
119          * We need to tell the secondary core where to find its stack and the
120          * page tables.
121          */
122         secondary_data.task = idle;
123         secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
124         update_cpu_boot_status(CPU_MMU_OFF);
125         __flush_dcache_area(&secondary_data, sizeof(secondary_data));
126 
127         /* Now bring the CPU into our world */
128         ret = boot_secondary(cpu, idle);
129         if (ret) {
130                 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
131                 return ret;
132         }
133 
134         /*
135          * CPU was successfully started, wait for it to come online or
136          * time out.
137          */
138         wait_for_completion_timeout(&cpu_running,
139                                     msecs_to_jiffies(5000));
140         if (cpu_online(cpu))
141                 return 0;
142 
143         pr_crit("CPU%u: failed to come online\n", cpu);
144         secondary_data.task = NULL;
145         secondary_data.stack = NULL;
146         __flush_dcache_area(&secondary_data, sizeof(secondary_data));
147         status = READ_ONCE(secondary_data.status);
148         if (status == CPU_MMU_OFF)
149                 status = READ_ONCE(__early_cpu_boot_status);
150 
151         switch (status & CPU_BOOT_STATUS_MASK) {
152         default:
153                 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
154                        cpu, status);
155                 cpus_stuck_in_kernel++;
156                 break;
157         case CPU_KILL_ME:
158                 if (!op_cpu_kill(cpu)) {
159                         pr_crit("CPU%u: died during early boot\n", cpu);
160                         break;
161                 }
162                 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
163                 fallthrough;
164         case CPU_STUCK_IN_KERNEL:
165                 pr_crit("CPU%u: is stuck in kernel\n", cpu);
166                 if (status & CPU_STUCK_REASON_52_BIT_VA)
167                         pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
168                 if (status & CPU_STUCK_REASON_NO_GRAN) {
169                         pr_crit("CPU%u: does not support %luK granule\n",
170                                 cpu, PAGE_SIZE / SZ_1K);
171                 }
172                 cpus_stuck_in_kernel++;
173                 break;
174         case CPU_PANIC_KERNEL:
175                 panic("CPU%u detected unsupported configuration\n", cpu);
176         }
177 
178         return -EIO;
179 }
180 
181 static void init_gic_priority_masking(void)
182 {
183         u32 cpuflags;
184 
185         if (WARN_ON(!gic_enable_sre()))
186                 return;
187 
188         cpuflags = read_sysreg(daif);
189 
190         WARN_ON(!(cpuflags & PSR_I_BIT));
191 
192         gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
193 }
194 
195 /*
196  * This is the secondary CPU boot entry.  We're using this CPUs
197  * idle thread stack, but a set of temporary page tables.
198  */
199 asmlinkage notrace void secondary_start_kernel(void)
200 {
201         u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
202         struct mm_struct *mm = &init_mm;
203         const struct cpu_operations *ops;
204         unsigned int cpu;
205 
206         cpu = task_cpu(current);
207         set_my_cpu_offset(per_cpu_offset(cpu));
208 
209         /*
210          * All kernel threads share the same mm context; grab a
211          * reference and switch to it.
212          */
213         mmgrab(mm);
214         current->active_mm = mm;
215 
216         /*
217          * TTBR0 is only used for the identity mapping at this stage. Make it
218          * point to zero page to avoid speculatively fetching new entries.
219          */
220         cpu_uninstall_idmap();
221 
222         if (system_uses_irq_prio_masking())
223                 init_gic_priority_masking();
224 
225         rcu_cpu_starting(cpu);
226         preempt_disable();
227         trace_hardirqs_off();
228 
229         /*
230          * If the system has established the capabilities, make sure
231          * this CPU ticks all of those. If it doesn't, the CPU will
232          * fail to come online.
233          */
234         check_local_cpu_capabilities();
235 
236         ops = get_cpu_ops(cpu);
237         if (ops->cpu_postboot)
238                 ops->cpu_postboot();
239 
240         /*
241          * Log the CPU info before it is marked online and might get read.
242          */
243         cpuinfo_store_cpu();
244 
245         /*
246          * Enable GIC and timers.
247          */
248         notify_cpu_starting(cpu);
249 
250         ipi_setup(cpu);
251 
252         store_cpu_topology(cpu);
253         numa_add_cpu(cpu);
254 
255         /*
256          * OK, now it's safe to let the boot CPU continue.  Wait for
257          * the CPU migration code to notice that the CPU is online
258          * before we continue.
259          */
260         pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
261                                          cpu, (unsigned long)mpidr,
262                                          read_cpuid_id());
263         update_cpu_boot_status(CPU_BOOT_SUCCESS);
264         set_cpu_online(cpu, true);
265         complete(&cpu_running);
266 
267         local_daif_restore(DAIF_PROCCTX);
268 
269         /*
270          * OK, it's off to the idle thread for us
271          */
272         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
273 }
274 
275 #ifdef CONFIG_HOTPLUG_CPU
276 static int op_cpu_disable(unsigned int cpu)
277 {
278         const struct cpu_operations *ops = get_cpu_ops(cpu);
279 
280         /*
281          * If we don't have a cpu_die method, abort before we reach the point
282          * of no return. CPU0 may not have an cpu_ops, so test for it.
283          */
284         if (!ops || !ops->cpu_die)
285                 return -EOPNOTSUPP;
286 
287         /*
288          * We may need to abort a hot unplug for some other mechanism-specific
289          * reason.
290          */
291         if (ops->cpu_disable)
292                 return ops->cpu_disable(cpu);
293 
294         return 0;
295 }
296 
297 /*
298  * __cpu_disable runs on the processor to be shutdown.
299  */
300 int __cpu_disable(void)
301 {
302         unsigned int cpu = smp_processor_id();
303         int ret;
304 
305         ret = op_cpu_disable(cpu);
306         if (ret)
307                 return ret;
308 
309         remove_cpu_topology(cpu);
310         numa_remove_cpu(cpu);
311 
312         /*
313          * Take this CPU offline.  Once we clear this, we can't return,
314          * and we must not schedule until we're ready to give up the cpu.
315          */
316         set_cpu_online(cpu, false);
317         ipi_teardown(cpu);
318 
319         /*
320          * OK - migrate IRQs away from this CPU
321          */
322         irq_migrate_all_off_this_cpu();
323 
324         return 0;
325 }
326 
327 static int op_cpu_kill(unsigned int cpu)
328 {
329         const struct cpu_operations *ops = get_cpu_ops(cpu);
330 
331         /*
332          * If we have no means of synchronising with the dying CPU, then assume
333          * that it is really dead. We can only wait for an arbitrary length of
334          * time and hope that it's dead, so let's skip the wait and just hope.
335          */
336         if (!ops->cpu_kill)
337                 return 0;
338 
339         return ops->cpu_kill(cpu);
340 }
341 
342 /*
343  * called on the thread which is asking for a CPU to be shutdown -
344  * waits until shutdown has completed, or it is timed out.
345  */
346 void __cpu_die(unsigned int cpu)
347 {
348         int err;
349 
350         if (!cpu_wait_death(cpu, 5)) {
351                 pr_crit("CPU%u: cpu didn't die\n", cpu);
352                 return;
353         }
354         pr_notice("CPU%u: shutdown\n", cpu);
355 
356         /*
357          * Now that the dying CPU is beyond the point of no return w.r.t.
358          * in-kernel synchronisation, try to get the firwmare to help us to
359          * verify that it has really left the kernel before we consider
360          * clobbering anything it might still be using.
361          */
362         err = op_cpu_kill(cpu);
363         if (err)
364                 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
365 }
366 
367 /*
368  * Called from the idle thread for the CPU which has been shutdown.
369  *
370  */
371 void cpu_die(void)
372 {
373         unsigned int cpu = smp_processor_id();
374         const struct cpu_operations *ops = get_cpu_ops(cpu);
375 
376         idle_task_exit();
377 
378         local_daif_mask();
379 
380         /* Tell __cpu_die() that this CPU is now safe to dispose of */
381         (void)cpu_report_death();
382 
383         /*
384          * Actually shutdown the CPU. This must never fail. The specific hotplug
385          * mechanism must perform all required cache maintenance to ensure that
386          * no dirty lines are lost in the process of shutting down the CPU.
387          */
388         ops->cpu_die(cpu);
389 
390         BUG();
391 }
392 #endif
393 
394 static void __cpu_try_die(int cpu)
395 {
396 #ifdef CONFIG_HOTPLUG_CPU
397         const struct cpu_operations *ops = get_cpu_ops(cpu);
398 
399         if (ops && ops->cpu_die)
400                 ops->cpu_die(cpu);
401 #endif
402 }
403 
404 /*
405  * Kill the calling secondary CPU, early in bringup before it is turned
406  * online.
407  */
408 void cpu_die_early(void)
409 {
410         int cpu = smp_processor_id();
411 
412         pr_crit("CPU%d: will not boot\n", cpu);
413 
414         /* Mark this CPU absent */
415         set_cpu_present(cpu, 0);
416         rcu_report_dead(cpu);
417 
418         if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
419                 update_cpu_boot_status(CPU_KILL_ME);
420                 __cpu_try_die(cpu);
421         }
422 
423         update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
424 
425         cpu_park_loop();
426 }
427 
428 static void __init hyp_mode_check(void)
429 {
430         if (is_hyp_mode_available())
431                 pr_info("CPU: All CPU(s) started at EL2\n");
432         else if (is_hyp_mode_mismatched())
433                 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
434                            "CPU: CPUs started in inconsistent modes");
435         else
436                 pr_info("CPU: All CPU(s) started at EL1\n");
437         if (IS_ENABLED(CONFIG_KVM))
438                 kvm_compute_layout();
439 }
440 
441 void __init smp_cpus_done(unsigned int max_cpus)
442 {
443         pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
444         setup_cpu_features();
445         hyp_mode_check();
446         apply_alternatives_all();
447         mark_linear_text_alias_ro();
448 }
449 
450 void __init smp_prepare_boot_cpu(void)
451 {
452         set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
453         cpuinfo_store_boot_cpu();
454 
455         /*
456          * We now know enough about the boot CPU to apply the
457          * alternatives that cannot wait until interrupt handling
458          * and/or scheduling is enabled.
459          */
460         apply_boot_alternatives();
461 
462         /* Conditionally switch to GIC PMR for interrupt masking */
463         if (system_uses_irq_prio_masking())
464                 init_gic_priority_masking();
465 }
466 
467 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
468 {
469         const __be32 *cell;
470         u64 hwid;
471 
472         /*
473          * A cpu node with missing "reg" property is
474          * considered invalid to build a cpu_logical_map
475          * entry.
476          */
477         cell = of_get_property(dn, "reg", NULL);
478         if (!cell) {
479                 pr_err("%pOF: missing reg property\n", dn);
480                 return INVALID_HWID;
481         }
482 
483         hwid = of_read_number(cell, of_n_addr_cells(dn));
484         /*
485          * Non affinity bits must be set to 0 in the DT
486          */
487         if (hwid & ~MPIDR_HWID_BITMASK) {
488                 pr_err("%pOF: invalid reg property\n", dn);
489                 return INVALID_HWID;
490         }
491         return hwid;
492 }
493 
494 /*
495  * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
496  * entries and check for duplicates. If any is found just ignore the
497  * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
498  * matching valid MPIDR values.
499  */
500 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
501 {
502         unsigned int i;
503 
504         for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
505                 if (cpu_logical_map(i) == hwid)
506                         return true;
507         return false;
508 }
509 
510 /*
511  * Initialize cpu operations for a logical cpu and
512  * set it in the possible mask on success
513  */
514 static int __init smp_cpu_setup(int cpu)
515 {
516         const struct cpu_operations *ops;
517 
518         if (init_cpu_ops(cpu))
519                 return -ENODEV;
520 
521         ops = get_cpu_ops(cpu);
522         if (ops->cpu_init(cpu))
523                 return -ENODEV;
524 
525         set_cpu_possible(cpu, true);
526 
527         return 0;
528 }
529 
530 static bool bootcpu_valid __initdata;
531 static unsigned int cpu_count = 1;
532 
533 #ifdef CONFIG_ACPI
534 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
535 
536 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
537 {
538         return &cpu_madt_gicc[cpu];
539 }
540 
541 /*
542  * acpi_map_gic_cpu_interface - parse processor MADT entry
543  *
544  * Carry out sanity checks on MADT processor entry and initialize
545  * cpu_logical_map on success
546  */
547 static void __init
548 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
549 {
550         u64 hwid = processor->arm_mpidr;
551 
552         if (!(processor->flags & ACPI_MADT_ENABLED)) {
553                 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
554                 return;
555         }
556 
557         if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
558                 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
559                 return;
560         }
561 
562         if (is_mpidr_duplicate(cpu_count, hwid)) {
563                 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
564                 return;
565         }
566 
567         /* Check if GICC structure of boot CPU is available in the MADT */
568         if (cpu_logical_map(0) == hwid) {
569                 if (bootcpu_valid) {
570                         pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
571                                hwid);
572                         return;
573                 }
574                 bootcpu_valid = true;
575                 cpu_madt_gicc[0] = *processor;
576                 return;
577         }
578 
579         if (cpu_count >= NR_CPUS)
580                 return;
581 
582         /* map the logical cpu id to cpu MPIDR */
583         set_cpu_logical_map(cpu_count, hwid);
584 
585         cpu_madt_gicc[cpu_count] = *processor;
586 
587         /*
588          * Set-up the ACPI parking protocol cpu entries
589          * while initializing the cpu_logical_map to
590          * avoid parsing MADT entries multiple times for
591          * nothing (ie a valid cpu_logical_map entry should
592          * contain a valid parking protocol data set to
593          * initialize the cpu if the parking protocol is
594          * the only available enable method).
595          */
596         acpi_set_mailbox_entry(cpu_count, processor);
597 
598         cpu_count++;
599 }
600 
601 static int __init
602 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
603                              const unsigned long end)
604 {
605         struct acpi_madt_generic_interrupt *processor;
606 
607         processor = (struct acpi_madt_generic_interrupt *)header;
608         if (BAD_MADT_GICC_ENTRY(processor, end))
609                 return -EINVAL;
610 
611         acpi_table_print_madt_entry(&header->common);
612 
613         acpi_map_gic_cpu_interface(processor);
614 
615         return 0;
616 }
617 
618 static void __init acpi_parse_and_init_cpus(void)
619 {
620         int i;
621 
622         /*
623          * do a walk of MADT to determine how many CPUs
624          * we have including disabled CPUs, and get information
625          * we need for SMP init.
626          */
627         acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
628                                       acpi_parse_gic_cpu_interface, 0);
629 
630         /*
631          * In ACPI, SMP and CPU NUMA information is provided in separate
632          * static tables, namely the MADT and the SRAT.
633          *
634          * Thus, it is simpler to first create the cpu logical map through
635          * an MADT walk and then map the logical cpus to their node ids
636          * as separate steps.
637          */
638         acpi_map_cpus_to_nodes();
639 
640         for (i = 0; i < nr_cpu_ids; i++)
641                 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
642 }
643 #else
644 #define acpi_parse_and_init_cpus(...)   do { } while (0)
645 #endif
646 
647 /*
648  * Enumerate the possible CPU set from the device tree and build the
649  * cpu logical map array containing MPIDR values related to logical
650  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
651  */
652 static void __init of_parse_and_init_cpus(void)
653 {
654         struct device_node *dn;
655 
656         for_each_of_cpu_node(dn) {
657                 u64 hwid = of_get_cpu_mpidr(dn);
658 
659                 if (hwid == INVALID_HWID)
660                         goto next;
661 
662                 if (is_mpidr_duplicate(cpu_count, hwid)) {
663                         pr_err("%pOF: duplicate cpu reg properties in the DT\n",
664                                 dn);
665                         goto next;
666                 }
667 
668                 /*
669                  * The numbering scheme requires that the boot CPU
670                  * must be assigned logical id 0. Record it so that
671                  * the logical map built from DT is validated and can
672                  * be used.
673                  */
674                 if (hwid == cpu_logical_map(0)) {
675                         if (bootcpu_valid) {
676                                 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
677                                         dn);
678                                 goto next;
679                         }
680 
681                         bootcpu_valid = true;
682                         early_map_cpu_to_node(0, of_node_to_nid(dn));
683 
684                         /*
685                          * cpu_logical_map has already been
686                          * initialized and the boot cpu doesn't need
687                          * the enable-method so continue without
688                          * incrementing cpu.
689                          */
690                         continue;
691                 }
692 
693                 if (cpu_count >= NR_CPUS)
694                         goto next;
695 
696                 pr_debug("cpu logical map 0x%llx\n", hwid);
697                 set_cpu_logical_map(cpu_count, hwid);
698 
699                 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
700 next:
701                 cpu_count++;
702         }
703 }
704 
705 /*
706  * Enumerate the possible CPU set from the device tree or ACPI and build the
707  * cpu logical map array containing MPIDR values related to logical
708  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
709  */
710 void __init smp_init_cpus(void)
711 {
712         int i;
713 
714         if (acpi_disabled)
715                 of_parse_and_init_cpus();
716         else
717                 acpi_parse_and_init_cpus();
718 
719         if (cpu_count > nr_cpu_ids)
720                 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
721                         cpu_count, nr_cpu_ids);
722 
723         if (!bootcpu_valid) {
724                 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
725                 return;
726         }
727 
728         /*
729          * We need to set the cpu_logical_map entries before enabling
730          * the cpus so that cpu processor description entries (DT cpu nodes
731          * and ACPI MADT entries) can be retrieved by matching the cpu hwid
732          * with entries in cpu_logical_map while initializing the cpus.
733          * If the cpu set-up fails, invalidate the cpu_logical_map entry.
734          */
735         for (i = 1; i < nr_cpu_ids; i++) {
736                 if (cpu_logical_map(i) != INVALID_HWID) {
737                         if (smp_cpu_setup(i))
738                                 set_cpu_logical_map(i, INVALID_HWID);
739                 }
740         }
741 }
742 
743 void __init smp_prepare_cpus(unsigned int max_cpus)
744 {
745         const struct cpu_operations *ops;
746         int err;
747         unsigned int cpu;
748         unsigned int this_cpu;
749 
750         init_cpu_topology();
751 
752         this_cpu = smp_processor_id();
753         store_cpu_topology(this_cpu);
754         numa_store_cpu_info(this_cpu);
755         numa_add_cpu(this_cpu);
756 
757         /*
758          * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
759          * secondary CPUs present.
760          */
761         if (max_cpus == 0)
762                 return;
763 
764         /*
765          * Initialise the present map (which describes the set of CPUs
766          * actually populated at the present time) and release the
767          * secondaries from the bootloader.
768          */
769         for_each_possible_cpu(cpu) {
770 
771                 per_cpu(cpu_number, cpu) = cpu;
772 
773                 if (cpu == smp_processor_id())
774                         continue;
775 
776                 ops = get_cpu_ops(cpu);
777                 if (!ops)
778                         continue;
779 
780                 err = ops->cpu_prepare(cpu);
781                 if (err)
782                         continue;
783 
784                 set_cpu_present(cpu, true);
785                 numa_store_cpu_info(cpu);
786         }
787 }
788 
789 static const char *ipi_types[NR_IPI] __tracepoint_string = {
790 #define S(x,s)  [x] = s
791         S(IPI_RESCHEDULE, "Rescheduling interrupts"),
792         S(IPI_CALL_FUNC, "Function call interrupts"),
793         S(IPI_CPU_STOP, "CPU stop interrupts"),
794         S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
795         S(IPI_TIMER, "Timer broadcast interrupts"),
796         S(IPI_IRQ_WORK, "IRQ work interrupts"),
797         S(IPI_WAKEUP, "CPU wake-up interrupts"),
798 };
799 
800 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
801 
802 unsigned long irq_err_count;
803 
804 int arch_show_interrupts(struct seq_file *p, int prec)
805 {
806         unsigned int cpu, i;
807 
808         for (i = 0; i < NR_IPI; i++) {
809                 unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
810                 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
811                            prec >= 4 ? " " : "");
812                 for_each_online_cpu(cpu)
813                         seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
814                 seq_printf(p, "      %s\n", ipi_types[i]);
815         }
816 
817         seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
818         return 0;
819 }
820 
821 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
822 {
823         smp_cross_call(mask, IPI_CALL_FUNC);
824 }
825 
826 void arch_send_call_function_single_ipi(int cpu)
827 {
828         smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
829 }
830 
831 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
832 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
833 {
834         smp_cross_call(mask, IPI_WAKEUP);
835 }
836 #endif
837 
838 #ifdef CONFIG_IRQ_WORK
839 void arch_irq_work_raise(void)
840 {
841         smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
842 }
843 #endif
844 
845 static void local_cpu_stop(void)
846 {
847         set_cpu_online(smp_processor_id(), false);
848 
849         local_daif_mask();
850         sdei_mask_local_cpu();
851         cpu_park_loop();
852 }
853 
854 /*
855  * We need to implement panic_smp_self_stop() for parallel panic() calls, so
856  * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
857  * CPUs that have already stopped themselves.
858  */
859 void panic_smp_self_stop(void)
860 {
861         local_cpu_stop();
862 }
863 
864 #ifdef CONFIG_KEXEC_CORE
865 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
866 #endif
867 
868 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
869 {
870 #ifdef CONFIG_KEXEC_CORE
871         crash_save_cpu(regs, cpu);
872 
873         atomic_dec(&waiting_for_crash_ipi);
874 
875         local_irq_disable();
876         sdei_mask_local_cpu();
877 
878         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
879                 __cpu_try_die(cpu);
880 
881         /* just in case */
882         cpu_park_loop();
883 #endif
884 }
885 
886 /*
887  * Main handler for inter-processor interrupts
888  */
889 static void do_handle_IPI(int ipinr)
890 {
891         unsigned int cpu = smp_processor_id();
892 
893         if ((unsigned)ipinr < NR_IPI)
894                 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
895 
896         switch (ipinr) {
897         case IPI_RESCHEDULE:
898                 scheduler_ipi();
899                 break;
900 
901         case IPI_CALL_FUNC:
902                 generic_smp_call_function_interrupt();
903                 break;
904 
905         case IPI_CPU_STOP:
906                 local_cpu_stop();
907                 break;
908 
909         case IPI_CPU_CRASH_STOP:
910                 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
911                         ipi_cpu_crash_stop(cpu, get_irq_regs());
912 
913                         unreachable();
914                 }
915                 break;
916 
917 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
918         case IPI_TIMER:
919                 tick_receive_broadcast();
920                 break;
921 #endif
922 
923 #ifdef CONFIG_IRQ_WORK
924         case IPI_IRQ_WORK:
925                 irq_work_run();
926                 break;
927 #endif
928 
929 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
930         case IPI_WAKEUP:
931                 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
932                           "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
933                           cpu);
934                 break;
935 #endif
936 
937         default:
938                 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
939                 break;
940         }
941 
942         if ((unsigned)ipinr < NR_IPI)
943                 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
944 }
945 
946 static irqreturn_t ipi_handler(int irq, void *data)
947 {
948         do_handle_IPI(irq - ipi_irq_base);
949         return IRQ_HANDLED;
950 }
951 
952 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
953 {
954         trace_ipi_raise(target, ipi_types[ipinr]);
955         __ipi_send_mask(ipi_desc[ipinr], target);
956 }
957 
958 static void ipi_setup(int cpu)
959 {
960         int i;
961 
962         if (WARN_ON_ONCE(!ipi_irq_base))
963                 return;
964 
965         for (i = 0; i < nr_ipi; i++)
966                 enable_percpu_irq(ipi_irq_base + i, 0);
967 }
968 
969 #ifdef CONFIG_HOTPLUG_CPU
970 static void ipi_teardown(int cpu)
971 {
972         int i;
973 
974         if (WARN_ON_ONCE(!ipi_irq_base))
975                 return;
976 
977         for (i = 0; i < nr_ipi; i++)
978                 disable_percpu_irq(ipi_irq_base + i);
979 }
980 #endif
981 
982 void __init set_smp_ipi_range(int ipi_base, int n)
983 {
984         int i;
985 
986         WARN_ON(n < NR_IPI);
987         nr_ipi = min(n, NR_IPI);
988 
989         for (i = 0; i < nr_ipi; i++) {
990                 int err;
991 
992                 err = request_percpu_irq(ipi_base + i, ipi_handler,
993                                          "IPI", &cpu_number);
994                 WARN_ON(err);
995 
996                 ipi_desc[i] = irq_to_desc(ipi_base + i);
997                 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
998         }
999 
1000         ipi_irq_base = ipi_base;
1001 
1002         /* Setup the boot CPU immediately */
1003         ipi_setup(smp_processor_id());
1004 }
1005 
1006 void smp_send_reschedule(int cpu)
1007 {
1008         smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
1009 }
1010 
1011 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1012 void tick_broadcast(const struct cpumask *mask)
1013 {
1014         smp_cross_call(mask, IPI_TIMER);
1015 }
1016 #endif
1017 
1018 /*
1019  * The number of CPUs online, not counting this CPU (which may not be
1020  * fully online and so not counted in num_online_cpus()).
1021  */
1022 static inline unsigned int num_other_online_cpus(void)
1023 {
1024         unsigned int this_cpu_online = cpu_online(smp_processor_id());
1025 
1026         return num_online_cpus() - this_cpu_online;
1027 }
1028 
1029 void smp_send_stop(void)
1030 {
1031         unsigned long timeout;
1032 
1033         if (num_other_online_cpus()) {
1034                 cpumask_t mask;
1035 
1036                 cpumask_copy(&mask, cpu_online_mask);
1037                 cpumask_clear_cpu(smp_processor_id(), &mask);
1038 
1039                 if (system_state <= SYSTEM_RUNNING)
1040                         pr_crit("SMP: stopping secondary CPUs\n");
1041                 smp_cross_call(&mask, IPI_CPU_STOP);
1042         }
1043 
1044         /* Wait up to one second for other CPUs to stop */
1045         timeout = USEC_PER_SEC;
1046         while (num_other_online_cpus() && timeout--)
1047                 udelay(1);
1048 
1049         if (num_other_online_cpus())
1050                 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1051                         cpumask_pr_args(cpu_online_mask));
1052 
1053         sdei_mask_local_cpu();
1054 }
1055 
1056 #ifdef CONFIG_KEXEC_CORE
1057 void crash_smp_send_stop(void)
1058 {
1059         static int cpus_stopped;
1060         cpumask_t mask;
1061         unsigned long timeout;
1062 
1063         /*
1064          * This function can be called twice in panic path, but obviously
1065          * we execute this only once.
1066          */
1067         if (cpus_stopped)
1068                 return;
1069 
1070         cpus_stopped = 1;
1071 
1072         /*
1073          * If this cpu is the only one alive at this point in time, online or
1074          * not, there are no stop messages to be sent around, so just back out.
1075          */
1076         if (num_other_online_cpus() == 0) {
1077                 sdei_mask_local_cpu();
1078                 return;
1079         }
1080 
1081         cpumask_copy(&mask, cpu_online_mask);
1082         cpumask_clear_cpu(smp_processor_id(), &mask);
1083 
1084         atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
1085 
1086         pr_crit("SMP: stopping secondary CPUs\n");
1087         smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
1088 
1089         /* Wait up to one second for other CPUs to stop */
1090         timeout = USEC_PER_SEC;
1091         while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
1092                 udelay(1);
1093 
1094         if (atomic_read(&waiting_for_crash_ipi) > 0)
1095                 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1096                         cpumask_pr_args(&mask));
1097 
1098         sdei_mask_local_cpu();
1099 }
1100 
1101 bool smp_crash_stop_failed(void)
1102 {
1103         return (atomic_read(&waiting_for_crash_ipi) > 0);
1104 }
1105 #endif
1106 
1107 /*
1108  * not supported here
1109  */
1110 int setup_profiling_timer(unsigned int multiplier)
1111 {
1112         return -EINVAL;
1113 }
1114 
1115 static bool have_cpu_die(void)
1116 {
1117 #ifdef CONFIG_HOTPLUG_CPU
1118         int any_cpu = raw_smp_processor_id();
1119         const struct cpu_operations *ops = get_cpu_ops(any_cpu);
1120 
1121         if (ops && ops->cpu_die)
1122                 return true;
1123 #endif
1124         return false;
1125 }
1126 
1127 bool cpus_are_stuck_in_kernel(void)
1128 {
1129         bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1130 
1131         return !!cpus_stuck_in_kernel || smp_spin_tables;
1132 }
1133 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp