~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/smp.c

Version: ~ [ linux-6.0-rc6 ] ~ [ linux-5.19.10 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.69 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.144 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.214 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.259 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.294 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.329 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * SMP initialisation and IPI support
  3  * Based on arch/arm/kernel/smp.c
  4  *
  5  * Copyright (C) 2012 ARM Ltd.
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  *
 11  * This program is distributed in the hope that it will be useful,
 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14  * GNU General Public License for more details.
 15  *
 16  * You should have received a copy of the GNU General Public License
 17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18  */
 19 
 20 #include <linux/acpi.h>
 21 #include <linux/delay.h>
 22 #include <linux/init.h>
 23 #include <linux/spinlock.h>
 24 #include <linux/sched.h>
 25 #include <linux/interrupt.h>
 26 #include <linux/cache.h>
 27 #include <linux/profile.h>
 28 #include <linux/errno.h>
 29 #include <linux/mm.h>
 30 #include <linux/err.h>
 31 #include <linux/cpu.h>
 32 #include <linux/smp.h>
 33 #include <linux/seq_file.h>
 34 #include <linux/irq.h>
 35 #include <linux/percpu.h>
 36 #include <linux/clockchips.h>
 37 #include <linux/completion.h>
 38 #include <linux/of.h>
 39 #include <linux/irq_work.h>
 40 
 41 #include <asm/alternative.h>
 42 #include <asm/atomic.h>
 43 #include <asm/cacheflush.h>
 44 #include <asm/cpu.h>
 45 #include <asm/cputype.h>
 46 #include <asm/cpu_ops.h>
 47 #include <asm/mmu_context.h>
 48 #include <asm/pgtable.h>
 49 #include <asm/pgalloc.h>
 50 #include <asm/processor.h>
 51 #include <asm/smp_plat.h>
 52 #include <asm/sections.h>
 53 #include <asm/tlbflush.h>
 54 #include <asm/ptrace.h>
 55 #include <asm/virt.h>
 56 
 57 #define CREATE_TRACE_POINTS
 58 #include <trace/events/ipi.h>
 59 
 60 /*
 61  * as from 2.5, kernels no longer have an init_tasks structure
 62  * so we need some other way of telling a new secondary core
 63  * where to place its SVC stack
 64  */
 65 struct secondary_data secondary_data;
 66 
 67 enum ipi_msg_type {
 68         IPI_RESCHEDULE,
 69         IPI_CALL_FUNC,
 70         IPI_CPU_STOP,
 71         IPI_TIMER,
 72         IPI_IRQ_WORK,
 73 };
 74 
 75 /*
 76  * Boot a secondary CPU, and assign it the specified idle task.
 77  * This also gives us the initial stack to use for this CPU.
 78  */
 79 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 80 {
 81         if (cpu_ops[cpu]->cpu_boot)
 82                 return cpu_ops[cpu]->cpu_boot(cpu);
 83 
 84         return -EOPNOTSUPP;
 85 }
 86 
 87 static DECLARE_COMPLETION(cpu_running);
 88 
 89 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 90 {
 91         int ret;
 92 
 93         /*
 94          * We need to tell the secondary core where to find its stack and the
 95          * page tables.
 96          */
 97         secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
 98         __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 99 
100         /*
101          * Now bring the CPU into our world.
102          */
103         ret = boot_secondary(cpu, idle);
104         if (ret == 0) {
105                 /*
106                  * CPU was successfully started, wait for it to come online or
107                  * time out.
108                  */
109                 wait_for_completion_timeout(&cpu_running,
110                                             msecs_to_jiffies(1000));
111 
112                 if (!cpu_online(cpu)) {
113                         pr_crit("CPU%u: failed to come online\n", cpu);
114                         ret = -EIO;
115                 }
116         } else {
117                 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
118                 return ret;
119         }
120 
121         secondary_data.stack = NULL;
122 
123         return ret;
124 }
125 
126 static void smp_store_cpu_info(unsigned int cpuid)
127 {
128         store_cpu_topology(cpuid);
129 }
130 
131 /*
132  * This is the secondary CPU boot entry.  We're using this CPUs
133  * idle thread stack, but a set of temporary page tables.
134  */
135 asmlinkage notrace void secondary_start_kernel(void)
136 {
137         struct mm_struct *mm = &init_mm;
138         unsigned int cpu = smp_processor_id();
139 
140         /*
141          * All kernel threads share the same mm context; grab a
142          * reference and switch to it.
143          */
144         atomic_inc(&mm->mm_count);
145         current->active_mm = mm;
146 
147         set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
148 
149         /*
150          * TTBR0 is only used for the identity mapping at this stage. Make it
151          * point to zero page to avoid speculatively fetching new entries.
152          */
153         cpu_set_reserved_ttbr0();
154         local_flush_tlb_all();
155         cpu_set_default_tcr_t0sz();
156 
157         preempt_disable();
158         trace_hardirqs_off();
159 
160         /*
161          * If the system has established the capabilities, make sure
162          * this CPU ticks all of those. If it doesn't, the CPU will
163          * fail to come online.
164          */
165         verify_local_cpu_capabilities();
166 
167         if (cpu_ops[cpu]->cpu_postboot)
168                 cpu_ops[cpu]->cpu_postboot();
169 
170         /*
171          * Log the CPU info before it is marked online and might get read.
172          */
173         cpuinfo_store_cpu();
174 
175         /*
176          * Enable GIC and timers.
177          */
178         notify_cpu_starting(cpu);
179 
180         smp_store_cpu_info(cpu);
181 
182         /*
183          * OK, now it's safe to let the boot CPU continue.  Wait for
184          * the CPU migration code to notice that the CPU is online
185          * before we continue.
186          */
187         pr_info("CPU%u: Booted secondary processor [%08x]\n",
188                                          cpu, read_cpuid_id());
189         set_cpu_online(cpu, true);
190         complete(&cpu_running);
191 
192         local_irq_enable();
193         local_async_enable();
194 
195         /*
196          * OK, it's off to the idle thread for us
197          */
198         cpu_startup_entry(CPUHP_ONLINE);
199 }
200 
201 #ifdef CONFIG_HOTPLUG_CPU
202 static int op_cpu_disable(unsigned int cpu)
203 {
204         /*
205          * If we don't have a cpu_die method, abort before we reach the point
206          * of no return. CPU0 may not have an cpu_ops, so test for it.
207          */
208         if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
209                 return -EOPNOTSUPP;
210 
211         /*
212          * We may need to abort a hot unplug for some other mechanism-specific
213          * reason.
214          */
215         if (cpu_ops[cpu]->cpu_disable)
216                 return cpu_ops[cpu]->cpu_disable(cpu);
217 
218         return 0;
219 }
220 
221 /*
222  * __cpu_disable runs on the processor to be shutdown.
223  */
224 int __cpu_disable(void)
225 {
226         unsigned int cpu = smp_processor_id();
227         int ret;
228 
229         ret = op_cpu_disable(cpu);
230         if (ret)
231                 return ret;
232 
233         /*
234          * Take this CPU offline.  Once we clear this, we can't return,
235          * and we must not schedule until we're ready to give up the cpu.
236          */
237         set_cpu_online(cpu, false);
238 
239         /*
240          * OK - migrate IRQs away from this CPU
241          */
242         irq_migrate_all_off_this_cpu();
243 
244         return 0;
245 }
246 
247 static int op_cpu_kill(unsigned int cpu)
248 {
249         /*
250          * If we have no means of synchronising with the dying CPU, then assume
251          * that it is really dead. We can only wait for an arbitrary length of
252          * time and hope that it's dead, so let's skip the wait and just hope.
253          */
254         if (!cpu_ops[cpu]->cpu_kill)
255                 return 0;
256 
257         return cpu_ops[cpu]->cpu_kill(cpu);
258 }
259 
260 /*
261  * called on the thread which is asking for a CPU to be shutdown -
262  * waits until shutdown has completed, or it is timed out.
263  */
264 void __cpu_die(unsigned int cpu)
265 {
266         int err;
267 
268         if (!cpu_wait_death(cpu, 5)) {
269                 pr_crit("CPU%u: cpu didn't die\n", cpu);
270                 return;
271         }
272         pr_notice("CPU%u: shutdown\n", cpu);
273 
274         /*
275          * Now that the dying CPU is beyond the point of no return w.r.t.
276          * in-kernel synchronisation, try to get the firwmare to help us to
277          * verify that it has really left the kernel before we consider
278          * clobbering anything it might still be using.
279          */
280         err = op_cpu_kill(cpu);
281         if (err)
282                 pr_warn("CPU%d may not have shut down cleanly: %d\n",
283                         cpu, err);
284 }
285 
286 /*
287  * Called from the idle thread for the CPU which has been shutdown.
288  *
289  * Note that we disable IRQs here, but do not re-enable them
290  * before returning to the caller. This is also the behaviour
291  * of the other hotplug-cpu capable cores, so presumably coming
292  * out of idle fixes this.
293  */
294 void cpu_die(void)
295 {
296         unsigned int cpu = smp_processor_id();
297 
298         idle_task_exit();
299 
300         local_irq_disable();
301 
302         /* Tell __cpu_die() that this CPU is now safe to dispose of */
303         (void)cpu_report_death();
304 
305         /*
306          * Actually shutdown the CPU. This must never fail. The specific hotplug
307          * mechanism must perform all required cache maintenance to ensure that
308          * no dirty lines are lost in the process of shutting down the CPU.
309          */
310         cpu_ops[cpu]->cpu_die(cpu);
311 
312         BUG();
313 }
314 #endif
315 
316 static void __init hyp_mode_check(void)
317 {
318         if (is_hyp_mode_available())
319                 pr_info("CPU: All CPU(s) started at EL2\n");
320         else if (is_hyp_mode_mismatched())
321                 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
322                            "CPU: CPUs started in inconsistent modes");
323         else
324                 pr_info("CPU: All CPU(s) started at EL1\n");
325 }
326 
327 void __init smp_cpus_done(unsigned int max_cpus)
328 {
329         pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
330         setup_cpu_features();
331         hyp_mode_check();
332         apply_alternatives_all();
333 }
334 
335 void __init smp_prepare_boot_cpu(void)
336 {
337         set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
338         cpuinfo_store_boot_cpu();
339 }
340 
341 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
342 {
343         const __be32 *cell;
344         u64 hwid;
345 
346         /*
347          * A cpu node with missing "reg" property is
348          * considered invalid to build a cpu_logical_map
349          * entry.
350          */
351         cell = of_get_property(dn, "reg", NULL);
352         if (!cell) {
353                 pr_err("%s: missing reg property\n", dn->full_name);
354                 return INVALID_HWID;
355         }
356 
357         hwid = of_read_number(cell, of_n_addr_cells(dn));
358         /*
359          * Non affinity bits must be set to 0 in the DT
360          */
361         if (hwid & ~MPIDR_HWID_BITMASK) {
362                 pr_err("%s: invalid reg property\n", dn->full_name);
363                 return INVALID_HWID;
364         }
365         return hwid;
366 }
367 
368 /*
369  * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
370  * entries and check for duplicates. If any is found just ignore the
371  * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
372  * matching valid MPIDR values.
373  */
374 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
375 {
376         unsigned int i;
377 
378         for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
379                 if (cpu_logical_map(i) == hwid)
380                         return true;
381         return false;
382 }
383 
384 /*
385  * Initialize cpu operations for a logical cpu and
386  * set it in the possible mask on success
387  */
388 static int __init smp_cpu_setup(int cpu)
389 {
390         if (cpu_read_ops(cpu))
391                 return -ENODEV;
392 
393         if (cpu_ops[cpu]->cpu_init(cpu))
394                 return -ENODEV;
395 
396         set_cpu_possible(cpu, true);
397 
398         return 0;
399 }
400 
401 static bool bootcpu_valid __initdata;
402 static unsigned int cpu_count = 1;
403 
404 #ifdef CONFIG_ACPI
405 /*
406  * acpi_map_gic_cpu_interface - parse processor MADT entry
407  *
408  * Carry out sanity checks on MADT processor entry and initialize
409  * cpu_logical_map on success
410  */
411 static void __init
412 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
413 {
414         u64 hwid = processor->arm_mpidr;
415 
416         if (!(processor->flags & ACPI_MADT_ENABLED)) {
417                 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
418                 return;
419         }
420 
421         if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
422                 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
423                 return;
424         }
425 
426         if (is_mpidr_duplicate(cpu_count, hwid)) {
427                 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
428                 return;
429         }
430 
431         /* Check if GICC structure of boot CPU is available in the MADT */
432         if (cpu_logical_map(0) == hwid) {
433                 if (bootcpu_valid) {
434                         pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
435                                hwid);
436                         return;
437                 }
438                 bootcpu_valid = true;
439                 return;
440         }
441 
442         if (cpu_count >= NR_CPUS)
443                 return;
444 
445         /* map the logical cpu id to cpu MPIDR */
446         cpu_logical_map(cpu_count) = hwid;
447 
448         cpu_count++;
449 }
450 
451 static int __init
452 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
453                              const unsigned long end)
454 {
455         struct acpi_madt_generic_interrupt *processor;
456 
457         processor = (struct acpi_madt_generic_interrupt *)header;
458         if (BAD_MADT_GICC_ENTRY(processor, end))
459                 return -EINVAL;
460 
461         acpi_table_print_madt_entry(header);
462 
463         acpi_map_gic_cpu_interface(processor);
464 
465         return 0;
466 }
467 #else
468 #define acpi_table_parse_madt(...)      do { } while (0)
469 #endif
470 
471 /*
472  * Enumerate the possible CPU set from the device tree and build the
473  * cpu logical map array containing MPIDR values related to logical
474  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
475  */
476 static void __init of_parse_and_init_cpus(void)
477 {
478         struct device_node *dn = NULL;
479 
480         while ((dn = of_find_node_by_type(dn, "cpu"))) {
481                 u64 hwid = of_get_cpu_mpidr(dn);
482 
483                 if (hwid == INVALID_HWID)
484                         goto next;
485 
486                 if (is_mpidr_duplicate(cpu_count, hwid)) {
487                         pr_err("%s: duplicate cpu reg properties in the DT\n",
488                                 dn->full_name);
489                         goto next;
490                 }
491 
492                 /*
493                  * The numbering scheme requires that the boot CPU
494                  * must be assigned logical id 0. Record it so that
495                  * the logical map built from DT is validated and can
496                  * be used.
497                  */
498                 if (hwid == cpu_logical_map(0)) {
499                         if (bootcpu_valid) {
500                                 pr_err("%s: duplicate boot cpu reg property in DT\n",
501                                         dn->full_name);
502                                 goto next;
503                         }
504 
505                         bootcpu_valid = true;
506 
507                         /*
508                          * cpu_logical_map has already been
509                          * initialized and the boot cpu doesn't need
510                          * the enable-method so continue without
511                          * incrementing cpu.
512                          */
513                         continue;
514                 }
515 
516                 if (cpu_count >= NR_CPUS)
517                         goto next;
518 
519                 pr_debug("cpu logical map 0x%llx\n", hwid);
520                 cpu_logical_map(cpu_count) = hwid;
521 next:
522                 cpu_count++;
523         }
524 }
525 
526 /*
527  * Enumerate the possible CPU set from the device tree or ACPI and build the
528  * cpu logical map array containing MPIDR values related to logical
529  * cpus. Assumes that cpu_logical_map(0) has already been initialized.
530  */
531 void __init smp_init_cpus(void)
532 {
533         int i;
534 
535         if (acpi_disabled)
536                 of_parse_and_init_cpus();
537         else
538                 /*
539                  * do a walk of MADT to determine how many CPUs
540                  * we have including disabled CPUs, and get information
541                  * we need for SMP init
542                  */
543                 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
544                                       acpi_parse_gic_cpu_interface, 0);
545 
546         if (cpu_count > NR_CPUS)
547                 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
548                         cpu_count, NR_CPUS);
549 
550         if (!bootcpu_valid) {
551                 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
552                 return;
553         }
554 
555         /*
556          * We need to set the cpu_logical_map entries before enabling
557          * the cpus so that cpu processor description entries (DT cpu nodes
558          * and ACPI MADT entries) can be retrieved by matching the cpu hwid
559          * with entries in cpu_logical_map while initializing the cpus.
560          * If the cpu set-up fails, invalidate the cpu_logical_map entry.
561          */
562         for (i = 1; i < NR_CPUS; i++) {
563                 if (cpu_logical_map(i) != INVALID_HWID) {
564                         if (smp_cpu_setup(i))
565                                 cpu_logical_map(i) = INVALID_HWID;
566                 }
567         }
568 }
569 
570 void __init smp_prepare_cpus(unsigned int max_cpus)
571 {
572         int err;
573         unsigned int cpu, ncores = num_possible_cpus();
574 
575         init_cpu_topology();
576 
577         smp_store_cpu_info(smp_processor_id());
578 
579         /*
580          * are we trying to boot more cores than exist?
581          */
582         if (max_cpus > ncores)
583                 max_cpus = ncores;
584 
585         /* Don't bother if we're effectively UP */
586         if (max_cpus <= 1)
587                 return;
588 
589         /*
590          * Initialise the present map (which describes the set of CPUs
591          * actually populated at the present time) and release the
592          * secondaries from the bootloader.
593          *
594          * Make sure we online at most (max_cpus - 1) additional CPUs.
595          */
596         max_cpus--;
597         for_each_possible_cpu(cpu) {
598                 if (max_cpus == 0)
599                         break;
600 
601                 if (cpu == smp_processor_id())
602                         continue;
603 
604                 if (!cpu_ops[cpu])
605                         continue;
606 
607                 err = cpu_ops[cpu]->cpu_prepare(cpu);
608                 if (err)
609                         continue;
610 
611                 set_cpu_present(cpu, true);
612                 max_cpus--;
613         }
614 }
615 
616 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
617 
618 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
619 {
620         __smp_cross_call = fn;
621 }
622 
623 static const char *ipi_types[NR_IPI] __tracepoint_string = {
624 #define S(x,s)  [x] = s
625         S(IPI_RESCHEDULE, "Rescheduling interrupts"),
626         S(IPI_CALL_FUNC, "Function call interrupts"),
627         S(IPI_CPU_STOP, "CPU stop interrupts"),
628         S(IPI_TIMER, "Timer broadcast interrupts"),
629         S(IPI_IRQ_WORK, "IRQ work interrupts"),
630 };
631 
632 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
633 {
634         trace_ipi_raise(target, ipi_types[ipinr]);
635         __smp_cross_call(target, ipinr);
636 }
637 
638 void show_ipi_list(struct seq_file *p, int prec)
639 {
640         unsigned int cpu, i;
641 
642         for (i = 0; i < NR_IPI; i++) {
643                 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
644                            prec >= 4 ? " " : "");
645                 for_each_online_cpu(cpu)
646                         seq_printf(p, "%10u ",
647                                    __get_irq_stat(cpu, ipi_irqs[i]));
648                 seq_printf(p, "      %s\n", ipi_types[i]);
649         }
650 }
651 
652 u64 smp_irq_stat_cpu(unsigned int cpu)
653 {
654         u64 sum = 0;
655         int i;
656 
657         for (i = 0; i < NR_IPI; i++)
658                 sum += __get_irq_stat(cpu, ipi_irqs[i]);
659 
660         return sum;
661 }
662 
663 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
664 {
665         smp_cross_call(mask, IPI_CALL_FUNC);
666 }
667 
668 void arch_send_call_function_single_ipi(int cpu)
669 {
670         smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
671 }
672 
673 #ifdef CONFIG_IRQ_WORK
674 void arch_irq_work_raise(void)
675 {
676         if (__smp_cross_call)
677                 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
678 }
679 #endif
680 
681 static DEFINE_RAW_SPINLOCK(stop_lock);
682 
683 /*
684  * ipi_cpu_stop - handle IPI from smp_send_stop()
685  */
686 static void ipi_cpu_stop(unsigned int cpu)
687 {
688         if (system_state == SYSTEM_BOOTING ||
689             system_state == SYSTEM_RUNNING) {
690                 raw_spin_lock(&stop_lock);
691                 pr_crit("CPU%u: stopping\n", cpu);
692                 dump_stack();
693                 raw_spin_unlock(&stop_lock);
694         }
695 
696         set_cpu_online(cpu, false);
697 
698         local_irq_disable();
699 
700         while (1)
701                 cpu_relax();
702 }
703 
704 /*
705  * Main handler for inter-processor interrupts
706  */
707 void handle_IPI(int ipinr, struct pt_regs *regs)
708 {
709         unsigned int cpu = smp_processor_id();
710         struct pt_regs *old_regs = set_irq_regs(regs);
711 
712         if ((unsigned)ipinr < NR_IPI) {
713                 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
714                 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
715         }
716 
717         switch (ipinr) {
718         case IPI_RESCHEDULE:
719                 scheduler_ipi();
720                 break;
721 
722         case IPI_CALL_FUNC:
723                 irq_enter();
724                 generic_smp_call_function_interrupt();
725                 irq_exit();
726                 break;
727 
728         case IPI_CPU_STOP:
729                 irq_enter();
730                 ipi_cpu_stop(cpu);
731                 irq_exit();
732                 break;
733 
734 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
735         case IPI_TIMER:
736                 irq_enter();
737                 tick_receive_broadcast();
738                 irq_exit();
739                 break;
740 #endif
741 
742 #ifdef CONFIG_IRQ_WORK
743         case IPI_IRQ_WORK:
744                 irq_enter();
745                 irq_work_run();
746                 irq_exit();
747                 break;
748 #endif
749 
750         default:
751                 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
752                 break;
753         }
754 
755         if ((unsigned)ipinr < NR_IPI)
756                 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
757         set_irq_regs(old_regs);
758 }
759 
760 void smp_send_reschedule(int cpu)
761 {
762         smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
763 }
764 
765 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
766 void tick_broadcast(const struct cpumask *mask)
767 {
768         smp_cross_call(mask, IPI_TIMER);
769 }
770 #endif
771 
772 /*
773  * The number of CPUs online, not counting this CPU (which may not be
774  * fully online and so not counted in num_online_cpus()).
775  */
776 static inline unsigned int num_other_online_cpus(void)
777 {
778         unsigned int this_cpu_online = cpu_online(smp_processor_id());
779 
780         return num_online_cpus() - this_cpu_online;
781 }
782 
783 void smp_send_stop(void)
784 {
785         unsigned long timeout;
786 
787         if (num_other_online_cpus()) {
788                 cpumask_t mask;
789 
790                 cpumask_copy(&mask, cpu_online_mask);
791                 cpumask_clear_cpu(smp_processor_id(), &mask);
792 
793                 smp_cross_call(&mask, IPI_CPU_STOP);
794         }
795 
796         /* Wait up to one second for other CPUs to stop */
797         timeout = USEC_PER_SEC;
798         while (num_other_online_cpus() && timeout--)
799                 udelay(1);
800 
801         if (num_other_online_cpus())
802                 pr_warning("SMP: failed to stop secondary CPUs\n");
803 }
804 
805 /*
806  * not supported here
807  */
808 int setup_profiling_timer(unsigned int multiplier)
809 {
810         return -EINVAL;
811 }
812 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp