~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/smp.c

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Generic helpers for smp ipi calls
  3  *
  4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5  */
  6 #include <linux/irq_work.h>
  7 #include <linux/rcupdate.h>
  8 #include <linux/rculist.h>
  9 #include <linux/kernel.h>
 10 #include <linux/export.h>
 11 #include <linux/percpu.h>
 12 #include <linux/init.h>
 13 #include <linux/gfp.h>
 14 #include <linux/smp.h>
 15 #include <linux/cpu.h>
 16 #include <linux/sched.h>
 17 
 18 #include "smpboot.h"
 19 
 20 enum {
 21         CSD_FLAG_LOCK           = 0x01,
 22         CSD_FLAG_SYNCHRONOUS    = 0x02,
 23 };
 24 
 25 struct call_function_data {
 26         struct call_single_data __percpu *csd;
 27         cpumask_var_t           cpumask;
 28 };
 29 
 30 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 31 
 32 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 33 
 34 static void flush_smp_call_function_queue(bool warn_cpu_offline);
 35 
 36 static int
 37 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 38 {
 39         long cpu = (long)hcpu;
 40         struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 41 
 42         switch (action) {
 43         case CPU_UP_PREPARE:
 44         case CPU_UP_PREPARE_FROZEN:
 45                 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 46                                 cpu_to_node(cpu)))
 47                         return notifier_from_errno(-ENOMEM);
 48                 cfd->csd = alloc_percpu(struct call_single_data);
 49                 if (!cfd->csd) {
 50                         free_cpumask_var(cfd->cpumask);
 51                         return notifier_from_errno(-ENOMEM);
 52                 }
 53                 break;
 54 
 55 #ifdef CONFIG_HOTPLUG_CPU
 56         case CPU_UP_CANCELED:
 57         case CPU_UP_CANCELED_FROZEN:
 58                 /* Fall-through to the CPU_DEAD[_FROZEN] case. */
 59 
 60         case CPU_DEAD:
 61         case CPU_DEAD_FROZEN:
 62                 free_cpumask_var(cfd->cpumask);
 63                 free_percpu(cfd->csd);
 64                 break;
 65 
 66         case CPU_DYING:
 67         case CPU_DYING_FROZEN:
 68                 /*
 69                  * The IPIs for the smp-call-function callbacks queued by other
 70                  * CPUs might arrive late, either due to hardware latencies or
 71                  * because this CPU disabled interrupts (inside stop-machine)
 72                  * before the IPIs were sent. So flush out any pending callbacks
 73                  * explicitly (without waiting for the IPIs to arrive), to
 74                  * ensure that the outgoing CPU doesn't go offline with work
 75                  * still pending.
 76                  */
 77                 flush_smp_call_function_queue(false);
 78                 break;
 79 #endif
 80         };
 81 
 82         return NOTIFY_OK;
 83 }
 84 
 85 static struct notifier_block hotplug_cfd_notifier = {
 86         .notifier_call          = hotplug_cfd,
 87 };
 88 
 89 void __init call_function_init(void)
 90 {
 91         void *cpu = (void *)(long)smp_processor_id();
 92         int i;
 93 
 94         for_each_possible_cpu(i)
 95                 init_llist_head(&per_cpu(call_single_queue, i));
 96 
 97         hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
 98         register_cpu_notifier(&hotplug_cfd_notifier);
 99 }
100 
101 /*
102  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
103  *
104  * For non-synchronous ipi calls the csd can still be in use by the
105  * previous function call. For multi-cpu calls its even more interesting
106  * as we'll have to ensure no other cpu is observing our csd.
107  */
108 static void csd_lock_wait(struct call_single_data *csd)
109 {
110         while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK)
111                 cpu_relax();
112 }
113 
114 static void csd_lock(struct call_single_data *csd)
115 {
116         csd_lock_wait(csd);
117         csd->flags |= CSD_FLAG_LOCK;
118 
119         /*
120          * prevent CPU from reordering the above assignment
121          * to ->flags with any subsequent assignments to other
122          * fields of the specified call_single_data structure:
123          */
124         smp_wmb();
125 }
126 
127 static void csd_unlock(struct call_single_data *csd)
128 {
129         WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
130 
131         /*
132          * ensure we're all done before releasing data:
133          */
134         smp_store_release(&csd->flags, 0);
135 }
136 
137 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
138 
139 /*
140  * Insert a previously allocated call_single_data element
141  * for execution on the given CPU. data must already have
142  * ->func, ->info, and ->flags set.
143  */
144 static int generic_exec_single(int cpu, struct call_single_data *csd,
145                                smp_call_func_t func, void *info)
146 {
147         if (cpu == smp_processor_id()) {
148                 unsigned long flags;
149 
150                 /*
151                  * We can unlock early even for the synchronous on-stack case,
152                  * since we're doing this from the same CPU..
153                  */
154                 csd_unlock(csd);
155                 local_irq_save(flags);
156                 func(info);
157                 local_irq_restore(flags);
158                 return 0;
159         }
160 
161 
162         if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
163                 csd_unlock(csd);
164                 return -ENXIO;
165         }
166 
167         csd->func = func;
168         csd->info = info;
169 
170         /*
171          * The list addition should be visible before sending the IPI
172          * handler locks the list to pull the entry off it because of
173          * normal cache coherency rules implied by spinlocks.
174          *
175          * If IPIs can go out of order to the cache coherency protocol
176          * in an architecture, sufficient synchronisation should be added
177          * to arch code to make it appear to obey cache coherency WRT
178          * locking and barrier primitives. Generic code isn't really
179          * equipped to do the right thing...
180          */
181         if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
182                 arch_send_call_function_single_ipi(cpu);
183 
184         return 0;
185 }
186 
187 /**
188  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
189  *
190  * Invoked by arch to handle an IPI for call function single.
191  * Must be called with interrupts disabled.
192  */
193 void generic_smp_call_function_single_interrupt(void)
194 {
195         flush_smp_call_function_queue(true);
196 }
197 
198 /**
199  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
200  *
201  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
202  *                    offline CPU. Skip this check if set to 'false'.
203  *
204  * Flush any pending smp-call-function callbacks queued on this CPU. This is
205  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
206  * to ensure that all pending IPI callbacks are run before it goes completely
207  * offline.
208  *
209  * Loop through the call_single_queue and run all the queued callbacks.
210  * Must be called with interrupts disabled.
211  */
212 static void flush_smp_call_function_queue(bool warn_cpu_offline)
213 {
214         struct llist_head *head;
215         struct llist_node *entry;
216         struct call_single_data *csd, *csd_next;
217         static bool warned;
218 
219         WARN_ON(!irqs_disabled());
220 
221         head = this_cpu_ptr(&call_single_queue);
222         entry = llist_del_all(head);
223         entry = llist_reverse_order(entry);
224 
225         /* There shouldn't be any pending callbacks on an offline CPU. */
226         if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
227                      !warned && !llist_empty(head))) {
228                 warned = true;
229                 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
230 
231                 /*
232                  * We don't have to use the _safe() variant here
233                  * because we are not invoking the IPI handlers yet.
234                  */
235                 llist_for_each_entry(csd, entry, llist)
236                         pr_warn("IPI callback %pS sent to offline CPU\n",
237                                 csd->func);
238         }
239 
240         llist_for_each_entry_safe(csd, csd_next, entry, llist) {
241                 smp_call_func_t func = csd->func;
242                 void *info = csd->info;
243 
244                 /* Do we wait until *after* callback? */
245                 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
246                         func(info);
247                         csd_unlock(csd);
248                 } else {
249                         csd_unlock(csd);
250                         func(info);
251                 }
252         }
253 
254         /*
255          * Handle irq works queued remotely by irq_work_queue_on().
256          * Smp functions above are typically synchronous so they
257          * better run first since some other CPUs may be busy waiting
258          * for them.
259          */
260         irq_work_run();
261 }
262 
263 /*
264  * smp_call_function_single - Run a function on a specific CPU
265  * @func: The function to run. This must be fast and non-blocking.
266  * @info: An arbitrary pointer to pass to the function.
267  * @wait: If true, wait until function has completed on other CPUs.
268  *
269  * Returns 0 on success, else a negative status code.
270  */
271 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
272                              int wait)
273 {
274         struct call_single_data *csd;
275         struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
276         int this_cpu;
277         int err;
278 
279         /*
280          * prevent preemption and reschedule on another processor,
281          * as well as CPU removal
282          */
283         this_cpu = get_cpu();
284 
285         /*
286          * Can deadlock when called with interrupts disabled.
287          * We allow cpu's that are not yet online though, as no one else can
288          * send smp call function interrupt to this cpu and as such deadlocks
289          * can't happen.
290          */
291         WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292                      && !oops_in_progress);
293 
294         csd = &csd_stack;
295         if (!wait) {
296                 csd = this_cpu_ptr(&csd_data);
297                 csd_lock(csd);
298         }
299 
300         err = generic_exec_single(cpu, csd, func, info);
301 
302         if (wait)
303                 csd_lock_wait(csd);
304 
305         put_cpu();
306 
307         return err;
308 }
309 EXPORT_SYMBOL(smp_call_function_single);
310 
311 /**
312  * smp_call_function_single_async(): Run an asynchronous function on a
313  *                               specific CPU.
314  * @cpu: The CPU to run on.
315  * @csd: Pre-allocated and setup data structure
316  *
317  * Like smp_call_function_single(), but the call is asynchonous and
318  * can thus be done from contexts with disabled interrupts.
319  *
320  * The caller passes his own pre-allocated data structure
321  * (ie: embedded in an object) and is responsible for synchronizing it
322  * such that the IPIs performed on the @csd are strictly serialized.
323  *
324  * NOTE: Be careful, there is unfortunately no current debugging facility to
325  * validate the correctness of this serialization.
326  */
327 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
328 {
329         int err = 0;
330 
331         preempt_disable();
332 
333         /* We could deadlock if we have to wait here with interrupts disabled! */
334         if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
335                 csd_lock_wait(csd);
336 
337         csd->flags = CSD_FLAG_LOCK;
338         smp_wmb();
339 
340         err = generic_exec_single(cpu, csd, csd->func, csd->info);
341         preempt_enable();
342 
343         return err;
344 }
345 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
346 
347 /*
348  * smp_call_function_any - Run a function on any of the given cpus
349  * @mask: The mask of cpus it can run on.
350  * @func: The function to run. This must be fast and non-blocking.
351  * @info: An arbitrary pointer to pass to the function.
352  * @wait: If true, wait until function has completed.
353  *
354  * Returns 0 on success, else a negative status code (if no cpus were online).
355  *
356  * Selection preference:
357  *      1) current cpu if in @mask
358  *      2) any cpu of current node if in @mask
359  *      3) any other online cpu in @mask
360  */
361 int smp_call_function_any(const struct cpumask *mask,
362                           smp_call_func_t func, void *info, int wait)
363 {
364         unsigned int cpu;
365         const struct cpumask *nodemask;
366         int ret;
367 
368         /* Try for same CPU (cheapest) */
369         cpu = get_cpu();
370         if (cpumask_test_cpu(cpu, mask))
371                 goto call;
372 
373         /* Try for same node. */
374         nodemask = cpumask_of_node(cpu_to_node(cpu));
375         for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
376              cpu = cpumask_next_and(cpu, nodemask, mask)) {
377                 if (cpu_online(cpu))
378                         goto call;
379         }
380 
381         /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
382         cpu = cpumask_any_and(mask, cpu_online_mask);
383 call:
384         ret = smp_call_function_single(cpu, func, info, wait);
385         put_cpu();
386         return ret;
387 }
388 EXPORT_SYMBOL_GPL(smp_call_function_any);
389 
390 /**
391  * smp_call_function_many(): Run a function on a set of other CPUs.
392  * @mask: The set of cpus to run on (only runs on online subset).
393  * @func: The function to run. This must be fast and non-blocking.
394  * @info: An arbitrary pointer to pass to the function.
395  * @wait: If true, wait (atomically) until function has completed
396  *        on other CPUs.
397  *
398  * If @wait is true, then returns once @func has returned.
399  *
400  * You must not call this function with disabled interrupts or from a
401  * hardware interrupt handler or from a bottom half handler. Preemption
402  * must be disabled when calling this function.
403  */
404 void smp_call_function_many(const struct cpumask *mask,
405                             smp_call_func_t func, void *info, bool wait)
406 {
407         struct call_function_data *cfd;
408         int cpu, next_cpu, this_cpu = smp_processor_id();
409 
410         /*
411          * Can deadlock when called with interrupts disabled.
412          * We allow cpu's that are not yet online though, as no one else can
413          * send smp call function interrupt to this cpu and as such deadlocks
414          * can't happen.
415          */
416         WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
417                      && !oops_in_progress && !early_boot_irqs_disabled);
418 
419         /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
420         cpu = cpumask_first_and(mask, cpu_online_mask);
421         if (cpu == this_cpu)
422                 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
423 
424         /* No online cpus?  We're done. */
425         if (cpu >= nr_cpu_ids)
426                 return;
427 
428         /* Do we have another CPU which isn't us? */
429         next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
430         if (next_cpu == this_cpu)
431                 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
432 
433         /* Fastpath: do that cpu by itself. */
434         if (next_cpu >= nr_cpu_ids) {
435                 smp_call_function_single(cpu, func, info, wait);
436                 return;
437         }
438 
439         cfd = this_cpu_ptr(&cfd_data);
440 
441         cpumask_and(cfd->cpumask, mask, cpu_online_mask);
442         cpumask_clear_cpu(this_cpu, cfd->cpumask);
443 
444         /* Some callers race with other cpus changing the passed mask */
445         if (unlikely(!cpumask_weight(cfd->cpumask)))
446                 return;
447 
448         for_each_cpu(cpu, cfd->cpumask) {
449                 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
450 
451                 csd_lock(csd);
452                 if (wait)
453                         csd->flags |= CSD_FLAG_SYNCHRONOUS;
454                 csd->func = func;
455                 csd->info = info;
456                 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
457         }
458 
459         /* Send a message to all CPUs in the map */
460         arch_send_call_function_ipi_mask(cfd->cpumask);
461 
462         if (wait) {
463                 for_each_cpu(cpu, cfd->cpumask) {
464                         struct call_single_data *csd;
465 
466                         csd = per_cpu_ptr(cfd->csd, cpu);
467                         csd_lock_wait(csd);
468                 }
469         }
470 }
471 EXPORT_SYMBOL(smp_call_function_many);
472 
473 /**
474  * smp_call_function(): Run a function on all other CPUs.
475  * @func: The function to run. This must be fast and non-blocking.
476  * @info: An arbitrary pointer to pass to the function.
477  * @wait: If true, wait (atomically) until function has completed
478  *        on other CPUs.
479  *
480  * Returns 0.
481  *
482  * If @wait is true, then returns once @func has returned; otherwise
483  * it returns just before the target cpu calls @func.
484  *
485  * You must not call this function with disabled interrupts or from a
486  * hardware interrupt handler or from a bottom half handler.
487  */
488 int smp_call_function(smp_call_func_t func, void *info, int wait)
489 {
490         preempt_disable();
491         smp_call_function_many(cpu_online_mask, func, info, wait);
492         preempt_enable();
493 
494         return 0;
495 }
496 EXPORT_SYMBOL(smp_call_function);
497 
498 /* Setup configured maximum number of CPUs to activate */
499 unsigned int setup_max_cpus = NR_CPUS;
500 EXPORT_SYMBOL(setup_max_cpus);
501 
502 
503 /*
504  * Setup routine for controlling SMP activation
505  *
506  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
507  * activation entirely (the MPS table probe still happens, though).
508  *
509  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
510  * greater than 0, limits the maximum number of CPUs activated in
511  * SMP mode to <NUM>.
512  */
513 
514 void __weak arch_disable_smp_support(void) { }
515 
516 static int __init nosmp(char *str)
517 {
518         setup_max_cpus = 0;
519         arch_disable_smp_support();
520 
521         return 0;
522 }
523 
524 early_param("nosmp", nosmp);
525 
526 /* this is hard limit */
527 static int __init nrcpus(char *str)
528 {
529         int nr_cpus;
530 
531         get_option(&str, &nr_cpus);
532         if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
533                 nr_cpu_ids = nr_cpus;
534 
535         return 0;
536 }
537 
538 early_param("nr_cpus", nrcpus);
539 
540 static int __init maxcpus(char *str)
541 {
542         get_option(&str, &setup_max_cpus);
543         if (setup_max_cpus == 0)
544                 arch_disable_smp_support();
545 
546         return 0;
547 }
548 
549 early_param("maxcpus", maxcpus);
550 
551 /* Setup number of possible processor ids */
552 int nr_cpu_ids __read_mostly = NR_CPUS;
553 EXPORT_SYMBOL(nr_cpu_ids);
554 
555 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
556 void __init setup_nr_cpu_ids(void)
557 {
558         nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
559 }
560 
561 void __weak smp_announce(void)
562 {
563         printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
564 }
565 
566 /* Called by boot processor to activate the rest. */
567 void __init smp_init(void)
568 {
569         unsigned int cpu;
570 
571         idle_threads_init();
572 
573         /* FIXME: This should be done in userspace --RR */
574         for_each_present_cpu(cpu) {
575                 if (num_online_cpus() >= setup_max_cpus)
576                         break;
577                 if (!cpu_online(cpu))
578                         cpu_up(cpu);
579         }
580 
581         /* Any cleanup work */
582         smp_announce();
583         smp_cpus_done(setup_max_cpus);
584 }
585 
586 /*
587  * Call a function on all processors.  May be used during early boot while
588  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
589  * of local_irq_disable/enable().
590  */
591 int on_each_cpu(void (*func) (void *info), void *info, int wait)
592 {
593         unsigned long flags;
594         int ret = 0;
595 
596         preempt_disable();
597         ret = smp_call_function(func, info, wait);
598         local_irq_save(flags);
599         func(info);
600         local_irq_restore(flags);
601         preempt_enable();
602         return ret;
603 }
604 EXPORT_SYMBOL(on_each_cpu);
605 
606 /**
607  * on_each_cpu_mask(): Run a function on processors specified by
608  * cpumask, which may include the local processor.
609  * @mask: The set of cpus to run on (only runs on online subset).
610  * @func: The function to run. This must be fast and non-blocking.
611  * @info: An arbitrary pointer to pass to the function.
612  * @wait: If true, wait (atomically) until function has completed
613  *        on other CPUs.
614  *
615  * If @wait is true, then returns once @func has returned.
616  *
617  * You must not call this function with disabled interrupts or from a
618  * hardware interrupt handler or from a bottom half handler.  The
619  * exception is that it may be used during early boot while
620  * early_boot_irqs_disabled is set.
621  */
622 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
623                         void *info, bool wait)
624 {
625         int cpu = get_cpu();
626 
627         smp_call_function_many(mask, func, info, wait);
628         if (cpumask_test_cpu(cpu, mask)) {
629                 unsigned long flags;
630                 local_irq_save(flags);
631                 func(info);
632                 local_irq_restore(flags);
633         }
634         put_cpu();
635 }
636 EXPORT_SYMBOL(on_each_cpu_mask);
637 
638 /*
639  * on_each_cpu_cond(): Call a function on each processor for which
640  * the supplied function cond_func returns true, optionally waiting
641  * for all the required CPUs to finish. This may include the local
642  * processor.
643  * @cond_func:  A callback function that is passed a cpu id and
644  *              the the info parameter. The function is called
645  *              with preemption disabled. The function should
646  *              return a blooean value indicating whether to IPI
647  *              the specified CPU.
648  * @func:       The function to run on all applicable CPUs.
649  *              This must be fast and non-blocking.
650  * @info:       An arbitrary pointer to pass to both functions.
651  * @wait:       If true, wait (atomically) until function has
652  *              completed on other CPUs.
653  * @gfp_flags:  GFP flags to use when allocating the cpumask
654  *              used internally by the function.
655  *
656  * The function might sleep if the GFP flags indicates a non
657  * atomic allocation is allowed.
658  *
659  * Preemption is disabled to protect against CPUs going offline but not online.
660  * CPUs going online during the call will not be seen or sent an IPI.
661  *
662  * You must not call this function with disabled interrupts or
663  * from a hardware interrupt handler or from a bottom half handler.
664  */
665 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
666                         smp_call_func_t func, void *info, bool wait,
667                         gfp_t gfp_flags)
668 {
669         cpumask_var_t cpus;
670         int cpu, ret;
671 
672         might_sleep_if(gfp_flags & __GFP_WAIT);
673 
674         if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
675                 preempt_disable();
676                 for_each_online_cpu(cpu)
677                         if (cond_func(cpu, info))
678                                 cpumask_set_cpu(cpu, cpus);
679                 on_each_cpu_mask(cpus, func, info, wait);
680                 preempt_enable();
681                 free_cpumask_var(cpus);
682         } else {
683                 /*
684                  * No free cpumask, bother. No matter, we'll
685                  * just have to IPI them one by one.
686                  */
687                 preempt_disable();
688                 for_each_online_cpu(cpu)
689                         if (cond_func(cpu, info)) {
690                                 ret = smp_call_function_single(cpu, func,
691                                                                 info, wait);
692                                 WARN_ON_ONCE(ret);
693                         }
694                 preempt_enable();
695         }
696 }
697 EXPORT_SYMBOL(on_each_cpu_cond);
698 
699 static void do_nothing(void *unused)
700 {
701 }
702 
703 /**
704  * kick_all_cpus_sync - Force all cpus out of idle
705  *
706  * Used to synchronize the update of pm_idle function pointer. It's
707  * called after the pointer is updated and returns after the dummy
708  * callback function has been executed on all cpus. The execution of
709  * the function can only happen on the remote cpus after they have
710  * left the idle function which had been called via pm_idle function
711  * pointer. So it's guaranteed that nothing uses the previous pointer
712  * anymore.
713  */
714 void kick_all_cpus_sync(void)
715 {
716         /* Make sure the change is visible before we kick the cpus */
717         smp_mb();
718         smp_call_function(do_nothing, NULL, 1);
719 }
720 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
721 
722 /**
723  * wake_up_all_idle_cpus - break all cpus out of idle
724  * wake_up_all_idle_cpus try to break all cpus which is in idle state even
725  * including idle polling cpus, for non-idle cpus, we will do nothing
726  * for them.
727  */
728 void wake_up_all_idle_cpus(void)
729 {
730         int cpu;
731 
732         preempt_disable();
733         for_each_online_cpu(cpu) {
734                 if (cpu == smp_processor_id())
735                         continue;
736 
737                 wake_up_if_idle(cpu);
738         }
739         preempt_enable();
740 }
741 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
742 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp