~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/smp.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Generic helpers for smp ipi calls
  3  *
  4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5  */
  6 #include <linux/rcupdate.h>
  7 #include <linux/rculist.h>
  8 #include <linux/kernel.h>
  9 #include <linux/module.h>
 10 #include <linux/percpu.h>
 11 #include <linux/init.h>
 12 #include <linux/gfp.h>
 13 #include <linux/smp.h>
 14 #include <linux/cpu.h>
 15 
 16 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 17 static struct {
 18         struct list_head        queue;
 19         raw_spinlock_t          lock;
 20 } call_function __cacheline_aligned_in_smp =
 21         {
 22                 .queue          = LIST_HEAD_INIT(call_function.queue),
 23                 .lock           = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
 24         };
 25 
 26 enum {
 27         CSD_FLAG_LOCK           = 0x01,
 28 };
 29 
 30 struct call_function_data {
 31         struct call_single_data csd;
 32         atomic_t                refs;
 33         cpumask_var_t           cpumask;
 34         cpumask_var_t           cpumask_ipi;
 35 };
 36 
 37 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 38 
 39 struct call_single_queue {
 40         struct list_head        list;
 41         raw_spinlock_t          lock;
 42 };
 43 
 44 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
 45 
 46 static int
 47 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 48 {
 49         long cpu = (long)hcpu;
 50         struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 51 
 52         switch (action) {
 53         case CPU_UP_PREPARE:
 54         case CPU_UP_PREPARE_FROZEN:
 55                 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 56                                 cpu_to_node(cpu)))
 57                         return notifier_from_errno(-ENOMEM);
 58                 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
 59                                 cpu_to_node(cpu)))
 60                         return notifier_from_errno(-ENOMEM);
 61                 break;
 62 
 63 #ifdef CONFIG_HOTPLUG_CPU
 64         case CPU_UP_CANCELED:
 65         case CPU_UP_CANCELED_FROZEN:
 66 
 67         case CPU_DEAD:
 68         case CPU_DEAD_FROZEN:
 69                 free_cpumask_var(cfd->cpumask);
 70                 free_cpumask_var(cfd->cpumask_ipi);
 71                 break;
 72 #endif
 73         };
 74 
 75         return NOTIFY_OK;
 76 }
 77 
 78 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
 79         .notifier_call          = hotplug_cfd,
 80 };
 81 
 82 void __init call_function_init(void)
 83 {
 84         void *cpu = (void *)(long)smp_processor_id();
 85         int i;
 86 
 87         for_each_possible_cpu(i) {
 88                 struct call_single_queue *q = &per_cpu(call_single_queue, i);
 89 
 90                 raw_spin_lock_init(&q->lock);
 91                 INIT_LIST_HEAD(&q->list);
 92         }
 93 
 94         hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
 95         register_cpu_notifier(&hotplug_cfd_notifier);
 96 }
 97 
 98 /*
 99  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
100  *
101  * For non-synchronous ipi calls the csd can still be in use by the
102  * previous function call. For multi-cpu calls its even more interesting
103  * as we'll have to ensure no other cpu is observing our csd.
104  */
105 static void csd_lock_wait(struct call_single_data *data)
106 {
107         while (data->flags & CSD_FLAG_LOCK)
108                 cpu_relax();
109 }
110 
111 static void csd_lock(struct call_single_data *data)
112 {
113         csd_lock_wait(data);
114         data->flags = CSD_FLAG_LOCK;
115 
116         /*
117          * prevent CPU from reordering the above assignment
118          * to ->flags with any subsequent assignments to other
119          * fields of the specified call_single_data structure:
120          */
121         smp_mb();
122 }
123 
124 static void csd_unlock(struct call_single_data *data)
125 {
126         WARN_ON(!(data->flags & CSD_FLAG_LOCK));
127 
128         /*
129          * ensure we're all done before releasing data:
130          */
131         smp_mb();
132 
133         data->flags &= ~CSD_FLAG_LOCK;
134 }
135 
136 /*
137  * Insert a previously allocated call_single_data element
138  * for execution on the given CPU. data must already have
139  * ->func, ->info, and ->flags set.
140  */
141 static
142 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
143 {
144         struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
145         unsigned long flags;
146         int ipi;
147 
148         raw_spin_lock_irqsave(&dst->lock, flags);
149         ipi = list_empty(&dst->list);
150         list_add_tail(&data->list, &dst->list);
151         raw_spin_unlock_irqrestore(&dst->lock, flags);
152 
153         /*
154          * The list addition should be visible before sending the IPI
155          * handler locks the list to pull the entry off it because of
156          * normal cache coherency rules implied by spinlocks.
157          *
158          * If IPIs can go out of order to the cache coherency protocol
159          * in an architecture, sufficient synchronisation should be added
160          * to arch code to make it appear to obey cache coherency WRT
161          * locking and barrier primitives. Generic code isn't really
162          * equipped to do the right thing...
163          */
164         if (ipi)
165                 arch_send_call_function_single_ipi(cpu);
166 
167         if (wait)
168                 csd_lock_wait(data);
169 }
170 
171 /*
172  * Invoked by arch to handle an IPI for call function. Must be called with
173  * interrupts disabled.
174  */
175 void generic_smp_call_function_interrupt(void)
176 {
177         struct call_function_data *data;
178         int cpu = smp_processor_id();
179 
180         /*
181          * Shouldn't receive this interrupt on a cpu that is not yet online.
182          */
183         WARN_ON_ONCE(!cpu_online(cpu));
184 
185         /*
186          * Ensure entry is visible on call_function_queue after we have
187          * entered the IPI. See comment in smp_call_function_many.
188          * If we don't have this, then we may miss an entry on the list
189          * and never get another IPI to process it.
190          */
191         smp_mb();
192 
193         /*
194          * It's ok to use list_for_each_rcu() here even though we may
195          * delete 'pos', since list_del_rcu() doesn't clear ->next
196          */
197         list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
198                 int refs;
199                 smp_call_func_t func;
200 
201                 /*
202                  * Since we walk the list without any locks, we might
203                  * see an entry that was completed, removed from the
204                  * list and is in the process of being reused.
205                  *
206                  * We must check that the cpu is in the cpumask before
207                  * checking the refs, and both must be set before
208                  * executing the callback on this cpu.
209                  */
210 
211                 if (!cpumask_test_cpu(cpu, data->cpumask))
212                         continue;
213 
214                 smp_rmb();
215 
216                 if (atomic_read(&data->refs) == 0)
217                         continue;
218 
219                 func = data->csd.func;          /* save for later warn */
220                 func(data->csd.info);
221 
222                 /*
223                  * If the cpu mask is not still set then func enabled
224                  * interrupts (BUG), and this cpu took another smp call
225                  * function interrupt and executed func(info) twice
226                  * on this cpu.  That nested execution decremented refs.
227                  */
228                 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
229                         WARN(1, "%pf enabled interrupts and double executed\n", func);
230                         continue;
231                 }
232 
233                 refs = atomic_dec_return(&data->refs);
234                 WARN_ON(refs < 0);
235 
236                 if (refs)
237                         continue;
238 
239                 WARN_ON(!cpumask_empty(data->cpumask));
240 
241                 raw_spin_lock(&call_function.lock);
242                 list_del_rcu(&data->csd.list);
243                 raw_spin_unlock(&call_function.lock);
244 
245                 csd_unlock(&data->csd);
246         }
247 
248 }
249 
250 /*
251  * Invoked by arch to handle an IPI for call function single. Must be
252  * called from the arch with interrupts disabled.
253  */
254 void generic_smp_call_function_single_interrupt(void)
255 {
256         struct call_single_queue *q = &__get_cpu_var(call_single_queue);
257         unsigned int data_flags;
258         LIST_HEAD(list);
259 
260         /*
261          * Shouldn't receive this interrupt on a cpu that is not yet online.
262          */
263         WARN_ON_ONCE(!cpu_online(smp_processor_id()));
264 
265         raw_spin_lock(&q->lock);
266         list_replace_init(&q->list, &list);
267         raw_spin_unlock(&q->lock);
268 
269         while (!list_empty(&list)) {
270                 struct call_single_data *data;
271 
272                 data = list_entry(list.next, struct call_single_data, list);
273                 list_del(&data->list);
274 
275                 /*
276                  * 'data' can be invalid after this call if flags == 0
277                  * (when called through generic_exec_single()),
278                  * so save them away before making the call:
279                  */
280                 data_flags = data->flags;
281 
282                 data->func(data->info);
283 
284                 /*
285                  * Unlocked CSDs are valid through generic_exec_single():
286                  */
287                 if (data_flags & CSD_FLAG_LOCK)
288                         csd_unlock(data);
289         }
290 }
291 
292 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
293 
294 /*
295  * smp_call_function_single - Run a function on a specific CPU
296  * @func: The function to run. This must be fast and non-blocking.
297  * @info: An arbitrary pointer to pass to the function.
298  * @wait: If true, wait until function has completed on other CPUs.
299  *
300  * Returns 0 on success, else a negative status code.
301  */
302 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
303                              int wait)
304 {
305         struct call_single_data d = {
306                 .flags = 0,
307         };
308         unsigned long flags;
309         int this_cpu;
310         int err = 0;
311 
312         /*
313          * prevent preemption and reschedule on another processor,
314          * as well as CPU removal
315          */
316         this_cpu = get_cpu();
317 
318         /*
319          * Can deadlock when called with interrupts disabled.
320          * We allow cpu's that are not yet online though, as no one else can
321          * send smp call function interrupt to this cpu and as such deadlocks
322          * can't happen.
323          */
324         WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
325                      && !oops_in_progress);
326 
327         if (cpu == this_cpu) {
328                 local_irq_save(flags);
329                 func(info);
330                 local_irq_restore(flags);
331         } else {
332                 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
333                         struct call_single_data *data = &d;
334 
335                         if (!wait)
336                                 data = &__get_cpu_var(csd_data);
337 
338                         csd_lock(data);
339 
340                         data->func = func;
341                         data->info = info;
342                         generic_exec_single(cpu, data, wait);
343                 } else {
344                         err = -ENXIO;   /* CPU not online */
345                 }
346         }
347 
348         put_cpu();
349 
350         return err;
351 }
352 EXPORT_SYMBOL(smp_call_function_single);
353 
354 /*
355  * smp_call_function_any - Run a function on any of the given cpus
356  * @mask: The mask of cpus it can run on.
357  * @func: The function to run. This must be fast and non-blocking.
358  * @info: An arbitrary pointer to pass to the function.
359  * @wait: If true, wait until function has completed.
360  *
361  * Returns 0 on success, else a negative status code (if no cpus were online).
362  * Note that @wait will be implicitly turned on in case of allocation failures,
363  * since we fall back to on-stack allocation.
364  *
365  * Selection preference:
366  *      1) current cpu if in @mask
367  *      2) any cpu of current node if in @mask
368  *      3) any other online cpu in @mask
369  */
370 int smp_call_function_any(const struct cpumask *mask,
371                           smp_call_func_t func, void *info, int wait)
372 {
373         unsigned int cpu;
374         const struct cpumask *nodemask;
375         int ret;
376 
377         /* Try for same CPU (cheapest) */
378         cpu = get_cpu();
379         if (cpumask_test_cpu(cpu, mask))
380                 goto call;
381 
382         /* Try for same node. */
383         nodemask = cpumask_of_node(cpu_to_node(cpu));
384         for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
385              cpu = cpumask_next_and(cpu, nodemask, mask)) {
386                 if (cpu_online(cpu))
387                         goto call;
388         }
389 
390         /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
391         cpu = cpumask_any_and(mask, cpu_online_mask);
392 call:
393         ret = smp_call_function_single(cpu, func, info, wait);
394         put_cpu();
395         return ret;
396 }
397 EXPORT_SYMBOL_GPL(smp_call_function_any);
398 
399 /**
400  * __smp_call_function_single(): Run a function on a specific CPU
401  * @cpu: The CPU to run on.
402  * @data: Pre-allocated and setup data structure
403  * @wait: If true, wait until function has completed on specified CPU.
404  *
405  * Like smp_call_function_single(), but allow caller to pass in a
406  * pre-allocated data structure. Useful for embedding @data inside
407  * other structures, for instance.
408  */
409 void __smp_call_function_single(int cpu, struct call_single_data *data,
410                                 int wait)
411 {
412         unsigned int this_cpu;
413         unsigned long flags;
414 
415         this_cpu = get_cpu();
416         /*
417          * Can deadlock when called with interrupts disabled.
418          * We allow cpu's that are not yet online though, as no one else can
419          * send smp call function interrupt to this cpu and as such deadlocks
420          * can't happen.
421          */
422         WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
423                      && !oops_in_progress);
424 
425         if (cpu == this_cpu) {
426                 local_irq_save(flags);
427                 data->func(data->info);
428                 local_irq_restore(flags);
429         } else {
430                 csd_lock(data);
431                 generic_exec_single(cpu, data, wait);
432         }
433         put_cpu();
434 }
435 
436 /**
437  * smp_call_function_many(): Run a function on a set of other CPUs.
438  * @mask: The set of cpus to run on (only runs on online subset).
439  * @func: The function to run. This must be fast and non-blocking.
440  * @info: An arbitrary pointer to pass to the function.
441  * @wait: If true, wait (atomically) until function has completed
442  *        on other CPUs.
443  *
444  * If @wait is true, then returns once @func has returned.
445  *
446  * You must not call this function with disabled interrupts or from a
447  * hardware interrupt handler or from a bottom half handler. Preemption
448  * must be disabled when calling this function.
449  */
450 void smp_call_function_many(const struct cpumask *mask,
451                             smp_call_func_t func, void *info, bool wait)
452 {
453         struct call_function_data *data;
454         unsigned long flags;
455         int refs, cpu, next_cpu, this_cpu = smp_processor_id();
456 
457         /*
458          * Can deadlock when called with interrupts disabled.
459          * We allow cpu's that are not yet online though, as no one else can
460          * send smp call function interrupt to this cpu and as such deadlocks
461          * can't happen.
462          */
463         WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
464                      && !oops_in_progress && !early_boot_irqs_disabled);
465 
466         /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
467         cpu = cpumask_first_and(mask, cpu_online_mask);
468         if (cpu == this_cpu)
469                 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
470 
471         /* No online cpus?  We're done. */
472         if (cpu >= nr_cpu_ids)
473                 return;
474 
475         /* Do we have another CPU which isn't us? */
476         next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
477         if (next_cpu == this_cpu)
478                 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
479 
480         /* Fastpath: do that cpu by itself. */
481         if (next_cpu >= nr_cpu_ids) {
482                 smp_call_function_single(cpu, func, info, wait);
483                 return;
484         }
485 
486         data = &__get_cpu_var(cfd_data);
487         csd_lock(&data->csd);
488 
489         /* This BUG_ON verifies our reuse assertions and can be removed */
490         BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
491 
492         /*
493          * The global call function queue list add and delete are protected
494          * by a lock, but the list is traversed without any lock, relying
495          * on the rcu list add and delete to allow safe concurrent traversal.
496          * We reuse the call function data without waiting for any grace
497          * period after some other cpu removes it from the global queue.
498          * This means a cpu might find our data block as it is being
499          * filled out.
500          *
501          * We hold off the interrupt handler on the other cpu by
502          * ordering our writes to the cpu mask vs our setting of the
503          * refs counter.  We assert only the cpu owning the data block
504          * will set a bit in cpumask, and each bit will only be cleared
505          * by the subject cpu.  Each cpu must first find its bit is
506          * set and then check that refs is set indicating the element is
507          * ready to be processed, otherwise it must skip the entry.
508          *
509          * On the previous iteration refs was set to 0 by another cpu.
510          * To avoid the use of transitivity, set the counter to 0 here
511          * so the wmb will pair with the rmb in the interrupt handler.
512          */
513         atomic_set(&data->refs, 0);     /* convert 3rd to 1st party write */
514 
515         data->csd.func = func;
516         data->csd.info = info;
517 
518         /* Ensure 0 refs is visible before mask.  Also orders func and info */
519         smp_wmb();
520 
521         /* We rely on the "and" being processed before the store */
522         cpumask_and(data->cpumask, mask, cpu_online_mask);
523         cpumask_clear_cpu(this_cpu, data->cpumask);
524         refs = cpumask_weight(data->cpumask);
525 
526         /* Some callers race with other cpus changing the passed mask */
527         if (unlikely(!refs)) {
528                 csd_unlock(&data->csd);
529                 return;
530         }
531 
532         /*
533          * After we put an entry into the list, data->cpumask
534          * may be cleared again when another CPU sends another IPI for
535          * a SMP function call, so data->cpumask will be zero.
536          */
537         cpumask_copy(data->cpumask_ipi, data->cpumask);
538         raw_spin_lock_irqsave(&call_function.lock, flags);
539         /*
540          * Place entry at the _HEAD_ of the list, so that any cpu still
541          * observing the entry in generic_smp_call_function_interrupt()
542          * will not miss any other list entries:
543          */
544         list_add_rcu(&data->csd.list, &call_function.queue);
545         /*
546          * We rely on the wmb() in list_add_rcu to complete our writes
547          * to the cpumask before this write to refs, which indicates
548          * data is on the list and is ready to be processed.
549          */
550         atomic_set(&data->refs, refs);
551         raw_spin_unlock_irqrestore(&call_function.lock, flags);
552 
553         /*
554          * Make the list addition visible before sending the ipi.
555          * (IPIs must obey or appear to obey normal Linux cache
556          * coherency rules -- see comment in generic_exec_single).
557          */
558         smp_mb();
559 
560         /* Send a message to all CPUs in the map */
561         arch_send_call_function_ipi_mask(data->cpumask_ipi);
562 
563         /* Optionally wait for the CPUs to complete */
564         if (wait)
565                 csd_lock_wait(&data->csd);
566 }
567 EXPORT_SYMBOL(smp_call_function_many);
568 
569 /**
570  * smp_call_function(): Run a function on all other CPUs.
571  * @func: The function to run. This must be fast and non-blocking.
572  * @info: An arbitrary pointer to pass to the function.
573  * @wait: If true, wait (atomically) until function has completed
574  *        on other CPUs.
575  *
576  * Returns 0.
577  *
578  * If @wait is true, then returns once @func has returned; otherwise
579  * it returns just before the target cpu calls @func.
580  *
581  * You must not call this function with disabled interrupts or from a
582  * hardware interrupt handler or from a bottom half handler.
583  */
584 int smp_call_function(smp_call_func_t func, void *info, int wait)
585 {
586         preempt_disable();
587         smp_call_function_many(cpu_online_mask, func, info, wait);
588         preempt_enable();
589 
590         return 0;
591 }
592 EXPORT_SYMBOL(smp_call_function);
593 
594 void ipi_call_lock(void)
595 {
596         raw_spin_lock(&call_function.lock);
597 }
598 
599 void ipi_call_unlock(void)
600 {
601         raw_spin_unlock(&call_function.lock);
602 }
603 
604 void ipi_call_lock_irq(void)
605 {
606         raw_spin_lock_irq(&call_function.lock);
607 }
608 
609 void ipi_call_unlock_irq(void)
610 {
611         raw_spin_unlock_irq(&call_function.lock);
612 }
613 #endif /* USE_GENERIC_SMP_HELPERS */
614 
615 /* Setup configured maximum number of CPUs to activate */
616 unsigned int setup_max_cpus = NR_CPUS;
617 EXPORT_SYMBOL(setup_max_cpus);
618 
619 
620 /*
621  * Setup routine for controlling SMP activation
622  *
623  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
624  * activation entirely (the MPS table probe still happens, though).
625  *
626  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
627  * greater than 0, limits the maximum number of CPUs activated in
628  * SMP mode to <NUM>.
629  */
630 
631 void __weak arch_disable_smp_support(void) { }
632 
633 static int __init nosmp(char *str)
634 {
635         setup_max_cpus = 0;
636         arch_disable_smp_support();
637 
638         return 0;
639 }
640 
641 early_param("nosmp", nosmp);
642 
643 /* this is hard limit */
644 static int __init nrcpus(char *str)
645 {
646         int nr_cpus;
647 
648         get_option(&str, &nr_cpus);
649         if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
650                 nr_cpu_ids = nr_cpus;
651 
652         return 0;
653 }
654 
655 early_param("nr_cpus", nrcpus);
656 
657 static int __init maxcpus(char *str)
658 {
659         get_option(&str, &setup_max_cpus);
660         if (setup_max_cpus == 0)
661                 arch_disable_smp_support();
662 
663         return 0;
664 }
665 
666 early_param("maxcpus", maxcpus);
667 
668 /* Setup number of possible processor ids */
669 int nr_cpu_ids __read_mostly = NR_CPUS;
670 EXPORT_SYMBOL(nr_cpu_ids);
671 
672 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
673 void __init setup_nr_cpu_ids(void)
674 {
675         nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
676 }
677 
678 /* Called by boot processor to activate the rest. */
679 void __init smp_init(void)
680 {
681         unsigned int cpu;
682 
683         /* FIXME: This should be done in userspace --RR */
684         for_each_present_cpu(cpu) {
685                 if (num_online_cpus() >= setup_max_cpus)
686                         break;
687                 if (!cpu_online(cpu))
688                         cpu_up(cpu);
689         }
690 
691         /* Any cleanup work */
692         printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
693         smp_cpus_done(setup_max_cpus);
694 }
695 
696 /*
697  * Call a function on all processors.  May be used during early boot while
698  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
699  * of local_irq_disable/enable().
700  */
701 int on_each_cpu(void (*func) (void *info), void *info, int wait)
702 {
703         unsigned long flags;
704         int ret = 0;
705 
706         preempt_disable();
707         ret = smp_call_function(func, info, wait);
708         local_irq_save(flags);
709         func(info);
710         local_irq_restore(flags);
711         preempt_enable();
712         return ret;
713 }
714 EXPORT_SYMBOL(on_each_cpu);
715 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp