~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/cpu.c

Version: ~ [ linux-5.9 ] ~ [ linux-5.8.14 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.70 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.150 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.200 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.238 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.238 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* CPU control.
  2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3  *
  4  * This code is licenced under the GPL.
  5  */
  6 #include <linux/proc_fs.h>
  7 #include <linux/smp.h>
  8 #include <linux/init.h>
  9 #include <linux/notifier.h>
 10 #include <linux/sched.h>
 11 #include <linux/unistd.h>
 12 #include <linux/cpu.h>
 13 #include <linux/oom.h>
 14 #include <linux/rcupdate.h>
 15 #include <linux/export.h>
 16 #include <linux/bug.h>
 17 #include <linux/kthread.h>
 18 #include <linux/stop_machine.h>
 19 #include <linux/mutex.h>
 20 #include <linux/gfp.h>
 21 #include <linux/suspend.h>
 22 #include <linux/lockdep.h>
 23 #include <trace/events/power.h>
 24 
 25 #include "smpboot.h"
 26 
 27 #ifdef CONFIG_SMP
 28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 29 static DEFINE_MUTEX(cpu_add_remove_lock);
 30 
 31 /*
 32  * The following two APIs (cpu_maps_update_begin/done) must be used when
 33  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 34  * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
 35  * hotplug callback (un)registration performed using __register_cpu_notifier()
 36  * or __unregister_cpu_notifier().
 37  */
 38 void cpu_maps_update_begin(void)
 39 {
 40         mutex_lock(&cpu_add_remove_lock);
 41 }
 42 EXPORT_SYMBOL(cpu_notifier_register_begin);
 43 
 44 void cpu_maps_update_done(void)
 45 {
 46         mutex_unlock(&cpu_add_remove_lock);
 47 }
 48 EXPORT_SYMBOL(cpu_notifier_register_done);
 49 
 50 static RAW_NOTIFIER_HEAD(cpu_chain);
 51 
 52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 53  * Should always be manipulated under cpu_add_remove_lock
 54  */
 55 static int cpu_hotplug_disabled;
 56 
 57 #ifdef CONFIG_HOTPLUG_CPU
 58 
 59 static struct {
 60         struct task_struct *active_writer;
 61         /* wait queue to wake up the active_writer */
 62         wait_queue_head_t wq;
 63         /* verifies that no writer will get active while readers are active */
 64         struct mutex lock;
 65         /*
 66          * Also blocks the new readers during
 67          * an ongoing cpu hotplug operation.
 68          */
 69         atomic_t refcount;
 70 
 71 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 72         struct lockdep_map dep_map;
 73 #endif
 74 } cpu_hotplug = {
 75         .active_writer = NULL,
 76         .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
 77         .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 78 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 79         .dep_map = {.name = "cpu_hotplug.lock" },
 80 #endif
 81 };
 82 
 83 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
 84 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
 85 #define cpuhp_lock_acquire_tryread() \
 86                                   lock_map_acquire_tryread(&cpu_hotplug.dep_map)
 87 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 88 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 89 
 90 
 91 void get_online_cpus(void)
 92 {
 93         might_sleep();
 94         if (cpu_hotplug.active_writer == current)
 95                 return;
 96         cpuhp_lock_acquire_read();
 97         mutex_lock(&cpu_hotplug.lock);
 98         atomic_inc(&cpu_hotplug.refcount);
 99         mutex_unlock(&cpu_hotplug.lock);
100 }
101 EXPORT_SYMBOL_GPL(get_online_cpus);
102 
103 bool try_get_online_cpus(void)
104 {
105         if (cpu_hotplug.active_writer == current)
106                 return true;
107         if (!mutex_trylock(&cpu_hotplug.lock))
108                 return false;
109         cpuhp_lock_acquire_tryread();
110         atomic_inc(&cpu_hotplug.refcount);
111         mutex_unlock(&cpu_hotplug.lock);
112         return true;
113 }
114 EXPORT_SYMBOL_GPL(try_get_online_cpus);
115 
116 void put_online_cpus(void)
117 {
118         int refcount;
119 
120         if (cpu_hotplug.active_writer == current)
121                 return;
122 
123         refcount = atomic_dec_return(&cpu_hotplug.refcount);
124         if (WARN_ON(refcount < 0)) /* try to fix things up */
125                 atomic_inc(&cpu_hotplug.refcount);
126 
127         if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
128                 wake_up(&cpu_hotplug.wq);
129 
130         cpuhp_lock_release();
131 
132 }
133 EXPORT_SYMBOL_GPL(put_online_cpus);
134 
135 /*
136  * This ensures that the hotplug operation can begin only when the
137  * refcount goes to zero.
138  *
139  * Note that during a cpu-hotplug operation, the new readers, if any,
140  * will be blocked by the cpu_hotplug.lock
141  *
142  * Since cpu_hotplug_begin() is always called after invoking
143  * cpu_maps_update_begin(), we can be sure that only one writer is active.
144  *
145  * Note that theoretically, there is a possibility of a livelock:
146  * - Refcount goes to zero, last reader wakes up the sleeping
147  *   writer.
148  * - Last reader unlocks the cpu_hotplug.lock.
149  * - A new reader arrives at this moment, bumps up the refcount.
150  * - The writer acquires the cpu_hotplug.lock finds the refcount
151  *   non zero and goes to sleep again.
152  *
153  * However, this is very difficult to achieve in practice since
154  * get_online_cpus() not an api which is called all that often.
155  *
156  */
157 void cpu_hotplug_begin(void)
158 {
159         DEFINE_WAIT(wait);
160 
161         cpu_hotplug.active_writer = current;
162         cpuhp_lock_acquire();
163 
164         for (;;) {
165                 mutex_lock(&cpu_hotplug.lock);
166                 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
167                 if (likely(!atomic_read(&cpu_hotplug.refcount)))
168                                 break;
169                 mutex_unlock(&cpu_hotplug.lock);
170                 schedule();
171         }
172         finish_wait(&cpu_hotplug.wq, &wait);
173 }
174 
175 void cpu_hotplug_done(void)
176 {
177         cpu_hotplug.active_writer = NULL;
178         mutex_unlock(&cpu_hotplug.lock);
179         cpuhp_lock_release();
180 }
181 
182 /*
183  * Wait for currently running CPU hotplug operations to complete (if any) and
184  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
185  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
186  * hotplug path before performing hotplug operations. So acquiring that lock
187  * guarantees mutual exclusion from any currently running hotplug operations.
188  */
189 void cpu_hotplug_disable(void)
190 {
191         cpu_maps_update_begin();
192         cpu_hotplug_disabled = 1;
193         cpu_maps_update_done();
194 }
195 
196 void cpu_hotplug_enable(void)
197 {
198         cpu_maps_update_begin();
199         cpu_hotplug_disabled = 0;
200         cpu_maps_update_done();
201 }
202 
203 #endif  /* CONFIG_HOTPLUG_CPU */
204 
205 /* Need to know about CPUs going up/down? */
206 int __ref register_cpu_notifier(struct notifier_block *nb)
207 {
208         int ret;
209         cpu_maps_update_begin();
210         ret = raw_notifier_chain_register(&cpu_chain, nb);
211         cpu_maps_update_done();
212         return ret;
213 }
214 
215 int __ref __register_cpu_notifier(struct notifier_block *nb)
216 {
217         return raw_notifier_chain_register(&cpu_chain, nb);
218 }
219 
220 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
221                         int *nr_calls)
222 {
223         int ret;
224 
225         ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
226                                         nr_calls);
227 
228         return notifier_to_errno(ret);
229 }
230 
231 static int cpu_notify(unsigned long val, void *v)
232 {
233         return __cpu_notify(val, v, -1, NULL);
234 }
235 
236 #ifdef CONFIG_HOTPLUG_CPU
237 
238 static void cpu_notify_nofail(unsigned long val, void *v)
239 {
240         BUG_ON(cpu_notify(val, v));
241 }
242 EXPORT_SYMBOL(register_cpu_notifier);
243 EXPORT_SYMBOL(__register_cpu_notifier);
244 
245 void __ref unregister_cpu_notifier(struct notifier_block *nb)
246 {
247         cpu_maps_update_begin();
248         raw_notifier_chain_unregister(&cpu_chain, nb);
249         cpu_maps_update_done();
250 }
251 EXPORT_SYMBOL(unregister_cpu_notifier);
252 
253 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
254 {
255         raw_notifier_chain_unregister(&cpu_chain, nb);
256 }
257 EXPORT_SYMBOL(__unregister_cpu_notifier);
258 
259 /**
260  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
261  * @cpu: a CPU id
262  *
263  * This function walks all processes, finds a valid mm struct for each one and
264  * then clears a corresponding bit in mm's cpumask.  While this all sounds
265  * trivial, there are various non-obvious corner cases, which this function
266  * tries to solve in a safe manner.
267  *
268  * Also note that the function uses a somewhat relaxed locking scheme, so it may
269  * be called only for an already offlined CPU.
270  */
271 void clear_tasks_mm_cpumask(int cpu)
272 {
273         struct task_struct *p;
274 
275         /*
276          * This function is called after the cpu is taken down and marked
277          * offline, so its not like new tasks will ever get this cpu set in
278          * their mm mask. -- Peter Zijlstra
279          * Thus, we may use rcu_read_lock() here, instead of grabbing
280          * full-fledged tasklist_lock.
281          */
282         WARN_ON(cpu_online(cpu));
283         rcu_read_lock();
284         for_each_process(p) {
285                 struct task_struct *t;
286 
287                 /*
288                  * Main thread might exit, but other threads may still have
289                  * a valid mm. Find one.
290                  */
291                 t = find_lock_task_mm(p);
292                 if (!t)
293                         continue;
294                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
295                 task_unlock(t);
296         }
297         rcu_read_unlock();
298 }
299 
300 static inline void check_for_tasks(int dead_cpu)
301 {
302         struct task_struct *g, *p;
303 
304         read_lock_irq(&tasklist_lock);
305         do_each_thread(g, p) {
306                 if (!p->on_rq)
307                         continue;
308                 /*
309                  * We do the check with unlocked task_rq(p)->lock.
310                  * Order the reading to do not warn about a task,
311                  * which was running on this cpu in the past, and
312                  * it's just been woken on another cpu.
313                  */
314                 rmb();
315                 if (task_cpu(p) != dead_cpu)
316                         continue;
317 
318                 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
319                         p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
320         } while_each_thread(g, p);
321         read_unlock_irq(&tasklist_lock);
322 }
323 
324 struct take_cpu_down_param {
325         unsigned long mod;
326         void *hcpu;
327 };
328 
329 /* Take this CPU down. */
330 static int __ref take_cpu_down(void *_param)
331 {
332         struct take_cpu_down_param *param = _param;
333         int err;
334 
335         /* Ensure this CPU doesn't handle any more interrupts. */
336         err = __cpu_disable();
337         if (err < 0)
338                 return err;
339 
340         cpu_notify(CPU_DYING | param->mod, param->hcpu);
341         /* Park the stopper thread */
342         kthread_park(current);
343         return 0;
344 }
345 
346 /* Requires cpu_add_remove_lock to be held */
347 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
348 {
349         int err, nr_calls = 0;
350         void *hcpu = (void *)(long)cpu;
351         unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
352         struct take_cpu_down_param tcd_param = {
353                 .mod = mod,
354                 .hcpu = hcpu,
355         };
356 
357         if (num_online_cpus() == 1)
358                 return -EBUSY;
359 
360         if (!cpu_online(cpu))
361                 return -EINVAL;
362 
363         cpu_hotplug_begin();
364 
365         err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
366         if (err) {
367                 nr_calls--;
368                 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
369                 pr_warn("%s: attempt to take down CPU %u failed\n",
370                         __func__, cpu);
371                 goto out_release;
372         }
373 
374         /*
375          * By now we've cleared cpu_active_mask, wait for all preempt-disabled
376          * and RCU users of this state to go away such that all new such users
377          * will observe it.
378          *
379          * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
380          * not imply sync_sched(), so explicitly call both.
381          *
382          * Do sync before park smpboot threads to take care the rcu boost case.
383          */
384 #ifdef CONFIG_PREEMPT
385         synchronize_sched();
386 #endif
387         synchronize_rcu();
388 
389         smpboot_park_threads(cpu);
390 
391         /*
392          * So now all preempt/rcu users must observe !cpu_active().
393          */
394 
395         err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
396         if (err) {
397                 /* CPU didn't die: tell everyone.  Can't complain. */
398                 smpboot_unpark_threads(cpu);
399                 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
400                 goto out_release;
401         }
402         BUG_ON(cpu_online(cpu));
403 
404         /*
405          * The migration_call() CPU_DYING callback will have removed all
406          * runnable tasks from the cpu, there's only the idle task left now
407          * that the migration thread is done doing the stop_machine thing.
408          *
409          * Wait for the stop thread to go away.
410          */
411         while (!idle_cpu(cpu))
412                 cpu_relax();
413 
414         /* This actually kills the CPU. */
415         __cpu_die(cpu);
416 
417         /* CPU is completely dead: tell everyone.  Too late to complain. */
418         cpu_notify_nofail(CPU_DEAD | mod, hcpu);
419 
420         check_for_tasks(cpu);
421 
422 out_release:
423         cpu_hotplug_done();
424         if (!err)
425                 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
426         return err;
427 }
428 
429 int __ref cpu_down(unsigned int cpu)
430 {
431         int err;
432 
433         cpu_maps_update_begin();
434 
435         if (cpu_hotplug_disabled) {
436                 err = -EBUSY;
437                 goto out;
438         }
439 
440         err = _cpu_down(cpu, 0);
441 
442 out:
443         cpu_maps_update_done();
444         return err;
445 }
446 EXPORT_SYMBOL(cpu_down);
447 #endif /*CONFIG_HOTPLUG_CPU*/
448 
449 /* Requires cpu_add_remove_lock to be held */
450 static int _cpu_up(unsigned int cpu, int tasks_frozen)
451 {
452         int ret, nr_calls = 0;
453         void *hcpu = (void *)(long)cpu;
454         unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
455         struct task_struct *idle;
456 
457         cpu_hotplug_begin();
458 
459         if (cpu_online(cpu) || !cpu_present(cpu)) {
460                 ret = -EINVAL;
461                 goto out;
462         }
463 
464         idle = idle_thread_get(cpu);
465         if (IS_ERR(idle)) {
466                 ret = PTR_ERR(idle);
467                 goto out;
468         }
469 
470         ret = smpboot_create_threads(cpu);
471         if (ret)
472                 goto out;
473 
474         ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
475         if (ret) {
476                 nr_calls--;
477                 pr_warn("%s: attempt to bring up CPU %u failed\n",
478                         __func__, cpu);
479                 goto out_notify;
480         }
481 
482         /* Arch-specific enabling code. */
483         ret = __cpu_up(cpu, idle);
484         if (ret != 0)
485                 goto out_notify;
486         BUG_ON(!cpu_online(cpu));
487 
488         /* Wake the per cpu threads */
489         smpboot_unpark_threads(cpu);
490 
491         /* Now call notifier in preparation. */
492         cpu_notify(CPU_ONLINE | mod, hcpu);
493 
494 out_notify:
495         if (ret != 0)
496                 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
497 out:
498         cpu_hotplug_done();
499 
500         return ret;
501 }
502 
503 int cpu_up(unsigned int cpu)
504 {
505         int err = 0;
506 
507         if (!cpu_possible(cpu)) {
508                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
509                        cpu);
510 #if defined(CONFIG_IA64)
511                 pr_err("please check additional_cpus= boot parameter\n");
512 #endif
513                 return -EINVAL;
514         }
515 
516         err = try_online_node(cpu_to_node(cpu));
517         if (err)
518                 return err;
519 
520         cpu_maps_update_begin();
521 
522         if (cpu_hotplug_disabled) {
523                 err = -EBUSY;
524                 goto out;
525         }
526 
527         err = _cpu_up(cpu, 0);
528 
529 out:
530         cpu_maps_update_done();
531         return err;
532 }
533 EXPORT_SYMBOL_GPL(cpu_up);
534 
535 #ifdef CONFIG_PM_SLEEP_SMP
536 static cpumask_var_t frozen_cpus;
537 
538 int disable_nonboot_cpus(void)
539 {
540         int cpu, first_cpu, error = 0;
541 
542         cpu_maps_update_begin();
543         first_cpu = cpumask_first(cpu_online_mask);
544         /*
545          * We take down all of the non-boot CPUs in one shot to avoid races
546          * with the userspace trying to use the CPU hotplug at the same time
547          */
548         cpumask_clear(frozen_cpus);
549 
550         pr_info("Disabling non-boot CPUs ...\n");
551         for_each_online_cpu(cpu) {
552                 if (cpu == first_cpu)
553                         continue;
554                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
555                 error = _cpu_down(cpu, 1);
556                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
557                 if (!error)
558                         cpumask_set_cpu(cpu, frozen_cpus);
559                 else {
560                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
561                         break;
562                 }
563         }
564 
565         if (!error) {
566                 BUG_ON(num_online_cpus() > 1);
567                 /* Make sure the CPUs won't be enabled by someone else */
568                 cpu_hotplug_disabled = 1;
569         } else {
570                 pr_err("Non-boot CPUs are not disabled\n");
571         }
572         cpu_maps_update_done();
573         return error;
574 }
575 
576 void __weak arch_enable_nonboot_cpus_begin(void)
577 {
578 }
579 
580 void __weak arch_enable_nonboot_cpus_end(void)
581 {
582 }
583 
584 void __ref enable_nonboot_cpus(void)
585 {
586         int cpu, error;
587 
588         /* Allow everyone to use the CPU hotplug again */
589         cpu_maps_update_begin();
590         cpu_hotplug_disabled = 0;
591         if (cpumask_empty(frozen_cpus))
592                 goto out;
593 
594         pr_info("Enabling non-boot CPUs ...\n");
595 
596         arch_enable_nonboot_cpus_begin();
597 
598         for_each_cpu(cpu, frozen_cpus) {
599                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
600                 error = _cpu_up(cpu, 1);
601                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
602                 if (!error) {
603                         pr_info("CPU%d is up\n", cpu);
604                         continue;
605                 }
606                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
607         }
608 
609         arch_enable_nonboot_cpus_end();
610 
611         cpumask_clear(frozen_cpus);
612 out:
613         cpu_maps_update_done();
614 }
615 
616 static int __init alloc_frozen_cpus(void)
617 {
618         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
619                 return -ENOMEM;
620         return 0;
621 }
622 core_initcall(alloc_frozen_cpus);
623 
624 /*
625  * When callbacks for CPU hotplug notifications are being executed, we must
626  * ensure that the state of the system with respect to the tasks being frozen
627  * or not, as reported by the notification, remains unchanged *throughout the
628  * duration* of the execution of the callbacks.
629  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
630  *
631  * This synchronization is implemented by mutually excluding regular CPU
632  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
633  * Hibernate notifications.
634  */
635 static int
636 cpu_hotplug_pm_callback(struct notifier_block *nb,
637                         unsigned long action, void *ptr)
638 {
639         switch (action) {
640 
641         case PM_SUSPEND_PREPARE:
642         case PM_HIBERNATION_PREPARE:
643                 cpu_hotplug_disable();
644                 break;
645 
646         case PM_POST_SUSPEND:
647         case PM_POST_HIBERNATION:
648                 cpu_hotplug_enable();
649                 break;
650 
651         default:
652                 return NOTIFY_DONE;
653         }
654 
655         return NOTIFY_OK;
656 }
657 
658 
659 static int __init cpu_hotplug_pm_sync_init(void)
660 {
661         /*
662          * cpu_hotplug_pm_callback has higher priority than x86
663          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
664          * to disable cpu hotplug to avoid cpu hotplug race.
665          */
666         pm_notifier(cpu_hotplug_pm_callback, 0);
667         return 0;
668 }
669 core_initcall(cpu_hotplug_pm_sync_init);
670 
671 #endif /* CONFIG_PM_SLEEP_SMP */
672 
673 /**
674  * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
675  * @cpu: cpu that just started
676  *
677  * This function calls the cpu_chain notifiers with CPU_STARTING.
678  * It must be called by the arch code on the new cpu, before the new cpu
679  * enables interrupts and before the "boot" cpu returns from __cpu_up().
680  */
681 void notify_cpu_starting(unsigned int cpu)
682 {
683         unsigned long val = CPU_STARTING;
684 
685 #ifdef CONFIG_PM_SLEEP_SMP
686         if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
687                 val = CPU_STARTING_FROZEN;
688 #endif /* CONFIG_PM_SLEEP_SMP */
689         cpu_notify(val, (void *)(long)cpu);
690 }
691 
692 #endif /* CONFIG_SMP */
693 
694 /*
695  * cpu_bit_bitmap[] is a special, "compressed" data structure that
696  * represents all NR_CPUS bits binary values of 1<<nr.
697  *
698  * It is used by cpumask_of() to get a constant address to a CPU
699  * mask value that has a single bit set only.
700  */
701 
702 /* cpu_bit_bitmap[0] is empty - so we can back into it */
703 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
704 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
705 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
706 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
707 
708 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
709 
710         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
711         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
712 #if BITS_PER_LONG > 32
713         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
714         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
715 #endif
716 };
717 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
718 
719 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
720 EXPORT_SYMBOL(cpu_all_bits);
721 
722 #ifdef CONFIG_INIT_ALL_POSSIBLE
723 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
724         = CPU_BITS_ALL;
725 #else
726 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
727 #endif
728 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
729 EXPORT_SYMBOL(cpu_possible_mask);
730 
731 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
732 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
733 EXPORT_SYMBOL(cpu_online_mask);
734 
735 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
736 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
737 EXPORT_SYMBOL(cpu_present_mask);
738 
739 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
740 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
741 EXPORT_SYMBOL(cpu_active_mask);
742 
743 void set_cpu_possible(unsigned int cpu, bool possible)
744 {
745         if (possible)
746                 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
747         else
748                 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
749 }
750 
751 void set_cpu_present(unsigned int cpu, bool present)
752 {
753         if (present)
754                 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
755         else
756                 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
757 }
758 
759 void set_cpu_online(unsigned int cpu, bool online)
760 {
761         if (online) {
762                 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
763                 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
764         } else {
765                 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
766         }
767 }
768 
769 void set_cpu_active(unsigned int cpu, bool active)
770 {
771         if (active)
772                 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
773         else
774                 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
775 }
776 
777 void init_cpu_present(const struct cpumask *src)
778 {
779         cpumask_copy(to_cpumask(cpu_present_bits), src);
780 }
781 
782 void init_cpu_possible(const struct cpumask *src)
783 {
784         cpumask_copy(to_cpumask(cpu_possible_bits), src);
785 }
786 
787 void init_cpu_online(const struct cpumask *src)
788 {
789         cpumask_copy(to_cpumask(cpu_online_bits), src);
790 }
791 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp