~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/idle.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Generic entry points for the idle threads and
  3  * implementation of the idle task scheduling class.
  4  *
  5  * (NOTE: these are not related to SCHED_IDLE batch scheduled
  6  *        tasks which are handled in sched/fair.c )
  7  */
  8 #include "sched.h"
  9 
 10 #include <trace/events/power.h>
 11 
 12 /* Linker adds these: start and end of __cpuidle functions */
 13 extern char __cpuidle_text_start[], __cpuidle_text_end[];
 14 
 15 /**
 16  * sched_idle_set_state - Record idle state for the current CPU.
 17  * @idle_state: State to record.
 18  */
 19 void sched_idle_set_state(struct cpuidle_state *idle_state)
 20 {
 21         idle_set_state(this_rq(), idle_state);
 22 }
 23 
 24 static int __read_mostly cpu_idle_force_poll;
 25 
 26 void cpu_idle_poll_ctrl(bool enable)
 27 {
 28         if (enable) {
 29                 cpu_idle_force_poll++;
 30         } else {
 31                 cpu_idle_force_poll--;
 32                 WARN_ON_ONCE(cpu_idle_force_poll < 0);
 33         }
 34 }
 35 
 36 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
 37 static int __init cpu_idle_poll_setup(char *__unused)
 38 {
 39         cpu_idle_force_poll = 1;
 40 
 41         return 1;
 42 }
 43 __setup("nohlt", cpu_idle_poll_setup);
 44 
 45 static int __init cpu_idle_nopoll_setup(char *__unused)
 46 {
 47         cpu_idle_force_poll = 0;
 48 
 49         return 1;
 50 }
 51 __setup("hlt", cpu_idle_nopoll_setup);
 52 #endif
 53 
 54 static noinline int __cpuidle cpu_idle_poll(void)
 55 {
 56         rcu_idle_enter();
 57         trace_cpu_idle_rcuidle(0, smp_processor_id());
 58         local_irq_enable();
 59         stop_critical_timings();
 60 
 61         while (!tif_need_resched() &&
 62                 (cpu_idle_force_poll || tick_check_broadcast_expired()))
 63                 cpu_relax();
 64         start_critical_timings();
 65         trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 66         rcu_idle_exit();
 67 
 68         return 1;
 69 }
 70 
 71 /* Weak implementations for optional arch specific functions */
 72 void __weak arch_cpu_idle_prepare(void) { }
 73 void __weak arch_cpu_idle_enter(void) { }
 74 void __weak arch_cpu_idle_exit(void) { }
 75 void __weak arch_cpu_idle_dead(void) { }
 76 void __weak arch_cpu_idle(void)
 77 {
 78         cpu_idle_force_poll = 1;
 79         local_irq_enable();
 80 }
 81 
 82 /**
 83  * default_idle_call - Default CPU idle routine.
 84  *
 85  * To use when the cpuidle framework cannot be used.
 86  */
 87 void __cpuidle default_idle_call(void)
 88 {
 89         if (current_clr_polling_and_test()) {
 90                 local_irq_enable();
 91         } else {
 92                 stop_critical_timings();
 93                 arch_cpu_idle();
 94                 start_critical_timings();
 95         }
 96 }
 97 
 98 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 99                       int next_state)
100 {
101         /*
102          * The idle task must be scheduled, it is pointless to go to idle, just
103          * update no idle residency and return.
104          */
105         if (current_clr_polling_and_test()) {
106                 dev->last_residency = 0;
107                 local_irq_enable();
108                 return -EBUSY;
109         }
110 
111         /*
112          * Enter the idle state previously returned by the governor decision.
113          * This function will block until an interrupt occurs and will take
114          * care of re-enabling the local interrupts
115          */
116         return cpuidle_enter(drv, dev, next_state);
117 }
118 
119 /**
120  * cpuidle_idle_call - the main idle function
121  *
122  * NOTE: no locks or semaphores should be used here
123  *
124  * On archs that support TIF_POLLING_NRFLAG, is called with polling
125  * set, and it returns with polling set.  If it ever stops polling, it
126  * must clear the polling bit.
127  */
128 static void cpuidle_idle_call(void)
129 {
130         struct cpuidle_device *dev = cpuidle_get_device();
131         struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
132         int next_state, entered_state;
133 
134         /*
135          * Check if the idle task must be rescheduled. If it is the
136          * case, exit the function after re-enabling the local irq.
137          */
138         if (need_resched()) {
139                 local_irq_enable();
140                 return;
141         }
142 
143         /*
144          * The RCU framework needs to be told that we are entering an idle
145          * section, so no more rcu read side critical sections and one more
146          * step to the grace period
147          */
148 
149         if (cpuidle_not_available(drv, dev)) {
150                 tick_nohz_idle_stop_tick();
151                 rcu_idle_enter();
152 
153                 default_idle_call();
154                 goto exit_idle;
155         }
156 
157         /*
158          * Suspend-to-idle ("s2idle") is a system state in which all user space
159          * has been frozen, all I/O devices have been suspended and the only
160          * activity happens here and in iterrupts (if any).  In that case bypass
161          * the cpuidle governor and go stratight for the deepest idle state
162          * available.  Possibly also suspend the local tick and the entire
163          * timekeeping to prevent timer interrupts from kicking us out of idle
164          * until a proper wakeup interrupt happens.
165          */
166 
167         if (idle_should_enter_s2idle() || dev->use_deepest_state) {
168                 if (idle_should_enter_s2idle()) {
169                         rcu_idle_enter();
170 
171                         entered_state = cpuidle_enter_s2idle(drv, dev);
172                         if (entered_state > 0) {
173                                 local_irq_enable();
174                                 goto exit_idle;
175                         }
176 
177                         rcu_idle_exit();
178                 }
179 
180                 tick_nohz_idle_stop_tick();
181                 rcu_idle_enter();
182 
183                 next_state = cpuidle_find_deepest_state(drv, dev);
184                 call_cpuidle(drv, dev, next_state);
185         } else {
186                 bool stop_tick = true;
187 
188                 /*
189                  * Ask the cpuidle framework to choose a convenient idle state.
190                  */
191                 next_state = cpuidle_select(drv, dev, &stop_tick);
192 
193                 if (stop_tick || tick_nohz_tick_stopped())
194                         tick_nohz_idle_stop_tick();
195                 else
196                         tick_nohz_idle_retain_tick();
197 
198                 rcu_idle_enter();
199 
200                 entered_state = call_cpuidle(drv, dev, next_state);
201                 /*
202                  * Give the governor an opportunity to reflect on the outcome
203                  */
204                 cpuidle_reflect(dev, entered_state);
205         }
206 
207 exit_idle:
208         __current_set_polling();
209 
210         /*
211          * It is up to the idle functions to reenable local interrupts
212          */
213         if (WARN_ON_ONCE(irqs_disabled()))
214                 local_irq_enable();
215 
216         rcu_idle_exit();
217 }
218 
219 /*
220  * Generic idle loop implementation
221  *
222  * Called with polling cleared.
223  */
224 static void do_idle(void)
225 {
226         int cpu = smp_processor_id();
227         /*
228          * If the arch has a polling bit, we maintain an invariant:
229          *
230          * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
231          * rq->idle). This means that, if rq->idle has the polling bit set,
232          * then setting need_resched is guaranteed to cause the CPU to
233          * reschedule.
234          */
235 
236         __current_set_polling();
237         tick_nohz_idle_enter();
238 
239         while (!need_resched()) {
240                 check_pgt_cache();
241                 rmb();
242 
243                 if (cpu_is_offline(cpu)) {
244                         tick_nohz_idle_stop_tick_protected();
245                         cpuhp_report_idle_dead();
246                         arch_cpu_idle_dead();
247                 }
248 
249                 local_irq_disable();
250                 arch_cpu_idle_enter();
251 
252                 /*
253                  * In poll mode we reenable interrupts and spin. Also if we
254                  * detected in the wakeup from idle path that the tick
255                  * broadcast device expired for us, we don't want to go deep
256                  * idle as we know that the IPI is going to arrive right away.
257                  */
258                 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
259                         tick_nohz_idle_restart_tick();
260                         cpu_idle_poll();
261                 } else {
262                         cpuidle_idle_call();
263                 }
264                 arch_cpu_idle_exit();
265         }
266 
267         /*
268          * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
269          * be set, propagate it into PREEMPT_NEED_RESCHED.
270          *
271          * This is required because for polling idle loops we will not have had
272          * an IPI to fold the state for us.
273          */
274         preempt_set_need_resched();
275         tick_nohz_idle_exit();
276         __current_clr_polling();
277 
278         /*
279          * We promise to call sched_ttwu_pending() and reschedule if
280          * need_resched() is set while polling is set. That means that clearing
281          * polling needs to be visible before doing these things.
282          */
283         smp_mb__after_atomic();
284 
285         sched_ttwu_pending();
286         schedule_idle();
287 
288         if (unlikely(klp_patch_pending(current)))
289                 klp_update_patch_state(current);
290 }
291 
292 bool cpu_in_idle(unsigned long pc)
293 {
294         return pc >= (unsigned long)__cpuidle_text_start &&
295                 pc < (unsigned long)__cpuidle_text_end;
296 }
297 
298 struct idle_timer {
299         struct hrtimer timer;
300         int done;
301 };
302 
303 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
304 {
305         struct idle_timer *it = container_of(timer, struct idle_timer, timer);
306 
307         WRITE_ONCE(it->done, 1);
308         set_tsk_need_resched(current);
309 
310         return HRTIMER_NORESTART;
311 }
312 
313 void play_idle(unsigned long duration_ms)
314 {
315         struct idle_timer it;
316 
317         /*
318          * Only FIFO tasks can disable the tick since they don't need the forced
319          * preemption.
320          */
321         WARN_ON_ONCE(current->policy != SCHED_FIFO);
322         WARN_ON_ONCE(current->nr_cpus_allowed != 1);
323         WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
324         WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
325         WARN_ON_ONCE(!duration_ms);
326 
327         rcu_sleep_check();
328         preempt_disable();
329         current->flags |= PF_IDLE;
330         cpuidle_use_deepest_state(true);
331 
332         it.done = 0;
333         hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
334         it.timer.function = idle_inject_timer_fn;
335         hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
336 
337         while (!READ_ONCE(it.done))
338                 do_idle();
339 
340         cpuidle_use_deepest_state(false);
341         current->flags &= ~PF_IDLE;
342 
343         preempt_fold_need_resched();
344         preempt_enable();
345 }
346 EXPORT_SYMBOL_GPL(play_idle);
347 
348 void cpu_startup_entry(enum cpuhp_state state)
349 {
350         arch_cpu_idle_prepare();
351         cpuhp_online_idle(state);
352         while (1)
353                 do_idle();
354 }
355 
356 /*
357  * idle-task scheduling class.
358  */
359 
360 #ifdef CONFIG_SMP
361 static int
362 select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
363 {
364         return task_cpu(p); /* IDLE tasks as never migrated */
365 }
366 #endif
367 
368 /*
369  * Idle tasks are unconditionally rescheduled:
370  */
371 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
372 {
373         resched_curr(rq);
374 }
375 
376 static struct task_struct *
377 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
378 {
379         put_prev_task(rq, prev);
380         update_idle_core(rq);
381         schedstat_inc(rq->sched_goidle);
382 
383         return rq->idle;
384 }
385 
386 /*
387  * It is not legal to sleep in the idle task - print a warning
388  * message if some code attempts to do it:
389  */
390 static void
391 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
392 {
393         raw_spin_unlock_irq(&rq->lock);
394         printk(KERN_ERR "bad: scheduling from the idle thread!\n");
395         dump_stack();
396         raw_spin_lock_irq(&rq->lock);
397 }
398 
399 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
400 {
401 }
402 
403 /*
404  * scheduler tick hitting a task of our scheduling class.
405  *
406  * NOTE: This function can be called remotely by the tick offload that
407  * goes along full dynticks. Therefore no local assumption can be made
408  * and everything must be accessed through the @rq and @curr passed in
409  * parameters.
410  */
411 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
412 {
413 }
414 
415 static void set_curr_task_idle(struct rq *rq)
416 {
417 }
418 
419 static void switched_to_idle(struct rq *rq, struct task_struct *p)
420 {
421         BUG();
422 }
423 
424 static void
425 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
426 {
427         BUG();
428 }
429 
430 static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
431 {
432         return 0;
433 }
434 
435 static void update_curr_idle(struct rq *rq)
436 {
437 }
438 
439 /*
440  * Simple, special scheduling class for the per-CPU idle tasks:
441  */
442 const struct sched_class idle_sched_class = {
443         /* .next is NULL */
444         /* no enqueue/yield_task for idle tasks */
445 
446         /* dequeue is not valid, we print a debug message there: */
447         .dequeue_task           = dequeue_task_idle,
448 
449         .check_preempt_curr     = check_preempt_curr_idle,
450 
451         .pick_next_task         = pick_next_task_idle,
452         .put_prev_task          = put_prev_task_idle,
453 
454 #ifdef CONFIG_SMP
455         .select_task_rq         = select_task_rq_idle,
456         .set_cpus_allowed       = set_cpus_allowed_common,
457 #endif
458 
459         .set_curr_task          = set_curr_task_idle,
460         .task_tick              = task_tick_idle,
461 
462         .get_rr_interval        = get_rr_interval_idle,
463 
464         .prio_changed           = prio_changed_idle,
465         .switched_to            = switched_to_idle,
466         .update_curr            = update_curr_idle,
467 };
468 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp