~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/tree_plugin.h

Version: ~ [ linux-4.14 ] ~ [ linux-4.13.12 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.61 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.97 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.46 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.80 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.50 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.95 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3  * Internal non-public definitions that provide either classic
  4  * or preemptible semantics.
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License as published by
  8  * the Free Software Foundation; either version 2 of the License, or
  9  * (at your option) any later version.
 10  *
 11  * This program is distributed in the hope that it will be useful,
 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14  * GNU General Public License for more details.
 15  *
 16  * You should have received a copy of the GNU General Public License
 17  * along with this program; if not, you can access it online at
 18  * http://www.gnu.org/licenses/gpl-2.0.html.
 19  *
 20  * Copyright Red Hat, 2009
 21  * Copyright IBM Corporation, 2009
 22  *
 23  * Author: Ingo Molnar <mingo@elte.hu>
 24  *         Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 25  */
 26 
 27 #include <linux/delay.h>
 28 #include <linux/gfp.h>
 29 #include <linux/oom.h>
 30 #include <linux/sched/debug.h>
 31 #include <linux/smpboot.h>
 32 #include <uapi/linux/sched/types.h>
 33 #include "../time/tick-internal.h"
 34 
 35 #ifdef CONFIG_RCU_BOOST
 36 
 37 #include "../locking/rtmutex_common.h"
 38 
 39 /*
 40  * Control variables for per-CPU and per-rcu_node kthreads.  These
 41  * handle all flavors of RCU.
 42  */
 43 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 44 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 45 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 46 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 47 
 48 #else /* #ifdef CONFIG_RCU_BOOST */
 49 
 50 /*
 51  * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
 52  * all uses are in dead code.  Provide a definition to keep the compiler
 53  * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
 54  * This probably needs to be excluded from -rt builds.
 55  */
 56 #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
 57 
 58 #endif /* #else #ifdef CONFIG_RCU_BOOST */
 59 
 60 #ifdef CONFIG_RCU_NOCB_CPU
 61 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
 62 static bool have_rcu_nocb_mask;     /* Was rcu_nocb_mask allocated? */
 63 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
 64 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 65 
 66 /*
 67  * Check the RCU kernel configuration parameters and print informative
 68  * messages about anything out of the ordinary.
 69  */
 70 static void __init rcu_bootup_announce_oddness(void)
 71 {
 72         if (IS_ENABLED(CONFIG_RCU_TRACE))
 73                 pr_info("\tRCU event tracing is enabled.\n");
 74         if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
 75             (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
 76                 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
 77                        RCU_FANOUT);
 78         if (rcu_fanout_exact)
 79                 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
 80         if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
 81                 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
 82         if (IS_ENABLED(CONFIG_PROVE_RCU))
 83                 pr_info("\tRCU lockdep checking is enabled.\n");
 84         if (RCU_NUM_LVLS >= 4)
 85                 pr_info("\tFour(or more)-level hierarchy is enabled.\n");
 86         if (RCU_FANOUT_LEAF != 16)
 87                 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
 88                         RCU_FANOUT_LEAF);
 89         if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
 90                 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
 91         if (nr_cpu_ids != NR_CPUS)
 92                 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
 93 #ifdef CONFIG_RCU_BOOST
 94         pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
 95 #endif
 96         if (blimit != DEFAULT_RCU_BLIMIT)
 97                 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
 98         if (qhimark != DEFAULT_RCU_QHIMARK)
 99                 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
100         if (qlowmark != DEFAULT_RCU_QLOMARK)
101                 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
102         if (jiffies_till_first_fqs != ULONG_MAX)
103                 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
104         if (jiffies_till_next_fqs != ULONG_MAX)
105                 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
106         if (rcu_kick_kthreads)
107                 pr_info("\tKick kthreads if too-long grace period.\n");
108         if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
109                 pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
110         if (gp_preinit_delay)
111                 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
112         if (gp_init_delay)
113                 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
114         if (gp_cleanup_delay)
115                 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
116         if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
117                 pr_info("\tRCU debug extended QS entry/exit.\n");
118         rcupdate_announce_bootup_oddness();
119 }
120 
121 #ifdef CONFIG_PREEMPT_RCU
122 
123 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
124 static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
125 static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
126 
127 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
128                                bool wake);
129 
130 /*
131  * Tell them what RCU they are running.
132  */
133 static void __init rcu_bootup_announce(void)
134 {
135         pr_info("Preemptible hierarchical RCU implementation.\n");
136         rcu_bootup_announce_oddness();
137 }
138 
139 /* Flags for rcu_preempt_ctxt_queue() decision table. */
140 #define RCU_GP_TASKS    0x8
141 #define RCU_EXP_TASKS   0x4
142 #define RCU_GP_BLKD     0x2
143 #define RCU_EXP_BLKD    0x1
144 
145 /*
146  * Queues a task preempted within an RCU-preempt read-side critical
147  * section into the appropriate location within the ->blkd_tasks list,
148  * depending on the states of any ongoing normal and expedited grace
149  * periods.  The ->gp_tasks pointer indicates which element the normal
150  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
151  * indicates which element the expedited grace period is waiting on (again,
152  * NULL if none).  If a grace period is waiting on a given element in the
153  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
154  * adding a task to the tail of the list blocks any grace period that is
155  * already waiting on one of the elements.  In contrast, adding a task
156  * to the head of the list won't block any grace period that is already
157  * waiting on one of the elements.
158  *
159  * This queuing is imprecise, and can sometimes make an ongoing grace
160  * period wait for a task that is not strictly speaking blocking it.
161  * Given the choice, we needlessly block a normal grace period rather than
162  * blocking an expedited grace period.
163  *
164  * Note that an endless sequence of expedited grace periods still cannot
165  * indefinitely postpone a normal grace period.  Eventually, all of the
166  * fixed number of preempted tasks blocking the normal grace period that are
167  * not also blocking the expedited grace period will resume and complete
168  * their RCU read-side critical sections.  At that point, the ->gp_tasks
169  * pointer will equal the ->exp_tasks pointer, at which point the end of
170  * the corresponding expedited grace period will also be the end of the
171  * normal grace period.
172  */
173 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
174         __releases(rnp->lock) /* But leaves rrupts disabled. */
175 {
176         int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
177                          (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
178                          (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
179                          (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
180         struct task_struct *t = current;
181 
182         lockdep_assert_held(&rnp->lock);
183         WARN_ON_ONCE(rdp->mynode != rnp);
184         WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
185 
186         /*
187          * Decide where to queue the newly blocked task.  In theory,
188          * this could be an if-statement.  In practice, when I tried
189          * that, it was quite messy.
190          */
191         switch (blkd_state) {
192         case 0:
193         case                RCU_EXP_TASKS:
194         case                RCU_EXP_TASKS + RCU_GP_BLKD:
195         case RCU_GP_TASKS:
196         case RCU_GP_TASKS + RCU_EXP_TASKS:
197 
198                 /*
199                  * Blocking neither GP, or first task blocking the normal
200                  * GP but not blocking the already-waiting expedited GP.
201                  * Queue at the head of the list to avoid unnecessarily
202                  * blocking the already-waiting GPs.
203                  */
204                 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
205                 break;
206 
207         case                                              RCU_EXP_BLKD:
208         case                                RCU_GP_BLKD:
209         case                                RCU_GP_BLKD + RCU_EXP_BLKD:
210         case RCU_GP_TASKS +                               RCU_EXP_BLKD:
211         case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
212         case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
213 
214                 /*
215                  * First task arriving that blocks either GP, or first task
216                  * arriving that blocks the expedited GP (with the normal
217                  * GP already waiting), or a task arriving that blocks
218                  * both GPs with both GPs already waiting.  Queue at the
219                  * tail of the list to avoid any GP waiting on any of the
220                  * already queued tasks that are not blocking it.
221                  */
222                 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
223                 break;
224 
225         case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
226         case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
227         case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
228 
229                 /*
230                  * Second or subsequent task blocking the expedited GP.
231                  * The task either does not block the normal GP, or is the
232                  * first task blocking the normal GP.  Queue just after
233                  * the first task blocking the expedited GP.
234                  */
235                 list_add(&t->rcu_node_entry, rnp->exp_tasks);
236                 break;
237 
238         case RCU_GP_TASKS +                 RCU_GP_BLKD:
239         case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
240 
241                 /*
242                  * Second or subsequent task blocking the normal GP.
243                  * The task does not block the expedited GP. Queue just
244                  * after the first task blocking the normal GP.
245                  */
246                 list_add(&t->rcu_node_entry, rnp->gp_tasks);
247                 break;
248 
249         default:
250 
251                 /* Yet another exercise in excessive paranoia. */
252                 WARN_ON_ONCE(1);
253                 break;
254         }
255 
256         /*
257          * We have now queued the task.  If it was the first one to
258          * block either grace period, update the ->gp_tasks and/or
259          * ->exp_tasks pointers, respectively, to reference the newly
260          * blocked tasks.
261          */
262         if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
263                 rnp->gp_tasks = &t->rcu_node_entry;
264         if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
265                 rnp->exp_tasks = &t->rcu_node_entry;
266         WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
267                      !(rnp->qsmask & rdp->grpmask));
268         WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
269                      !(rnp->expmask & rdp->grpmask));
270         raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
271 
272         /*
273          * Report the quiescent state for the expedited GP.  This expedited
274          * GP should not be able to end until we report, so there should be
275          * no need to check for a subsequent expedited GP.  (Though we are
276          * still in a quiescent state in any case.)
277          */
278         if (blkd_state & RCU_EXP_BLKD &&
279             t->rcu_read_unlock_special.b.exp_need_qs) {
280                 t->rcu_read_unlock_special.b.exp_need_qs = false;
281                 rcu_report_exp_rdp(rdp->rsp, rdp, true);
282         } else {
283                 WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
284         }
285 }
286 
287 /*
288  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
289  * that this just means that the task currently running on the CPU is
290  * not in a quiescent state.  There might be any number of tasks blocked
291  * while in an RCU read-side critical section.
292  *
293  * As with the other rcu_*_qs() functions, callers to this function
294  * must disable preemption.
295  */
296 static void rcu_preempt_qs(void)
297 {
298         RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
299         if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
300                 trace_rcu_grace_period(TPS("rcu_preempt"),
301                                        __this_cpu_read(rcu_data_p->gpnum),
302                                        TPS("cpuqs"));
303                 __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
304                 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
305                 current->rcu_read_unlock_special.b.need_qs = false;
306         }
307 }
308 
309 /*
310  * We have entered the scheduler, and the current task might soon be
311  * context-switched away from.  If this task is in an RCU read-side
312  * critical section, we will no longer be able to rely on the CPU to
313  * record that fact, so we enqueue the task on the blkd_tasks list.
314  * The task will dequeue itself when it exits the outermost enclosing
315  * RCU read-side critical section.  Therefore, the current grace period
316  * cannot be permitted to complete until the blkd_tasks list entries
317  * predating the current grace period drain, in other words, until
318  * rnp->gp_tasks becomes NULL.
319  *
320  * Caller must disable interrupts.
321  */
322 static void rcu_preempt_note_context_switch(bool preempt)
323 {
324         struct task_struct *t = current;
325         struct rcu_data *rdp;
326         struct rcu_node *rnp;
327 
328         RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n");
329         WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
330         if (t->rcu_read_lock_nesting > 0 &&
331             !t->rcu_read_unlock_special.b.blocked) {
332 
333                 /* Possibly blocking in an RCU read-side critical section. */
334                 rdp = this_cpu_ptr(rcu_state_p->rda);
335                 rnp = rdp->mynode;
336                 raw_spin_lock_rcu_node(rnp);
337                 t->rcu_read_unlock_special.b.blocked = true;
338                 t->rcu_blocked_node = rnp;
339 
340                 /*
341                  * Verify the CPU's sanity, trace the preemption, and
342                  * then queue the task as required based on the states
343                  * of any ongoing and expedited grace periods.
344                  */
345                 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
346                 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
347                 trace_rcu_preempt_task(rdp->rsp->name,
348                                        t->pid,
349                                        (rnp->qsmask & rdp->grpmask)
350                                        ? rnp->gpnum
351                                        : rnp->gpnum + 1);
352                 rcu_preempt_ctxt_queue(rnp, rdp);
353         } else if (t->rcu_read_lock_nesting < 0 &&
354                    t->rcu_read_unlock_special.s) {
355 
356                 /*
357                  * Complete exit from RCU read-side critical section on
358                  * behalf of preempted instance of __rcu_read_unlock().
359                  */
360                 rcu_read_unlock_special(t);
361         }
362 
363         /*
364          * Either we were not in an RCU read-side critical section to
365          * begin with, or we have now recorded that critical section
366          * globally.  Either way, we can now note a quiescent state
367          * for this CPU.  Again, if we were in an RCU read-side critical
368          * section, and if that critical section was blocking the current
369          * grace period, then the fact that the task has been enqueued
370          * means that we continue to block the current grace period.
371          */
372         rcu_preempt_qs();
373 }
374 
375 /*
376  * Check for preempted RCU readers blocking the current grace period
377  * for the specified rcu_node structure.  If the caller needs a reliable
378  * answer, it must hold the rcu_node's ->lock.
379  */
380 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
381 {
382         return rnp->gp_tasks != NULL;
383 }
384 
385 /*
386  * Advance a ->blkd_tasks-list pointer to the next entry, instead
387  * returning NULL if at the end of the list.
388  */
389 static struct list_head *rcu_next_node_entry(struct task_struct *t,
390                                              struct rcu_node *rnp)
391 {
392         struct list_head *np;
393 
394         np = t->rcu_node_entry.next;
395         if (np == &rnp->blkd_tasks)
396                 np = NULL;
397         return np;
398 }
399 
400 /*
401  * Return true if the specified rcu_node structure has tasks that were
402  * preempted within an RCU read-side critical section.
403  */
404 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
405 {
406         return !list_empty(&rnp->blkd_tasks);
407 }
408 
409 /*
410  * Handle special cases during rcu_read_unlock(), such as needing to
411  * notify RCU core processing or task having blocked during the RCU
412  * read-side critical section.
413  */
414 void rcu_read_unlock_special(struct task_struct *t)
415 {
416         bool empty_exp;
417         bool empty_norm;
418         bool empty_exp_now;
419         unsigned long flags;
420         struct list_head *np;
421         bool drop_boost_mutex = false;
422         struct rcu_data *rdp;
423         struct rcu_node *rnp;
424         union rcu_special special;
425 
426         /* NMI handlers cannot block and cannot safely manipulate state. */
427         if (in_nmi())
428                 return;
429 
430         local_irq_save(flags);
431 
432         /*
433          * If RCU core is waiting for this CPU to exit its critical section,
434          * report the fact that it has exited.  Because irqs are disabled,
435          * t->rcu_read_unlock_special cannot change.
436          */
437         special = t->rcu_read_unlock_special;
438         if (special.b.need_qs) {
439                 rcu_preempt_qs();
440                 t->rcu_read_unlock_special.b.need_qs = false;
441                 if (!t->rcu_read_unlock_special.s) {
442                         local_irq_restore(flags);
443                         return;
444                 }
445         }
446 
447         /*
448          * Respond to a request for an expedited grace period, but only if
449          * we were not preempted, meaning that we were running on the same
450          * CPU throughout.  If we were preempted, the exp_need_qs flag
451          * would have been cleared at the time of the first preemption,
452          * and the quiescent state would be reported when we were dequeued.
453          */
454         if (special.b.exp_need_qs) {
455                 WARN_ON_ONCE(special.b.blocked);
456                 t->rcu_read_unlock_special.b.exp_need_qs = false;
457                 rdp = this_cpu_ptr(rcu_state_p->rda);
458                 rcu_report_exp_rdp(rcu_state_p, rdp, true);
459                 if (!t->rcu_read_unlock_special.s) {
460                         local_irq_restore(flags);
461                         return;
462                 }
463         }
464 
465         /* Hardware IRQ handlers cannot block, complain if they get here. */
466         if (in_irq() || in_serving_softirq()) {
467                 lockdep_rcu_suspicious(__FILE__, __LINE__,
468                                        "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
469                 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
470                          t->rcu_read_unlock_special.s,
471                          t->rcu_read_unlock_special.b.blocked,
472                          t->rcu_read_unlock_special.b.exp_need_qs,
473                          t->rcu_read_unlock_special.b.need_qs);
474                 local_irq_restore(flags);
475                 return;
476         }
477 
478         /* Clean up if blocked during RCU read-side critical section. */
479         if (special.b.blocked) {
480                 t->rcu_read_unlock_special.b.blocked = false;
481 
482                 /*
483                  * Remove this task from the list it blocked on.  The task
484                  * now remains queued on the rcu_node corresponding to the
485                  * CPU it first blocked on, so there is no longer any need
486                  * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
487                  */
488                 rnp = t->rcu_blocked_node;
489                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
490                 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
491                 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
492                 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
493                 empty_exp = sync_rcu_preempt_exp_done(rnp);
494                 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
495                 np = rcu_next_node_entry(t, rnp);
496                 list_del_init(&t->rcu_node_entry);
497                 t->rcu_blocked_node = NULL;
498                 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
499                                                 rnp->gpnum, t->pid);
500                 if (&t->rcu_node_entry == rnp->gp_tasks)
501                         rnp->gp_tasks = np;
502                 if (&t->rcu_node_entry == rnp->exp_tasks)
503                         rnp->exp_tasks = np;
504                 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
505                         /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
506                         drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
507                         if (&t->rcu_node_entry == rnp->boost_tasks)
508                                 rnp->boost_tasks = np;
509                 }
510 
511                 /*
512                  * If this was the last task on the current list, and if
513                  * we aren't waiting on any CPUs, report the quiescent state.
514                  * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
515                  * so we must take a snapshot of the expedited state.
516                  */
517                 empty_exp_now = sync_rcu_preempt_exp_done(rnp);
518                 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
519                         trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
520                                                          rnp->gpnum,
521                                                          0, rnp->qsmask,
522                                                          rnp->level,
523                                                          rnp->grplo,
524                                                          rnp->grphi,
525                                                          !!rnp->gp_tasks);
526                         rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
527                 } else {
528                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
529                 }
530 
531                 /* Unboost if we were boosted. */
532                 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
533                         rt_mutex_unlock(&rnp->boost_mtx);
534 
535                 /*
536                  * If this was the last task on the expedited lists,
537                  * then we need to report up the rcu_node hierarchy.
538                  */
539                 if (!empty_exp && empty_exp_now)
540                         rcu_report_exp_rnp(rcu_state_p, rnp, true);
541         } else {
542                 local_irq_restore(flags);
543         }
544 }
545 
546 /*
547  * Dump detailed information for all tasks blocking the current RCU
548  * grace period on the specified rcu_node structure.
549  */
550 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
551 {
552         unsigned long flags;
553         struct task_struct *t;
554 
555         raw_spin_lock_irqsave_rcu_node(rnp, flags);
556         if (!rcu_preempt_blocked_readers_cgp(rnp)) {
557                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
558                 return;
559         }
560         t = list_entry(rnp->gp_tasks->prev,
561                        struct task_struct, rcu_node_entry);
562         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
563                 sched_show_task(t);
564         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
565 }
566 
567 /*
568  * Dump detailed information for all tasks blocking the current RCU
569  * grace period.
570  */
571 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
572 {
573         struct rcu_node *rnp = rcu_get_root(rsp);
574 
575         rcu_print_detail_task_stall_rnp(rnp);
576         rcu_for_each_leaf_node(rsp, rnp)
577                 rcu_print_detail_task_stall_rnp(rnp);
578 }
579 
580 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
581 {
582         pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
583                rnp->level, rnp->grplo, rnp->grphi);
584 }
585 
586 static void rcu_print_task_stall_end(void)
587 {
588         pr_cont("\n");
589 }
590 
591 /*
592  * Scan the current list of tasks blocked within RCU read-side critical
593  * sections, printing out the tid of each.
594  */
595 static int rcu_print_task_stall(struct rcu_node *rnp)
596 {
597         struct task_struct *t;
598         int ndetected = 0;
599 
600         if (!rcu_preempt_blocked_readers_cgp(rnp))
601                 return 0;
602         rcu_print_task_stall_begin(rnp);
603         t = list_entry(rnp->gp_tasks->prev,
604                        struct task_struct, rcu_node_entry);
605         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
606                 pr_cont(" P%d", t->pid);
607                 ndetected++;
608         }
609         rcu_print_task_stall_end();
610         return ndetected;
611 }
612 
613 /*
614  * Scan the current list of tasks blocked within RCU read-side critical
615  * sections, printing out the tid of each that is blocking the current
616  * expedited grace period.
617  */
618 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
619 {
620         struct task_struct *t;
621         int ndetected = 0;
622 
623         if (!rnp->exp_tasks)
624                 return 0;
625         t = list_entry(rnp->exp_tasks->prev,
626                        struct task_struct, rcu_node_entry);
627         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
628                 pr_cont(" P%d", t->pid);
629                 ndetected++;
630         }
631         return ndetected;
632 }
633 
634 /*
635  * Check that the list of blocked tasks for the newly completed grace
636  * period is in fact empty.  It is a serious bug to complete a grace
637  * period that still has RCU readers blocked!  This function must be
638  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
639  * must be held by the caller.
640  *
641  * Also, if there are blocked tasks on the list, they automatically
642  * block the newly created grace period, so set up ->gp_tasks accordingly.
643  */
644 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
645 {
646         struct task_struct *t;
647 
648         RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
649         WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
650         if (rcu_preempt_has_tasks(rnp)) {
651                 rnp->gp_tasks = rnp->blkd_tasks.next;
652                 t = container_of(rnp->gp_tasks, struct task_struct,
653                                  rcu_node_entry);
654                 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
655                                                 rnp->gpnum, t->pid);
656         }
657         WARN_ON_ONCE(rnp->qsmask);
658 }
659 
660 /*
661  * Check for a quiescent state from the current CPU.  When a task blocks,
662  * the task is recorded in the corresponding CPU's rcu_node structure,
663  * which is checked elsewhere.
664  *
665  * Caller must disable hard irqs.
666  */
667 static void rcu_preempt_check_callbacks(void)
668 {
669         struct task_struct *t = current;
670 
671         if (t->rcu_read_lock_nesting == 0) {
672                 rcu_preempt_qs();
673                 return;
674         }
675         if (t->rcu_read_lock_nesting > 0 &&
676             __this_cpu_read(rcu_data_p->core_needs_qs) &&
677             __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
678                 t->rcu_read_unlock_special.b.need_qs = true;
679 }
680 
681 #ifdef CONFIG_RCU_BOOST
682 
683 static void rcu_preempt_do_callbacks(void)
684 {
685         rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
686 }
687 
688 #endif /* #ifdef CONFIG_RCU_BOOST */
689 
690 /**
691  * call_rcu() - Queue an RCU callback for invocation after a grace period.
692  * @head: structure to be used for queueing the RCU updates.
693  * @func: actual callback function to be invoked after the grace period
694  *
695  * The callback function will be invoked some time after a full grace
696  * period elapses, in other words after all pre-existing RCU read-side
697  * critical sections have completed.  However, the callback function
698  * might well execute concurrently with RCU read-side critical sections
699  * that started after call_rcu() was invoked.  RCU read-side critical
700  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
701  * and may be nested.
702  *
703  * Note that all CPUs must agree that the grace period extended beyond
704  * all pre-existing RCU read-side critical section.  On systems with more
705  * than one CPU, this means that when "func()" is invoked, each CPU is
706  * guaranteed to have executed a full memory barrier since the end of its
707  * last RCU read-side critical section whose beginning preceded the call
708  * to call_rcu().  It also means that each CPU executing an RCU read-side
709  * critical section that continues beyond the start of "func()" must have
710  * executed a memory barrier after the call_rcu() but before the beginning
711  * of that RCU read-side critical section.  Note that these guarantees
712  * include CPUs that are offline, idle, or executing in user mode, as
713  * well as CPUs that are executing in the kernel.
714  *
715  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
716  * resulting RCU callback function "func()", then both CPU A and CPU B are
717  * guaranteed to execute a full memory barrier during the time interval
718  * between the call to call_rcu() and the invocation of "func()" -- even
719  * if CPU A and CPU B are the same CPU (but again only if the system has
720  * more than one CPU).
721  */
722 void call_rcu(struct rcu_head *head, rcu_callback_t func)
723 {
724         __call_rcu(head, func, rcu_state_p, -1, 0);
725 }
726 EXPORT_SYMBOL_GPL(call_rcu);
727 
728 /**
729  * synchronize_rcu - wait until a grace period has elapsed.
730  *
731  * Control will return to the caller some time after a full grace
732  * period has elapsed, in other words after all currently executing RCU
733  * read-side critical sections have completed.  Note, however, that
734  * upon return from synchronize_rcu(), the caller might well be executing
735  * concurrently with new RCU read-side critical sections that began while
736  * synchronize_rcu() was waiting.  RCU read-side critical sections are
737  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
738  *
739  * See the description of synchronize_sched() for more detailed
740  * information on memory-ordering guarantees.  However, please note
741  * that -only- the memory-ordering guarantees apply.  For example,
742  * synchronize_rcu() is -not- guaranteed to wait on things like code
743  * protected by preempt_disable(), instead, synchronize_rcu() is -only-
744  * guaranteed to wait on RCU read-side critical sections, that is, sections
745  * of code protected by rcu_read_lock().
746  */
747 void synchronize_rcu(void)
748 {
749         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
750                          lock_is_held(&rcu_lock_map) ||
751                          lock_is_held(&rcu_sched_lock_map),
752                          "Illegal synchronize_rcu() in RCU read-side critical section");
753         if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
754                 return;
755         if (rcu_gp_is_expedited())
756                 synchronize_rcu_expedited();
757         else
758                 wait_rcu_gp(call_rcu);
759 }
760 EXPORT_SYMBOL_GPL(synchronize_rcu);
761 
762 /**
763  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
764  *
765  * Note that this primitive does not necessarily wait for an RCU grace period
766  * to complete.  For example, if there are no RCU callbacks queued anywhere
767  * in the system, then rcu_barrier() is within its rights to return
768  * immediately, without waiting for anything, much less an RCU grace period.
769  */
770 void rcu_barrier(void)
771 {
772         _rcu_barrier(rcu_state_p);
773 }
774 EXPORT_SYMBOL_GPL(rcu_barrier);
775 
776 /*
777  * Initialize preemptible RCU's state structures.
778  */
779 static void __init __rcu_init_preempt(void)
780 {
781         rcu_init_one(rcu_state_p);
782 }
783 
784 /*
785  * Check for a task exiting while in a preemptible-RCU read-side
786  * critical section, clean up if so.  No need to issue warnings,
787  * as debug_check_no_locks_held() already does this if lockdep
788  * is enabled.
789  */
790 void exit_rcu(void)
791 {
792         struct task_struct *t = current;
793 
794         if (likely(list_empty(&current->rcu_node_entry)))
795                 return;
796         t->rcu_read_lock_nesting = 1;
797         barrier();
798         t->rcu_read_unlock_special.b.blocked = true;
799         __rcu_read_unlock();
800 }
801 
802 #else /* #ifdef CONFIG_PREEMPT_RCU */
803 
804 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
805 
806 /*
807  * Tell them what RCU they are running.
808  */
809 static void __init rcu_bootup_announce(void)
810 {
811         pr_info("Hierarchical RCU implementation.\n");
812         rcu_bootup_announce_oddness();
813 }
814 
815 /*
816  * Because preemptible RCU does not exist, we never have to check for
817  * CPUs being in quiescent states.
818  */
819 static void rcu_preempt_note_context_switch(bool preempt)
820 {
821 }
822 
823 /*
824  * Because preemptible RCU does not exist, there are never any preempted
825  * RCU readers.
826  */
827 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
828 {
829         return 0;
830 }
831 
832 /*
833  * Because there is no preemptible RCU, there can be no readers blocked.
834  */
835 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
836 {
837         return false;
838 }
839 
840 /*
841  * Because preemptible RCU does not exist, we never have to check for
842  * tasks blocked within RCU read-side critical sections.
843  */
844 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
845 {
846 }
847 
848 /*
849  * Because preemptible RCU does not exist, we never have to check for
850  * tasks blocked within RCU read-side critical sections.
851  */
852 static int rcu_print_task_stall(struct rcu_node *rnp)
853 {
854         return 0;
855 }
856 
857 /*
858  * Because preemptible RCU does not exist, we never have to check for
859  * tasks blocked within RCU read-side critical sections that are
860  * blocking the current expedited grace period.
861  */
862 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
863 {
864         return 0;
865 }
866 
867 /*
868  * Because there is no preemptible RCU, there can be no readers blocked,
869  * so there is no need to check for blocked tasks.  So check only for
870  * bogus qsmask values.
871  */
872 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
873 {
874         WARN_ON_ONCE(rnp->qsmask);
875 }
876 
877 /*
878  * Because preemptible RCU does not exist, it never has any callbacks
879  * to check.
880  */
881 static void rcu_preempt_check_callbacks(void)
882 {
883 }
884 
885 /*
886  * Because preemptible RCU does not exist, rcu_barrier() is just
887  * another name for rcu_barrier_sched().
888  */
889 void rcu_barrier(void)
890 {
891         rcu_barrier_sched();
892 }
893 EXPORT_SYMBOL_GPL(rcu_barrier);
894 
895 /*
896  * Because preemptible RCU does not exist, it need not be initialized.
897  */
898 static void __init __rcu_init_preempt(void)
899 {
900 }
901 
902 /*
903  * Because preemptible RCU does not exist, tasks cannot possibly exit
904  * while in preemptible RCU read-side critical sections.
905  */
906 void exit_rcu(void)
907 {
908 }
909 
910 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
911 
912 #ifdef CONFIG_RCU_BOOST
913 
914 #include "../locking/rtmutex_common.h"
915 
916 static void rcu_wake_cond(struct task_struct *t, int status)
917 {
918         /*
919          * If the thread is yielding, only wake it when this
920          * is invoked from idle
921          */
922         if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
923                 wake_up_process(t);
924 }
925 
926 /*
927  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
928  * or ->boost_tasks, advancing the pointer to the next task in the
929  * ->blkd_tasks list.
930  *
931  * Note that irqs must be enabled: boosting the task can block.
932  * Returns 1 if there are more tasks needing to be boosted.
933  */
934 static int rcu_boost(struct rcu_node *rnp)
935 {
936         unsigned long flags;
937         struct task_struct *t;
938         struct list_head *tb;
939 
940         if (READ_ONCE(rnp->exp_tasks) == NULL &&
941             READ_ONCE(rnp->boost_tasks) == NULL)
942                 return 0;  /* Nothing left to boost. */
943 
944         raw_spin_lock_irqsave_rcu_node(rnp, flags);
945 
946         /*
947          * Recheck under the lock: all tasks in need of boosting
948          * might exit their RCU read-side critical sections on their own.
949          */
950         if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
951                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
952                 return 0;
953         }
954 
955         /*
956          * Preferentially boost tasks blocking expedited grace periods.
957          * This cannot starve the normal grace periods because a second
958          * expedited grace period must boost all blocked tasks, including
959          * those blocking the pre-existing normal grace period.
960          */
961         if (rnp->exp_tasks != NULL) {
962                 tb = rnp->exp_tasks;
963                 rnp->n_exp_boosts++;
964         } else {
965                 tb = rnp->boost_tasks;
966                 rnp->n_normal_boosts++;
967         }
968         rnp->n_tasks_boosted++;
969 
970         /*
971          * We boost task t by manufacturing an rt_mutex that appears to
972          * be held by task t.  We leave a pointer to that rt_mutex where
973          * task t can find it, and task t will release the mutex when it
974          * exits its outermost RCU read-side critical section.  Then
975          * simply acquiring this artificial rt_mutex will boost task
976          * t's priority.  (Thanks to tglx for suggesting this approach!)
977          *
978          * Note that task t must acquire rnp->lock to remove itself from
979          * the ->blkd_tasks list, which it will do from exit() if from
980          * nowhere else.  We therefore are guaranteed that task t will
981          * stay around at least until we drop rnp->lock.  Note that
982          * rnp->lock also resolves races between our priority boosting
983          * and task t's exiting its outermost RCU read-side critical
984          * section.
985          */
986         t = container_of(tb, struct task_struct, rcu_node_entry);
987         rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
988         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
989         /* Lock only for side effect: boosts task t's priority. */
990         rt_mutex_lock(&rnp->boost_mtx);
991         rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
992 
993         return READ_ONCE(rnp->exp_tasks) != NULL ||
994                READ_ONCE(rnp->boost_tasks) != NULL;
995 }
996 
997 /*
998  * Priority-boosting kthread, one per leaf rcu_node.
999  */
1000 static int rcu_boost_kthread(void *arg)
1001 {
1002         struct rcu_node *rnp = (struct rcu_node *)arg;
1003         int spincnt = 0;
1004         int more2boost;
1005 
1006         trace_rcu_utilization(TPS("Start boost kthread@init"));
1007         for (;;) {
1008                 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1009                 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1010                 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1011                 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1012                 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1013                 more2boost = rcu_boost(rnp);
1014                 if (more2boost)
1015                         spincnt++;
1016                 else
1017                         spincnt = 0;
1018                 if (spincnt > 10) {
1019                         rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1020                         trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1021                         schedule_timeout_interruptible(2);
1022                         trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1023                         spincnt = 0;
1024                 }
1025         }
1026         /* NOTREACHED */
1027         trace_rcu_utilization(TPS("End boost kthread@notreached"));
1028         return 0;
1029 }
1030 
1031 /*
1032  * Check to see if it is time to start boosting RCU readers that are
1033  * blocking the current grace period, and, if so, tell the per-rcu_node
1034  * kthread to start boosting them.  If there is an expedited grace
1035  * period in progress, it is always time to boost.
1036  *
1037  * The caller must hold rnp->lock, which this function releases.
1038  * The ->boost_kthread_task is immortal, so we don't need to worry
1039  * about it going away.
1040  */
1041 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1042         __releases(rnp->lock)
1043 {
1044         struct task_struct *t;
1045 
1046         lockdep_assert_held(&rnp->lock);
1047         if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1048                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1049                 return;
1050         }
1051         if (rnp->exp_tasks != NULL ||
1052             (rnp->gp_tasks != NULL &&
1053              rnp->boost_tasks == NULL &&
1054              rnp->qsmask == 0 &&
1055              ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1056                 if (rnp->exp_tasks == NULL)
1057                         rnp->boost_tasks = rnp->gp_tasks;
1058                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1059                 t = rnp->boost_kthread_task;
1060                 if (t)
1061                         rcu_wake_cond(t, rnp->boost_kthread_status);
1062         } else {
1063                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1064         }
1065 }
1066 
1067 /*
1068  * Wake up the per-CPU kthread to invoke RCU callbacks.
1069  */
1070 static void invoke_rcu_callbacks_kthread(void)
1071 {
1072         unsigned long flags;
1073 
1074         local_irq_save(flags);
1075         __this_cpu_write(rcu_cpu_has_work, 1);
1076         if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1077             current != __this_cpu_read(rcu_cpu_kthread_task)) {
1078                 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1079                               __this_cpu_read(rcu_cpu_kthread_status));
1080         }
1081         local_irq_restore(flags);
1082 }
1083 
1084 /*
1085  * Is the current CPU running the RCU-callbacks kthread?
1086  * Caller must have preemption disabled.
1087  */
1088 static bool rcu_is_callbacks_kthread(void)
1089 {
1090         return __this_cpu_read(rcu_cpu_kthread_task) == current;
1091 }
1092 
1093 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1094 
1095 /*
1096  * Do priority-boost accounting for the start of a new grace period.
1097  */
1098 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1099 {
1100         rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1101 }
1102 
1103 /*
1104  * Create an RCU-boost kthread for the specified node if one does not
1105  * already exist.  We only create this kthread for preemptible RCU.
1106  * Returns zero if all is well, a negated errno otherwise.
1107  */
1108 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1109                                        struct rcu_node *rnp)
1110 {
1111         int rnp_index = rnp - &rsp->node[0];
1112         unsigned long flags;
1113         struct sched_param sp;
1114         struct task_struct *t;
1115 
1116         if (rcu_state_p != rsp)
1117                 return 0;
1118 
1119         if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
1120                 return 0;
1121 
1122         rsp->boost = 1;
1123         if (rnp->boost_kthread_task != NULL)
1124                 return 0;
1125         t = kthread_create(rcu_boost_kthread, (void *)rnp,
1126                            "rcub/%d", rnp_index);
1127         if (IS_ERR(t))
1128                 return PTR_ERR(t);
1129         raw_spin_lock_irqsave_rcu_node(rnp, flags);
1130         rnp->boost_kthread_task = t;
1131         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1132         sp.sched_priority = kthread_prio;
1133         sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1134         wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1135         return 0;
1136 }
1137 
1138 static void rcu_kthread_do_work(void)
1139 {
1140         rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1141         rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1142         rcu_preempt_do_callbacks();
1143 }
1144 
1145 static void rcu_cpu_kthread_setup(unsigned int cpu)
1146 {
1147         struct sched_param sp;
1148 
1149         sp.sched_priority = kthread_prio;
1150         sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1151 }
1152 
1153 static void rcu_cpu_kthread_park(unsigned int cpu)
1154 {
1155         per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1156 }
1157 
1158 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1159 {
1160         return __this_cpu_read(rcu_cpu_has_work);
1161 }
1162 
1163 /*
1164  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1165  * RCU softirq used in flavors and configurations of RCU that do not
1166  * support RCU priority boosting.
1167  */
1168 static void rcu_cpu_kthread(unsigned int cpu)
1169 {
1170         unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1171         char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1172         int spincnt;
1173 
1174         for (spincnt = 0; spincnt < 10; spincnt++) {
1175                 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1176                 local_bh_disable();
1177                 *statusp = RCU_KTHREAD_RUNNING;
1178                 this_cpu_inc(rcu_cpu_kthread_loops);
1179                 local_irq_disable();
1180                 work = *workp;
1181                 *workp = 0;
1182                 local_irq_enable();
1183                 if (work)
1184                         rcu_kthread_do_work();
1185                 local_bh_enable();
1186                 if (*workp == 0) {
1187                         trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1188                         *statusp = RCU_KTHREAD_WAITING;
1189                         return;
1190                 }
1191         }
1192         *statusp = RCU_KTHREAD_YIELDING;
1193         trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1194         schedule_timeout_interruptible(2);
1195         trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1196         *statusp = RCU_KTHREAD_WAITING;
1197 }
1198 
1199 /*
1200  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1201  * served by the rcu_node in question.  The CPU hotplug lock is still
1202  * held, so the value of rnp->qsmaskinit will be stable.
1203  *
1204  * We don't include outgoingcpu in the affinity set, use -1 if there is
1205  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1206  * this function allows the kthread to execute on any CPU.
1207  */
1208 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1209 {
1210         struct task_struct *t = rnp->boost_kthread_task;
1211         unsigned long mask = rcu_rnp_online_cpus(rnp);
1212         cpumask_var_t cm;
1213         int cpu;
1214 
1215         if (!t)
1216                 return;
1217         if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1218                 return;
1219         for_each_leaf_node_possible_cpu(rnp, cpu)
1220                 if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1221                     cpu != outgoingcpu)
1222                         cpumask_set_cpu(cpu, cm);
1223         if (cpumask_weight(cm) == 0)
1224                 cpumask_setall(cm);
1225         set_cpus_allowed_ptr(t, cm);
1226         free_cpumask_var(cm);
1227 }
1228 
1229 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1230         .store                  = &rcu_cpu_kthread_task,
1231         .thread_should_run      = rcu_cpu_kthread_should_run,
1232         .thread_fn              = rcu_cpu_kthread,
1233         .thread_comm            = "rcuc/%u",
1234         .setup                  = rcu_cpu_kthread_setup,
1235         .park                   = rcu_cpu_kthread_park,
1236 };
1237 
1238 /*
1239  * Spawn boost kthreads -- called as soon as the scheduler is running.
1240  */
1241 static void __init rcu_spawn_boost_kthreads(void)
1242 {
1243         struct rcu_node *rnp;
1244         int cpu;
1245 
1246         for_each_possible_cpu(cpu)
1247                 per_cpu(rcu_cpu_has_work, cpu) = 0;
1248         BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1249         rcu_for_each_leaf_node(rcu_state_p, rnp)
1250                 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1251 }
1252 
1253 static void rcu_prepare_kthreads(int cpu)
1254 {
1255         struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1256         struct rcu_node *rnp = rdp->mynode;
1257 
1258         /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1259         if (rcu_scheduler_fully_active)
1260                 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1261 }
1262 
1263 #else /* #ifdef CONFIG_RCU_BOOST */
1264 
1265 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1266         __releases(rnp->lock)
1267 {
1268         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1269 }
1270 
1271 static void invoke_rcu_callbacks_kthread(void)
1272 {
1273         WARN_ON_ONCE(1);
1274 }
1275 
1276 static bool rcu_is_callbacks_kthread(void)
1277 {
1278         return false;
1279 }
1280 
1281 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1282 {
1283 }
1284 
1285 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1286 {
1287 }
1288 
1289 static void __init rcu_spawn_boost_kthreads(void)
1290 {
1291 }
1292 
1293 static void rcu_prepare_kthreads(int cpu)
1294 {
1295 }
1296 
1297 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1298 
1299 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1300 
1301 /*
1302  * Check to see if any future RCU-related work will need to be done
1303  * by the current CPU, even if none need be done immediately, returning
1304  * 1 if so.  This function is part of the RCU implementation; it is -not-
1305  * an exported member of the RCU API.
1306  *
1307  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1308  * any flavor of RCU.
1309  */
1310 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1311 {
1312         *nextevt = KTIME_MAX;
1313         return rcu_cpu_has_callbacks(NULL);
1314 }
1315 
1316 /*
1317  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1318  * after it.
1319  */
1320 static void rcu_cleanup_after_idle(void)
1321 {
1322 }
1323 
1324 /*
1325  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1326  * is nothing.
1327  */
1328 static void rcu_prepare_for_idle(void)
1329 {
1330 }
1331 
1332 /*
1333  * Don't bother keeping a running count of the number of RCU callbacks
1334  * posted because CONFIG_RCU_FAST_NO_HZ=n.
1335  */
1336 static void rcu_idle_count_callbacks_posted(void)
1337 {
1338 }
1339 
1340 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1341 
1342 /*
1343  * This code is invoked when a CPU goes idle, at which point we want
1344  * to have the CPU do everything required for RCU so that it can enter
1345  * the energy-efficient dyntick-idle mode.  This is handled by a
1346  * state machine implemented by rcu_prepare_for_idle() below.
1347  *
1348  * The following three proprocessor symbols control this state machine:
1349  *
1350  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1351  *      to sleep in dyntick-idle mode with RCU callbacks pending.  This
1352  *      is sized to be roughly one RCU grace period.  Those energy-efficiency
1353  *      benchmarkers who might otherwise be tempted to set this to a large
1354  *      number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1355  *      system.  And if you are -that- concerned about energy efficiency,
1356  *      just power the system down and be done with it!
1357  * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1358  *      permitted to sleep in dyntick-idle mode with only lazy RCU
1359  *      callbacks pending.  Setting this too high can OOM your system.
1360  *
1361  * The values below work well in practice.  If future workloads require
1362  * adjustment, they can be converted into kernel config parameters, though
1363  * making the state machine smarter might be a better option.
1364  */
1365 #define RCU_IDLE_GP_DELAY 4             /* Roughly one grace period. */
1366 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1367 
1368 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1369 module_param(rcu_idle_gp_delay, int, 0644);
1370 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1371 module_param(rcu_idle_lazy_gp_delay, int, 0644);
1372 
1373 /*
1374  * Try to advance callbacks for all flavors of RCU on the current CPU, but
1375  * only if it has been awhile since the last time we did so.  Afterwards,
1376  * if there are any callbacks ready for immediate invocation, return true.
1377  */
1378 static bool __maybe_unused rcu_try_advance_all_cbs(void)
1379 {
1380         bool cbs_ready = false;
1381         struct rcu_data *rdp;
1382         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1383         struct rcu_node *rnp;
1384         struct rcu_state *rsp;
1385 
1386         /* Exit early if we advanced recently. */
1387         if (jiffies == rdtp->last_advance_all)
1388                 return false;
1389         rdtp->last_advance_all = jiffies;
1390 
1391         for_each_rcu_flavor(rsp) {
1392                 rdp = this_cpu_ptr(rsp->rda);
1393                 rnp = rdp->mynode;
1394 
1395                 /*
1396                  * Don't bother checking unless a grace period has
1397                  * completed since we last checked and there are
1398                  * callbacks not yet ready to invoke.
1399                  */
1400                 if ((rdp->completed != rnp->completed ||
1401                      unlikely(READ_ONCE(rdp->gpwrap))) &&
1402                     rcu_segcblist_pend_cbs(&rdp->cblist))
1403                         note_gp_changes(rsp, rdp);
1404 
1405                 if (rcu_segcblist_ready_cbs(&rdp->cblist))
1406                         cbs_ready = true;
1407         }
1408         return cbs_ready;
1409 }
1410 
1411 /*
1412  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1413  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1414  * caller to set the timeout based on whether or not there are non-lazy
1415  * callbacks.
1416  *
1417  * The caller must have disabled interrupts.
1418  */
1419 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1420 {
1421         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1422         unsigned long dj;
1423 
1424         RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_needs_cpu() invoked with irqs enabled!!!");
1425 
1426         /* Snapshot to detect later posting of non-lazy callback. */
1427         rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1428 
1429         /* If no callbacks, RCU doesn't need the CPU. */
1430         if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1431                 *nextevt = KTIME_MAX;
1432                 return 0;
1433         }
1434 
1435         /* Attempt to advance callbacks. */
1436         if (rcu_try_advance_all_cbs()) {
1437                 /* Some ready to invoke, so initiate later invocation. */
1438                 invoke_rcu_core();
1439                 return 1;
1440         }
1441         rdtp->last_accelerate = jiffies;
1442 
1443         /* Request timer delay depending on laziness, and round. */
1444         if (!rdtp->all_lazy) {
1445                 dj = round_up(rcu_idle_gp_delay + jiffies,
1446                                rcu_idle_gp_delay) - jiffies;
1447         } else {
1448                 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1449         }
1450         *nextevt = basemono + dj * TICK_NSEC;
1451         return 0;
1452 }
1453 
1454 /*
1455  * Prepare a CPU for idle from an RCU perspective.  The first major task
1456  * is to sense whether nohz mode has been enabled or disabled via sysfs.
1457  * The second major task is to check to see if a non-lazy callback has
1458  * arrived at a CPU that previously had only lazy callbacks.  The third
1459  * major task is to accelerate (that is, assign grace-period numbers to)
1460  * any recently arrived callbacks.
1461  *
1462  * The caller must have disabled interrupts.
1463  */
1464 static void rcu_prepare_for_idle(void)
1465 {
1466         bool needwake;
1467         struct rcu_data *rdp;
1468         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1469         struct rcu_node *rnp;
1470         struct rcu_state *rsp;
1471         int tne;
1472 
1473         RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_prepare_for_idle() invoked with irqs enabled!!!");
1474         if (rcu_is_nocb_cpu(smp_processor_id()))
1475                 return;
1476 
1477         /* Handle nohz enablement switches conservatively. */
1478         tne = READ_ONCE(tick_nohz_active);
1479         if (tne != rdtp->tick_nohz_enabled_snap) {
1480                 if (rcu_cpu_has_callbacks(NULL))
1481                         invoke_rcu_core(); /* force nohz to see update. */
1482                 rdtp->tick_nohz_enabled_snap = tne;
1483                 return;
1484         }
1485         if (!tne)
1486                 return;
1487 
1488         /*
1489          * If a non-lazy callback arrived at a CPU having only lazy
1490          * callbacks, invoke RCU core for the side-effect of recalculating
1491          * idle duration on re-entry to idle.
1492          */
1493         if (rdtp->all_lazy &&
1494             rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1495                 rdtp->all_lazy = false;
1496                 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1497                 invoke_rcu_core();
1498                 return;
1499         }
1500 
1501         /*
1502          * If we have not yet accelerated this jiffy, accelerate all
1503          * callbacks on this CPU.
1504          */
1505         if (rdtp->last_accelerate == jiffies)
1506                 return;
1507         rdtp->last_accelerate = jiffies;
1508         for_each_rcu_flavor(rsp) {
1509                 rdp = this_cpu_ptr(rsp->rda);
1510                 if (rcu_segcblist_pend_cbs(&rdp->cblist))
1511                         continue;
1512                 rnp = rdp->mynode;
1513                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1514                 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1515                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1516                 if (needwake)
1517                         rcu_gp_kthread_wake(rsp);
1518         }
1519 }
1520 
1521 /*
1522  * Clean up for exit from idle.  Attempt to advance callbacks based on
1523  * any grace periods that elapsed while the CPU was idle, and if any
1524  * callbacks are now ready to invoke, initiate invocation.
1525  */
1526 static void rcu_cleanup_after_idle(void)
1527 {
1528         RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_cleanup_after_idle() invoked with irqs enabled!!!");
1529         if (rcu_is_nocb_cpu(smp_processor_id()))
1530                 return;
1531         if (rcu_try_advance_all_cbs())
1532                 invoke_rcu_core();
1533 }
1534 
1535 /*
1536  * Keep a running count of the number of non-lazy callbacks posted
1537  * on this CPU.  This running counter (which is never decremented) allows
1538  * rcu_prepare_for_idle() to detect when something out of the idle loop
1539  * posts a callback, even if an equal number of callbacks are invoked.
1540  * Of course, callbacks should only be posted from within a trace event
1541  * designed to be called from idle or from within RCU_NONIDLE().
1542  */
1543 static void rcu_idle_count_callbacks_posted(void)
1544 {
1545         __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1546 }
1547 
1548 /*
1549  * Data for flushing lazy RCU callbacks at OOM time.
1550  */
1551 static atomic_t oom_callback_count;
1552 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1553 
1554 /*
1555  * RCU OOM callback -- decrement the outstanding count and deliver the
1556  * wake-up if we are the last one.
1557  */
1558 static void rcu_oom_callback(struct rcu_head *rhp)
1559 {
1560         if (atomic_dec_and_test(&oom_callback_count))
1561                 wake_up(&oom_callback_wq);
1562 }
1563 
1564 /*
1565  * Post an rcu_oom_notify callback on the current CPU if it has at
1566  * least one lazy callback.  This will unnecessarily post callbacks
1567  * to CPUs that already have a non-lazy callback at the end of their
1568  * callback list, but this is an infrequent operation, so accept some
1569  * extra overhead to keep things simple.
1570  */
1571 static void rcu_oom_notify_cpu(void *unused)
1572 {
1573         struct rcu_state *rsp;
1574         struct rcu_data *rdp;
1575 
1576         for_each_rcu_flavor(rsp) {
1577                 rdp = raw_cpu_ptr(rsp->rda);
1578                 if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) {
1579                         atomic_inc(&oom_callback_count);
1580                         rsp->call(&rdp->oom_head, rcu_oom_callback);
1581                 }
1582         }
1583 }
1584 
1585 /*
1586  * If low on memory, ensure that each CPU has a non-lazy callback.
1587  * This will wake up CPUs that have only lazy callbacks, in turn
1588  * ensuring that they free up the corresponding memory in a timely manner.
1589  * Because an uncertain amount of memory will be freed in some uncertain
1590  * timeframe, we do not claim to have freed anything.
1591  */
1592 static int rcu_oom_notify(struct notifier_block *self,
1593                           unsigned long notused, void *nfreed)
1594 {
1595         int cpu;
1596 
1597         /* Wait for callbacks from earlier instance to complete. */
1598         wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1599         smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1600 
1601         /*
1602          * Prevent premature wakeup: ensure that all increments happen
1603          * before there is a chance of the counter reaching zero.
1604          */
1605         atomic_set(&oom_callback_count, 1);
1606 
1607         for_each_online_cpu(cpu) {
1608                 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1609                 cond_resched_rcu_qs();
1610         }
1611 
1612         /* Unconditionally decrement: no need to wake ourselves up. */
1613         atomic_dec(&oom_callback_count);
1614 
1615         return NOTIFY_OK;
1616 }
1617 
1618 static struct notifier_block rcu_oom_nb = {
1619         .notifier_call = rcu_oom_notify
1620 };
1621 
1622 static int __init rcu_register_oom_notifier(void)
1623 {
1624         register_oom_notifier(&rcu_oom_nb);
1625         return 0;
1626 }
1627 early_initcall(rcu_register_oom_notifier);
1628 
1629 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1630 
1631 #ifdef CONFIG_RCU_FAST_NO_HZ
1632 
1633 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1634 {
1635         struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1636         unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1637 
1638         sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1639                 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1640                 ulong2long(nlpd),
1641                 rdtp->all_lazy ? 'L' : '.',
1642                 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1643 }
1644 
1645 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1646 
1647 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1648 {
1649         *cp = '\0';
1650 }
1651 
1652 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1653 
1654 /* Initiate the stall-info list. */
1655 static void print_cpu_stall_info_begin(void)
1656 {
1657         pr_cont("\n");
1658 }
1659 
1660 /*
1661  * Print out diagnostic information for the specified stalled CPU.
1662  *
1663  * If the specified CPU is aware of the current RCU grace period
1664  * (flavor specified by rsp), then print the number of scheduling
1665  * clock interrupts the CPU has taken during the time that it has
1666  * been aware.  Otherwise, print the number of RCU grace periods
1667  * that this CPU is ignorant of, for example, "1" if the CPU was
1668  * aware of the previous grace period.
1669  *
1670  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1671  */
1672 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1673 {
1674         char fast_no_hz[72];
1675         struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1676         struct rcu_dynticks *rdtp = rdp->dynticks;
1677         char *ticks_title;
1678         unsigned long ticks_value;
1679 
1680         if (rsp->gpnum == rdp->gpnum) {
1681                 ticks_title = "ticks this GP";
1682                 ticks_value = rdp->ticks_this_gp;
1683         } else {
1684                 ticks_title = "GPs behind";
1685                 ticks_value = rsp->gpnum - rdp->gpnum;
1686         }
1687         print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1688         pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1689                cpu,
1690                "O."[!!cpu_online(cpu)],
1691                "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
1692                "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
1693                ticks_value, ticks_title,
1694                rcu_dynticks_snap(rdtp) & 0xfff,
1695                rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1696                rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1697                READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1698                fast_no_hz);
1699 }
1700 
1701 /* Terminate the stall-info list. */
1702 static void print_cpu_stall_info_end(void)
1703 {
1704         pr_err("\t");
1705 }
1706 
1707 /* Zero ->ticks_this_gp for all flavors of RCU. */
1708 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1709 {
1710         rdp->ticks_this_gp = 0;
1711         rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1712 }
1713 
1714 /* Increment ->ticks_this_gp for all flavors of RCU. */
1715 static void increment_cpu_stall_ticks(void)
1716 {
1717         struct rcu_state *rsp;
1718 
1719         for_each_rcu_flavor(rsp)
1720                 raw_cpu_inc(rsp->rda->ticks_this_gp);
1721 }
1722 
1723 #ifdef CONFIG_RCU_NOCB_CPU
1724 
1725 /*
1726  * Offload callback processing from the boot-time-specified set of CPUs
1727  * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1728  * kthread created that pulls the callbacks from the corresponding CPU,
1729  * waits for a grace period to elapse, and invokes the callbacks.
1730  * The no-CBs CPUs do a wake_up() on their kthread when they insert
1731  * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1732  * has been specified, in which case each kthread actively polls its
1733  * CPU.  (Which isn't so great for energy efficiency, but which does
1734  * reduce RCU's overhead on that CPU.)
1735  *
1736  * This is intended to be used in conjunction with Frederic Weisbecker's
1737  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1738  * running CPU-bound user-mode computations.
1739  *
1740  * Offloading of callback processing could also in theory be used as
1741  * an energy-efficiency measure because CPUs with no RCU callbacks
1742  * queued are more aggressive about entering dyntick-idle mode.
1743  */
1744 
1745 
1746 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1747 static int __init rcu_nocb_setup(char *str)
1748 {
1749         alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1750         have_rcu_nocb_mask = true;
1751         cpulist_parse(str, rcu_nocb_mask);
1752         return 1;
1753 }
1754 __setup("rcu_nocbs=", rcu_nocb_setup);
1755 
1756 static int __init parse_rcu_nocb_poll(char *arg)
1757 {
1758         rcu_nocb_poll = true;
1759         return 0;
1760 }
1761 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1762 
1763 /*
1764  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1765  * grace period.
1766  */
1767 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1768 {
1769         swake_up_all(sq);
1770 }
1771 
1772 /*
1773  * Set the root rcu_node structure's ->need_future_gp field
1774  * based on the sum of those of all rcu_node structures.  This does
1775  * double-count the root rcu_node structure's requests, but this
1776  * is necessary to handle the possibility of a rcu_nocb_kthread()
1777  * having awakened during the time that the rcu_node structures
1778  * were being updated for the end of the previous grace period.
1779  */
1780 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
1781 {
1782         rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1783 }
1784 
1785 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1786 {
1787         return &rnp->nocb_gp_wq[rnp->completed & 0x1];
1788 }
1789 
1790 static void rcu_init_one_nocb(struct rcu_node *rnp)
1791 {
1792         init_swait_queue_head(&rnp->nocb_gp_wq[0]);
1793         init_swait_queue_head(&rnp->nocb_gp_wq[1]);
1794 }
1795 
1796 /* Is the specified CPU a no-CBs CPU? */
1797 bool rcu_is_nocb_cpu(int cpu)
1798 {
1799         if (have_rcu_nocb_mask)
1800                 return cpumask_test_cpu(cpu, rcu_nocb_mask);
1801         return false;
1802 }
1803 
1804 /*
1805  * Kick the leader kthread for this NOCB group.  Caller holds ->nocb_lock
1806  * and this function releases it.
1807  */
1808 static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
1809                                unsigned long flags)
1810         __releases(rdp->nocb_lock)
1811 {
1812         struct rcu_data *rdp_leader = rdp->nocb_leader;
1813 
1814         lockdep_assert_held(&rdp->nocb_lock);
1815         if (!READ_ONCE(rdp_leader->nocb_kthread)) {
1816                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1817                 return;
1818         }
1819         if (rdp_leader->nocb_leader_sleep || force) {
1820                 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
1821                 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1822                 del_timer(&rdp->nocb_timer);
1823                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1824                 smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
1825                 swake_up(&rdp_leader->nocb_wq);
1826         } else {
1827                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1828         }
1829 }
1830 
1831 /*
1832  * Kick the leader kthread for this NOCB group, but caller has not
1833  * acquired locks.
1834  */
1835 static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1836 {
1837         unsigned long flags;
1838 
1839         raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1840         __wake_nocb_leader(rdp, force, flags);
1841 }
1842 
1843 /*
1844  * Arrange to wake the leader kthread for this NOCB group at some
1845  * future time when it is safe to do so.
1846  */
1847 static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
1848                                    const char *reason)
1849 {
1850         unsigned long flags;
1851 
1852         raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1853         if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
1854                 mod_timer(&rdp->nocb_timer, jiffies + 1);
1855         WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
1856         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason);
1857         raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1858 }
1859 
1860 /*
1861  * Does the specified CPU need an RCU callback for the specified flavor
1862  * of rcu_barrier()?
1863  */
1864 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
1865 {
1866         struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1867         unsigned long ret;
1868 #ifdef CONFIG_PROVE_RCU
1869         struct rcu_head *rhp;
1870 #endif /* #ifdef CONFIG_PROVE_RCU */
1871 
1872         /*
1873          * Check count of all no-CBs callbacks awaiting invocation.
1874          * There needs to be a barrier before this function is called,
1875          * but associated with a prior determination that no more
1876          * callbacks would be posted.  In the worst case, the first
1877          * barrier in _rcu_barrier() suffices (but the caller cannot
1878          * necessarily rely on this, not a substitute for the caller
1879          * getting the concurrency design right!).  There must also be
1880          * a barrier between the following load an posting of a callback
1881          * (if a callback is in fact needed).  This is associated with an
1882          * atomic_inc() in the caller.
1883          */
1884         ret = atomic_long_read(&rdp->nocb_q_count);
1885 
1886 #ifdef CONFIG_PROVE_RCU
1887         rhp = READ_ONCE(rdp->nocb_head);
1888         if (!rhp)
1889                 rhp = READ_ONCE(rdp->nocb_gp_head);
1890         if (!rhp)
1891                 rhp = READ_ONCE(rdp->nocb_follower_head);
1892 
1893         /* Having no rcuo kthread but CBs after scheduler starts is bad! */
1894         if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
1895             rcu_scheduler_fully_active) {
1896                 /* RCU callback enqueued before CPU first came online??? */
1897                 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
1898                        cpu, rhp->func);
1899                 WARN_ON_ONCE(1);
1900         }
1901 #endif /* #ifdef CONFIG_PROVE_RCU */
1902 
1903         return !!ret;
1904 }
1905 
1906 /*
1907  * Enqueue the specified string of rcu_head structures onto the specified
1908  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
1909  * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
1910  * counts are supplied by rhcount and rhcount_lazy.
1911  *
1912  * If warranted, also wake up the kthread servicing this CPUs queues.
1913  */
1914 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
1915                                     struct rcu_head *rhp,
1916                                     struct rcu_head **rhtp,
1917                                     int rhcount, int rhcount_lazy,
1918                                     unsigned long flags)
1919 {
1920         int len;
1921         struct rcu_head **old_rhpp;
1922         struct task_struct *t;
1923 
1924         /* Enqueue the callback on the nocb list and update counts. */
1925         atomic_long_add(rhcount, &rdp->nocb_q_count);
1926         /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
1927         old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1928         WRITE_ONCE(*old_rhpp, rhp);
1929         atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1930         smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
1931 
1932         /* If we are not being polled and there is a kthread, awaken it ... */
1933         t = READ_ONCE(rdp->nocb_kthread);
1934         if (rcu_nocb_poll || !t) {
1935                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1936                                     TPS("WakeNotPoll"));
1937                 return;
1938         }
1939         len = atomic_long_read(&rdp->nocb_q_count);
1940         if (old_rhpp == &rdp->nocb_head) {
1941                 if (!irqs_disabled_flags(flags)) {
1942                         /* ... if queue was empty ... */
1943                         wake_nocb_leader(rdp, false);
1944                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1945                                             TPS("WakeEmpty"));
1946                 } else {
1947                         wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
1948                                                TPS("WakeEmptyIsDeferred"));
1949                 }
1950                 rdp->qlen_last_fqs_check = 0;
1951         } else if (len > rdp->qlen_last_fqs_check + qhimark) {
1952                 /* ... or if many callbacks queued. */
1953                 if (!irqs_disabled_flags(flags)) {
1954                         wake_nocb_leader(rdp, true);
1955                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1956                                             TPS("WakeOvf"));
1957                 } else {
1958                         wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
1959                                                TPS("WakeOvfIsDeferred"));
1960                 }
1961                 rdp->qlen_last_fqs_check = LONG_MAX / 2;
1962         } else {
1963                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
1964         }
1965         return;
1966 }
1967 
1968 /*
1969  * This is a helper for __call_rcu(), which invokes this when the normal
1970  * callback queue is inoperable.  If this is not a no-CBs CPU, this
1971  * function returns failure back to __call_rcu(), which can complain
1972  * appropriately.
1973  *
1974  * Otherwise, this function queues the callback where the corresponding
1975  * "rcuo" kthread can find it.
1976  */
1977 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
1978                             bool lazy, unsigned long flags)
1979 {
1980 
1981         if (!rcu_is_nocb_cpu(rdp->cpu))
1982                 return false;
1983         __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
1984         if (__is_kfree_rcu_offset((unsigned long)rhp->func))
1985                 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
1986                                          (unsigned long)rhp->func,
1987                                          -atomic_long_read(&rdp->nocb_q_count_lazy),
1988                                          -atomic_long_read(&rdp->nocb_q_count));
1989         else
1990                 trace_rcu_callback(rdp->rsp->name, rhp,
1991                                    -atomic_long_read(&rdp->nocb_q_count_lazy),
1992                                    -atomic_long_read(&rdp->nocb_q_count));
1993 
1994         /*
1995          * If called from an extended quiescent state with interrupts
1996          * disabled, invoke the RCU core in order to allow the idle-entry
1997          * deferred-wakeup check to function.
1998          */
1999         if (irqs_disabled_flags(flags) &&
2000             !rcu_is_watching() &&
2001             cpu_online(smp_processor_id()))
2002                 invoke_rcu_core();
2003 
2004         return true;
2005 }
2006 
2007 /*
2008  * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2009  * not a no-CBs CPU.
2010  */
2011 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
2012                                                      struct rcu_data *rdp,
2013                                                      unsigned long flags)
2014 {
2015         RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_nocb_adopt_orphan_cbs() invoked with irqs enabled!!!");
2016         if (!rcu_is_nocb_cpu(smp_processor_id()))
2017                 return false; /* Not NOCBs CPU, caller must migrate CBs. */
2018         __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
2019                                 rcu_segcblist_tail(&rdp->cblist),
2020                                 rcu_segcblist_n_cbs(&rdp->cblist),
2021                                 rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags);
2022         rcu_segcblist_init(&rdp->cblist);
2023         rcu_segcblist_disable(&rdp->cblist);
2024         return true;
2025 }
2026 
2027 /*
2028  * If necessary, kick off a new grace period, and either way wait
2029  * for a subsequent grace period to complete.
2030  */
2031 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2032 {
2033         unsigned long c;
2034         bool d;
2035         unsigned long flags;
2036         bool needwake;
2037         struct rcu_node *rnp = rdp->mynode;
2038 
2039         raw_spin_lock_irqsave_rcu_node(rnp, flags);
2040         needwake = rcu_start_future_gp(rnp, rdp, &c);
2041         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2042         if (needwake)
2043                 rcu_gp_kthread_wake(rdp->rsp);
2044 
2045         /*
2046          * Wait for the grace period.  Do so interruptibly to avoid messing
2047          * up the load average.
2048          */
2049         trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2050         for (;;) {
2051                 swait_event_interruptible(
2052                         rnp->nocb_gp_wq[c & 0x1],
2053                         (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2054                 if (likely(d))
2055                         break;
2056                 WARN_ON(signal_pending(current));
2057                 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2058         }
2059         trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2060         smp_mb(); /* Ensure that CB invocation happens after GP end. */
2061 }
2062 
2063 /*
2064  * Leaders come here to wait for additional callbacks to show up.
2065  * This function does not return until callbacks appear.
2066  */
2067 static void nocb_leader_wait(struct rcu_data *my_rdp)
2068 {
2069         bool firsttime = true;
2070         unsigned long flags;
2071         bool gotcbs;
2072         struct rcu_data *rdp;
2073         struct rcu_head **tail;
2074 
2075 wait_again:
2076 
2077         /* Wait for callbacks to appear. */
2078         if (!rcu_nocb_poll) {
2079                 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
2080                 swait_event_interruptible(my_rdp->nocb_wq,
2081                                 !READ_ONCE(my_rdp->nocb_leader_sleep));
2082                 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2083                 my_rdp->nocb_leader_sleep = true;
2084                 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
2085                 del_timer(&my_rdp->nocb_timer);
2086                 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
2087         } else if (firsttime) {
2088                 firsttime = false; /* Don't drown trace log with "Poll"! */
2089                 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll"));
2090         }
2091 
2092         /*
2093          * Each pass through the following loop checks a follower for CBs.
2094          * We are our own first follower.  Any CBs found are moved to
2095          * nocb_gp_head, where they await a grace period.
2096          */
2097         gotcbs = false;
2098         smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */
2099         for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2100                 rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
2101                 if (!rdp->nocb_gp_head)
2102                         continue;  /* No CBs here, try next follower. */
2103 
2104                 /* Move callbacks to wait-for-GP list, which is empty. */
2105                 WRITE_ONCE(rdp->nocb_head, NULL);
2106                 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2107                 gotcbs = true;
2108         }
2109 
2110         /* No callbacks?  Sleep a bit if polling, and go retry.  */
2111         if (unlikely(!gotcbs)) {
2112                 WARN_ON(signal_pending(current));
2113                 if (rcu_nocb_poll) {
2114                         schedule_timeout_interruptible(1);
2115                 } else {
2116                         trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2117                                             TPS("WokeEmpty"));
2118                 }
2119                 goto wait_again;
2120         }
2121 
2122         /* Wait for one grace period. */
2123         rcu_nocb_wait_gp(my_rdp);
2124 
2125         /* Each pass through the following loop wakes a follower, if needed. */
2126         for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2127                 if (!rcu_nocb_poll &&
2128                     READ_ONCE(rdp->nocb_head) &&
2129                     READ_ONCE(my_rdp->nocb_leader_sleep)) {
2130                         raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2131                         my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2132                         raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
2133                 }
2134                 if (!rdp->nocb_gp_head)
2135                         continue; /* No CBs, so no need to wake follower. */
2136 
2137                 /* Append callbacks to follower's "done" list. */
2138                 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2139                 tail = rdp->nocb_follower_tail;
2140                 rdp->nocb_follower_tail = rdp->nocb_gp_tail;
2141                 *tail = rdp->nocb_gp_head;
2142                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2143                 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2144                         /* List was empty, so wake up the follower.  */
2145                         swake_up(&rdp->nocb_wq);
2146                 }
2147         }
2148 
2149         /* If we (the leader) don't have CBs, go wait some more. */
2150         if (!my_rdp->nocb_follower_head)
2151                 goto wait_again;
2152 }
2153 
2154 /*
2155  * Followers come here to wait for additional callbacks to show up.
2156  * This function does not return until callbacks appear.
2157  */
2158 static void nocb_follower_wait(struct rcu_data *rdp)
2159 {
2160         for (;;) {
2161                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
2162                 swait_event_interruptible(rdp->nocb_wq,
2163                                          READ_ONCE(rdp->nocb_follower_head));
2164                 if (smp_load_acquire(&rdp->nocb_follower_head)) {
2165                         /* ^^^ Ensure CB invocation follows _head test. */
2166                         return;
2167                 }
2168                 WARN_ON(signal_pending(current));
2169                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty"));
2170         }
2171 }
2172 
2173 /*
2174  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2175  * callbacks queued by the corresponding no-CBs CPU, however, there is
2176  * an optional leader-follower relationship so that the grace-period
2177  * kthreads don't have to do quite so many wakeups.
2178  */
2179 static int rcu_nocb_kthread(void *arg)
2180 {
2181         int c, cl;
2182         unsigned long flags;
2183         struct rcu_head *list;
2184         struct rcu_head *next;
2185         struct rcu_head **tail;
2186         struct rcu_data *rdp = arg;
2187 
2188         /* Each pass through this loop invokes one batch of callbacks */
2189         for (;;) {
2190                 /* Wait for callbacks. */
2191                 if (rdp->nocb_leader == rdp)
2192                         nocb_leader_wait(rdp);
2193                 else
2194                         nocb_follower_wait(rdp);
2195 
2196                 /* Pull the ready-to-invoke callbacks onto local list. */
2197                 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2198                 list = rdp->nocb_follower_head;
2199                 rdp->nocb_follower_head = NULL;
2200                 tail = rdp->nocb_follower_tail;
2201                 rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2202                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2203                 BUG_ON(!list);
2204                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty"));
2205 
2206                 /* Each pass through the following loop invokes a callback. */
2207                 trace_rcu_batch_start(rdp->rsp->name,
2208                                       atomic_long_read(&rdp->nocb_q_count_lazy),
2209                                       atomic_long_read(&rdp->nocb_q_count), -1);
2210                 c = cl = 0;
2211                 while (list) {
2212                         next = list->next;
2213                         /* Wait for enqueuing to complete, if needed. */
2214                         while (next == NULL && &list->next != tail) {
2215                                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2216                                                     TPS("WaitQueue"));
2217                                 schedule_timeout_interruptible(1);
2218                                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2219                                                     TPS("WokeQueue"));
2220                                 next = list->next;
2221                         }
2222                         debug_rcu_head_unqueue(list);
2223                         local_bh_disable();
2224                         if (__rcu_reclaim(rdp->rsp->name, list))
2225                                 cl++;
2226                         c++;
2227                         local_bh_enable();
2228                         cond_resched_rcu_qs();
2229                         list = next;
2230                 }
2231                 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2232                 smp_mb__before_atomic();  /* _add after CB invocation. */
2233                 atomic_long_add(-c, &rdp->nocb_q_count);
2234                 atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2235                 rdp->n_nocbs_invoked += c;
2236         }
2237         return 0;
2238 }
2239 
2240 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
2241 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2242 {
2243         return READ_ONCE(rdp->nocb_defer_wakeup);
2244 }
2245 
2246 /* Do a deferred wakeup of rcu_nocb_kthread(). */
2247 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
2248 {
2249         unsigned long flags;
2250         int ndw;
2251 
2252         raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2253         if (!rcu_nocb_need_deferred_wakeup(rdp)) {
2254                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2255                 return;
2256         }
2257         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
2258         WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
2259         __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
2260         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2261 }
2262 
2263 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
2264 static void do_nocb_deferred_wakeup_timer(unsigned long x)
2265 {
2266         do_nocb_deferred_wakeup_common((struct rcu_data *)x);
2267 }
2268 
2269 /*
2270  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
2271  * This means we do an inexact common-case check.  Note that if
2272  * we miss, ->nocb_timer will eventually clean things up.
2273  */
2274 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2275 {
2276         if (rcu_nocb_need_deferred_wakeup(rdp))
2277                 do_nocb_deferred_wakeup_common(rdp);
2278 }
2279 
2280 void __init rcu_init_nohz(void)
2281 {
2282         int cpu;
2283         bool need_rcu_nocb_mask = true;
2284         struct rcu_state *rsp;
2285 
2286 #if defined(CONFIG_NO_HZ_FULL)
2287         if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2288                 need_rcu_nocb_mask = true;
2289 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2290 
2291         if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2292                 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2293                         pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2294                         return;
2295                 }
2296                 have_rcu_nocb_mask = true;
2297         }
2298         if (!have_rcu_nocb_mask)
2299                 return;
2300 
2301 #if defined(CONFIG_NO_HZ_FULL)
2302         if (tick_nohz_full_running)
2303                 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2304 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2305 
2306         if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2307                 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2308                 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2309                             rcu_nocb_mask);
2310         }
2311         pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2312                 cpumask_pr_args(rcu_nocb_mask));
2313         if (rcu_nocb_poll)
2314                 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2315 
2316         for_each_rcu_flavor(rsp) {
2317                 for_each_cpu(cpu, rcu_nocb_mask)
2318                         init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
2319                 rcu_organize_nocb_kthreads(rsp);
2320         }
2321 }
2322 
2323 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2324 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2325 {
2326         rdp->nocb_tail = &rdp->nocb_head;
2327         init_swait_queue_head(&rdp->nocb_wq);
2328         rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2329         raw_spin_lock_init(&rdp->nocb_lock);
2330         setup_timer(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer,
2331                     (unsigned long)rdp);
2332 }
2333 
2334 /*
2335  * If the specified CPU is a no-CBs CPU that does not already have its
2336  * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
2337  * brought online out of order, this can require re-organizing the
2338  * leader-follower relationships.
2339  */
2340 static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2341 {
2342         struct rcu_data *rdp;
2343         struct rcu_data *rdp_last;
2344         struct rcu_data *rdp_old_leader;
2345         struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2346         struct task_struct *t;
2347 
2348         /*
2349          * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2350          * then nothing to do.
2351          */
2352         if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2353                 return;
2354 
2355         /* If we didn't spawn the leader first, reorganize! */
2356         rdp_old_leader = rdp_spawn->nocb_leader;
2357         if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2358                 rdp_last = NULL;
2359                 rdp = rdp_old_leader;
2360                 do {
2361                         rdp->nocb_leader = rdp_spawn;
2362                         if (rdp_last && rdp != rdp_spawn)
2363                                 rdp_last->nocb_next_follower = rdp;
2364                         if (rdp == rdp_spawn) {
2365                                 rdp = rdp->nocb_next_follower;
2366                         } else {
2367                                 rdp_last = rdp;
2368                                 rdp = rdp->nocb_next_follower;
2369                                 rdp_last->nocb_next_follower = NULL;
2370                         }
2371                 } while (rdp);
2372                 rdp_spawn->nocb_next_follower = rdp_old_leader;
2373         }
2374 
2375         /* Spawn the kthread for this CPU and RCU flavor. */
2376         t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2377                         "rcuo%c/%d", rsp->abbr, cpu);
2378         BUG_ON(IS_ERR(t));
2379         WRITE_ONCE(rdp_spawn->nocb_kthread, t);
2380 }
2381 
2382 /*
2383  * If the specified CPU is a no-CBs CPU that does not already have its
2384  * rcuo kthreads, spawn them.
2385  */
2386 static void rcu_spawn_all_nocb_kthreads(int cpu)
2387 {
2388         struct rcu_state *rsp;
2389 
2390         if (rcu_scheduler_fully_active)
2391                 for_each_rcu_flavor(rsp)
2392                         rcu_spawn_one_nocb_kthread(rsp, cpu);
2393 }
2394 
2395 /*
2396  * Once the scheduler is running, spawn rcuo kthreads for all online
2397  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2398  * non-boot CPUs come online -- if this changes, we will need to add
2399  * some mutual exclusion.
2400  */
2401 static void __init rcu_spawn_nocb_kthreads(void)
2402 {
2403         int cpu;
2404 
2405         for_each_online_cpu(cpu)
2406                 rcu_spawn_all_nocb_kthreads(cpu);
2407 }
2408 
2409 /* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
2410 static int rcu_nocb_leader_stride = -1;
2411 module_param(rcu_nocb_leader_stride, int, 0444);
2412 
2413 /*
2414  * Initialize leader-follower relationships for all no-CBs CPU.
2415  */
2416 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2417 {
2418         int cpu;
2419         int ls = rcu_nocb_leader_stride;
2420         int nl = 0;  /* Next leader. */
2421         struct rcu_data *rdp;
2422         struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
2423         struct rcu_data *rdp_prev = NULL;
2424 
2425         if (!have_rcu_nocb_mask)
2426                 return;
2427         if (ls == -1) {
2428                 ls = int_sqrt(nr_cpu_ids);
2429                 rcu_nocb_leader_stride = ls;
2430         }
2431 
2432         /*
2433          * Each pass through this loop sets up one rcu_data structure.
2434          * Should the corresponding CPU come online in the future, then
2435          * we will spawn the needed set of rcu_nocb_kthread() kthreads.
2436          */
2437         for_each_cpu(cpu, rcu_nocb_mask) {
2438                 rdp = per_cpu_ptr(rsp->rda, cpu);
2439                 if (rdp->cpu >= nl) {
2440                         /* New leader, set up for followers & next leader. */
2441                         nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2442                         rdp->nocb_leader = rdp;
2443                         rdp_leader = rdp;
2444                 } else {
2445                         /* Another follower, link to previous leader. */
2446                         rdp->nocb_leader = rdp_leader;
2447                         rdp_prev->nocb_next_follower = rdp;
2448                 }
2449                 rdp_prev = rdp;
2450         }
2451 }
2452 
2453 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2454 static bool init_nocb_callback_list(struct rcu_data *rdp)
2455 {
2456         if (!rcu_is_nocb_cpu(rdp->cpu))
2457                 return false;
2458 
2459         /* If there are early-boot callbacks, move them to nocb lists. */
2460         if (!rcu_segcblist_empty(&rdp->cblist)) {
2461                 rdp->nocb_head = rcu_segcblist_head(&rdp->cblist);
2462                 rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist);
2463                 atomic_long_set(&rdp->nocb_q_count,
2464                                 rcu_segcblist_n_cbs(&rdp->cblist));
2465                 atomic_long_set(&rdp->nocb_q_count_lazy,
2466                                 rcu_segcblist_n_lazy_cbs(&rdp->cblist));
2467                 rcu_segcblist_init(&rdp->cblist);
2468         }
2469         rcu_segcblist_disable(&rdp->cblist);
2470         return true;
2471 }
2472 
2473 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2474 
2475 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2476 {
2477         WARN_ON_ONCE(1); /* Should be dead code. */
2478         return false;
2479 }
2480 
2481 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
2482 {
2483 }
2484 
2485 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2486 {
2487 }
2488 
2489 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
2490 {
2491         return NULL;
2492 }
2493 
2494 static void rcu_init_one_nocb(struct rcu_node *rnp)
2495 {
2496 }
2497 
2498 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2499                             bool lazy, unsigned long flags)
2500 {
2501         return false;
2502 }
2503 
2504 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
2505                                                      struct rcu_data *rdp,
2506                                                      unsigned long flags)
2507 {
2508         return false;
2509 }
2510 
2511 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2512 {
2513 }
2514 
2515 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2516 {
2517         return false;
2518 }
2519 
2520 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2521 {
2522 }
2523 
2524 static void rcu_spawn_all_nocb_kthreads(int cpu)
2525 {
2526 }
2527 
2528 static void __init rcu_spawn_nocb_kthreads(void)
2529 {
2530 }
2531 
2532 static bool init_nocb_callback_list(struct rcu_data *rdp)
2533 {
2534         return false;
2535 }
2536 
2537 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2538 
2539 /*
2540  * An adaptive-ticks CPU can potentially execute in kernel mode for an
2541  * arbitrarily long period of time with the scheduling-clock tick turned
2542  * off.  RCU will be paying attention to this CPU because it is in the
2543  * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2544  * machine because the scheduling-clock tick has been disabled.  Therefore,
2545  * if an adaptive-ticks CPU is failing to respond to the current grace
2546  * period and has not be idle from an RCU perspective, kick it.
2547  */
2548 static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2549 {
2550 #ifdef CONFIG_NO_HZ_FULL
2551         if (tick_nohz_full_cpu(cpu))
2552                 smp_send_reschedule(cpu);
2553 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2554 }
2555 
2556 /*
2557  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2558  * grace-period kthread will do force_quiescent_state() processing?
2559  * The idea is to avoid waking up RCU core processing on such a
2560  * CPU unless the grace period has extended for too long.
2561  *
2562  * This code relies on the fact that all NO_HZ_FULL CPUs are also
2563  * CONFIG_RCU_NOCB_CPU CPUs.
2564  */
2565 static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2566 {
2567 #ifdef CONFIG_NO_HZ_FULL
2568         if (tick_nohz_full_cpu(smp_processor_id()) &&
2569             (!rcu_gp_in_progress(rsp) ||
2570              ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
2571                 return true;
2572 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2573         return false;
2574 }
2575 
2576 /*
2577  * Bind the grace-period kthread for the sysidle flavor of RCU to the
2578  * timekeeping CPU.
2579  */
2580 static void rcu_bind_gp_kthread(void)
2581 {
2582         int __maybe_unused cpu;
2583 
2584         if (!tick_nohz_full_enabled())
2585                 return;
2586         housekeeping_affine(current);
2587 }
2588 
2589 /* Record the current task on dyntick-idle entry. */
2590 static void rcu_dynticks_task_enter(void)
2591 {
2592 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2593         WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
2594 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2595 }
2596 
2597 /* Record no current task on dyntick-idle exit. */
2598 static void rcu_dynticks_task_exit(void)
2599 {
2600 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2601         WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
2602 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2603 }
2604 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp