~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/tree.h

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3  * Internal non-public definitions.
  4  *
  5  * This program is free software; you can redistribute it and/or modify
  6  * it under the terms of the GNU General Public License as published by
  7  * the Free Software Foundation; either version 2 of the License, or
  8  * (at your option) any later version.
  9  *
 10  * This program is distributed in the hope that it will be useful,
 11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13  * GNU General Public License for more details.
 14  *
 15  * You should have received a copy of the GNU General Public License
 16  * along with this program; if not, you can access it online at
 17  * http://www.gnu.org/licenses/gpl-2.0.html.
 18  *
 19  * Copyright IBM Corporation, 2008
 20  *
 21  * Author: Ingo Molnar <mingo@elte.hu>
 22  *         Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 23  */
 24 
 25 #include <linux/cache.h>
 26 #include <linux/spinlock.h>
 27 #include <linux/threads.h>
 28 #include <linux/cpumask.h>
 29 #include <linux/seqlock.h>
 30 #include <linux/stop_machine.h>
 31 
 32 /*
 33  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
 34  * CONFIG_RCU_FANOUT_LEAF.
 35  * In theory, it should be possible to add more levels straightforwardly.
 36  * In practice, this did work well going from three levels to four.
 37  * Of course, your mileage may vary.
 38  */
 39 
 40 #ifdef CONFIG_RCU_FANOUT
 41 #define RCU_FANOUT CONFIG_RCU_FANOUT
 42 #else /* #ifdef CONFIG_RCU_FANOUT */
 43 # ifdef CONFIG_64BIT
 44 # define RCU_FANOUT 64
 45 # else
 46 # define RCU_FANOUT 32
 47 # endif
 48 #endif /* #else #ifdef CONFIG_RCU_FANOUT */
 49 
 50 #ifdef CONFIG_RCU_FANOUT_LEAF
 51 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
 52 #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
 53 # ifdef CONFIG_64BIT
 54 # define RCU_FANOUT_LEAF 64
 55 # else
 56 # define RCU_FANOUT_LEAF 32
 57 # endif
 58 #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
 59 
 60 #define RCU_FANOUT_1          (RCU_FANOUT_LEAF)
 61 #define RCU_FANOUT_2          (RCU_FANOUT_1 * RCU_FANOUT)
 62 #define RCU_FANOUT_3          (RCU_FANOUT_2 * RCU_FANOUT)
 63 #define RCU_FANOUT_4          (RCU_FANOUT_3 * RCU_FANOUT)
 64 
 65 #if NR_CPUS <= RCU_FANOUT_1
 66 #  define RCU_NUM_LVLS        1
 67 #  define NUM_RCU_LVL_0       1
 68 #  define NUM_RCU_NODES       NUM_RCU_LVL_0
 69 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
 70 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
 71 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
 72 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
 73 #  define RCU_EXP_SCHED_NAME_INIT \
 74                               { "rcu_node_exp_sched_0" }
 75 #elif NR_CPUS <= RCU_FANOUT_2
 76 #  define RCU_NUM_LVLS        2
 77 #  define NUM_RCU_LVL_0       1
 78 #  define NUM_RCU_LVL_1       DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 79 #  define NUM_RCU_NODES       (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
 80 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
 81 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
 82 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
 83 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
 84 #  define RCU_EXP_SCHED_NAME_INIT \
 85                               { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1" }
 86 #elif NR_CPUS <= RCU_FANOUT_3
 87 #  define RCU_NUM_LVLS        3
 88 #  define NUM_RCU_LVL_0       1
 89 #  define NUM_RCU_LVL_1       DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 90 #  define NUM_RCU_LVL_2       DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 91 #  define NUM_RCU_NODES       (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
 92 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
 93 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
 94 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
 95 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
 96 #  define RCU_EXP_SCHED_NAME_INIT \
 97                               { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2" }
 98 #elif NR_CPUS <= RCU_FANOUT_4
 99 #  define RCU_NUM_LVLS        4
100 #  define NUM_RCU_LVL_0       1
101 #  define NUM_RCU_LVL_1       DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
102 #  define NUM_RCU_LVL_2       DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
103 #  define NUM_RCU_LVL_3       DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
104 #  define NUM_RCU_NODES       (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
105 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
106 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
107 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
108 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
109 #  define RCU_EXP_SCHED_NAME_INIT \
110                               { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2", "rcu_node_exp_sched_3" }
111 #else
112 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
113 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
114 
115 extern int rcu_num_lvls;
116 extern int rcu_num_nodes;
117 
118 /*
119  * Dynticks per-CPU state.
120  */
121 struct rcu_dynticks {
122         long long dynticks_nesting; /* Track irq/process nesting level. */
123                                     /* Process level is worth LLONG_MAX/2. */
124         int dynticks_nmi_nesting;   /* Track NMI nesting level. */
125         atomic_t dynticks;          /* Even value for idle, else odd. */
126 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
127         long long dynticks_idle_nesting;
128                                     /* irq/process nesting level from idle. */
129         atomic_t dynticks_idle;     /* Even value for idle, else odd. */
130                                     /*  "Idle" excludes userspace execution. */
131         unsigned long dynticks_idle_jiffies;
132                                     /* End of last non-NMI non-idle period. */
133 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
134 #ifdef CONFIG_RCU_FAST_NO_HZ
135         bool all_lazy;              /* Are all CPU's CBs lazy? */
136         unsigned long nonlazy_posted;
137                                     /* # times non-lazy CBs posted to CPU. */
138         unsigned long nonlazy_posted_snap;
139                                     /* idle-period nonlazy_posted snapshot. */
140         unsigned long last_accelerate;
141                                     /* Last jiffy CBs were accelerated. */
142         unsigned long last_advance_all;
143                                     /* Last jiffy CBs were all advanced. */
144         int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
145 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
146 };
147 
148 /* RCU's kthread states for tracing. */
149 #define RCU_KTHREAD_STOPPED  0
150 #define RCU_KTHREAD_RUNNING  1
151 #define RCU_KTHREAD_WAITING  2
152 #define RCU_KTHREAD_OFFCPU   3
153 #define RCU_KTHREAD_YIELDING 4
154 #define RCU_KTHREAD_MAX      4
155 
156 /*
157  * Definition for node within the RCU grace-period-detection hierarchy.
158  */
159 struct rcu_node {
160         raw_spinlock_t lock;    /* Root rcu_node's lock protects some */
161                                 /*  rcu_state fields as well as following. */
162         unsigned long gpnum;    /* Current grace period for this node. */
163                                 /*  This will either be equal to or one */
164                                 /*  behind the root rcu_node's gpnum. */
165         unsigned long completed; /* Last GP completed for this node. */
166                                 /*  This will either be equal to or one */
167                                 /*  behind the root rcu_node's gpnum. */
168         unsigned long qsmask;   /* CPUs or groups that need to switch in */
169                                 /*  order for current grace period to proceed.*/
170                                 /*  In leaf rcu_node, each bit corresponds to */
171                                 /*  an rcu_data structure, otherwise, each */
172                                 /*  bit corresponds to a child rcu_node */
173                                 /*  structure. */
174         unsigned long expmask;  /* Groups that have ->blkd_tasks */
175                                 /*  elements that need to drain to allow the */
176                                 /*  current expedited grace period to */
177                                 /*  complete (only for PREEMPT_RCU). */
178         unsigned long qsmaskinit;
179                                 /* Per-GP initial value for qsmask & expmask. */
180                                 /*  Initialized from ->qsmaskinitnext at the */
181                                 /*  beginning of each grace period. */
182         unsigned long qsmaskinitnext;
183                                 /* Online CPUs for next grace period. */
184         unsigned long grpmask;  /* Mask to apply to parent qsmask. */
185                                 /*  Only one bit will be set in this mask. */
186         int     grplo;          /* lowest-numbered CPU or group here. */
187         int     grphi;          /* highest-numbered CPU or group here. */
188         u8      grpnum;         /* CPU/group number for next level up. */
189         u8      level;          /* root is at level 0. */
190         bool    wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
191                                 /*  exit RCU read-side critical sections */
192                                 /*  before propagating offline up the */
193                                 /*  rcu_node tree? */
194         struct rcu_node *parent;
195         struct list_head blkd_tasks;
196                                 /* Tasks blocked in RCU read-side critical */
197                                 /*  section.  Tasks are placed at the head */
198                                 /*  of this list and age towards the tail. */
199         struct list_head *gp_tasks;
200                                 /* Pointer to the first task blocking the */
201                                 /*  current grace period, or NULL if there */
202                                 /*  is no such task. */
203         struct list_head *exp_tasks;
204                                 /* Pointer to the first task blocking the */
205                                 /*  current expedited grace period, or NULL */
206                                 /*  if there is no such task.  If there */
207                                 /*  is no current expedited grace period, */
208                                 /*  then there can cannot be any such task. */
209         struct list_head *boost_tasks;
210                                 /* Pointer to first task that needs to be */
211                                 /*  priority boosted, or NULL if no priority */
212                                 /*  boosting is needed for this rcu_node */
213                                 /*  structure.  If there are no tasks */
214                                 /*  queued on this rcu_node structure that */
215                                 /*  are blocking the current grace period, */
216                                 /*  there can be no such task. */
217         struct rt_mutex boost_mtx;
218                                 /* Used only for the priority-boosting */
219                                 /*  side effect, not as a lock. */
220         unsigned long boost_time;
221                                 /* When to start boosting (jiffies). */
222         struct task_struct *boost_kthread_task;
223                                 /* kthread that takes care of priority */
224                                 /*  boosting for this rcu_node structure. */
225         unsigned int boost_kthread_status;
226                                 /* State of boost_kthread_task for tracing. */
227         unsigned long n_tasks_boosted;
228                                 /* Total number of tasks boosted. */
229         unsigned long n_exp_boosts;
230                                 /* Number of tasks boosted for expedited GP. */
231         unsigned long n_normal_boosts;
232                                 /* Number of tasks boosted for normal GP. */
233         unsigned long n_balk_blkd_tasks;
234                                 /* Refused to boost: no blocked tasks. */
235         unsigned long n_balk_exp_gp_tasks;
236                                 /* Refused to boost: nothing blocking GP. */
237         unsigned long n_balk_boost_tasks;
238                                 /* Refused to boost: already boosting. */
239         unsigned long n_balk_notblocked;
240                                 /* Refused to boost: RCU RS CS still running. */
241         unsigned long n_balk_notyet;
242                                 /* Refused to boost: not yet time. */
243         unsigned long n_balk_nos;
244                                 /* Refused to boost: not sure why, though. */
245                                 /*  This can happen due to race conditions. */
246 #ifdef CONFIG_RCU_NOCB_CPU
247         wait_queue_head_t nocb_gp_wq[2];
248                                 /* Place for rcu_nocb_kthread() to wait GP. */
249 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
250         int need_future_gp[2];
251                                 /* Counts of upcoming no-CB GP requests. */
252         raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
253 
254         struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
255 } ____cacheline_internodealigned_in_smp;
256 
257 /*
258  * Do a full breadth-first scan of the rcu_node structures for the
259  * specified rcu_state structure.
260  */
261 #define rcu_for_each_node_breadth_first(rsp, rnp) \
262         for ((rnp) = &(rsp)->node[0]; \
263              (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
264 
265 /*
266  * Do a breadth-first scan of the non-leaf rcu_node structures for the
267  * specified rcu_state structure.  Note that if there is a singleton
268  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
269  */
270 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
271         for ((rnp) = &(rsp)->node[0]; \
272              (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
273 
274 /*
275  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
276  * structure.  Note that if there is a singleton rcu_node tree with but
277  * one rcu_node structure, this loop -will- visit the rcu_node structure.
278  * It is still a leaf node, even if it is also the root node.
279  */
280 #define rcu_for_each_leaf_node(rsp, rnp) \
281         for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
282              (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
283 
284 /* Index values for nxttail array in struct rcu_data. */
285 #define RCU_DONE_TAIL           0       /* Also RCU_WAIT head. */
286 #define RCU_WAIT_TAIL           1       /* Also RCU_NEXT_READY head. */
287 #define RCU_NEXT_READY_TAIL     2       /* Also RCU_NEXT head. */
288 #define RCU_NEXT_TAIL           3
289 #define RCU_NEXT_SIZE           4
290 
291 /* Per-CPU data for read-copy update. */
292 struct rcu_data {
293         /* 1) quiescent-state and grace-period handling : */
294         unsigned long   completed;      /* Track rsp->completed gp number */
295                                         /*  in order to detect GP end. */
296         unsigned long   gpnum;          /* Highest gp number that this CPU */
297                                         /*  is aware of having started. */
298         unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
299                                         /*  for rcu_all_qs() invocations. */
300         bool            passed_quiesce; /* User-mode/idle loop etc. */
301         bool            qs_pending;     /* Core waits for quiesc state. */
302         bool            beenonline;     /* CPU online at least once. */
303         bool            gpwrap;         /* Possible gpnum/completed wrap. */
304         struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
305         unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
306         unsigned long   ticks_this_gp;  /* The number of scheduling-clock */
307                                         /*  ticks this CPU has handled */
308                                         /*  during and after the last grace */
309                                         /* period it is aware of. */
310         struct cpu_stop_work exp_stop_work;
311                                         /* Expedited grace-period control */
312                                         /*  for CPU stopping. */
313 
314         /* 2) batch handling */
315         /*
316          * If nxtlist is not NULL, it is partitioned as follows.
317          * Any of the partitions might be empty, in which case the
318          * pointer to that partition will be equal to the pointer for
319          * the following partition.  When the list is empty, all of
320          * the nxttail elements point to the ->nxtlist pointer itself,
321          * which in that case is NULL.
322          *
323          * [nxtlist, *nxttail[RCU_DONE_TAIL]):
324          *      Entries that batch # <= ->completed
325          *      The grace period for these entries has completed, and
326          *      the other grace-period-completed entries may be moved
327          *      here temporarily in rcu_process_callbacks().
328          * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
329          *      Entries that batch # <= ->completed - 1: waiting for current GP
330          * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
331          *      Entries known to have arrived before current GP ended
332          * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
333          *      Entries that might have arrived after current GP ended
334          *      Note that the value of *nxttail[RCU_NEXT_TAIL] will
335          *      always be NULL, as this is the end of the list.
336          */
337         struct rcu_head *nxtlist;
338         struct rcu_head **nxttail[RCU_NEXT_SIZE];
339         unsigned long   nxtcompleted[RCU_NEXT_SIZE];
340                                         /* grace periods for sublists. */
341         long            qlen_lazy;      /* # of lazy queued callbacks */
342         long            qlen;           /* # of queued callbacks, incl lazy */
343         long            qlen_last_fqs_check;
344                                         /* qlen at last check for QS forcing */
345         unsigned long   n_cbs_invoked;  /* count of RCU cbs invoked. */
346         unsigned long   n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
347         unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
348         unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
349         unsigned long   n_force_qs_snap;
350                                         /* did other CPU force QS recently? */
351         long            blimit;         /* Upper limit on a processed batch */
352 
353         /* 3) dynticks interface. */
354         struct rcu_dynticks *dynticks;  /* Shared per-CPU dynticks state. */
355         int dynticks_snap;              /* Per-GP tracking for dynticks. */
356 
357         /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
358         unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
359         unsigned long offline_fqs;      /* Kicked due to being offline. */
360         unsigned long cond_resched_completed;
361                                         /* Grace period that needs help */
362                                         /*  from cond_resched(). */
363 
364         /* 5) __rcu_pending() statistics. */
365         unsigned long n_rcu_pending;    /* rcu_pending() calls since boot. */
366         unsigned long n_rp_qs_pending;
367         unsigned long n_rp_report_qs;
368         unsigned long n_rp_cb_ready;
369         unsigned long n_rp_cpu_needs_gp;
370         unsigned long n_rp_gp_completed;
371         unsigned long n_rp_gp_started;
372         unsigned long n_rp_nocb_defer_wakeup;
373         unsigned long n_rp_need_nothing;
374 
375         /* 6) _rcu_barrier(), OOM callbacks, and expediting. */
376         struct rcu_head barrier_head;
377 #ifdef CONFIG_RCU_FAST_NO_HZ
378         struct rcu_head oom_head;
379 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
380         struct mutex exp_funnel_mutex;
381         bool exp_done;                  /* Expedited QS for this CPU? */
382 
383         /* 7) Callback offloading. */
384 #ifdef CONFIG_RCU_NOCB_CPU
385         struct rcu_head *nocb_head;     /* CBs waiting for kthread. */
386         struct rcu_head **nocb_tail;
387         atomic_long_t nocb_q_count;     /* # CBs waiting for nocb */
388         atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
389         struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
390         struct rcu_head **nocb_follower_tail;
391         wait_queue_head_t nocb_wq;      /* For nocb kthreads to sleep on. */
392         struct task_struct *nocb_kthread;
393         int nocb_defer_wakeup;          /* Defer wakeup of nocb_kthread. */
394 
395         /* The following fields are used by the leader, hence own cacheline. */
396         struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
397                                         /* CBs waiting for GP. */
398         struct rcu_head **nocb_gp_tail;
399         bool nocb_leader_sleep;         /* Is the nocb leader thread asleep? */
400         struct rcu_data *nocb_next_follower;
401                                         /* Next follower in wakeup chain. */
402 
403         /* The following fields are used by the follower, hence new cachline. */
404         struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
405                                         /* Leader CPU takes GP-end wakeups. */
406 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
407 
408         /* 8) RCU CPU stall data. */
409         unsigned int softirq_snap;      /* Snapshot of softirq activity. */
410 
411         int cpu;
412         struct rcu_state *rsp;
413 };
414 
415 /* Values for fqs_state field in struct rcu_state. */
416 #define RCU_GP_IDLE             0       /* No grace period in progress. */
417 #define RCU_GP_INIT             1       /* Grace period being initialized. */
418 #define RCU_SAVE_DYNTICK        2       /* Need to scan dyntick state. */
419 #define RCU_FORCE_QS            3       /* Need to force quiescent state. */
420 #define RCU_SIGNAL_INIT         RCU_SAVE_DYNTICK
421 
422 /* Values for nocb_defer_wakeup field in struct rcu_data. */
423 #define RCU_NOGP_WAKE_NOT       0
424 #define RCU_NOGP_WAKE           1
425 #define RCU_NOGP_WAKE_FORCE     2
426 
427 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
428                                         /* For jiffies_till_first_fqs and */
429                                         /*  and jiffies_till_next_fqs. */
430 
431 #define RCU_JIFFIES_FQS_DIV     256     /* Very large systems need more */
432                                         /*  delay between bouts of */
433                                         /*  quiescent-state forcing. */
434 
435 #define RCU_STALL_RAT_DELAY     2       /* Allow other CPUs time to take */
436                                         /*  at least one scheduling clock */
437                                         /*  irq before ratting on them. */
438 
439 #define rcu_wait(cond)                                                  \
440 do {                                                                    \
441         for (;;) {                                                      \
442                 set_current_state(TASK_INTERRUPTIBLE);                  \
443                 if (cond)                                               \
444                         break;                                          \
445                 schedule();                                             \
446         }                                                               \
447         __set_current_state(TASK_RUNNING);                              \
448 } while (0)
449 
450 /*
451  * RCU global state, including node hierarchy.  This hierarchy is
452  * represented in "heap" form in a dense array.  The root (first level)
453  * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
454  * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
455  * and the third level in ->node[m+1] and following (->node[m+1] referenced
456  * by ->level[2]).  The number of levels is determined by the number of
457  * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
458  * consisting of a single rcu_node.
459  */
460 struct rcu_state {
461         struct rcu_node node[NUM_RCU_NODES];    /* Hierarchy. */
462         struct rcu_node *level[RCU_NUM_LVLS + 1];
463                                                 /* Hierarchy levels (+1 to */
464                                                 /*  shut bogus gcc warning) */
465         u8 flavor_mask;                         /* bit in flavor mask. */
466         struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
467         void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
468                      void (*func)(struct rcu_head *head));
469 
470         /* The following fields are guarded by the root rcu_node's lock. */
471 
472         u8      fqs_state ____cacheline_internodealigned_in_smp;
473                                                 /* Force QS state. */
474         u8      boost;                          /* Subject to priority boost. */
475         unsigned long gpnum;                    /* Current gp number. */
476         unsigned long completed;                /* # of last completed gp. */
477         struct task_struct *gp_kthread;         /* Task for grace periods. */
478         wait_queue_head_t gp_wq;                /* Where GP task waits. */
479         short gp_flags;                         /* Commands for GP task. */
480         short gp_state;                         /* GP kthread sleep state. */
481 
482         /* End of fields guarded by root rcu_node's lock. */
483 
484         raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
485                                                 /* Protect following fields. */
486         struct rcu_head *orphan_nxtlist;        /* Orphaned callbacks that */
487                                                 /*  need a grace period. */
488         struct rcu_head **orphan_nxttail;       /* Tail of above. */
489         struct rcu_head *orphan_donelist;       /* Orphaned callbacks that */
490                                                 /*  are ready to invoke. */
491         struct rcu_head **orphan_donetail;      /* Tail of above. */
492         long qlen_lazy;                         /* Number of lazy callbacks. */
493         long qlen;                              /* Total number of callbacks. */
494         /* End of fields guarded by orphan_lock. */
495 
496         struct mutex barrier_mutex;             /* Guards barrier fields. */
497         atomic_t barrier_cpu_count;             /* # CPUs waiting on. */
498         struct completion barrier_completion;   /* Wake at barrier end. */
499         unsigned long barrier_sequence;         /* ++ at start and end of */
500                                                 /*  _rcu_barrier(). */
501         /* End of fields guarded by barrier_mutex. */
502 
503         unsigned long expedited_sequence;       /* Take a ticket. */
504         atomic_long_t expedited_workdone0;      /* # done by others #0. */
505         atomic_long_t expedited_workdone1;      /* # done by others #1. */
506         atomic_long_t expedited_workdone2;      /* # done by others #2. */
507         atomic_long_t expedited_workdone3;      /* # done by others #3. */
508         atomic_long_t expedited_normal;         /* # fallbacks to normal. */
509         atomic_t expedited_need_qs;             /* # CPUs left to check in. */
510         wait_queue_head_t expedited_wq;         /* Wait for check-ins. */
511 
512         unsigned long jiffies_force_qs;         /* Time at which to invoke */
513                                                 /*  force_quiescent_state(). */
514         unsigned long n_force_qs;               /* Number of calls to */
515                                                 /*  force_quiescent_state(). */
516         unsigned long n_force_qs_lh;            /* ~Number of calls leaving */
517                                                 /*  due to lock unavailable. */
518         unsigned long n_force_qs_ngp;           /* Number of calls leaving */
519                                                 /*  due to no GP active. */
520         unsigned long gp_start;                 /* Time at which GP started, */
521                                                 /*  but in jiffies. */
522         unsigned long gp_activity;              /* Time of last GP kthread */
523                                                 /*  activity in jiffies. */
524         unsigned long jiffies_stall;            /* Time at which to check */
525                                                 /*  for CPU stalls. */
526         unsigned long jiffies_resched;          /* Time at which to resched */
527                                                 /*  a reluctant CPU. */
528         unsigned long n_force_qs_gpstart;       /* Snapshot of n_force_qs at */
529                                                 /*  GP start. */
530         unsigned long gp_max;                   /* Maximum GP duration in */
531                                                 /*  jiffies. */
532         const char *name;                       /* Name of structure. */
533         char abbr;                              /* Abbreviated name. */
534         struct list_head flavors;               /* List of RCU flavors. */
535 };
536 
537 /* Values for rcu_state structure's gp_flags field. */
538 #define RCU_GP_FLAG_INIT 0x1    /* Need grace-period initialization. */
539 #define RCU_GP_FLAG_FQS  0x2    /* Need grace-period quiescent-state forcing. */
540 
541 /* Values for rcu_state structure's gp_flags field. */
542 #define RCU_GP_WAIT_INIT 0      /* Initial state. */
543 #define RCU_GP_WAIT_GPS  1      /* Wait for grace-period start. */
544 #define RCU_GP_DONE_GPS  2      /* Wait done for grace-period start. */
545 #define RCU_GP_WAIT_FQS  3      /* Wait for force-quiescent-state time. */
546 #define RCU_GP_DOING_FQS 4      /* Wait done for force-quiescent-state time. */
547 #define RCU_GP_CLEANUP   5      /* Grace-period cleanup started. */
548 #define RCU_GP_CLEANED   6      /* Grace-period cleanup complete. */
549 
550 extern struct list_head rcu_struct_flavors;
551 
552 /* Sequence through rcu_state structures for each RCU flavor. */
553 #define for_each_rcu_flavor(rsp) \
554         list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
555 
556 /*
557  * RCU implementation internal declarations:
558  */
559 extern struct rcu_state rcu_sched_state;
560 
561 extern struct rcu_state rcu_bh_state;
562 
563 #ifdef CONFIG_PREEMPT_RCU
564 extern struct rcu_state rcu_preempt_state;
565 #endif /* #ifdef CONFIG_PREEMPT_RCU */
566 
567 #ifdef CONFIG_RCU_BOOST
568 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
569 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
570 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
571 DECLARE_PER_CPU(char, rcu_cpu_has_work);
572 #endif /* #ifdef CONFIG_RCU_BOOST */
573 
574 #ifndef RCU_TREE_NONCORE
575 
576 /* Forward declarations for rcutree_plugin.h */
577 static void rcu_bootup_announce(void);
578 static void rcu_preempt_note_context_switch(void);
579 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
580 #ifdef CONFIG_HOTPLUG_CPU
581 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
582 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
583 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
584 static int rcu_print_task_stall(struct rcu_node *rnp);
585 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
586 static void rcu_preempt_check_callbacks(void);
587 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
588 static void __init __rcu_init_preempt(void);
589 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
590 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
591 static void invoke_rcu_callbacks_kthread(void);
592 static bool rcu_is_callbacks_kthread(void);
593 #ifdef CONFIG_RCU_BOOST
594 static void rcu_preempt_do_callbacks(void);
595 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
596                                                  struct rcu_node *rnp);
597 #endif /* #ifdef CONFIG_RCU_BOOST */
598 static void __init rcu_spawn_boost_kthreads(void);
599 static void rcu_prepare_kthreads(int cpu);
600 static void rcu_cleanup_after_idle(void);
601 static void rcu_prepare_for_idle(void);
602 static void rcu_idle_count_callbacks_posted(void);
603 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
604 static void print_cpu_stall_info_begin(void);
605 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
606 static void print_cpu_stall_info_end(void);
607 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
608 static void increment_cpu_stall_ticks(void);
609 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
610 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
611 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
612 static void rcu_init_one_nocb(struct rcu_node *rnp);
613 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
614                             bool lazy, unsigned long flags);
615 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
616                                       struct rcu_data *rdp,
617                                       unsigned long flags);
618 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
619 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
620 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
621 static void rcu_spawn_all_nocb_kthreads(int cpu);
622 static void __init rcu_spawn_nocb_kthreads(void);
623 #ifdef CONFIG_RCU_NOCB_CPU
624 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
625 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
626 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
627 static bool init_nocb_callback_list(struct rcu_data *rdp);
628 static void rcu_sysidle_enter(int irq);
629 static void rcu_sysidle_exit(int irq);
630 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
631                                   unsigned long *maxj);
632 static bool is_sysidle_rcu_state(struct rcu_state *rsp);
633 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
634                                   unsigned long maxj);
635 static void rcu_bind_gp_kthread(void);
636 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
637 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
638 static void rcu_dynticks_task_enter(void);
639 static void rcu_dynticks_task_exit(void);
640 
641 #endif /* #ifndef RCU_TREE_NONCORE */
642 
643 #ifdef CONFIG_RCU_TRACE
644 /* Read out queue lengths for tracing. */
645 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
646 {
647 #ifdef CONFIG_RCU_NOCB_CPU
648         *ql = atomic_long_read(&rdp->nocb_q_count);
649         *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
650 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
651         *ql = 0;
652         *qll = 0;
653 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
654 }
655 #endif /* #ifdef CONFIG_RCU_TRACE */
656 
657 /*
658  * Place this after a lock-acquisition primitive to guarantee that
659  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
660  * if the UNLOCK and LOCK are executed by the same CPU or if the
661  * UNLOCK and LOCK operate on the same lock variable.
662  */
663 #ifdef CONFIG_PPC
664 #define smp_mb__after_unlock_lock()     smp_mb()  /* Full ordering for lock. */
665 #else /* #ifdef CONFIG_PPC */
666 #define smp_mb__after_unlock_lock()     do { } while (0)
667 #endif /* #else #ifdef CONFIG_PPC */
668 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp