~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcu/tree_exp.h

Version: ~ [ linux-5.9-rc6 ] ~ [ linux-5.8.10 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.66 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.146 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.198 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.236 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.236 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * RCU expedited grace periods
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, you can access it online at
 16  * http://www.gnu.org/licenses/gpl-2.0.html.
 17  *
 18  * Copyright IBM Corporation, 2016
 19  *
 20  * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 21  */
 22 
 23 #include <linux/lockdep.h>
 24 
 25 /*
 26  * Record the start of an expedited grace period.
 27  */
 28 static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
 29 {
 30         rcu_seq_start(&rsp->expedited_sequence);
 31 }
 32 
 33 /*
 34  * Return then value that expedited-grace-period counter will have
 35  * at the end of the current grace period.
 36  */
 37 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
 38 {
 39         return rcu_seq_endval(&rsp->expedited_sequence);
 40 }
 41 
 42 /*
 43  * Record the end of an expedited grace period.
 44  */
 45 static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
 46 {
 47         rcu_seq_end(&rsp->expedited_sequence);
 48         smp_mb(); /* Ensure that consecutive grace periods serialize. */
 49 }
 50 
 51 /*
 52  * Take a snapshot of the expedited-grace-period counter.
 53  */
 54 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
 55 {
 56         unsigned long s;
 57 
 58         smp_mb(); /* Caller's modifications seen first by other CPUs. */
 59         s = rcu_seq_snap(&rsp->expedited_sequence);
 60         trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
 61         return s;
 62 }
 63 
 64 /*
 65  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
 66  * if a full expedited grace period has elapsed since that snapshot
 67  * was taken.
 68  */
 69 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
 70 {
 71         return rcu_seq_done(&rsp->expedited_sequence, s);
 72 }
 73 
 74 /*
 75  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
 76  * recent CPU-online activity.  Note that these masks are not cleared
 77  * when CPUs go offline, so they reflect the union of all CPUs that have
 78  * ever been online.  This means that this function normally takes its
 79  * no-work-to-do fastpath.
 80  */
 81 static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
 82 {
 83         bool done;
 84         unsigned long flags;
 85         unsigned long mask;
 86         unsigned long oldmask;
 87         int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
 88         struct rcu_node *rnp;
 89         struct rcu_node *rnp_up;
 90 
 91         /* If no new CPUs onlined since last time, nothing to do. */
 92         if (likely(ncpus == rsp->ncpus_snap))
 93                 return;
 94         rsp->ncpus_snap = ncpus;
 95 
 96         /*
 97          * Each pass through the following loop propagates newly onlined
 98          * CPUs for the current rcu_node structure up the rcu_node tree.
 99          */
100         rcu_for_each_leaf_node(rsp, rnp) {
101                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
102                 if (rnp->expmaskinit == rnp->expmaskinitnext) {
103                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
104                         continue;  /* No new CPUs, nothing to do. */
105                 }
106 
107                 /* Update this node's mask, track old value for propagation. */
108                 oldmask = rnp->expmaskinit;
109                 rnp->expmaskinit = rnp->expmaskinitnext;
110                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
111 
112                 /* If was already nonzero, nothing to propagate. */
113                 if (oldmask)
114                         continue;
115 
116                 /* Propagate the new CPU up the tree. */
117                 mask = rnp->grpmask;
118                 rnp_up = rnp->parent;
119                 done = false;
120                 while (rnp_up) {
121                         raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
122                         if (rnp_up->expmaskinit)
123                                 done = true;
124                         rnp_up->expmaskinit |= mask;
125                         raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
126                         if (done)
127                                 break;
128                         mask = rnp_up->grpmask;
129                         rnp_up = rnp_up->parent;
130                 }
131         }
132 }
133 
134 /*
135  * Reset the ->expmask values in the rcu_node tree in preparation for
136  * a new expedited grace period.
137  */
138 static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
139 {
140         unsigned long flags;
141         struct rcu_node *rnp;
142 
143         sync_exp_reset_tree_hotplug(rsp);
144         rcu_for_each_node_breadth_first(rsp, rnp) {
145                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
146                 WARN_ON_ONCE(rnp->expmask);
147                 rnp->expmask = rnp->expmaskinit;
148                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
149         }
150 }
151 
152 /*
153  * Return non-zero if there is no RCU expedited grace period in progress
154  * for the specified rcu_node structure, in other words, if all CPUs and
155  * tasks covered by the specified rcu_node structure have done their bit
156  * for the current expedited grace period.  Works only for preemptible
157  * RCU -- other RCU implementation use other means.
158  *
159  * Caller must hold the specificed rcu_node structure's ->lock
160  */
161 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
162 {
163         raw_lockdep_assert_held_rcu_node(rnp);
164 
165         return rnp->exp_tasks == NULL &&
166                READ_ONCE(rnp->expmask) == 0;
167 }
168 
169 /*
170  * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
171  * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
172  * itself
173  */
174 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
175 {
176         unsigned long flags;
177         bool ret;
178 
179         raw_spin_lock_irqsave_rcu_node(rnp, flags);
180         ret = sync_rcu_preempt_exp_done(rnp);
181         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
182 
183         return ret;
184 }
185 
186 
187 /*
188  * Report the exit from RCU read-side critical section for the last task
189  * that queued itself during or before the current expedited preemptible-RCU
190  * grace period.  This event is reported either to the rcu_node structure on
191  * which the task was queued or to one of that rcu_node structure's ancestors,
192  * recursively up the tree.  (Calm down, calm down, we do the recursion
193  * iteratively!)
194  *
195  * Caller must hold the specified rcu_node structure's ->lock.
196  */
197 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
198                                  bool wake, unsigned long flags)
199         __releases(rnp->lock)
200 {
201         unsigned long mask;
202 
203         for (;;) {
204                 if (!sync_rcu_preempt_exp_done(rnp)) {
205                         if (!rnp->expmask)
206                                 rcu_initiate_boost(rnp, flags);
207                         else
208                                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
209                         break;
210                 }
211                 if (rnp->parent == NULL) {
212                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
213                         if (wake) {
214                                 smp_mb(); /* EGP done before wake_up(). */
215                                 swake_up(&rsp->expedited_wq);
216                         }
217                         break;
218                 }
219                 mask = rnp->grpmask;
220                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
221                 rnp = rnp->parent;
222                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
223                 WARN_ON_ONCE(!(rnp->expmask & mask));
224                 rnp->expmask &= ~mask;
225         }
226 }
227 
228 /*
229  * Report expedited quiescent state for specified node.  This is a
230  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
231  */
232 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
233                                               struct rcu_node *rnp, bool wake)
234 {
235         unsigned long flags;
236 
237         raw_spin_lock_irqsave_rcu_node(rnp, flags);
238         __rcu_report_exp_rnp(rsp, rnp, wake, flags);
239 }
240 
241 /*
242  * Report expedited quiescent state for multiple CPUs, all covered by the
243  * specified leaf rcu_node structure.
244  */
245 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
246                                     unsigned long mask, bool wake)
247 {
248         unsigned long flags;
249 
250         raw_spin_lock_irqsave_rcu_node(rnp, flags);
251         if (!(rnp->expmask & mask)) {
252                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
253                 return;
254         }
255         rnp->expmask &= ~mask;
256         __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
257 }
258 
259 /*
260  * Report expedited quiescent state for specified rcu_data (CPU).
261  */
262 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
263                                bool wake)
264 {
265         rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
266 }
267 
268 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
269 static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
270 {
271         if (rcu_exp_gp_seq_done(rsp, s)) {
272                 trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
273                 /* Ensure test happens before caller kfree(). */
274                 smp_mb__before_atomic(); /* ^^^ */
275                 return true;
276         }
277         return false;
278 }
279 
280 /*
281  * Funnel-lock acquisition for expedited grace periods.  Returns true
282  * if some other task completed an expedited grace period that this task
283  * can piggy-back on, and with no mutex held.  Otherwise, returns false
284  * with the mutex held, indicating that the caller must actually do the
285  * expedited grace period.
286  */
287 static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
288 {
289         struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
290         struct rcu_node *rnp = rdp->mynode;
291         struct rcu_node *rnp_root = rcu_get_root(rsp);
292 
293         /* Low-contention fastpath. */
294         if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
295             (rnp == rnp_root ||
296              ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
297             mutex_trylock(&rsp->exp_mutex))
298                 goto fastpath;
299 
300         /*
301          * Each pass through the following loop works its way up
302          * the rcu_node tree, returning if others have done the work or
303          * otherwise falls through to acquire rsp->exp_mutex.  The mapping
304          * from CPU to rcu_node structure can be inexact, as it is just
305          * promoting locality and is not strictly needed for correctness.
306          */
307         for (; rnp != NULL; rnp = rnp->parent) {
308                 if (sync_exp_work_done(rsp, s))
309                         return true;
310 
311                 /* Work not done, either wait here or go up. */
312                 spin_lock(&rnp->exp_lock);
313                 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
314 
315                         /* Someone else doing GP, so wait for them. */
316                         spin_unlock(&rnp->exp_lock);
317                         trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
318                                                   rnp->grplo, rnp->grphi,
319                                                   TPS("wait"));
320                         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
321                                    sync_exp_work_done(rsp, s));
322                         return true;
323                 }
324                 rnp->exp_seq_rq = s; /* Followers can wait on us. */
325                 spin_unlock(&rnp->exp_lock);
326                 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
327                                           rnp->grphi, TPS("nxtlvl"));
328         }
329         mutex_lock(&rsp->exp_mutex);
330 fastpath:
331         if (sync_exp_work_done(rsp, s)) {
332                 mutex_unlock(&rsp->exp_mutex);
333                 return true;
334         }
335         rcu_exp_gp_seq_start(rsp);
336         trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
337         return false;
338 }
339 
340 /* Invoked on each online non-idle CPU for expedited quiescent state. */
341 static void sync_sched_exp_handler(void *data)
342 {
343         struct rcu_data *rdp;
344         struct rcu_node *rnp;
345         struct rcu_state *rsp = data;
346 
347         rdp = this_cpu_ptr(rsp->rda);
348         rnp = rdp->mynode;
349         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
350             __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
351                 return;
352         if (rcu_is_cpu_rrupt_from_idle()) {
353                 rcu_report_exp_rdp(&rcu_sched_state,
354                                    this_cpu_ptr(&rcu_sched_data), true);
355                 return;
356         }
357         __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
358         /* Store .exp before .rcu_urgent_qs. */
359         smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
360         resched_cpu(smp_processor_id());
361 }
362 
363 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
364 static void sync_sched_exp_online_cleanup(int cpu)
365 {
366         struct rcu_data *rdp;
367         int ret;
368         struct rcu_node *rnp;
369         struct rcu_state *rsp = &rcu_sched_state;
370 
371         rdp = per_cpu_ptr(rsp->rda, cpu);
372         rnp = rdp->mynode;
373         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
374                 return;
375         ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
376         WARN_ON_ONCE(ret);
377 }
378 
379 /*
380  * Select the CPUs within the specified rcu_node that the upcoming
381  * expedited grace period needs to wait for.
382  */
383 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
384 {
385         int cpu;
386         unsigned long flags;
387         smp_call_func_t func;
388         unsigned long mask_ofl_test;
389         unsigned long mask_ofl_ipi;
390         int ret;
391         struct rcu_exp_work *rewp =
392                 container_of(wp, struct rcu_exp_work, rew_work);
393         struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
394         struct rcu_state *rsp = rewp->rew_rsp;
395 
396         func = rewp->rew_func;
397         raw_spin_lock_irqsave_rcu_node(rnp, flags);
398 
399         /* Each pass checks a CPU for identity, offline, and idle. */
400         mask_ofl_test = 0;
401         for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
402                 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
403                 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
404                 struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
405                 int snap;
406 
407                 if (raw_smp_processor_id() == cpu ||
408                     !(rnp->qsmaskinitnext & mask)) {
409                         mask_ofl_test |= mask;
410                 } else {
411                         snap = rcu_dynticks_snap(rdtp);
412                         if (rcu_dynticks_in_eqs(snap))
413                                 mask_ofl_test |= mask;
414                         else
415                                 rdp->exp_dynticks_snap = snap;
416                 }
417         }
418         mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
419 
420         /*
421          * Need to wait for any blocked tasks as well.  Note that
422          * additional blocking tasks will also block the expedited GP
423          * until such time as the ->expmask bits are cleared.
424          */
425         if (rcu_preempt_has_tasks(rnp))
426                 rnp->exp_tasks = rnp->blkd_tasks.next;
427         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
428 
429         /* IPI the remaining CPUs for expedited quiescent state. */
430         for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
431                 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
432                 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
433 
434                 if (!(mask_ofl_ipi & mask))
435                         continue;
436 retry_ipi:
437                 if (rcu_dynticks_in_eqs_since(rdp->dynticks,
438                                               rdp->exp_dynticks_snap)) {
439                         mask_ofl_test |= mask;
440                         continue;
441                 }
442                 ret = smp_call_function_single(cpu, func, rsp, 0);
443                 if (!ret) {
444                         mask_ofl_ipi &= ~mask;
445                         continue;
446                 }
447                 /* Failed, raced with CPU hotplug operation. */
448                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
449                 if ((rnp->qsmaskinitnext & mask) &&
450                     (rnp->expmask & mask)) {
451                         /* Online, so delay for a bit and try again. */
452                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
453                         trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
454                         schedule_timeout_uninterruptible(1);
455                         goto retry_ipi;
456                 }
457                 /* CPU really is offline, so we can ignore it. */
458                 if (!(rnp->expmask & mask))
459                         mask_ofl_ipi &= ~mask;
460                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
461         }
462         /* Report quiescent states for those that went offline. */
463         mask_ofl_test |= mask_ofl_ipi;
464         if (mask_ofl_test)
465                 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
466 }
467 
468 /*
469  * Select the nodes that the upcoming expedited grace period needs
470  * to wait for.
471  */
472 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
473                                      smp_call_func_t func)
474 {
475         int cpu;
476         struct rcu_node *rnp;
477 
478         trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
479         sync_exp_reset_tree(rsp);
480         trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
481 
482         /* Schedule work for each leaf rcu_node structure. */
483         rcu_for_each_leaf_node(rsp, rnp) {
484                 rnp->exp_need_flush = false;
485                 if (!READ_ONCE(rnp->expmask))
486                         continue; /* Avoid early boot non-existent wq. */
487                 rnp->rew.rew_func = func;
488                 rnp->rew.rew_rsp = rsp;
489                 if (!READ_ONCE(rcu_par_gp_wq) ||
490                     rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
491                         /* No workqueues yet. */
492                         sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
493                         continue;
494                 }
495                 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
496                 preempt_disable();
497                 cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
498                 /* If all offline, queue the work on an unbound CPU. */
499                 if (unlikely(cpu > rnp->grphi))
500                         cpu = WORK_CPU_UNBOUND;
501                 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
502                 preempt_enable();
503                 rnp->exp_need_flush = true;
504         }
505 
506         /* Wait for workqueue jobs (if any) to complete. */
507         rcu_for_each_leaf_node(rsp, rnp)
508                 if (rnp->exp_need_flush)
509                         flush_work(&rnp->rew.rew_work);
510 }
511 
512 static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
513 {
514         int cpu;
515         unsigned long jiffies_stall;
516         unsigned long jiffies_start;
517         unsigned long mask;
518         int ndetected;
519         struct rcu_node *rnp;
520         struct rcu_node *rnp_root = rcu_get_root(rsp);
521         int ret;
522 
523         trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
524         jiffies_stall = rcu_jiffies_till_stall_check();
525         jiffies_start = jiffies;
526 
527         for (;;) {
528                 ret = swait_event_timeout(
529                                 rsp->expedited_wq,
530                                 sync_rcu_preempt_exp_done_unlocked(rnp_root),
531                                 jiffies_stall);
532                 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
533                         return;
534                 WARN_ON(ret < 0);  /* workqueues should not be signaled. */
535                 if (rcu_cpu_stall_suppress)
536                         continue;
537                 panic_on_rcu_stall();
538                 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
539                        rsp->name);
540                 ndetected = 0;
541                 rcu_for_each_leaf_node(rsp, rnp) {
542                         ndetected += rcu_print_task_exp_stall(rnp);
543                         for_each_leaf_node_possible_cpu(rnp, cpu) {
544                                 struct rcu_data *rdp;
545 
546                                 mask = leaf_node_cpu_bit(rnp, cpu);
547                                 if (!(rnp->expmask & mask))
548                                         continue;
549                                 ndetected++;
550                                 rdp = per_cpu_ptr(rsp->rda, cpu);
551                                 pr_cont(" %d-%c%c%c", cpu,
552                                         "O."[!!cpu_online(cpu)],
553                                         "o."[!!(rdp->grpmask & rnp->expmaskinit)],
554                                         "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
555                         }
556                 }
557                 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
558                         jiffies - jiffies_start, rsp->expedited_sequence,
559                         rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
560                 if (ndetected) {
561                         pr_err("blocking rcu_node structures:");
562                         rcu_for_each_node_breadth_first(rsp, rnp) {
563                                 if (rnp == rnp_root)
564                                         continue; /* printed unconditionally */
565                                 if (sync_rcu_preempt_exp_done_unlocked(rnp))
566                                         continue;
567                                 pr_cont(" l=%u:%d-%d:%#lx/%c",
568                                         rnp->level, rnp->grplo, rnp->grphi,
569                                         rnp->expmask,
570                                         ".T"[!!rnp->exp_tasks]);
571                         }
572                         pr_cont("\n");
573                 }
574                 rcu_for_each_leaf_node(rsp, rnp) {
575                         for_each_leaf_node_possible_cpu(rnp, cpu) {
576                                 mask = leaf_node_cpu_bit(rnp, cpu);
577                                 if (!(rnp->expmask & mask))
578                                         continue;
579                                 dump_cpu_task(cpu);
580                         }
581                 }
582                 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
583         }
584 }
585 
586 /*
587  * Wait for the current expedited grace period to complete, and then
588  * wake up everyone who piggybacked on the just-completed expedited
589  * grace period.  Also update all the ->exp_seq_rq counters as needed
590  * in order to avoid counter-wrap problems.
591  */
592 static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
593 {
594         struct rcu_node *rnp;
595 
596         synchronize_sched_expedited_wait(rsp);
597         rcu_exp_gp_seq_end(rsp);
598         trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
599 
600         /*
601          * Switch over to wakeup mode, allowing the next GP, but -only- the
602          * next GP, to proceed.
603          */
604         mutex_lock(&rsp->exp_wake_mutex);
605 
606         rcu_for_each_node_breadth_first(rsp, rnp) {
607                 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
608                         spin_lock(&rnp->exp_lock);
609                         /* Recheck, avoid hang in case someone just arrived. */
610                         if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
611                                 rnp->exp_seq_rq = s;
612                         spin_unlock(&rnp->exp_lock);
613                 }
614                 smp_mb(); /* All above changes before wakeup. */
615                 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
616         }
617         trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
618         mutex_unlock(&rsp->exp_wake_mutex);
619 }
620 
621 /*
622  * Common code to drive an expedited grace period forward, used by
623  * workqueues and mid-boot-time tasks.
624  */
625 static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
626                                   smp_call_func_t func, unsigned long s)
627 {
628         /* Initialize the rcu_node tree in preparation for the wait. */
629         sync_rcu_exp_select_cpus(rsp, func);
630 
631         /* Wait and clean up, including waking everyone. */
632         rcu_exp_wait_wake(rsp, s);
633 }
634 
635 /*
636  * Work-queue handler to drive an expedited grace period forward.
637  */
638 static void wait_rcu_exp_gp(struct work_struct *wp)
639 {
640         struct rcu_exp_work *rewp;
641 
642         rewp = container_of(wp, struct rcu_exp_work, rew_work);
643         rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
644 }
645 
646 /*
647  * Given an rcu_state pointer and a smp_call_function() handler, kick
648  * off the specified flavor of expedited grace period.
649  */
650 static void _synchronize_rcu_expedited(struct rcu_state *rsp,
651                                        smp_call_func_t func)
652 {
653         struct rcu_data *rdp;
654         struct rcu_exp_work rew;
655         struct rcu_node *rnp;
656         unsigned long s;
657 
658         /* If expedited grace periods are prohibited, fall back to normal. */
659         if (rcu_gp_is_normal()) {
660                 wait_rcu_gp(rsp->call);
661                 return;
662         }
663 
664         /* Take a snapshot of the sequence number.  */
665         s = rcu_exp_gp_seq_snap(rsp);
666         if (exp_funnel_lock(rsp, s))
667                 return;  /* Someone else did our work for us. */
668 
669         /* Ensure that load happens before action based on it. */
670         if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
671                 /* Direct call during scheduler init and early_initcalls(). */
672                 rcu_exp_sel_wait_wake(rsp, func, s);
673         } else {
674                 /* Marshall arguments & schedule the expedited grace period. */
675                 rew.rew_func = func;
676                 rew.rew_rsp = rsp;
677                 rew.rew_s = s;
678                 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
679                 queue_work(rcu_gp_wq, &rew.rew_work);
680         }
681 
682         /* Wait for expedited grace period to complete. */
683         rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
684         rnp = rcu_get_root(rsp);
685         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
686                    sync_exp_work_done(rsp, s));
687         smp_mb(); /* Workqueue actions happen before return. */
688 
689         /* Let the next expedited grace period start. */
690         mutex_unlock(&rsp->exp_mutex);
691 }
692 
693 /**
694  * synchronize_sched_expedited - Brute-force RCU-sched grace period
695  *
696  * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
697  * approach to force the grace period to end quickly.  This consumes
698  * significant time on all CPUs and is unfriendly to real-time workloads,
699  * so is thus not recommended for any sort of common-case code.  In fact,
700  * if you are using synchronize_sched_expedited() in a loop, please
701  * restructure your code to batch your updates, and then use a single
702  * synchronize_sched() instead.
703  *
704  * This implementation can be thought of as an application of sequence
705  * locking to expedited grace periods, but using the sequence counter to
706  * determine when someone else has already done the work instead of for
707  * retrying readers.
708  */
709 void synchronize_sched_expedited(void)
710 {
711         struct rcu_state *rsp = &rcu_sched_state;
712 
713         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
714                          lock_is_held(&rcu_lock_map) ||
715                          lock_is_held(&rcu_sched_lock_map),
716                          "Illegal synchronize_sched_expedited() in RCU read-side critical section");
717 
718         /* If only one CPU, this is automatically a grace period. */
719         if (rcu_blocking_is_gp())
720                 return;
721 
722         _synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
723 }
724 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
725 
726 #ifdef CONFIG_PREEMPT_RCU
727 
728 /*
729  * Remote handler for smp_call_function_single().  If there is an
730  * RCU read-side critical section in effect, request that the
731  * next rcu_read_unlock() record the quiescent state up the
732  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
733  * report the quiescent state.
734  */
735 static void sync_rcu_exp_handler(void *info)
736 {
737         struct rcu_data *rdp;
738         struct rcu_state *rsp = info;
739         struct task_struct *t = current;
740 
741         /*
742          * Within an RCU read-side critical section, request that the next
743          * rcu_read_unlock() report.  Unless this RCU read-side critical
744          * section has already blocked, in which case it is already set
745          * up for the expedited grace period to wait on it.
746          */
747         if (t->rcu_read_lock_nesting > 0 &&
748             !t->rcu_read_unlock_special.b.blocked) {
749                 t->rcu_read_unlock_special.b.exp_need_qs = true;
750                 return;
751         }
752 
753         /*
754          * We are either exiting an RCU read-side critical section (negative
755          * values of t->rcu_read_lock_nesting) or are not in one at all
756          * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
757          * read-side critical section that blocked before this expedited
758          * grace period started.  Either way, we can immediately report
759          * the quiescent state.
760          */
761         rdp = this_cpu_ptr(rsp->rda);
762         rcu_report_exp_rdp(rsp, rdp, true);
763 }
764 
765 /**
766  * synchronize_rcu_expedited - Brute-force RCU grace period
767  *
768  * Wait for an RCU-preempt grace period, but expedite it.  The basic
769  * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
770  * checks whether the CPU is in an RCU-preempt critical section, and
771  * if so, it sets a flag that causes the outermost rcu_read_unlock()
772  * to report the quiescent state.  On the other hand, if the CPU is
773  * not in an RCU read-side critical section, the IPI handler reports
774  * the quiescent state immediately.
775  *
776  * Although this is a greate improvement over previous expedited
777  * implementations, it is still unfriendly to real-time workloads, so is
778  * thus not recommended for any sort of common-case code.  In fact, if
779  * you are using synchronize_rcu_expedited() in a loop, please restructure
780  * your code to batch your updates, and then Use a single synchronize_rcu()
781  * instead.
782  */
783 void synchronize_rcu_expedited(void)
784 {
785         struct rcu_state *rsp = rcu_state_p;
786 
787         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
788                          lock_is_held(&rcu_lock_map) ||
789                          lock_is_held(&rcu_sched_lock_map),
790                          "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
791 
792         if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
793                 return;
794         _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
795 }
796 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
797 
798 #else /* #ifdef CONFIG_PREEMPT_RCU */
799 
800 /*
801  * Wait for an rcu-preempt grace period, but make it happen quickly.
802  * But because preemptible RCU does not exist, map to rcu-sched.
803  */
804 void synchronize_rcu_expedited(void)
805 {
806         synchronize_sched_expedited();
807 }
808 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
809 
810 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
811 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp