1 /* 2 * Read-Copy Update module-based torture test facility 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2005, 2006 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Josh Triplett <josh@freedesktop.org> 22 * 23 * See also: Documentation/RCU/torture.txt 24 */ 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/kthread.h> 30 #include <linux/err.h> 31 #include <linux/spinlock.h> 32 #include <linux/smp.h> 33 #include <linux/rcupdate.h> 34 #include <linux/interrupt.h> 35 #include <linux/sched.h> 36 #include <linux/atomic.h> 37 #include <linux/bitops.h> 38 #include <linux/completion.h> 39 #include <linux/moduleparam.h> 40 #include <linux/percpu.h> 41 #include <linux/notifier.h> 42 #include <linux/reboot.h> 43 #include <linux/freezer.h> 44 #include <linux/cpu.h> 45 #include <linux/delay.h> 46 #include <linux/stat.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/trace_clock.h> 50 #include <asm/byteorder.h> 51 52 MODULE_LICENSE("GPL"); 53 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); 54 55 MODULE_ALIAS("rcutorture"); 56 #ifdef MODULE_PARAM_PREFIX 57 #undef MODULE_PARAM_PREFIX 58 #endif 59 #define MODULE_PARAM_PREFIX "rcutorture." 60 61 static int fqs_duration; 62 module_param(fqs_duration, int, 0444); 63 MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable"); 64 static int fqs_holdoff; 65 module_param(fqs_holdoff, int, 0444); 66 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); 67 static int fqs_stutter = 3; 68 module_param(fqs_stutter, int, 0444); 69 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); 70 static bool gp_exp; 71 module_param(gp_exp, bool, 0444); 72 MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives"); 73 static bool gp_normal; 74 module_param(gp_normal, bool, 0444); 75 MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives"); 76 static int irqreader = 1; 77 module_param(irqreader, int, 0444); 78 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); 79 static int n_barrier_cbs; 80 module_param(n_barrier_cbs, int, 0444); 81 MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing"); 82 static int nfakewriters = 4; 83 module_param(nfakewriters, int, 0444); 84 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); 85 static int nreaders = -1; 86 module_param(nreaders, int, 0444); 87 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); 88 static int object_debug; 89 module_param(object_debug, int, 0444); 90 MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing"); 91 static int onoff_holdoff; 92 module_param(onoff_holdoff, int, 0444); 93 MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)"); 94 static int onoff_interval; 95 module_param(onoff_interval, int, 0444); 96 MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); 97 static int shuffle_interval = 3; 98 module_param(shuffle_interval, int, 0444); 99 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); 100 static int shutdown_secs; 101 module_param(shutdown_secs, int, 0444); 102 MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable."); 103 static int stall_cpu; 104 module_param(stall_cpu, int, 0444); 105 MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable."); 106 static int stall_cpu_holdoff = 10; 107 module_param(stall_cpu_holdoff, int, 0444); 108 MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s)."); 109 static int stat_interval = 60; 110 module_param(stat_interval, int, 0644); 111 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); 112 static int stutter = 5; 113 module_param(stutter, int, 0444); 114 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); 115 static int test_boost = 1; 116 module_param(test_boost, int, 0444); 117 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 118 static int test_boost_duration = 4; 119 module_param(test_boost_duration, int, 0444); 120 MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds."); 121 static int test_boost_interval = 7; 122 module_param(test_boost_interval, int, 0444); 123 MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds."); 124 static bool test_no_idle_hz = true; 125 module_param(test_no_idle_hz, bool, 0444); 126 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); 127 static char *torture_type = "rcu"; 128 module_param(torture_type, charp, 0444); 129 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); 130 static bool verbose; 131 module_param(verbose, bool, 0444); 132 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); 133 134 #define TORTURE_FLAG "-torture:" 135 #define PRINTK_STRING(s) \ 136 do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) 137 #define VERBOSE_PRINTK_STRING(s) \ 138 do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) 139 #define VERBOSE_PRINTK_ERRSTRING(s) \ 140 do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) 141 142 static char printk_buf[4096]; 143 144 static int nrealreaders; 145 static struct task_struct *writer_task; 146 static struct task_struct **fakewriter_tasks; 147 static struct task_struct **reader_tasks; 148 static struct task_struct *stats_task; 149 static struct task_struct *shuffler_task; 150 static struct task_struct *stutter_task; 151 static struct task_struct *fqs_task; 152 static struct task_struct *boost_tasks[NR_CPUS]; 153 static struct task_struct *shutdown_task; 154 #ifdef CONFIG_HOTPLUG_CPU 155 static struct task_struct *onoff_task; 156 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 157 static struct task_struct *stall_task; 158 static struct task_struct **barrier_cbs_tasks; 159 static struct task_struct *barrier_task; 160 161 #define RCU_TORTURE_PIPE_LEN 10 162 163 struct rcu_torture { 164 struct rcu_head rtort_rcu; 165 int rtort_pipe_count; 166 struct list_head rtort_free; 167 int rtort_mbtest; 168 }; 169 170 static LIST_HEAD(rcu_torture_freelist); 171 static struct rcu_torture __rcu *rcu_torture_current; 172 static unsigned long rcu_torture_current_version; 173 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 174 static DEFINE_SPINLOCK(rcu_torture_lock); 175 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = 176 { 0 }; 177 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = 178 { 0 }; 179 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 180 static atomic_t n_rcu_torture_alloc; 181 static atomic_t n_rcu_torture_alloc_fail; 182 static atomic_t n_rcu_torture_free; 183 static atomic_t n_rcu_torture_mberror; 184 static atomic_t n_rcu_torture_error; 185 static long n_rcu_torture_barrier_error; 186 static long n_rcu_torture_boost_ktrerror; 187 static long n_rcu_torture_boost_rterror; 188 static long n_rcu_torture_boost_failure; 189 static long n_rcu_torture_boosts; 190 static long n_rcu_torture_timers; 191 static long n_offline_attempts; 192 static long n_offline_successes; 193 static unsigned long sum_offline; 194 static int min_offline = -1; 195 static int max_offline; 196 static long n_online_attempts; 197 static long n_online_successes; 198 static unsigned long sum_online; 199 static int min_online = -1; 200 static int max_online; 201 static long n_barrier_attempts; 202 static long n_barrier_successes; 203 static struct list_head rcu_torture_removed; 204 static cpumask_var_t shuffle_tmp_mask; 205 206 static int stutter_pause_test; 207 208 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) 209 #define RCUTORTURE_RUNNABLE_INIT 1 210 #else 211 #define RCUTORTURE_RUNNABLE_INIT 0 212 #endif 213 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; 214 module_param(rcutorture_runnable, int, 0444); 215 MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot"); 216 217 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 218 #define rcu_can_boost() 1 219 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 220 #define rcu_can_boost() 0 221 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 222 223 #ifdef CONFIG_RCU_TRACE 224 static u64 notrace rcu_trace_clock_local(void) 225 { 226 u64 ts = trace_clock_local(); 227 unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); 228 return ts; 229 } 230 #else /* #ifdef CONFIG_RCU_TRACE */ 231 static u64 notrace rcu_trace_clock_local(void) 232 { 233 return 0ULL; 234 } 235 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 236 237 static unsigned long shutdown_time; /* jiffies to system shutdown. */ 238 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 239 DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 240 /* and boost task create/destroy. */ 241 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 242 static bool barrier_phase; /* Test phase. */ 243 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 244 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 245 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 246 247 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ 248 249 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ 250 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ 251 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ 252 static int fullstop = FULLSTOP_RMMOD; 253 /* 254 * Protect fullstop transitions and spawning of kthreads. 255 */ 256 static DEFINE_MUTEX(fullstop_mutex); 257 258 /* Forward reference. */ 259 static void rcu_torture_cleanup(void); 260 261 /* 262 * Detect and respond to a system shutdown. 263 */ 264 static int 265 rcutorture_shutdown_notify(struct notifier_block *unused1, 266 unsigned long unused2, void *unused3) 267 { 268 mutex_lock(&fullstop_mutex); 269 if (fullstop == FULLSTOP_DONTSTOP) 270 fullstop = FULLSTOP_SHUTDOWN; 271 else 272 pr_warn(/* but going down anyway, so... */ 273 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); 274 mutex_unlock(&fullstop_mutex); 275 return NOTIFY_DONE; 276 } 277 278 /* 279 * Absorb kthreads into a kernel function that won't return, so that 280 * they won't ever access module text or data again. 281 */ 282 static void rcutorture_shutdown_absorb(const char *title) 283 { 284 if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 285 pr_notice( 286 "rcutorture thread %s parking due to system shutdown\n", 287 title); 288 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); 289 } 290 } 291 292 /* 293 * Allocate an element from the rcu_tortures pool. 294 */ 295 static struct rcu_torture * 296 rcu_torture_alloc(void) 297 { 298 struct list_head *p; 299 300 spin_lock_bh(&rcu_torture_lock); 301 if (list_empty(&rcu_torture_freelist)) { 302 atomic_inc(&n_rcu_torture_alloc_fail); 303 spin_unlock_bh(&rcu_torture_lock); 304 return NULL; 305 } 306 atomic_inc(&n_rcu_torture_alloc); 307 p = rcu_torture_freelist.next; 308 list_del_init(p); 309 spin_unlock_bh(&rcu_torture_lock); 310 return container_of(p, struct rcu_torture, rtort_free); 311 } 312 313 /* 314 * Free an element to the rcu_tortures pool. 315 */ 316 static void 317 rcu_torture_free(struct rcu_torture *p) 318 { 319 atomic_inc(&n_rcu_torture_free); 320 spin_lock_bh(&rcu_torture_lock); 321 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 322 spin_unlock_bh(&rcu_torture_lock); 323 } 324 325 struct rcu_random_state { 326 unsigned long rrs_state; 327 long rrs_count; 328 }; 329 330 #define RCU_RANDOM_MULT 39916801 /* prime */ 331 #define RCU_RANDOM_ADD 479001701 /* prime */ 332 #define RCU_RANDOM_REFRESH 10000 333 334 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 } 335 336 /* 337 * Crude but fast random-number generator. Uses a linear congruential 338 * generator, with occasional help from cpu_clock(). 339 */ 340 static unsigned long 341 rcu_random(struct rcu_random_state *rrsp) 342 { 343 if (--rrsp->rrs_count < 0) { 344 rrsp->rrs_state += (unsigned long)local_clock(); 345 rrsp->rrs_count = RCU_RANDOM_REFRESH; 346 } 347 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD; 348 return swahw32(rrsp->rrs_state); 349 } 350 351 static void 352 rcu_stutter_wait(const char *title) 353 { 354 while (stutter_pause_test || !rcutorture_runnable) { 355 if (rcutorture_runnable) 356 schedule_timeout_interruptible(1); 357 else 358 schedule_timeout_interruptible(round_jiffies_relative(HZ)); 359 rcutorture_shutdown_absorb(title); 360 } 361 } 362 363 /* 364 * Operations vector for selecting different types of tests. 365 */ 366 367 struct rcu_torture_ops { 368 void (*init)(void); 369 int (*readlock)(void); 370 void (*read_delay)(struct rcu_random_state *rrsp); 371 void (*readunlock)(int idx); 372 int (*completed)(void); 373 void (*deferred_free)(struct rcu_torture *p); 374 void (*sync)(void); 375 void (*exp_sync)(void); 376 void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 377 void (*cb_barrier)(void); 378 void (*fqs)(void); 379 int (*stats)(char *page); 380 int irq_capable; 381 int can_boost; 382 const char *name; 383 }; 384 385 static struct rcu_torture_ops *cur_ops; 386 387 /* 388 * Definitions for rcu torture testing. 389 */ 390 391 static int rcu_torture_read_lock(void) __acquires(RCU) 392 { 393 rcu_read_lock(); 394 return 0; 395 } 396 397 static void rcu_read_delay(struct rcu_random_state *rrsp) 398 { 399 const unsigned long shortdelay_us = 200; 400 const unsigned long longdelay_ms = 50; 401 402 /* We want a short delay sometimes to make a reader delay the grace 403 * period, and we want a long delay occasionally to trigger 404 * force_quiescent_state. */ 405 406 if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) 407 mdelay(longdelay_ms); 408 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) 409 udelay(shortdelay_us); 410 #ifdef CONFIG_PREEMPT 411 if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000))) 412 preempt_schedule(); /* No QS if preempt_disable() in effect */ 413 #endif 414 } 415 416 static void rcu_torture_read_unlock(int idx) __releases(RCU) 417 { 418 rcu_read_unlock(); 419 } 420 421 static int rcu_torture_completed(void) 422 { 423 return rcu_batches_completed(); 424 } 425 426 static void 427 rcu_torture_cb(struct rcu_head *p) 428 { 429 int i; 430 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 431 432 if (fullstop != FULLSTOP_DONTSTOP) { 433 /* Test is ending, just drop callbacks on the floor. */ 434 /* The next initialization will pick up the pieces. */ 435 return; 436 } 437 i = rp->rtort_pipe_count; 438 if (i > RCU_TORTURE_PIPE_LEN) 439 i = RCU_TORTURE_PIPE_LEN; 440 atomic_inc(&rcu_torture_wcount[i]); 441 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 442 rp->rtort_mbtest = 0; 443 rcu_torture_free(rp); 444 } else { 445 cur_ops->deferred_free(rp); 446 } 447 } 448 449 static int rcu_no_completed(void) 450 { 451 return 0; 452 } 453 454 static void rcu_torture_deferred_free(struct rcu_torture *p) 455 { 456 call_rcu(&p->rtort_rcu, rcu_torture_cb); 457 } 458 459 static void rcu_sync_torture_init(void) 460 { 461 INIT_LIST_HEAD(&rcu_torture_removed); 462 } 463 464 static struct rcu_torture_ops rcu_ops = { 465 .init = rcu_sync_torture_init, 466 .readlock = rcu_torture_read_lock, 467 .read_delay = rcu_read_delay, 468 .readunlock = rcu_torture_read_unlock, 469 .completed = rcu_torture_completed, 470 .deferred_free = rcu_torture_deferred_free, 471 .sync = synchronize_rcu, 472 .exp_sync = synchronize_rcu_expedited, 473 .call = call_rcu, 474 .cb_barrier = rcu_barrier, 475 .fqs = rcu_force_quiescent_state, 476 .stats = NULL, 477 .irq_capable = 1, 478 .can_boost = rcu_can_boost(), 479 .name = "rcu" 480 }; 481 482 /* 483 * Definitions for rcu_bh torture testing. 484 */ 485 486 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) 487 { 488 rcu_read_lock_bh(); 489 return 0; 490 } 491 492 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) 493 { 494 rcu_read_unlock_bh(); 495 } 496 497 static int rcu_bh_torture_completed(void) 498 { 499 return rcu_batches_completed_bh(); 500 } 501 502 static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 503 { 504 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 505 } 506 507 static struct rcu_torture_ops rcu_bh_ops = { 508 .init = rcu_sync_torture_init, 509 .readlock = rcu_bh_torture_read_lock, 510 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 511 .readunlock = rcu_bh_torture_read_unlock, 512 .completed = rcu_bh_torture_completed, 513 .deferred_free = rcu_bh_torture_deferred_free, 514 .sync = synchronize_rcu_bh, 515 .exp_sync = synchronize_rcu_bh_expedited, 516 .call = call_rcu_bh, 517 .cb_barrier = rcu_barrier_bh, 518 .fqs = rcu_bh_force_quiescent_state, 519 .stats = NULL, 520 .irq_capable = 1, 521 .name = "rcu_bh" 522 }; 523 524 /* 525 * Definitions for srcu torture testing. 526 */ 527 528 DEFINE_STATIC_SRCU(srcu_ctl); 529 530 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) 531 { 532 return srcu_read_lock(&srcu_ctl); 533 } 534 535 static void srcu_read_delay(struct rcu_random_state *rrsp) 536 { 537 long delay; 538 const long uspertick = 1000000 / HZ; 539 const long longdelay = 10; 540 541 /* We want there to be long-running readers, but not all the time. */ 542 543 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); 544 if (!delay) 545 schedule_timeout_interruptible(longdelay); 546 else 547 rcu_read_delay(rrsp); 548 } 549 550 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) 551 { 552 srcu_read_unlock(&srcu_ctl, idx); 553 } 554 555 static int srcu_torture_completed(void) 556 { 557 return srcu_batches_completed(&srcu_ctl); 558 } 559 560 static void srcu_torture_deferred_free(struct rcu_torture *rp) 561 { 562 call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb); 563 } 564 565 static void srcu_torture_synchronize(void) 566 { 567 synchronize_srcu(&srcu_ctl); 568 } 569 570 static void srcu_torture_call(struct rcu_head *head, 571 void (*func)(struct rcu_head *head)) 572 { 573 call_srcu(&srcu_ctl, head, func); 574 } 575 576 static void srcu_torture_barrier(void) 577 { 578 srcu_barrier(&srcu_ctl); 579 } 580 581 static int srcu_torture_stats(char *page) 582 { 583 int cnt = 0; 584 int cpu; 585 int idx = srcu_ctl.completed & 0x1; 586 587 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):", 588 torture_type, TORTURE_FLAG, idx); 589 for_each_possible_cpu(cpu) { 590 cnt += sprintf(&page[cnt], " %d(%lu,%lu)", cpu, 591 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx], 592 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]); 593 } 594 cnt += sprintf(&page[cnt], "\n"); 595 return cnt; 596 } 597 598 static void srcu_torture_synchronize_expedited(void) 599 { 600 synchronize_srcu_expedited(&srcu_ctl); 601 } 602 603 static struct rcu_torture_ops srcu_ops = { 604 .init = rcu_sync_torture_init, 605 .readlock = srcu_torture_read_lock, 606 .read_delay = srcu_read_delay, 607 .readunlock = srcu_torture_read_unlock, 608 .completed = srcu_torture_completed, 609 .deferred_free = srcu_torture_deferred_free, 610 .sync = srcu_torture_synchronize, 611 .exp_sync = srcu_torture_synchronize_expedited, 612 .call = srcu_torture_call, 613 .cb_barrier = srcu_torture_barrier, 614 .stats = srcu_torture_stats, 615 .name = "srcu" 616 }; 617 618 /* 619 * Definitions for sched torture testing. 620 */ 621 622 static int sched_torture_read_lock(void) 623 { 624 preempt_disable(); 625 return 0; 626 } 627 628 static void sched_torture_read_unlock(int idx) 629 { 630 preempt_enable(); 631 } 632 633 static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 634 { 635 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 636 } 637 638 static struct rcu_torture_ops sched_ops = { 639 .init = rcu_sync_torture_init, 640 .readlock = sched_torture_read_lock, 641 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 642 .readunlock = sched_torture_read_unlock, 643 .completed = rcu_no_completed, 644 .deferred_free = rcu_sched_torture_deferred_free, 645 .sync = synchronize_sched, 646 .exp_sync = synchronize_sched_expedited, 647 .call = call_rcu_sched, 648 .cb_barrier = rcu_barrier_sched, 649 .fqs = rcu_sched_force_quiescent_state, 650 .stats = NULL, 651 .irq_capable = 1, 652 .name = "sched" 653 }; 654 655 /* 656 * RCU torture priority-boost testing. Runs one real-time thread per 657 * CPU for moderate bursts, repeatedly registering RCU callbacks and 658 * spinning waiting for them to be invoked. If a given callback takes 659 * too long to be invoked, we assume that priority inversion has occurred. 660 */ 661 662 struct rcu_boost_inflight { 663 struct rcu_head rcu; 664 int inflight; 665 }; 666 667 static void rcu_torture_boost_cb(struct rcu_head *head) 668 { 669 struct rcu_boost_inflight *rbip = 670 container_of(head, struct rcu_boost_inflight, rcu); 671 672 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */ 673 rbip->inflight = 0; 674 } 675 676 static int rcu_torture_boost(void *arg) 677 { 678 unsigned long call_rcu_time; 679 unsigned long endtime; 680 unsigned long oldstarttime; 681 struct rcu_boost_inflight rbi = { .inflight = 0 }; 682 struct sched_param sp; 683 684 VERBOSE_PRINTK_STRING("rcu_torture_boost started"); 685 686 /* Set real-time priority. */ 687 sp.sched_priority = 1; 688 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 689 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!"); 690 n_rcu_torture_boost_rterror++; 691 } 692 693 init_rcu_head_on_stack(&rbi.rcu); 694 /* Each pass through the following loop does one boost-test cycle. */ 695 do { 696 /* Wait for the next test interval. */ 697 oldstarttime = boost_starttime; 698 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 699 schedule_timeout_interruptible(oldstarttime - jiffies); 700 rcu_stutter_wait("rcu_torture_boost"); 701 if (kthread_should_stop() || 702 fullstop != FULLSTOP_DONTSTOP) 703 goto checkwait; 704 } 705 706 /* Do one boost-test interval. */ 707 endtime = oldstarttime + test_boost_duration * HZ; 708 call_rcu_time = jiffies; 709 while (ULONG_CMP_LT(jiffies, endtime)) { 710 /* If we don't have a callback in flight, post one. */ 711 if (!rbi.inflight) { 712 smp_mb(); /* RCU core before ->inflight = 1. */ 713 rbi.inflight = 1; 714 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 715 if (jiffies - call_rcu_time > 716 test_boost_duration * HZ - HZ / 2) { 717 VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed"); 718 n_rcu_torture_boost_failure++; 719 } 720 call_rcu_time = jiffies; 721 } 722 cond_resched(); 723 rcu_stutter_wait("rcu_torture_boost"); 724 if (kthread_should_stop() || 725 fullstop != FULLSTOP_DONTSTOP) 726 goto checkwait; 727 } 728 729 /* 730 * Set the start time of the next test interval. 731 * Yes, this is vulnerable to long delays, but such 732 * delays simply cause a false negative for the next 733 * interval. Besides, we are running at RT priority, 734 * so delays should be relatively rare. 735 */ 736 while (oldstarttime == boost_starttime && 737 !kthread_should_stop()) { 738 if (mutex_trylock(&boost_mutex)) { 739 boost_starttime = jiffies + 740 test_boost_interval * HZ; 741 n_rcu_torture_boosts++; 742 mutex_unlock(&boost_mutex); 743 break; 744 } 745 schedule_timeout_uninterruptible(1); 746 } 747 748 /* Go do the stutter. */ 749 checkwait: rcu_stutter_wait("rcu_torture_boost"); 750 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 751 752 /* Clean up and exit. */ 753 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); 754 rcutorture_shutdown_absorb("rcu_torture_boost"); 755 while (!kthread_should_stop() || rbi.inflight) 756 schedule_timeout_uninterruptible(1); 757 smp_mb(); /* order accesses to ->inflight before stack-frame death. */ 758 destroy_rcu_head_on_stack(&rbi.rcu); 759 return 0; 760 } 761 762 /* 763 * RCU torture force-quiescent-state kthread. Repeatedly induces 764 * bursts of calls to force_quiescent_state(), increasing the probability 765 * of occurrence of some important types of race conditions. 766 */ 767 static int 768 rcu_torture_fqs(void *arg) 769 { 770 unsigned long fqs_resume_time; 771 int fqs_burst_remaining; 772 773 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started"); 774 do { 775 fqs_resume_time = jiffies + fqs_stutter * HZ; 776 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 777 !kthread_should_stop()) { 778 schedule_timeout_interruptible(1); 779 } 780 fqs_burst_remaining = fqs_duration; 781 while (fqs_burst_remaining > 0 && 782 !kthread_should_stop()) { 783 cur_ops->fqs(); 784 udelay(fqs_holdoff); 785 fqs_burst_remaining -= fqs_holdoff; 786 } 787 rcu_stutter_wait("rcu_torture_fqs"); 788 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 789 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping"); 790 rcutorture_shutdown_absorb("rcu_torture_fqs"); 791 while (!kthread_should_stop()) 792 schedule_timeout_uninterruptible(1); 793 return 0; 794 } 795 796 /* 797 * RCU torture writer kthread. Repeatedly substitutes a new structure 798 * for that pointed to by rcu_torture_current, freeing the old structure 799 * after a series of grace periods (the "pipeline"). 800 */ 801 static int 802 rcu_torture_writer(void *arg) 803 { 804 bool exp; 805 int i; 806 struct rcu_torture *rp; 807 struct rcu_torture *rp1; 808 struct rcu_torture *old_rp; 809 static DEFINE_RCU_RANDOM(rand); 810 811 VERBOSE_PRINTK_STRING("rcu_torture_writer task started"); 812 set_user_nice(current, 19); 813 814 do { 815 schedule_timeout_uninterruptible(1); 816 rp = rcu_torture_alloc(); 817 if (rp == NULL) 818 continue; 819 rp->rtort_pipe_count = 0; 820 udelay(rcu_random(&rand) & 0x3ff); 821 old_rp = rcu_dereference_check(rcu_torture_current, 822 current == writer_task); 823 rp->rtort_mbtest = 1; 824 rcu_assign_pointer(rcu_torture_current, rp); 825 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 826 if (old_rp) { 827 i = old_rp->rtort_pipe_count; 828 if (i > RCU_TORTURE_PIPE_LEN) 829 i = RCU_TORTURE_PIPE_LEN; 830 atomic_inc(&rcu_torture_wcount[i]); 831 old_rp->rtort_pipe_count++; 832 if (gp_normal == gp_exp) 833 exp = !!(rcu_random(&rand) & 0x80); 834 else 835 exp = gp_exp; 836 if (!exp) { 837 cur_ops->deferred_free(old_rp); 838 } else { 839 cur_ops->exp_sync(); 840 list_add(&old_rp->rtort_free, 841 &rcu_torture_removed); 842 list_for_each_entry_safe(rp, rp1, 843 &rcu_torture_removed, 844 rtort_free) { 845 i = rp->rtort_pipe_count; 846 if (i > RCU_TORTURE_PIPE_LEN) 847 i = RCU_TORTURE_PIPE_LEN; 848 atomic_inc(&rcu_torture_wcount[i]); 849 if (++rp->rtort_pipe_count >= 850 RCU_TORTURE_PIPE_LEN) { 851 rp->rtort_mbtest = 0; 852 list_del(&rp->rtort_free); 853 rcu_torture_free(rp); 854 } 855 } 856 } 857 } 858 rcutorture_record_progress(++rcu_torture_current_version); 859 rcu_stutter_wait("rcu_torture_writer"); 860 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 861 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); 862 rcutorture_shutdown_absorb("rcu_torture_writer"); 863 while (!kthread_should_stop()) 864 schedule_timeout_uninterruptible(1); 865 return 0; 866 } 867 868 /* 869 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 870 * delay between calls. 871 */ 872 static int 873 rcu_torture_fakewriter(void *arg) 874 { 875 DEFINE_RCU_RANDOM(rand); 876 877 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started"); 878 set_user_nice(current, 19); 879 880 do { 881 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); 882 udelay(rcu_random(&rand) & 0x3ff); 883 if (cur_ops->cb_barrier != NULL && 884 rcu_random(&rand) % (nfakewriters * 8) == 0) { 885 cur_ops->cb_barrier(); 886 } else if (gp_normal == gp_exp) { 887 if (rcu_random(&rand) & 0x80) 888 cur_ops->sync(); 889 else 890 cur_ops->exp_sync(); 891 } else if (gp_normal) { 892 cur_ops->sync(); 893 } else { 894 cur_ops->exp_sync(); 895 } 896 rcu_stutter_wait("rcu_torture_fakewriter"); 897 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 898 899 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); 900 rcutorture_shutdown_absorb("rcu_torture_fakewriter"); 901 while (!kthread_should_stop()) 902 schedule_timeout_uninterruptible(1); 903 return 0; 904 } 905 906 void rcutorture_trace_dump(void) 907 { 908 static atomic_t beenhere = ATOMIC_INIT(0); 909 910 if (atomic_read(&beenhere)) 911 return; 912 if (atomic_xchg(&beenhere, 1) != 0) 913 return; 914 ftrace_dump(DUMP_ALL); 915 } 916 917 /* 918 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 919 * incrementing the corresponding element of the pipeline array. The 920 * counter in the element should never be greater than 1, otherwise, the 921 * RCU implementation is broken. 922 */ 923 static void rcu_torture_timer(unsigned long unused) 924 { 925 int idx; 926 int completed; 927 int completed_end; 928 static DEFINE_RCU_RANDOM(rand); 929 static DEFINE_SPINLOCK(rand_lock); 930 struct rcu_torture *p; 931 int pipe_count; 932 unsigned long long ts; 933 934 idx = cur_ops->readlock(); 935 completed = cur_ops->completed(); 936 ts = rcu_trace_clock_local(); 937 p = rcu_dereference_check(rcu_torture_current, 938 rcu_read_lock_bh_held() || 939 rcu_read_lock_sched_held() || 940 srcu_read_lock_held(&srcu_ctl)); 941 if (p == NULL) { 942 /* Leave because rcu_torture_writer is not yet underway */ 943 cur_ops->readunlock(idx); 944 return; 945 } 946 if (p->rtort_mbtest == 0) 947 atomic_inc(&n_rcu_torture_mberror); 948 spin_lock(&rand_lock); 949 cur_ops->read_delay(&rand); 950 n_rcu_torture_timers++; 951 spin_unlock(&rand_lock); 952 preempt_disable(); 953 pipe_count = p->rtort_pipe_count; 954 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 955 /* Should not happen, but... */ 956 pipe_count = RCU_TORTURE_PIPE_LEN; 957 } 958 completed_end = cur_ops->completed(); 959 if (pipe_count > 1) { 960 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, 961 completed, completed_end); 962 rcutorture_trace_dump(); 963 } 964 __this_cpu_inc(rcu_torture_count[pipe_count]); 965 completed = completed_end - completed; 966 if (completed > RCU_TORTURE_PIPE_LEN) { 967 /* Should not happen, but... */ 968 completed = RCU_TORTURE_PIPE_LEN; 969 } 970 __this_cpu_inc(rcu_torture_batch[completed]); 971 preempt_enable(); 972 cur_ops->readunlock(idx); 973 } 974 975 /* 976 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 977 * incrementing the corresponding element of the pipeline array. The 978 * counter in the element should never be greater than 1, otherwise, the 979 * RCU implementation is broken. 980 */ 981 static int 982 rcu_torture_reader(void *arg) 983 { 984 int completed; 985 int completed_end; 986 int idx; 987 DEFINE_RCU_RANDOM(rand); 988 struct rcu_torture *p; 989 int pipe_count; 990 struct timer_list t; 991 unsigned long long ts; 992 993 VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); 994 set_user_nice(current, 19); 995 if (irqreader && cur_ops->irq_capable) 996 setup_timer_on_stack(&t, rcu_torture_timer, 0); 997 998 do { 999 if (irqreader && cur_ops->irq_capable) { 1000 if (!timer_pending(&t)) 1001 mod_timer(&t, jiffies + 1); 1002 } 1003 idx = cur_ops->readlock(); 1004 completed = cur_ops->completed(); 1005 ts = rcu_trace_clock_local(); 1006 p = rcu_dereference_check(rcu_torture_current, 1007 rcu_read_lock_bh_held() || 1008 rcu_read_lock_sched_held() || 1009 srcu_read_lock_held(&srcu_ctl)); 1010 if (p == NULL) { 1011 /* Wait for rcu_torture_writer to get underway */ 1012 cur_ops->readunlock(idx); 1013 schedule_timeout_interruptible(HZ); 1014 continue; 1015 } 1016 if (p->rtort_mbtest == 0) 1017 atomic_inc(&n_rcu_torture_mberror); 1018 cur_ops->read_delay(&rand); 1019 preempt_disable(); 1020 pipe_count = p->rtort_pipe_count; 1021 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1022 /* Should not happen, but... */ 1023 pipe_count = RCU_TORTURE_PIPE_LEN; 1024 } 1025 completed_end = cur_ops->completed(); 1026 if (pipe_count > 1) { 1027 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1028 ts, completed, completed_end); 1029 rcutorture_trace_dump(); 1030 } 1031 __this_cpu_inc(rcu_torture_count[pipe_count]); 1032 completed = completed_end - completed; 1033 if (completed > RCU_TORTURE_PIPE_LEN) { 1034 /* Should not happen, but... */ 1035 completed = RCU_TORTURE_PIPE_LEN; 1036 } 1037 __this_cpu_inc(rcu_torture_batch[completed]); 1038 preempt_enable(); 1039 cur_ops->readunlock(idx); 1040 schedule(); 1041 rcu_stutter_wait("rcu_torture_reader"); 1042 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 1043 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 1044 rcutorture_shutdown_absorb("rcu_torture_reader"); 1045 if (irqreader && cur_ops->irq_capable) 1046 del_timer_sync(&t); 1047 while (!kthread_should_stop()) 1048 schedule_timeout_uninterruptible(1); 1049 return 0; 1050 } 1051 1052 /* 1053 * Create an RCU-torture statistics message in the specified buffer. 1054 */ 1055 static int 1056 rcu_torture_printk(char *page) 1057 { 1058 int cnt = 0; 1059 int cpu; 1060 int i; 1061 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1062 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1063 1064 for_each_possible_cpu(cpu) { 1065 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1066 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1067 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1068 } 1069 } 1070 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1071 if (pipesummary[i] != 0) 1072 break; 1073 } 1074 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); 1075 cnt += sprintf(&page[cnt], 1076 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1077 rcu_torture_current, 1078 rcu_torture_current_version, 1079 list_empty(&rcu_torture_freelist), 1080 atomic_read(&n_rcu_torture_alloc), 1081 atomic_read(&n_rcu_torture_alloc_fail), 1082 atomic_read(&n_rcu_torture_free)); 1083 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ", 1084 atomic_read(&n_rcu_torture_mberror), 1085 n_rcu_torture_boost_ktrerror, 1086 n_rcu_torture_boost_rterror); 1087 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ", 1088 n_rcu_torture_boost_failure, 1089 n_rcu_torture_boosts, 1090 n_rcu_torture_timers); 1091 cnt += sprintf(&page[cnt], 1092 "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", 1093 n_online_successes, n_online_attempts, 1094 n_offline_successes, n_offline_attempts, 1095 min_online, max_online, 1096 min_offline, max_offline, 1097 sum_online, sum_offline, HZ); 1098 cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld", 1099 n_barrier_successes, 1100 n_barrier_attempts, 1101 n_rcu_torture_barrier_error); 1102 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); 1103 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1104 n_rcu_torture_barrier_error != 0 || 1105 n_rcu_torture_boost_ktrerror != 0 || 1106 n_rcu_torture_boost_rterror != 0 || 1107 n_rcu_torture_boost_failure != 0 || 1108 i > 1) { 1109 cnt += sprintf(&page[cnt], "!!! "); 1110 atomic_inc(&n_rcu_torture_error); 1111 WARN_ON_ONCE(1); 1112 } 1113 cnt += sprintf(&page[cnt], "Reader Pipe: "); 1114 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1115 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]); 1116 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); 1117 cnt += sprintf(&page[cnt], "Reader Batch: "); 1118 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1119 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]); 1120 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); 1121 cnt += sprintf(&page[cnt], "Free-Block Circulation: "); 1122 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1123 cnt += sprintf(&page[cnt], " %d", 1124 atomic_read(&rcu_torture_wcount[i])); 1125 } 1126 cnt += sprintf(&page[cnt], "\n"); 1127 if (cur_ops->stats) 1128 cnt += cur_ops->stats(&page[cnt]); 1129 return cnt; 1130 } 1131 1132 /* 1133 * Print torture statistics. Caller must ensure that there is only 1134 * one call to this function at a given time!!! This is normally 1135 * accomplished by relying on the module system to only have one copy 1136 * of the module loaded, and then by giving the rcu_torture_stats 1137 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1138 * thread is not running). 1139 */ 1140 static void 1141 rcu_torture_stats_print(void) 1142 { 1143 int cnt; 1144 1145 cnt = rcu_torture_printk(printk_buf); 1146 pr_alert("%s", printk_buf); 1147 } 1148 1149 /* 1150 * Periodically prints torture statistics, if periodic statistics printing 1151 * was specified via the stat_interval module parameter. 1152 * 1153 * No need to worry about fullstop here, since this one doesn't reference 1154 * volatile state or register callbacks. 1155 */ 1156 static int 1157 rcu_torture_stats(void *arg) 1158 { 1159 VERBOSE_PRINTK_STRING("rcu_torture_stats task started"); 1160 do { 1161 schedule_timeout_interruptible(stat_interval * HZ); 1162 rcu_torture_stats_print(); 1163 rcutorture_shutdown_absorb("rcu_torture_stats"); 1164 } while (!kthread_should_stop()); 1165 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); 1166 return 0; 1167 } 1168 1169 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ 1170 1171 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case 1172 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs. 1173 */ 1174 static void rcu_torture_shuffle_tasks(void) 1175 { 1176 int i; 1177 1178 cpumask_setall(shuffle_tmp_mask); 1179 get_online_cpus(); 1180 1181 /* No point in shuffling if there is only one online CPU (ex: UP) */ 1182 if (num_online_cpus() == 1) { 1183 put_online_cpus(); 1184 return; 1185 } 1186 1187 if (rcu_idle_cpu != -1) 1188 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask); 1189 1190 set_cpus_allowed_ptr(current, shuffle_tmp_mask); 1191 1192 if (reader_tasks) { 1193 for (i = 0; i < nrealreaders; i++) 1194 if (reader_tasks[i]) 1195 set_cpus_allowed_ptr(reader_tasks[i], 1196 shuffle_tmp_mask); 1197 } 1198 if (fakewriter_tasks) { 1199 for (i = 0; i < nfakewriters; i++) 1200 if (fakewriter_tasks[i]) 1201 set_cpus_allowed_ptr(fakewriter_tasks[i], 1202 shuffle_tmp_mask); 1203 } 1204 if (writer_task) 1205 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); 1206 if (stats_task) 1207 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); 1208 if (stutter_task) 1209 set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); 1210 if (fqs_task) 1211 set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); 1212 if (shutdown_task) 1213 set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); 1214 #ifdef CONFIG_HOTPLUG_CPU 1215 if (onoff_task) 1216 set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); 1217 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1218 if (stall_task) 1219 set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); 1220 if (barrier_cbs_tasks) 1221 for (i = 0; i < n_barrier_cbs; i++) 1222 if (barrier_cbs_tasks[i]) 1223 set_cpus_allowed_ptr(barrier_cbs_tasks[i], 1224 shuffle_tmp_mask); 1225 if (barrier_task) 1226 set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask); 1227 1228 if (rcu_idle_cpu == -1) 1229 rcu_idle_cpu = num_online_cpus() - 1; 1230 else 1231 rcu_idle_cpu--; 1232 1233 put_online_cpus(); 1234 } 1235 1236 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 1237 * system to become idle at a time and cut off its timer ticks. This is meant 1238 * to test the support for such tickless idle CPU in RCU. 1239 */ 1240 static int 1241 rcu_torture_shuffle(void *arg) 1242 { 1243 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started"); 1244 do { 1245 schedule_timeout_interruptible(shuffle_interval * HZ); 1246 rcu_torture_shuffle_tasks(); 1247 rcutorture_shutdown_absorb("rcu_torture_shuffle"); 1248 } while (!kthread_should_stop()); 1249 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); 1250 return 0; 1251 } 1252 1253 /* Cause the rcutorture test to "stutter", starting and stopping all 1254 * threads periodically. 1255 */ 1256 static int 1257 rcu_torture_stutter(void *arg) 1258 { 1259 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started"); 1260 do { 1261 schedule_timeout_interruptible(stutter * HZ); 1262 stutter_pause_test = 1; 1263 if (!kthread_should_stop()) 1264 schedule_timeout_interruptible(stutter * HZ); 1265 stutter_pause_test = 0; 1266 rcutorture_shutdown_absorb("rcu_torture_stutter"); 1267 } while (!kthread_should_stop()); 1268 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); 1269 return 0; 1270 } 1271 1272 static inline void 1273 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1274 { 1275 pr_alert("%s" TORTURE_FLAG 1276 "--- %s: nreaders=%d nfakewriters=%d " 1277 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1278 "shuffle_interval=%d stutter=%d irqreader=%d " 1279 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1280 "test_boost=%d/%d test_boost_interval=%d " 1281 "test_boost_duration=%d shutdown_secs=%d " 1282 "stall_cpu=%d stall_cpu_holdoff=%d " 1283 "n_barrier_cbs=%d " 1284 "onoff_interval=%d onoff_holdoff=%d\n", 1285 torture_type, tag, nrealreaders, nfakewriters, 1286 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1287 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1288 test_boost, cur_ops->can_boost, 1289 test_boost_interval, test_boost_duration, shutdown_secs, 1290 stall_cpu, stall_cpu_holdoff, 1291 n_barrier_cbs, 1292 onoff_interval, onoff_holdoff); 1293 } 1294 1295 static struct notifier_block rcutorture_shutdown_nb = { 1296 .notifier_call = rcutorture_shutdown_notify, 1297 }; 1298 1299 static void rcutorture_booster_cleanup(int cpu) 1300 { 1301 struct task_struct *t; 1302 1303 if (boost_tasks[cpu] == NULL) 1304 return; 1305 mutex_lock(&boost_mutex); 1306 VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task"); 1307 t = boost_tasks[cpu]; 1308 boost_tasks[cpu] = NULL; 1309 mutex_unlock(&boost_mutex); 1310 1311 /* This must be outside of the mutex, otherwise deadlock! */ 1312 kthread_stop(t); 1313 boost_tasks[cpu] = NULL; 1314 } 1315 1316 static int rcutorture_booster_init(int cpu) 1317 { 1318 int retval; 1319 1320 if (boost_tasks[cpu] != NULL) 1321 return 0; /* Already created, nothing more to do. */ 1322 1323 /* Don't allow time recalculation while creating a new task. */ 1324 mutex_lock(&boost_mutex); 1325 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task"); 1326 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1327 cpu_to_node(cpu), 1328 "rcu_torture_boost"); 1329 if (IS_ERR(boost_tasks[cpu])) { 1330 retval = PTR_ERR(boost_tasks[cpu]); 1331 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed"); 1332 n_rcu_torture_boost_ktrerror++; 1333 boost_tasks[cpu] = NULL; 1334 mutex_unlock(&boost_mutex); 1335 return retval; 1336 } 1337 kthread_bind(boost_tasks[cpu], cpu); 1338 wake_up_process(boost_tasks[cpu]); 1339 mutex_unlock(&boost_mutex); 1340 return 0; 1341 } 1342 1343 /* 1344 * Cause the rcutorture test to shutdown the system after the test has 1345 * run for the time specified by the shutdown_secs module parameter. 1346 */ 1347 static int 1348 rcu_torture_shutdown(void *arg) 1349 { 1350 long delta; 1351 unsigned long jiffies_snap; 1352 1353 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started"); 1354 jiffies_snap = ACCESS_ONCE(jiffies); 1355 while (ULONG_CMP_LT(jiffies_snap, shutdown_time) && 1356 !kthread_should_stop()) { 1357 delta = shutdown_time - jiffies_snap; 1358 if (verbose) 1359 pr_alert("%s" TORTURE_FLAG 1360 "rcu_torture_shutdown task: %lu jiffies remaining\n", 1361 torture_type, delta); 1362 schedule_timeout_interruptible(delta); 1363 jiffies_snap = ACCESS_ONCE(jiffies); 1364 } 1365 if (kthread_should_stop()) { 1366 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping"); 1367 return 0; 1368 } 1369 1370 /* OK, shut down the system. */ 1371 1372 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system"); 1373 shutdown_task = NULL; /* Avoid self-kill deadlock. */ 1374 rcu_torture_cleanup(); /* Get the success/failure message. */ 1375 kernel_power_off(); /* Shut down the system. */ 1376 return 0; 1377 } 1378 1379 #ifdef CONFIG_HOTPLUG_CPU 1380 1381 /* 1382 * Execute random CPU-hotplug operations at the interval specified 1383 * by the onoff_interval. 1384 */ 1385 static int 1386 rcu_torture_onoff(void *arg) 1387 { 1388 int cpu; 1389 unsigned long delta; 1390 int maxcpu = -1; 1391 DEFINE_RCU_RANDOM(rand); 1392 int ret; 1393 unsigned long starttime; 1394 1395 VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); 1396 for_each_online_cpu(cpu) 1397 maxcpu = cpu; 1398 WARN_ON(maxcpu < 0); 1399 if (onoff_holdoff > 0) { 1400 VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff"); 1401 schedule_timeout_interruptible(onoff_holdoff * HZ); 1402 VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff"); 1403 } 1404 while (!kthread_should_stop()) { 1405 cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); 1406 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { 1407 if (verbose) 1408 pr_alert("%s" TORTURE_FLAG 1409 "rcu_torture_onoff task: offlining %d\n", 1410 torture_type, cpu); 1411 starttime = jiffies; 1412 n_offline_attempts++; 1413 ret = cpu_down(cpu); 1414 if (ret) { 1415 if (verbose) 1416 pr_alert("%s" TORTURE_FLAG 1417 "rcu_torture_onoff task: offline %d failed: errno %d\n", 1418 torture_type, cpu, ret); 1419 } else { 1420 if (verbose) 1421 pr_alert("%s" TORTURE_FLAG 1422 "rcu_torture_onoff task: offlined %d\n", 1423 torture_type, cpu); 1424 n_offline_successes++; 1425 delta = jiffies - starttime; 1426 sum_offline += delta; 1427 if (min_offline < 0) { 1428 min_offline = delta; 1429 max_offline = delta; 1430 } 1431 if (min_offline > delta) 1432 min_offline = delta; 1433 if (max_offline < delta) 1434 max_offline = delta; 1435 } 1436 } else if (cpu_is_hotpluggable(cpu)) { 1437 if (verbose) 1438 pr_alert("%s" TORTURE_FLAG 1439 "rcu_torture_onoff task: onlining %d\n", 1440 torture_type, cpu); 1441 starttime = jiffies; 1442 n_online_attempts++; 1443 ret = cpu_up(cpu); 1444 if (ret) { 1445 if (verbose) 1446 pr_alert("%s" TORTURE_FLAG 1447 "rcu_torture_onoff task: online %d failed: errno %d\n", 1448 torture_type, cpu, ret); 1449 } else { 1450 if (verbose) 1451 pr_alert("%s" TORTURE_FLAG 1452 "rcu_torture_onoff task: onlined %d\n", 1453 torture_type, cpu); 1454 n_online_successes++; 1455 delta = jiffies - starttime; 1456 sum_online += delta; 1457 if (min_online < 0) { 1458 min_online = delta; 1459 max_online = delta; 1460 } 1461 if (min_online > delta) 1462 min_online = delta; 1463 if (max_online < delta) 1464 max_online = delta; 1465 } 1466 } 1467 schedule_timeout_interruptible(onoff_interval * HZ); 1468 } 1469 VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping"); 1470 return 0; 1471 } 1472 1473 static int 1474 rcu_torture_onoff_init(void) 1475 { 1476 int ret; 1477 1478 if (onoff_interval <= 0) 1479 return 0; 1480 onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff"); 1481 if (IS_ERR(onoff_task)) { 1482 ret = PTR_ERR(onoff_task); 1483 onoff_task = NULL; 1484 return ret; 1485 } 1486 return 0; 1487 } 1488 1489 static void rcu_torture_onoff_cleanup(void) 1490 { 1491 if (onoff_task == NULL) 1492 return; 1493 VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task"); 1494 kthread_stop(onoff_task); 1495 onoff_task = NULL; 1496 } 1497 1498 #else /* #ifdef CONFIG_HOTPLUG_CPU */ 1499 1500 static int 1501 rcu_torture_onoff_init(void) 1502 { 1503 return 0; 1504 } 1505 1506 static void rcu_torture_onoff_cleanup(void) 1507 { 1508 } 1509 1510 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 1511 1512 /* 1513 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1514 * induces a CPU stall for the time specified by stall_cpu. 1515 */ 1516 static int rcu_torture_stall(void *args) 1517 { 1518 unsigned long stop_at; 1519 1520 VERBOSE_PRINTK_STRING("rcu_torture_stall task started"); 1521 if (stall_cpu_holdoff > 0) { 1522 VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff"); 1523 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1524 VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff"); 1525 } 1526 if (!kthread_should_stop()) { 1527 stop_at = get_seconds() + stall_cpu; 1528 /* RCU CPU stall is expected behavior in following code. */ 1529 pr_alert("rcu_torture_stall start.\n"); 1530 rcu_read_lock(); 1531 preempt_disable(); 1532 while (ULONG_CMP_LT(get_seconds(), stop_at)) 1533 continue; /* Induce RCU CPU stall warning. */ 1534 preempt_enable(); 1535 rcu_read_unlock(); 1536 pr_alert("rcu_torture_stall end.\n"); 1537 } 1538 rcutorture_shutdown_absorb("rcu_torture_stall"); 1539 while (!kthread_should_stop()) 1540 schedule_timeout_interruptible(10 * HZ); 1541 return 0; 1542 } 1543 1544 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1545 static int __init rcu_torture_stall_init(void) 1546 { 1547 int ret; 1548 1549 if (stall_cpu <= 0) 1550 return 0; 1551 stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall"); 1552 if (IS_ERR(stall_task)) { 1553 ret = PTR_ERR(stall_task); 1554 stall_task = NULL; 1555 return ret; 1556 } 1557 return 0; 1558 } 1559 1560 /* Clean up after the CPU-stall kthread, if one was spawned. */ 1561 static void rcu_torture_stall_cleanup(void) 1562 { 1563 if (stall_task == NULL) 1564 return; 1565 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task."); 1566 kthread_stop(stall_task); 1567 stall_task = NULL; 1568 } 1569 1570 /* Callback function for RCU barrier testing. */ 1571 void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1572 { 1573 atomic_inc(&barrier_cbs_invoked); 1574 } 1575 1576 /* kthread function to register callbacks used to test RCU barriers. */ 1577 static int rcu_torture_barrier_cbs(void *arg) 1578 { 1579 long myid = (long)arg; 1580 bool lastphase = 0; 1581 struct rcu_head rcu; 1582 1583 init_rcu_head_on_stack(&rcu); 1584 VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started"); 1585 set_user_nice(current, 19); 1586 do { 1587 wait_event(barrier_cbs_wq[myid], 1588 barrier_phase != lastphase || 1589 kthread_should_stop() || 1590 fullstop != FULLSTOP_DONTSTOP); 1591 lastphase = barrier_phase; 1592 smp_mb(); /* ensure barrier_phase load before ->call(). */ 1593 if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) 1594 break; 1595 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 1596 if (atomic_dec_and_test(&barrier_cbs_count)) 1597 wake_up(&barrier_wq); 1598 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 1599 VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping"); 1600 rcutorture_shutdown_absorb("rcu_torture_barrier_cbs"); 1601 while (!kthread_should_stop()) 1602 schedule_timeout_interruptible(1); 1603 cur_ops->cb_barrier(); 1604 destroy_rcu_head_on_stack(&rcu); 1605 return 0; 1606 } 1607 1608 /* kthread function to drive and coordinate RCU barrier testing. */ 1609 static int rcu_torture_barrier(void *arg) 1610 { 1611 int i; 1612 1613 VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting"); 1614 do { 1615 atomic_set(&barrier_cbs_invoked, 0); 1616 atomic_set(&barrier_cbs_count, n_barrier_cbs); 1617 smp_mb(); /* Ensure barrier_phase after prior assignments. */ 1618 barrier_phase = !barrier_phase; 1619 for (i = 0; i < n_barrier_cbs; i++) 1620 wake_up(&barrier_cbs_wq[i]); 1621 wait_event(barrier_wq, 1622 atomic_read(&barrier_cbs_count) == 0 || 1623 kthread_should_stop() || 1624 fullstop != FULLSTOP_DONTSTOP); 1625 if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) 1626 break; 1627 n_barrier_attempts++; 1628 cur_ops->cb_barrier(); 1629 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1630 n_rcu_torture_barrier_error++; 1631 WARN_ON_ONCE(1); 1632 } 1633 n_barrier_successes++; 1634 schedule_timeout_interruptible(HZ / 10); 1635 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 1636 VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping"); 1637 rcutorture_shutdown_absorb("rcu_torture_barrier"); 1638 while (!kthread_should_stop()) 1639 schedule_timeout_interruptible(1); 1640 return 0; 1641 } 1642 1643 /* Initialize RCU barrier testing. */ 1644 static int rcu_torture_barrier_init(void) 1645 { 1646 int i; 1647 int ret; 1648 1649 if (n_barrier_cbs == 0) 1650 return 0; 1651 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 1652 pr_alert("%s" TORTURE_FLAG 1653 " Call or barrier ops missing for %s,\n", 1654 torture_type, cur_ops->name); 1655 pr_alert("%s" TORTURE_FLAG 1656 " RCU barrier testing omitted from run.\n", 1657 torture_type); 1658 return 0; 1659 } 1660 atomic_set(&barrier_cbs_count, 0); 1661 atomic_set(&barrier_cbs_invoked, 0); 1662 barrier_cbs_tasks = 1663 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]), 1664 GFP_KERNEL); 1665 barrier_cbs_wq = 1666 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]), 1667 GFP_KERNEL); 1668 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 1669 return -ENOMEM; 1670 for (i = 0; i < n_barrier_cbs; i++) { 1671 init_waitqueue_head(&barrier_cbs_wq[i]); 1672 barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs, 1673 (void *)(long)i, 1674 "rcu_torture_barrier_cbs"); 1675 if (IS_ERR(barrier_cbs_tasks[i])) { 1676 ret = PTR_ERR(barrier_cbs_tasks[i]); 1677 VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs"); 1678 barrier_cbs_tasks[i] = NULL; 1679 return ret; 1680 } 1681 } 1682 barrier_task = kthread_run(rcu_torture_barrier, NULL, 1683 "rcu_torture_barrier"); 1684 if (IS_ERR(barrier_task)) { 1685 ret = PTR_ERR(barrier_task); 1686 VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier"); 1687 barrier_task = NULL; 1688 } 1689 return 0; 1690 } 1691 1692 /* Clean up after RCU barrier testing. */ 1693 static void rcu_torture_barrier_cleanup(void) 1694 { 1695 int i; 1696 1697 if (barrier_task != NULL) { 1698 VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task"); 1699 kthread_stop(barrier_task); 1700 barrier_task = NULL; 1701 } 1702 if (barrier_cbs_tasks != NULL) { 1703 for (i = 0; i < n_barrier_cbs; i++) { 1704 if (barrier_cbs_tasks[i] != NULL) { 1705 VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task"); 1706 kthread_stop(barrier_cbs_tasks[i]); 1707 barrier_cbs_tasks[i] = NULL; 1708 } 1709 } 1710 kfree(barrier_cbs_tasks); 1711 barrier_cbs_tasks = NULL; 1712 } 1713 if (barrier_cbs_wq != NULL) { 1714 kfree(barrier_cbs_wq); 1715 barrier_cbs_wq = NULL; 1716 } 1717 } 1718 1719 static int rcutorture_cpu_notify(struct notifier_block *self, 1720 unsigned long action, void *hcpu) 1721 { 1722 long cpu = (long)hcpu; 1723 1724 switch (action) { 1725 case CPU_ONLINE: 1726 case CPU_DOWN_FAILED: 1727 (void)rcutorture_booster_init(cpu); 1728 break; 1729 case CPU_DOWN_PREPARE: 1730 rcutorture_booster_cleanup(cpu); 1731 break; 1732 default: 1733 break; 1734 } 1735 return NOTIFY_OK; 1736 } 1737 1738 static struct notifier_block rcutorture_cpu_nb = { 1739 .notifier_call = rcutorture_cpu_notify, 1740 }; 1741 1742 static void 1743 rcu_torture_cleanup(void) 1744 { 1745 int i; 1746 1747 mutex_lock(&fullstop_mutex); 1748 rcutorture_record_test_transition(); 1749 if (fullstop == FULLSTOP_SHUTDOWN) { 1750 pr_warn(/* but going down anyway, so... */ 1751 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); 1752 mutex_unlock(&fullstop_mutex); 1753 schedule_timeout_uninterruptible(10); 1754 if (cur_ops->cb_barrier != NULL) 1755 cur_ops->cb_barrier(); 1756 return; 1757 } 1758 fullstop = FULLSTOP_RMMOD; 1759 mutex_unlock(&fullstop_mutex); 1760 unregister_reboot_notifier(&rcutorture_shutdown_nb); 1761 rcu_torture_barrier_cleanup(); 1762 rcu_torture_stall_cleanup(); 1763 if (stutter_task) { 1764 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); 1765 kthread_stop(stutter_task); 1766 } 1767 stutter_task = NULL; 1768 if (shuffler_task) { 1769 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); 1770 kthread_stop(shuffler_task); 1771 free_cpumask_var(shuffle_tmp_mask); 1772 } 1773 shuffler_task = NULL; 1774 1775 if (writer_task) { 1776 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); 1777 kthread_stop(writer_task); 1778 } 1779 writer_task = NULL; 1780 1781 if (reader_tasks) { 1782 for (i = 0; i < nrealreaders; i++) { 1783 if (reader_tasks[i]) { 1784 VERBOSE_PRINTK_STRING( 1785 "Stopping rcu_torture_reader task"); 1786 kthread_stop(reader_tasks[i]); 1787 } 1788 reader_tasks[i] = NULL; 1789 } 1790 kfree(reader_tasks); 1791 reader_tasks = NULL; 1792 } 1793 rcu_torture_current = NULL; 1794 1795 if (fakewriter_tasks) { 1796 for (i = 0; i < nfakewriters; i++) { 1797 if (fakewriter_tasks[i]) { 1798 VERBOSE_PRINTK_STRING( 1799 "Stopping rcu_torture_fakewriter task"); 1800 kthread_stop(fakewriter_tasks[i]); 1801 } 1802 fakewriter_tasks[i] = NULL; 1803 } 1804 kfree(fakewriter_tasks); 1805 fakewriter_tasks = NULL; 1806 } 1807 1808 if (stats_task) { 1809 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); 1810 kthread_stop(stats_task); 1811 } 1812 stats_task = NULL; 1813 1814 if (fqs_task) { 1815 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task"); 1816 kthread_stop(fqs_task); 1817 } 1818 fqs_task = NULL; 1819 if ((test_boost == 1 && cur_ops->can_boost) || 1820 test_boost == 2) { 1821 unregister_cpu_notifier(&rcutorture_cpu_nb); 1822 for_each_possible_cpu(i) 1823 rcutorture_booster_cleanup(i); 1824 } 1825 if (shutdown_task != NULL) { 1826 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task"); 1827 kthread_stop(shutdown_task); 1828 } 1829 shutdown_task = NULL; 1830 rcu_torture_onoff_cleanup(); 1831 1832 /* Wait for all RCU callbacks to fire. */ 1833 1834 if (cur_ops->cb_barrier != NULL) 1835 cur_ops->cb_barrier(); 1836 1837 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 1838 1839 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 1840 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 1841 else if (n_online_successes != n_online_attempts || 1842 n_offline_successes != n_offline_attempts) 1843 rcu_torture_print_module_parms(cur_ops, 1844 "End of test: RCU_HOTPLUG"); 1845 else 1846 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 1847 } 1848 1849 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 1850 static void rcu_torture_leak_cb(struct rcu_head *rhp) 1851 { 1852 } 1853 1854 static void rcu_torture_err_cb(struct rcu_head *rhp) 1855 { 1856 /* 1857 * This -might- happen due to race conditions, but is unlikely. 1858 * The scenario that leads to this happening is that the 1859 * first of the pair of duplicate callbacks is queued, 1860 * someone else starts a grace period that includes that 1861 * callback, then the second of the pair must wait for the 1862 * next grace period. Unlikely, but can happen. If it 1863 * does happen, the debug-objects subsystem won't have splatted. 1864 */ 1865 pr_alert("rcutorture: duplicated callback was invoked.\n"); 1866 } 1867 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1868 1869 /* 1870 * Verify that double-free causes debug-objects to complain, but only 1871 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 1872 * cannot be carried out. 1873 */ 1874 static void rcu_test_debug_objects(void) 1875 { 1876 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 1877 struct rcu_head rh1; 1878 struct rcu_head rh2; 1879 1880 init_rcu_head_on_stack(&rh1); 1881 init_rcu_head_on_stack(&rh2); 1882 pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n"); 1883 1884 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 1885 preempt_disable(); /* Prevent preemption from interrupting test. */ 1886 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 1887 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 1888 local_irq_disable(); /* Make it harder to start a new grace period. */ 1889 call_rcu(&rh2, rcu_torture_leak_cb); 1890 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 1891 local_irq_enable(); 1892 rcu_read_unlock(); 1893 preempt_enable(); 1894 1895 /* Wait for them all to get done so we can safely return. */ 1896 rcu_barrier(); 1897 pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n"); 1898 destroy_rcu_head_on_stack(&rh1); 1899 destroy_rcu_head_on_stack(&rh2); 1900 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1901 pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n"); 1902 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1903 } 1904 1905 static int __init 1906 rcu_torture_init(void) 1907 { 1908 int i; 1909 int cpu; 1910 int firsterr = 0; 1911 int retval; 1912 static struct rcu_torture_ops *torture_ops[] = { 1913 &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops, 1914 }; 1915 1916 mutex_lock(&fullstop_mutex); 1917 1918 /* Process args and tell the world that the torturer is on the job. */ 1919 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 1920 cur_ops = torture_ops[i]; 1921 if (strcmp(torture_type, cur_ops->name) == 0) 1922 break; 1923 } 1924 if (i == ARRAY_SIZE(torture_ops)) { 1925 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 1926 torture_type); 1927 pr_alert("rcu-torture types:"); 1928 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 1929 pr_alert(" %s", torture_ops[i]->name); 1930 pr_alert("\n"); 1931 mutex_unlock(&fullstop_mutex); 1932 return -EINVAL; 1933 } 1934 if (cur_ops->fqs == NULL && fqs_duration != 0) { 1935 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 1936 fqs_duration = 0; 1937 } 1938 if (cur_ops->init) 1939 cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 1940 1941 if (nreaders >= 0) 1942 nrealreaders = nreaders; 1943 else 1944 nrealreaders = 2 * num_online_cpus(); 1945 rcu_torture_print_module_parms(cur_ops, "Start of test"); 1946 fullstop = FULLSTOP_DONTSTOP; 1947 1948 /* Set up the freelist. */ 1949 1950 INIT_LIST_HEAD(&rcu_torture_freelist); 1951 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 1952 rcu_tortures[i].rtort_mbtest = 0; 1953 list_add_tail(&rcu_tortures[i].rtort_free, 1954 &rcu_torture_freelist); 1955 } 1956 1957 /* Initialize the statistics so that each run gets its own numbers. */ 1958 1959 rcu_torture_current = NULL; 1960 rcu_torture_current_version = 0; 1961 atomic_set(&n_rcu_torture_alloc, 0); 1962 atomic_set(&n_rcu_torture_alloc_fail, 0); 1963 atomic_set(&n_rcu_torture_free, 0); 1964 atomic_set(&n_rcu_torture_mberror, 0); 1965 atomic_set(&n_rcu_torture_error, 0); 1966 n_rcu_torture_barrier_error = 0; 1967 n_rcu_torture_boost_ktrerror = 0; 1968 n_rcu_torture_boost_rterror = 0; 1969 n_rcu_torture_boost_failure = 0; 1970 n_rcu_torture_boosts = 0; 1971 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1972 atomic_set(&rcu_torture_wcount[i], 0); 1973 for_each_possible_cpu(cpu) { 1974 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1975 per_cpu(rcu_torture_count, cpu)[i] = 0; 1976 per_cpu(rcu_torture_batch, cpu)[i] = 0; 1977 } 1978 } 1979 1980 /* Start up the kthreads. */ 1981 1982 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); 1983 writer_task = kthread_create(rcu_torture_writer, NULL, 1984 "rcu_torture_writer"); 1985 if (IS_ERR(writer_task)) { 1986 firsterr = PTR_ERR(writer_task); 1987 VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); 1988 writer_task = NULL; 1989 goto unwind; 1990 } 1991 wake_up_process(writer_task); 1992 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), 1993 GFP_KERNEL); 1994 if (fakewriter_tasks == NULL) { 1995 VERBOSE_PRINTK_ERRSTRING("out of memory"); 1996 firsterr = -ENOMEM; 1997 goto unwind; 1998 } 1999 for (i = 0; i < nfakewriters; i++) { 2000 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); 2001 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, 2002 "rcu_torture_fakewriter"); 2003 if (IS_ERR(fakewriter_tasks[i])) { 2004 firsterr = PTR_ERR(fakewriter_tasks[i]); 2005 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); 2006 fakewriter_tasks[i] = NULL; 2007 goto unwind; 2008 } 2009 } 2010 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), 2011 GFP_KERNEL); 2012 if (reader_tasks == NULL) { 2013 VERBOSE_PRINTK_ERRSTRING("out of memory"); 2014 firsterr = -ENOMEM; 2015 goto unwind; 2016 } 2017 for (i = 0; i < nrealreaders; i++) { 2018 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task"); 2019 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL, 2020 "rcu_torture_reader"); 2021 if (IS_ERR(reader_tasks[i])) { 2022 firsterr = PTR_ERR(reader_tasks[i]); 2023 VERBOSE_PRINTK_ERRSTRING("Failed to create reader"); 2024 reader_tasks[i] = NULL; 2025 goto unwind; 2026 } 2027 } 2028 if (stat_interval > 0) { 2029 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task"); 2030 stats_task = kthread_run(rcu_torture_stats, NULL, 2031 "rcu_torture_stats"); 2032 if (IS_ERR(stats_task)) { 2033 firsterr = PTR_ERR(stats_task); 2034 VERBOSE_PRINTK_ERRSTRING("Failed to create stats"); 2035 stats_task = NULL; 2036 goto unwind; 2037 } 2038 } 2039 if (test_no_idle_hz) { 2040 rcu_idle_cpu = num_online_cpus() - 1; 2041 2042 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { 2043 firsterr = -ENOMEM; 2044 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask"); 2045 goto unwind; 2046 } 2047 2048 /* Create the shuffler thread */ 2049 shuffler_task = kthread_run(rcu_torture_shuffle, NULL, 2050 "rcu_torture_shuffle"); 2051 if (IS_ERR(shuffler_task)) { 2052 free_cpumask_var(shuffle_tmp_mask); 2053 firsterr = PTR_ERR(shuffler_task); 2054 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); 2055 shuffler_task = NULL; 2056 goto unwind; 2057 } 2058 } 2059 if (stutter < 0) 2060 stutter = 0; 2061 if (stutter) { 2062 /* Create the stutter thread */ 2063 stutter_task = kthread_run(rcu_torture_stutter, NULL, 2064 "rcu_torture_stutter"); 2065 if (IS_ERR(stutter_task)) { 2066 firsterr = PTR_ERR(stutter_task); 2067 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter"); 2068 stutter_task = NULL; 2069 goto unwind; 2070 } 2071 } 2072 if (fqs_duration < 0) 2073 fqs_duration = 0; 2074 if (fqs_duration) { 2075 /* Create the stutter thread */ 2076 fqs_task = kthread_run(rcu_torture_fqs, NULL, 2077 "rcu_torture_fqs"); 2078 if (IS_ERR(fqs_task)) { 2079 firsterr = PTR_ERR(fqs_task); 2080 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs"); 2081 fqs_task = NULL; 2082 goto unwind; 2083 } 2084 } 2085 if (test_boost_interval < 1) 2086 test_boost_interval = 1; 2087 if (test_boost_duration < 2) 2088 test_boost_duration = 2; 2089 if ((test_boost == 1 && cur_ops->can_boost) || 2090 test_boost == 2) { 2091 2092 boost_starttime = jiffies + test_boost_interval * HZ; 2093 register_cpu_notifier(&rcutorture_cpu_nb); 2094 for_each_possible_cpu(i) { 2095 if (cpu_is_offline(i)) 2096 continue; /* Heuristic: CPU can go offline. */ 2097 retval = rcutorture_booster_init(i); 2098 if (retval < 0) { 2099 firsterr = retval; 2100 goto unwind; 2101 } 2102 } 2103 } 2104 if (shutdown_secs > 0) { 2105 shutdown_time = jiffies + shutdown_secs * HZ; 2106 shutdown_task = kthread_create(rcu_torture_shutdown, NULL, 2107 "rcu_torture_shutdown"); 2108 if (IS_ERR(shutdown_task)) { 2109 firsterr = PTR_ERR(shutdown_task); 2110 VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); 2111 shutdown_task = NULL; 2112 goto unwind; 2113 } 2114 wake_up_process(shutdown_task); 2115 } 2116 i = rcu_torture_onoff_init(); 2117 if (i != 0) { 2118 firsterr = i; 2119 goto unwind; 2120 } 2121 register_reboot_notifier(&rcutorture_shutdown_nb); 2122 i = rcu_torture_stall_init(); 2123 if (i != 0) { 2124 firsterr = i; 2125 goto unwind; 2126 } 2127 retval = rcu_torture_barrier_init(); 2128 if (retval != 0) { 2129 firsterr = retval; 2130 goto unwind; 2131 } 2132 if (object_debug) 2133 rcu_test_debug_objects(); 2134 rcutorture_record_test_transition(); 2135 mutex_unlock(&fullstop_mutex); 2136 return 0; 2137 2138 unwind: 2139 mutex_unlock(&fullstop_mutex); 2140 rcu_torture_cleanup(); 2141 return firsterr; 2142 } 2143 2144 module_init(rcu_torture_init); 2145 module_exit(rcu_torture_cleanup); 2146
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.