~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/trace_selftest.c

Version: ~ [ linux-4.18 ] ~ [ linux-4.17.14 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.62 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.119 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.147 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.118 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~ [ linux-next-20180810 ] ~ [ linux-next-20180813 ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Include in trace.c */
  3 
  4 #include <uapi/linux/sched/types.h>
  5 #include <linux/stringify.h>
  6 #include <linux/kthread.h>
  7 #include <linux/delay.h>
  8 #include <linux/slab.h>
  9 
 10 static inline int trace_valid_entry(struct trace_entry *entry)
 11 {
 12         switch (entry->type) {
 13         case TRACE_FN:
 14         case TRACE_CTX:
 15         case TRACE_WAKE:
 16         case TRACE_STACK:
 17         case TRACE_PRINT:
 18         case TRACE_BRANCH:
 19         case TRACE_GRAPH_ENT:
 20         case TRACE_GRAPH_RET:
 21                 return 1;
 22         }
 23         return 0;
 24 }
 25 
 26 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
 27 {
 28         struct ring_buffer_event *event;
 29         struct trace_entry *entry;
 30         unsigned int loops = 0;
 31 
 32         while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
 33                 entry = ring_buffer_event_data(event);
 34 
 35                 /*
 36                  * The ring buffer is a size of trace_buf_size, if
 37                  * we loop more than the size, there's something wrong
 38                  * with the ring buffer.
 39                  */
 40                 if (loops++ > trace_buf_size) {
 41                         printk(KERN_CONT ".. bad ring buffer ");
 42                         goto failed;
 43                 }
 44                 if (!trace_valid_entry(entry)) {
 45                         printk(KERN_CONT ".. invalid entry %d ",
 46                                 entry->type);
 47                         goto failed;
 48                 }
 49         }
 50         return 0;
 51 
 52  failed:
 53         /* disable tracing */
 54         tracing_disabled = 1;
 55         printk(KERN_CONT ".. corrupted trace buffer .. ");
 56         return -1;
 57 }
 58 
 59 /*
 60  * Test the trace buffer to see if all the elements
 61  * are still sane.
 62  */
 63 static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
 64 {
 65         unsigned long flags, cnt = 0;
 66         int cpu, ret = 0;
 67 
 68         /* Don't allow flipping of max traces now */
 69         local_irq_save(flags);
 70         arch_spin_lock(&buf->tr->max_lock);
 71 
 72         cnt = ring_buffer_entries(buf->buffer);
 73 
 74         /*
 75          * The trace_test_buffer_cpu runs a while loop to consume all data.
 76          * If the calling tracer is broken, and is constantly filling
 77          * the buffer, this will run forever, and hard lock the box.
 78          * We disable the ring buffer while we do this test to prevent
 79          * a hard lock up.
 80          */
 81         tracing_off();
 82         for_each_possible_cpu(cpu) {
 83                 ret = trace_test_buffer_cpu(buf, cpu);
 84                 if (ret)
 85                         break;
 86         }
 87         tracing_on();
 88         arch_spin_unlock(&buf->tr->max_lock);
 89         local_irq_restore(flags);
 90 
 91         if (count)
 92                 *count = cnt;
 93 
 94         return ret;
 95 }
 96 
 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
 98 {
 99         printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100                 trace->name, init_ret);
101 }
102 #ifdef CONFIG_FUNCTION_TRACER
103 
104 #ifdef CONFIG_DYNAMIC_FTRACE
105 
106 static int trace_selftest_test_probe1_cnt;
107 static void trace_selftest_test_probe1_func(unsigned long ip,
108                                             unsigned long pip,
109                                             struct ftrace_ops *op,
110                                             struct pt_regs *pt_regs)
111 {
112         trace_selftest_test_probe1_cnt++;
113 }
114 
115 static int trace_selftest_test_probe2_cnt;
116 static void trace_selftest_test_probe2_func(unsigned long ip,
117                                             unsigned long pip,
118                                             struct ftrace_ops *op,
119                                             struct pt_regs *pt_regs)
120 {
121         trace_selftest_test_probe2_cnt++;
122 }
123 
124 static int trace_selftest_test_probe3_cnt;
125 static void trace_selftest_test_probe3_func(unsigned long ip,
126                                             unsigned long pip,
127                                             struct ftrace_ops *op,
128                                             struct pt_regs *pt_regs)
129 {
130         trace_selftest_test_probe3_cnt++;
131 }
132 
133 static int trace_selftest_test_global_cnt;
134 static void trace_selftest_test_global_func(unsigned long ip,
135                                             unsigned long pip,
136                                             struct ftrace_ops *op,
137                                             struct pt_regs *pt_regs)
138 {
139         trace_selftest_test_global_cnt++;
140 }
141 
142 static int trace_selftest_test_dyn_cnt;
143 static void trace_selftest_test_dyn_func(unsigned long ip,
144                                          unsigned long pip,
145                                          struct ftrace_ops *op,
146                                          struct pt_regs *pt_regs)
147 {
148         trace_selftest_test_dyn_cnt++;
149 }
150 
151 static struct ftrace_ops test_probe1 = {
152         .func                   = trace_selftest_test_probe1_func,
153         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
154 };
155 
156 static struct ftrace_ops test_probe2 = {
157         .func                   = trace_selftest_test_probe2_func,
158         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
159 };
160 
161 static struct ftrace_ops test_probe3 = {
162         .func                   = trace_selftest_test_probe3_func,
163         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
164 };
165 
166 static void print_counts(void)
167 {
168         printk("(%d %d %d %d %d) ",
169                trace_selftest_test_probe1_cnt,
170                trace_selftest_test_probe2_cnt,
171                trace_selftest_test_probe3_cnt,
172                trace_selftest_test_global_cnt,
173                trace_selftest_test_dyn_cnt);
174 }
175 
176 static void reset_counts(void)
177 {
178         trace_selftest_test_probe1_cnt = 0;
179         trace_selftest_test_probe2_cnt = 0;
180         trace_selftest_test_probe3_cnt = 0;
181         trace_selftest_test_global_cnt = 0;
182         trace_selftest_test_dyn_cnt = 0;
183 }
184 
185 static int trace_selftest_ops(struct trace_array *tr, int cnt)
186 {
187         int save_ftrace_enabled = ftrace_enabled;
188         struct ftrace_ops *dyn_ops;
189         char *func1_name;
190         char *func2_name;
191         int len1;
192         int len2;
193         int ret = -1;
194 
195         printk(KERN_CONT "PASSED\n");
196         pr_info("Testing dynamic ftrace ops #%d: ", cnt);
197 
198         ftrace_enabled = 1;
199         reset_counts();
200 
201         /* Handle PPC64 '.' name */
202         func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
203         func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
204         len1 = strlen(func1_name);
205         len2 = strlen(func2_name);
206 
207         /*
208          * Probe 1 will trace function 1.
209          * Probe 2 will trace function 2.
210          * Probe 3 will trace functions 1 and 2.
211          */
212         ftrace_set_filter(&test_probe1, func1_name, len1, 1);
213         ftrace_set_filter(&test_probe2, func2_name, len2, 1);
214         ftrace_set_filter(&test_probe3, func1_name, len1, 1);
215         ftrace_set_filter(&test_probe3, func2_name, len2, 0);
216 
217         register_ftrace_function(&test_probe1);
218         register_ftrace_function(&test_probe2);
219         register_ftrace_function(&test_probe3);
220         /* First time we are running with main function */
221         if (cnt > 1) {
222                 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
223                 register_ftrace_function(tr->ops);
224         }
225 
226         DYN_FTRACE_TEST_NAME();
227 
228         print_counts();
229 
230         if (trace_selftest_test_probe1_cnt != 1)
231                 goto out;
232         if (trace_selftest_test_probe2_cnt != 0)
233                 goto out;
234         if (trace_selftest_test_probe3_cnt != 1)
235                 goto out;
236         if (cnt > 1) {
237                 if (trace_selftest_test_global_cnt == 0)
238                         goto out;
239         }
240 
241         DYN_FTRACE_TEST_NAME2();
242 
243         print_counts();
244 
245         if (trace_selftest_test_probe1_cnt != 1)
246                 goto out;
247         if (trace_selftest_test_probe2_cnt != 1)
248                 goto out;
249         if (trace_selftest_test_probe3_cnt != 2)
250                 goto out;
251 
252         /* Add a dynamic probe */
253         dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
254         if (!dyn_ops) {
255                 printk("MEMORY ERROR ");
256                 goto out;
257         }
258 
259         dyn_ops->func = trace_selftest_test_dyn_func;
260 
261         register_ftrace_function(dyn_ops);
262 
263         trace_selftest_test_global_cnt = 0;
264 
265         DYN_FTRACE_TEST_NAME();
266 
267         print_counts();
268 
269         if (trace_selftest_test_probe1_cnt != 2)
270                 goto out_free;
271         if (trace_selftest_test_probe2_cnt != 1)
272                 goto out_free;
273         if (trace_selftest_test_probe3_cnt != 3)
274                 goto out_free;
275         if (cnt > 1) {
276                 if (trace_selftest_test_global_cnt == 0)
277                         goto out_free;
278         }
279         if (trace_selftest_test_dyn_cnt == 0)
280                 goto out_free;
281 
282         DYN_FTRACE_TEST_NAME2();
283 
284         print_counts();
285 
286         if (trace_selftest_test_probe1_cnt != 2)
287                 goto out_free;
288         if (trace_selftest_test_probe2_cnt != 2)
289                 goto out_free;
290         if (trace_selftest_test_probe3_cnt != 4)
291                 goto out_free;
292 
293         ret = 0;
294  out_free:
295         unregister_ftrace_function(dyn_ops);
296         kfree(dyn_ops);
297 
298  out:
299         /* Purposely unregister in the same order */
300         unregister_ftrace_function(&test_probe1);
301         unregister_ftrace_function(&test_probe2);
302         unregister_ftrace_function(&test_probe3);
303         if (cnt > 1)
304                 unregister_ftrace_function(tr->ops);
305         ftrace_reset_array_ops(tr);
306 
307         /* Make sure everything is off */
308         reset_counts();
309         DYN_FTRACE_TEST_NAME();
310         DYN_FTRACE_TEST_NAME();
311 
312         if (trace_selftest_test_probe1_cnt ||
313             trace_selftest_test_probe2_cnt ||
314             trace_selftest_test_probe3_cnt ||
315             trace_selftest_test_global_cnt ||
316             trace_selftest_test_dyn_cnt)
317                 ret = -1;
318 
319         ftrace_enabled = save_ftrace_enabled;
320 
321         return ret;
322 }
323 
324 /* Test dynamic code modification and ftrace filters */
325 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
326                                                   struct trace_array *tr,
327                                                   int (*func)(void))
328 {
329         int save_ftrace_enabled = ftrace_enabled;
330         unsigned long count;
331         char *func_name;
332         int ret;
333 
334         /* The ftrace test PASSED */
335         printk(KERN_CONT "PASSED\n");
336         pr_info("Testing dynamic ftrace: ");
337 
338         /* enable tracing, and record the filter function */
339         ftrace_enabled = 1;
340 
341         /* passed in by parameter to fool gcc from optimizing */
342         func();
343 
344         /*
345          * Some archs *cough*PowerPC*cough* add characters to the
346          * start of the function names. We simply put a '*' to
347          * accommodate them.
348          */
349         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
350 
351         /* filter only on our function */
352         ftrace_set_global_filter(func_name, strlen(func_name), 1);
353 
354         /* enable tracing */
355         ret = tracer_init(trace, tr);
356         if (ret) {
357                 warn_failed_init_tracer(trace, ret);
358                 goto out;
359         }
360 
361         /* Sleep for a 1/10 of a second */
362         msleep(100);
363 
364         /* we should have nothing in the buffer */
365         ret = trace_test_buffer(&tr->trace_buffer, &count);
366         if (ret)
367                 goto out;
368 
369         if (count) {
370                 ret = -1;
371                 printk(KERN_CONT ".. filter did not filter .. ");
372                 goto out;
373         }
374 
375         /* call our function again */
376         func();
377 
378         /* sleep again */
379         msleep(100);
380 
381         /* stop the tracing. */
382         tracing_stop();
383         ftrace_enabled = 0;
384 
385         /* check the trace buffer */
386         ret = trace_test_buffer(&tr->trace_buffer, &count);
387 
388         ftrace_enabled = 1;
389         tracing_start();
390 
391         /* we should only have one item */
392         if (!ret && count != 1) {
393                 trace->reset(tr);
394                 printk(KERN_CONT ".. filter failed count=%ld ..", count);
395                 ret = -1;
396                 goto out;
397         }
398 
399         /* Test the ops with global tracing running */
400         ret = trace_selftest_ops(tr, 1);
401         trace->reset(tr);
402 
403  out:
404         ftrace_enabled = save_ftrace_enabled;
405 
406         /* Enable tracing on all functions again */
407         ftrace_set_global_filter(NULL, 0, 1);
408 
409         /* Test the ops with global tracing off */
410         if (!ret)
411                 ret = trace_selftest_ops(tr, 2);
412 
413         return ret;
414 }
415 
416 static int trace_selftest_recursion_cnt;
417 static void trace_selftest_test_recursion_func(unsigned long ip,
418                                                unsigned long pip,
419                                                struct ftrace_ops *op,
420                                                struct pt_regs *pt_regs)
421 {
422         /*
423          * This function is registered without the recursion safe flag.
424          * The ftrace infrastructure should provide the recursion
425          * protection. If not, this will crash the kernel!
426          */
427         if (trace_selftest_recursion_cnt++ > 10)
428                 return;
429         DYN_FTRACE_TEST_NAME();
430 }
431 
432 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
433                                                     unsigned long pip,
434                                                     struct ftrace_ops *op,
435                                                     struct pt_regs *pt_regs)
436 {
437         /*
438          * We said we would provide our own recursion. By calling
439          * this function again, we should recurse back into this function
440          * and count again. But this only happens if the arch supports
441          * all of ftrace features and nothing else is using the function
442          * tracing utility.
443          */
444         if (trace_selftest_recursion_cnt++)
445                 return;
446         DYN_FTRACE_TEST_NAME();
447 }
448 
449 static struct ftrace_ops test_rec_probe = {
450         .func                   = trace_selftest_test_recursion_func,
451 };
452 
453 static struct ftrace_ops test_recsafe_probe = {
454         .func                   = trace_selftest_test_recursion_safe_func,
455         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
456 };
457 
458 static int
459 trace_selftest_function_recursion(void)
460 {
461         int save_ftrace_enabled = ftrace_enabled;
462         char *func_name;
463         int len;
464         int ret;
465 
466         /* The previous test PASSED */
467         pr_cont("PASSED\n");
468         pr_info("Testing ftrace recursion: ");
469 
470 
471         /* enable tracing, and record the filter function */
472         ftrace_enabled = 1;
473 
474         /* Handle PPC64 '.' name */
475         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
476         len = strlen(func_name);
477 
478         ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
479         if (ret) {
480                 pr_cont("*Could not set filter* ");
481                 goto out;
482         }
483 
484         ret = register_ftrace_function(&test_rec_probe);
485         if (ret) {
486                 pr_cont("*could not register callback* ");
487                 goto out;
488         }
489 
490         DYN_FTRACE_TEST_NAME();
491 
492         unregister_ftrace_function(&test_rec_probe);
493 
494         ret = -1;
495         if (trace_selftest_recursion_cnt != 1) {
496                 pr_cont("*callback not called once (%d)* ",
497                         trace_selftest_recursion_cnt);
498                 goto out;
499         }
500 
501         trace_selftest_recursion_cnt = 1;
502 
503         pr_cont("PASSED\n");
504         pr_info("Testing ftrace recursion safe: ");
505 
506         ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
507         if (ret) {
508                 pr_cont("*Could not set filter* ");
509                 goto out;
510         }
511 
512         ret = register_ftrace_function(&test_recsafe_probe);
513         if (ret) {
514                 pr_cont("*could not register callback* ");
515                 goto out;
516         }
517 
518         DYN_FTRACE_TEST_NAME();
519 
520         unregister_ftrace_function(&test_recsafe_probe);
521 
522         ret = -1;
523         if (trace_selftest_recursion_cnt != 2) {
524                 pr_cont("*callback not called expected 2 times (%d)* ",
525                         trace_selftest_recursion_cnt);
526                 goto out;
527         }
528 
529         ret = 0;
530 out:
531         ftrace_enabled = save_ftrace_enabled;
532 
533         return ret;
534 }
535 #else
536 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
537 # define trace_selftest_function_recursion() ({ 0; })
538 #endif /* CONFIG_DYNAMIC_FTRACE */
539 
540 static enum {
541         TRACE_SELFTEST_REGS_START,
542         TRACE_SELFTEST_REGS_FOUND,
543         TRACE_SELFTEST_REGS_NOT_FOUND,
544 } trace_selftest_regs_stat;
545 
546 static void trace_selftest_test_regs_func(unsigned long ip,
547                                           unsigned long pip,
548                                           struct ftrace_ops *op,
549                                           struct pt_regs *pt_regs)
550 {
551         if (pt_regs)
552                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
553         else
554                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
555 }
556 
557 static struct ftrace_ops test_regs_probe = {
558         .func           = trace_selftest_test_regs_func,
559         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
560 };
561 
562 static int
563 trace_selftest_function_regs(void)
564 {
565         int save_ftrace_enabled = ftrace_enabled;
566         char *func_name;
567         int len;
568         int ret;
569         int supported = 0;
570 
571 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
572         supported = 1;
573 #endif
574 
575         /* The previous test PASSED */
576         pr_cont("PASSED\n");
577         pr_info("Testing ftrace regs%s: ",
578                 !supported ? "(no arch support)" : "");
579 
580         /* enable tracing, and record the filter function */
581         ftrace_enabled = 1;
582 
583         /* Handle PPC64 '.' name */
584         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
585         len = strlen(func_name);
586 
587         ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
588         /*
589          * If DYNAMIC_FTRACE is not set, then we just trace all functions.
590          * This test really doesn't care.
591          */
592         if (ret && ret != -ENODEV) {
593                 pr_cont("*Could not set filter* ");
594                 goto out;
595         }
596 
597         ret = register_ftrace_function(&test_regs_probe);
598         /*
599          * Now if the arch does not support passing regs, then this should
600          * have failed.
601          */
602         if (!supported) {
603                 if (!ret) {
604                         pr_cont("*registered save-regs without arch support* ");
605                         goto out;
606                 }
607                 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
608                 ret = register_ftrace_function(&test_regs_probe);
609         }
610         if (ret) {
611                 pr_cont("*could not register callback* ");
612                 goto out;
613         }
614 
615 
616         DYN_FTRACE_TEST_NAME();
617 
618         unregister_ftrace_function(&test_regs_probe);
619 
620         ret = -1;
621 
622         switch (trace_selftest_regs_stat) {
623         case TRACE_SELFTEST_REGS_START:
624                 pr_cont("*callback never called* ");
625                 goto out;
626 
627         case TRACE_SELFTEST_REGS_FOUND:
628                 if (supported)
629                         break;
630                 pr_cont("*callback received regs without arch support* ");
631                 goto out;
632 
633         case TRACE_SELFTEST_REGS_NOT_FOUND:
634                 if (!supported)
635                         break;
636                 pr_cont("*callback received NULL regs* ");
637                 goto out;
638         }
639 
640         ret = 0;
641 out:
642         ftrace_enabled = save_ftrace_enabled;
643 
644         return ret;
645 }
646 
647 /*
648  * Simple verification test of ftrace function tracer.
649  * Enable ftrace, sleep 1/10 second, and then read the trace
650  * buffer to see if all is in order.
651  */
652 __init int
653 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
654 {
655         int save_ftrace_enabled = ftrace_enabled;
656         unsigned long count;
657         int ret;
658 
659 #ifdef CONFIG_DYNAMIC_FTRACE
660         if (ftrace_filter_param) {
661                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
662                 return 0;
663         }
664 #endif
665 
666         /* make sure msleep has been recorded */
667         msleep(1);
668 
669         /* start the tracing */
670         ftrace_enabled = 1;
671 
672         ret = tracer_init(trace, tr);
673         if (ret) {
674                 warn_failed_init_tracer(trace, ret);
675                 goto out;
676         }
677 
678         /* Sleep for a 1/10 of a second */
679         msleep(100);
680         /* stop the tracing. */
681         tracing_stop();
682         ftrace_enabled = 0;
683 
684         /* check the trace buffer */
685         ret = trace_test_buffer(&tr->trace_buffer, &count);
686 
687         ftrace_enabled = 1;
688         trace->reset(tr);
689         tracing_start();
690 
691         if (!ret && !count) {
692                 printk(KERN_CONT ".. no entries found ..");
693                 ret = -1;
694                 goto out;
695         }
696 
697         ret = trace_selftest_startup_dynamic_tracing(trace, tr,
698                                                      DYN_FTRACE_TEST_NAME);
699         if (ret)
700                 goto out;
701 
702         ret = trace_selftest_function_recursion();
703         if (ret)
704                 goto out;
705 
706         ret = trace_selftest_function_regs();
707  out:
708         ftrace_enabled = save_ftrace_enabled;
709 
710         /* kill ftrace totally if we failed */
711         if (ret)
712                 ftrace_kill();
713 
714         return ret;
715 }
716 #endif /* CONFIG_FUNCTION_TRACER */
717 
718 
719 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
720 
721 /* Maximum number of functions to trace before diagnosing a hang */
722 #define GRAPH_MAX_FUNC_TEST     100000000
723 
724 static unsigned int graph_hang_thresh;
725 
726 /* Wrap the real function entry probe to avoid possible hanging */
727 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
728 {
729         /* This is harmlessly racy, we want to approximately detect a hang */
730         if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
731                 ftrace_graph_stop();
732                 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
733                 if (ftrace_dump_on_oops) {
734                         ftrace_dump(DUMP_ALL);
735                         /* ftrace_dump() disables tracing */
736                         tracing_on();
737                 }
738                 return 0;
739         }
740 
741         return trace_graph_entry(trace);
742 }
743 
744 /*
745  * Pretty much the same than for the function tracer from which the selftest
746  * has been borrowed.
747  */
748 __init int
749 trace_selftest_startup_function_graph(struct tracer *trace,
750                                         struct trace_array *tr)
751 {
752         int ret;
753         unsigned long count;
754 
755 #ifdef CONFIG_DYNAMIC_FTRACE
756         if (ftrace_filter_param) {
757                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
758                 return 0;
759         }
760 #endif
761 
762         /*
763          * Simulate the init() callback but we attach a watchdog callback
764          * to detect and recover from possible hangs
765          */
766         tracing_reset_online_cpus(&tr->trace_buffer);
767         set_graph_array(tr);
768         ret = register_ftrace_graph(&trace_graph_return,
769                                     &trace_graph_entry_watchdog);
770         if (ret) {
771                 warn_failed_init_tracer(trace, ret);
772                 goto out;
773         }
774         tracing_start_cmdline_record();
775 
776         /* Sleep for a 1/10 of a second */
777         msleep(100);
778 
779         /* Have we just recovered from a hang? */
780         if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
781                 tracing_selftest_disabled = true;
782                 ret = -1;
783                 goto out;
784         }
785 
786         tracing_stop();
787 
788         /* check the trace buffer */
789         ret = trace_test_buffer(&tr->trace_buffer, &count);
790 
791         trace->reset(tr);
792         tracing_start();
793 
794         if (!ret && !count) {
795                 printk(KERN_CONT ".. no entries found ..");
796                 ret = -1;
797                 goto out;
798         }
799 
800         /* Don't test dynamic tracing, the function tracer already did */
801 
802 out:
803         /* Stop it if we failed */
804         if (ret)
805                 ftrace_graph_stop();
806 
807         return ret;
808 }
809 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
810 
811 
812 #ifdef CONFIG_IRQSOFF_TRACER
813 int
814 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
815 {
816         unsigned long save_max = tr->max_latency;
817         unsigned long count;
818         int ret;
819 
820         /* start the tracing */
821         ret = tracer_init(trace, tr);
822         if (ret) {
823                 warn_failed_init_tracer(trace, ret);
824                 return ret;
825         }
826 
827         /* reset the max latency */
828         tr->max_latency = 0;
829         /* disable interrupts for a bit */
830         local_irq_disable();
831         udelay(100);
832         local_irq_enable();
833 
834         /*
835          * Stop the tracer to avoid a warning subsequent
836          * to buffer flipping failure because tracing_stop()
837          * disables the tr and max buffers, making flipping impossible
838          * in case of parallels max irqs off latencies.
839          */
840         trace->stop(tr);
841         /* stop the tracing. */
842         tracing_stop();
843         /* check both trace buffers */
844         ret = trace_test_buffer(&tr->trace_buffer, NULL);
845         if (!ret)
846                 ret = trace_test_buffer(&tr->max_buffer, &count);
847         trace->reset(tr);
848         tracing_start();
849 
850         if (!ret && !count) {
851                 printk(KERN_CONT ".. no entries found ..");
852                 ret = -1;
853         }
854 
855         tr->max_latency = save_max;
856 
857         return ret;
858 }
859 #endif /* CONFIG_IRQSOFF_TRACER */
860 
861 #ifdef CONFIG_PREEMPT_TRACER
862 int
863 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
864 {
865         unsigned long save_max = tr->max_latency;
866         unsigned long count;
867         int ret;
868 
869         /*
870          * Now that the big kernel lock is no longer preemptable,
871          * and this is called with the BKL held, it will always
872          * fail. If preemption is already disabled, simply
873          * pass the test. When the BKL is removed, or becomes
874          * preemptible again, we will once again test this,
875          * so keep it in.
876          */
877         if (preempt_count()) {
878                 printk(KERN_CONT "can not test ... force ");
879                 return 0;
880         }
881 
882         /* start the tracing */
883         ret = tracer_init(trace, tr);
884         if (ret) {
885                 warn_failed_init_tracer(trace, ret);
886                 return ret;
887         }
888 
889         /* reset the max latency */
890         tr->max_latency = 0;
891         /* disable preemption for a bit */
892         preempt_disable();
893         udelay(100);
894         preempt_enable();
895 
896         /*
897          * Stop the tracer to avoid a warning subsequent
898          * to buffer flipping failure because tracing_stop()
899          * disables the tr and max buffers, making flipping impossible
900          * in case of parallels max preempt off latencies.
901          */
902         trace->stop(tr);
903         /* stop the tracing. */
904         tracing_stop();
905         /* check both trace buffers */
906         ret = trace_test_buffer(&tr->trace_buffer, NULL);
907         if (!ret)
908                 ret = trace_test_buffer(&tr->max_buffer, &count);
909         trace->reset(tr);
910         tracing_start();
911 
912         if (!ret && !count) {
913                 printk(KERN_CONT ".. no entries found ..");
914                 ret = -1;
915         }
916 
917         tr->max_latency = save_max;
918 
919         return ret;
920 }
921 #endif /* CONFIG_PREEMPT_TRACER */
922 
923 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
924 int
925 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
926 {
927         unsigned long save_max = tr->max_latency;
928         unsigned long count;
929         int ret;
930 
931         /*
932          * Now that the big kernel lock is no longer preemptable,
933          * and this is called with the BKL held, it will always
934          * fail. If preemption is already disabled, simply
935          * pass the test. When the BKL is removed, or becomes
936          * preemptible again, we will once again test this,
937          * so keep it in.
938          */
939         if (preempt_count()) {
940                 printk(KERN_CONT "can not test ... force ");
941                 return 0;
942         }
943 
944         /* start the tracing */
945         ret = tracer_init(trace, tr);
946         if (ret) {
947                 warn_failed_init_tracer(trace, ret);
948                 goto out_no_start;
949         }
950 
951         /* reset the max latency */
952         tr->max_latency = 0;
953 
954         /* disable preemption and interrupts for a bit */
955         preempt_disable();
956         local_irq_disable();
957         udelay(100);
958         preempt_enable();
959         /* reverse the order of preempt vs irqs */
960         local_irq_enable();
961 
962         /*
963          * Stop the tracer to avoid a warning subsequent
964          * to buffer flipping failure because tracing_stop()
965          * disables the tr and max buffers, making flipping impossible
966          * in case of parallels max irqs/preempt off latencies.
967          */
968         trace->stop(tr);
969         /* stop the tracing. */
970         tracing_stop();
971         /* check both trace buffers */
972         ret = trace_test_buffer(&tr->trace_buffer, NULL);
973         if (ret)
974                 goto out;
975 
976         ret = trace_test_buffer(&tr->max_buffer, &count);
977         if (ret)
978                 goto out;
979 
980         if (!ret && !count) {
981                 printk(KERN_CONT ".. no entries found ..");
982                 ret = -1;
983                 goto out;
984         }
985 
986         /* do the test by disabling interrupts first this time */
987         tr->max_latency = 0;
988         tracing_start();
989         trace->start(tr);
990 
991         preempt_disable();
992         local_irq_disable();
993         udelay(100);
994         preempt_enable();
995         /* reverse the order of preempt vs irqs */
996         local_irq_enable();
997 
998         trace->stop(tr);
999         /* stop the tracing. */
1000         tracing_stop();
1001         /* check both trace buffers */
1002         ret = trace_test_buffer(&tr->trace_buffer, NULL);
1003         if (ret)
1004                 goto out;
1005 
1006         ret = trace_test_buffer(&tr->max_buffer, &count);
1007 
1008         if (!ret && !count) {
1009                 printk(KERN_CONT ".. no entries found ..");
1010                 ret = -1;
1011                 goto out;
1012         }
1013 
1014 out:
1015         tracing_start();
1016 out_no_start:
1017         trace->reset(tr);
1018         tr->max_latency = save_max;
1019 
1020         return ret;
1021 }
1022 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1023 
1024 #ifdef CONFIG_NOP_TRACER
1025 int
1026 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1027 {
1028         /* What could possibly go wrong? */
1029         return 0;
1030 }
1031 #endif
1032 
1033 #ifdef CONFIG_SCHED_TRACER
1034 
1035 struct wakeup_test_data {
1036         struct completion       is_ready;
1037         int                     go;
1038 };
1039 
1040 static int trace_wakeup_test_thread(void *data)
1041 {
1042         /* Make this a -deadline thread */
1043         static const struct sched_attr attr = {
1044                 .sched_policy = SCHED_DEADLINE,
1045                 .sched_runtime = 100000ULL,
1046                 .sched_deadline = 10000000ULL,
1047                 .sched_period = 10000000ULL
1048         };
1049         struct wakeup_test_data *x = data;
1050 
1051         sched_setattr(current, &attr);
1052 
1053         /* Make it know we have a new prio */
1054         complete(&x->is_ready);
1055 
1056         /* now go to sleep and let the test wake us up */
1057         set_current_state(TASK_INTERRUPTIBLE);
1058         while (!x->go) {
1059                 schedule();
1060                 set_current_state(TASK_INTERRUPTIBLE);
1061         }
1062 
1063         complete(&x->is_ready);
1064 
1065         set_current_state(TASK_INTERRUPTIBLE);
1066 
1067         /* we are awake, now wait to disappear */
1068         while (!kthread_should_stop()) {
1069                 schedule();
1070                 set_current_state(TASK_INTERRUPTIBLE);
1071         }
1072 
1073         __set_current_state(TASK_RUNNING);
1074 
1075         return 0;
1076 }
1077 int
1078 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1079 {
1080         unsigned long save_max = tr->max_latency;
1081         struct task_struct *p;
1082         struct wakeup_test_data data;
1083         unsigned long count;
1084         int ret;
1085 
1086         memset(&data, 0, sizeof(data));
1087 
1088         init_completion(&data.is_ready);
1089 
1090         /* create a -deadline thread */
1091         p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1092         if (IS_ERR(p)) {
1093                 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1094                 return -1;
1095         }
1096 
1097         /* make sure the thread is running at -deadline policy */
1098         wait_for_completion(&data.is_ready);
1099 
1100         /* start the tracing */
1101         ret = tracer_init(trace, tr);
1102         if (ret) {
1103                 warn_failed_init_tracer(trace, ret);
1104                 return ret;
1105         }
1106 
1107         /* reset the max latency */
1108         tr->max_latency = 0;
1109 
1110         while (p->on_rq) {
1111                 /*
1112                  * Sleep to make sure the -deadline thread is asleep too.
1113                  * On virtual machines we can't rely on timings,
1114                  * but we want to make sure this test still works.
1115                  */
1116                 msleep(100);
1117         }
1118 
1119         init_completion(&data.is_ready);
1120 
1121         data.go = 1;
1122         /* memory barrier is in the wake_up_process() */
1123 
1124         wake_up_process(p);
1125 
1126         /* Wait for the task to wake up */
1127         wait_for_completion(&data.is_ready);
1128 
1129         /* stop the tracing. */
1130         tracing_stop();
1131         /* check both trace buffers */
1132         ret = trace_test_buffer(&tr->trace_buffer, NULL);
1133         if (!ret)
1134                 ret = trace_test_buffer(&tr->max_buffer, &count);
1135 
1136 
1137         trace->reset(tr);
1138         tracing_start();
1139 
1140         tr->max_latency = save_max;
1141 
1142         /* kill the thread */
1143         kthread_stop(p);
1144 
1145         if (!ret && !count) {
1146                 printk(KERN_CONT ".. no entries found ..");
1147                 ret = -1;
1148         }
1149 
1150         return ret;
1151 }
1152 #endif /* CONFIG_SCHED_TRACER */
1153 
1154 #ifdef CONFIG_BRANCH_TRACER
1155 int
1156 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1157 {
1158         unsigned long count;
1159         int ret;
1160 
1161         /* start the tracing */
1162         ret = tracer_init(trace, tr);
1163         if (ret) {
1164                 warn_failed_init_tracer(trace, ret);
1165                 return ret;
1166         }
1167 
1168         /* Sleep for a 1/10 of a second */
1169         msleep(100);
1170         /* stop the tracing. */
1171         tracing_stop();
1172         /* check the trace buffer */
1173         ret = trace_test_buffer(&tr->trace_buffer, &count);
1174         trace->reset(tr);
1175         tracing_start();
1176 
1177         if (!ret && !count) {
1178                 printk(KERN_CONT ".. no entries found ..");
1179                 ret = -1;
1180         }
1181 
1182         return ret;
1183 }
1184 #endif /* CONFIG_BRANCH_TRACER */
1185 
1186 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp