~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/trace_events.c

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * event tracer
  3  *
  4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5  *
  6  *  - Added format output of fields of the trace point.
  7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8  *
  9  */
 10 
 11 #include <linux/workqueue.h>
 12 #include <linux/spinlock.h>
 13 #include <linux/kthread.h>
 14 #include <linux/debugfs.h>
 15 #include <linux/uaccess.h>
 16 #include <linux/module.h>
 17 #include <linux/ctype.h>
 18 #include <linux/slab.h>
 19 #include <linux/delay.h>
 20 
 21 #include <asm/setup.h>
 22 
 23 #include "trace_output.h"
 24 
 25 #undef TRACE_SYSTEM
 26 #define TRACE_SYSTEM "TRACE_SYSTEM"
 27 
 28 DEFINE_MUTEX(event_mutex);
 29 
 30 LIST_HEAD(ftrace_events);
 31 static LIST_HEAD(ftrace_common_fields);
 32 
 33 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
 34 
 35 static struct kmem_cache *field_cachep;
 36 static struct kmem_cache *file_cachep;
 37 
 38 #define SYSTEM_FL_FREE_NAME             (1 << 31)
 39 
 40 static inline int system_refcount(struct event_subsystem *system)
 41 {
 42         return system->ref_count & ~SYSTEM_FL_FREE_NAME;
 43 }
 44 
 45 static int system_refcount_inc(struct event_subsystem *system)
 46 {
 47         return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
 48 }
 49 
 50 static int system_refcount_dec(struct event_subsystem *system)
 51 {
 52         return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
 53 }
 54 
 55 /* Double loops, do not use break, only goto's work */
 56 #define do_for_each_event_file(tr, file)                        \
 57         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
 58                 list_for_each_entry(file, &tr->events, list)
 59 
 60 #define do_for_each_event_file_safe(tr, file)                   \
 61         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
 62                 struct ftrace_event_file *___n;                         \
 63                 list_for_each_entry_safe(file, ___n, &tr->events, list)
 64 
 65 #define while_for_each_event_file()             \
 66         }
 67 
 68 static struct list_head *
 69 trace_get_fields(struct ftrace_event_call *event_call)
 70 {
 71         if (!event_call->class->get_fields)
 72                 return &event_call->class->fields;
 73         return event_call->class->get_fields(event_call);
 74 }
 75 
 76 static struct ftrace_event_field *
 77 __find_event_field(struct list_head *head, char *name)
 78 {
 79         struct ftrace_event_field *field;
 80 
 81         list_for_each_entry(field, head, link) {
 82                 if (!strcmp(field->name, name))
 83                         return field;
 84         }
 85 
 86         return NULL;
 87 }
 88 
 89 struct ftrace_event_field *
 90 trace_find_event_field(struct ftrace_event_call *call, char *name)
 91 {
 92         struct ftrace_event_field *field;
 93         struct list_head *head;
 94 
 95         field = __find_event_field(&ftrace_common_fields, name);
 96         if (field)
 97                 return field;
 98 
 99         head = trace_get_fields(call);
100         return __find_event_field(head, name);
101 }
102 
103 static int __trace_define_field(struct list_head *head, const char *type,
104                                 const char *name, int offset, int size,
105                                 int is_signed, int filter_type)
106 {
107         struct ftrace_event_field *field;
108 
109         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
110         if (!field)
111                 return -ENOMEM;
112 
113         field->name = name;
114         field->type = type;
115 
116         if (filter_type == FILTER_OTHER)
117                 field->filter_type = filter_assign_type(type);
118         else
119                 field->filter_type = filter_type;
120 
121         field->offset = offset;
122         field->size = size;
123         field->is_signed = is_signed;
124 
125         list_add(&field->link, head);
126 
127         return 0;
128 }
129 
130 int trace_define_field(struct ftrace_event_call *call, const char *type,
131                        const char *name, int offset, int size, int is_signed,
132                        int filter_type)
133 {
134         struct list_head *head;
135 
136         if (WARN_ON(!call->class))
137                 return 0;
138 
139         head = trace_get_fields(call);
140         return __trace_define_field(head, type, name, offset, size,
141                                     is_signed, filter_type);
142 }
143 EXPORT_SYMBOL_GPL(trace_define_field);
144 
145 #define __common_field(type, item)                                      \
146         ret = __trace_define_field(&ftrace_common_fields, #type,        \
147                                    "common_" #item,                     \
148                                    offsetof(typeof(ent), item),         \
149                                    sizeof(ent.item),                    \
150                                    is_signed_type(type), FILTER_OTHER); \
151         if (ret)                                                        \
152                 return ret;
153 
154 static int trace_define_common_fields(void)
155 {
156         int ret;
157         struct trace_entry ent;
158 
159         __common_field(unsigned short, type);
160         __common_field(unsigned char, flags);
161         __common_field(unsigned char, preempt_count);
162         __common_field(int, pid);
163 
164         return ret;
165 }
166 
167 static void trace_destroy_fields(struct ftrace_event_call *call)
168 {
169         struct ftrace_event_field *field, *next;
170         struct list_head *head;
171 
172         head = trace_get_fields(call);
173         list_for_each_entry_safe(field, next, head, link) {
174                 list_del(&field->link);
175                 kmem_cache_free(field_cachep, field);
176         }
177 }
178 
179 int trace_event_raw_init(struct ftrace_event_call *call)
180 {
181         int id;
182 
183         id = register_ftrace_event(&call->event);
184         if (!id)
185                 return -ENODEV;
186 
187         return 0;
188 }
189 EXPORT_SYMBOL_GPL(trace_event_raw_init);
190 
191 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
192                                   struct ftrace_event_file *ftrace_file,
193                                   unsigned long len)
194 {
195         struct ftrace_event_call *event_call = ftrace_file->event_call;
196 
197         local_save_flags(fbuffer->flags);
198         fbuffer->pc = preempt_count();
199         fbuffer->ftrace_file = ftrace_file;
200 
201         fbuffer->event =
202                 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
203                                                 event_call->event.type, len,
204                                                 fbuffer->flags, fbuffer->pc);
205         if (!fbuffer->event)
206                 return NULL;
207 
208         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
209         return fbuffer->entry;
210 }
211 EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
212 
213 void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
214 {
215         event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
216                                     fbuffer->event, fbuffer->entry,
217                                     fbuffer->flags, fbuffer->pc);
218 }
219 EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
220 
221 int ftrace_event_reg(struct ftrace_event_call *call,
222                      enum trace_reg type, void *data)
223 {
224         struct ftrace_event_file *file = data;
225 
226         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
227         switch (type) {
228         case TRACE_REG_REGISTER:
229                 return tracepoint_probe_register(call->tp,
230                                                  call->class->probe,
231                                                  file);
232         case TRACE_REG_UNREGISTER:
233                 tracepoint_probe_unregister(call->tp,
234                                             call->class->probe,
235                                             file);
236                 return 0;
237 
238 #ifdef CONFIG_PERF_EVENTS
239         case TRACE_REG_PERF_REGISTER:
240                 return tracepoint_probe_register(call->tp,
241                                                  call->class->perf_probe,
242                                                  call);
243         case TRACE_REG_PERF_UNREGISTER:
244                 tracepoint_probe_unregister(call->tp,
245                                             call->class->perf_probe,
246                                             call);
247                 return 0;
248         case TRACE_REG_PERF_OPEN:
249         case TRACE_REG_PERF_CLOSE:
250         case TRACE_REG_PERF_ADD:
251         case TRACE_REG_PERF_DEL:
252                 return 0;
253 #endif
254         }
255         return 0;
256 }
257 EXPORT_SYMBOL_GPL(ftrace_event_reg);
258 
259 void trace_event_enable_cmd_record(bool enable)
260 {
261         struct ftrace_event_file *file;
262         struct trace_array *tr;
263 
264         mutex_lock(&event_mutex);
265         do_for_each_event_file(tr, file) {
266 
267                 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
268                         continue;
269 
270                 if (enable) {
271                         tracing_start_cmdline_record();
272                         set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
273                 } else {
274                         tracing_stop_cmdline_record();
275                         clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
276                 }
277         } while_for_each_event_file();
278         mutex_unlock(&event_mutex);
279 }
280 
281 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
282                                          int enable, int soft_disable)
283 {
284         struct ftrace_event_call *call = file->event_call;
285         int ret = 0;
286         int disable;
287 
288         switch (enable) {
289         case 0:
290                 /*
291                  * When soft_disable is set and enable is cleared, the sm_ref
292                  * reference counter is decremented. If it reaches 0, we want
293                  * to clear the SOFT_DISABLED flag but leave the event in the
294                  * state that it was. That is, if the event was enabled and
295                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
296                  * is set we do not want the event to be enabled before we
297                  * clear the bit.
298                  *
299                  * When soft_disable is not set but the SOFT_MODE flag is,
300                  * we do nothing. Do not disable the tracepoint, otherwise
301                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
302                  */
303                 if (soft_disable) {
304                         if (atomic_dec_return(&file->sm_ref) > 0)
305                                 break;
306                         disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
307                         clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
308                 } else
309                         disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
310 
311                 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
312                         clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
313                         if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
314                                 tracing_stop_cmdline_record();
315                                 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
316                         }
317                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
318                 }
319                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
320                 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
321                         set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
322                 else
323                         clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
324                 break;
325         case 1:
326                 /*
327                  * When soft_disable is set and enable is set, we want to
328                  * register the tracepoint for the event, but leave the event
329                  * as is. That means, if the event was already enabled, we do
330                  * nothing (but set SOFT_MODE). If the event is disabled, we
331                  * set SOFT_DISABLED before enabling the event tracepoint, so
332                  * it still seems to be disabled.
333                  */
334                 if (!soft_disable)
335                         clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
336                 else {
337                         if (atomic_inc_return(&file->sm_ref) > 1)
338                                 break;
339                         set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
340                 }
341 
342                 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
343 
344                         /* Keep the event disabled, when going to SOFT_MODE. */
345                         if (soft_disable)
346                                 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
347 
348                         if (trace_flags & TRACE_ITER_RECORD_CMD) {
349                                 tracing_start_cmdline_record();
350                                 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
351                         }
352                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
353                         if (ret) {
354                                 tracing_stop_cmdline_record();
355                                 pr_info("event trace: Could not enable event "
356                                         "%s\n", ftrace_event_name(call));
357                                 break;
358                         }
359                         set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
360 
361                         /* WAS_ENABLED gets set but never cleared. */
362                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
363                 }
364                 break;
365         }
366 
367         return ret;
368 }
369 
370 int trace_event_enable_disable(struct ftrace_event_file *file,
371                                int enable, int soft_disable)
372 {
373         return __ftrace_event_enable_disable(file, enable, soft_disable);
374 }
375 
376 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
377                                        int enable)
378 {
379         return __ftrace_event_enable_disable(file, enable, 0);
380 }
381 
382 static void ftrace_clear_events(struct trace_array *tr)
383 {
384         struct ftrace_event_file *file;
385 
386         mutex_lock(&event_mutex);
387         list_for_each_entry(file, &tr->events, list) {
388                 ftrace_event_enable_disable(file, 0);
389         }
390         mutex_unlock(&event_mutex);
391 }
392 
393 static void __put_system(struct event_subsystem *system)
394 {
395         struct event_filter *filter = system->filter;
396 
397         WARN_ON_ONCE(system_refcount(system) == 0);
398         if (system_refcount_dec(system))
399                 return;
400 
401         list_del(&system->list);
402 
403         if (filter) {
404                 kfree(filter->filter_string);
405                 kfree(filter);
406         }
407         if (system->ref_count & SYSTEM_FL_FREE_NAME)
408                 kfree(system->name);
409         kfree(system);
410 }
411 
412 static void __get_system(struct event_subsystem *system)
413 {
414         WARN_ON_ONCE(system_refcount(system) == 0);
415         system_refcount_inc(system);
416 }
417 
418 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
419 {
420         WARN_ON_ONCE(dir->ref_count == 0);
421         dir->ref_count++;
422         __get_system(dir->subsystem);
423 }
424 
425 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
426 {
427         WARN_ON_ONCE(dir->ref_count == 0);
428         /* If the subsystem is about to be freed, the dir must be too */
429         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
430 
431         __put_system(dir->subsystem);
432         if (!--dir->ref_count)
433                 kfree(dir);
434 }
435 
436 static void put_system(struct ftrace_subsystem_dir *dir)
437 {
438         mutex_lock(&event_mutex);
439         __put_system_dir(dir);
440         mutex_unlock(&event_mutex);
441 }
442 
443 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
444 {
445         if (!dir)
446                 return;
447 
448         if (!--dir->nr_events) {
449                 debugfs_remove_recursive(dir->entry);
450                 list_del(&dir->list);
451                 __put_system_dir(dir);
452         }
453 }
454 
455 static void remove_event_file_dir(struct ftrace_event_file *file)
456 {
457         struct dentry *dir = file->dir;
458         struct dentry *child;
459 
460         if (dir) {
461                 spin_lock(&dir->d_lock);        /* probably unneeded */
462                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
463                         if (child->d_inode)     /* probably unneeded */
464                                 child->d_inode->i_private = NULL;
465                 }
466                 spin_unlock(&dir->d_lock);
467 
468                 debugfs_remove_recursive(dir);
469         }
470 
471         list_del(&file->list);
472         remove_subsystem(file->system);
473         free_event_filter(file->filter);
474         kmem_cache_free(file_cachep, file);
475 }
476 
477 /*
478  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
479  */
480 static int
481 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
482                               const char *sub, const char *event, int set)
483 {
484         struct ftrace_event_file *file;
485         struct ftrace_event_call *call;
486         const char *name;
487         int ret = -EINVAL;
488 
489         list_for_each_entry(file, &tr->events, list) {
490 
491                 call = file->event_call;
492                 name = ftrace_event_name(call);
493 
494                 if (!name || !call->class || !call->class->reg)
495                         continue;
496 
497                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
498                         continue;
499 
500                 if (match &&
501                     strcmp(match, name) != 0 &&
502                     strcmp(match, call->class->system) != 0)
503                         continue;
504 
505                 if (sub && strcmp(sub, call->class->system) != 0)
506                         continue;
507 
508                 if (event && strcmp(event, name) != 0)
509                         continue;
510 
511                 ftrace_event_enable_disable(file, set);
512 
513                 ret = 0;
514         }
515 
516         return ret;
517 }
518 
519 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
520                                   const char *sub, const char *event, int set)
521 {
522         int ret;
523 
524         mutex_lock(&event_mutex);
525         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
526         mutex_unlock(&event_mutex);
527 
528         return ret;
529 }
530 
531 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
532 {
533         char *event = NULL, *sub = NULL, *match;
534 
535         /*
536          * The buf format can be <subsystem>:<event-name>
537          *  *:<event-name> means any event by that name.
538          *  :<event-name> is the same.
539          *
540          *  <subsystem>:* means all events in that subsystem
541          *  <subsystem>: means the same.
542          *
543          *  <name> (no ':') means all events in a subsystem with
544          *  the name <name> or any event that matches <name>
545          */
546 
547         match = strsep(&buf, ":");
548         if (buf) {
549                 sub = match;
550                 event = buf;
551                 match = NULL;
552 
553                 if (!strlen(sub) || strcmp(sub, "*") == 0)
554                         sub = NULL;
555                 if (!strlen(event) || strcmp(event, "*") == 0)
556                         event = NULL;
557         }
558 
559         return __ftrace_set_clr_event(tr, match, sub, event, set);
560 }
561 
562 /**
563  * trace_set_clr_event - enable or disable an event
564  * @system: system name to match (NULL for any system)
565  * @event: event name to match (NULL for all events, within system)
566  * @set: 1 to enable, 0 to disable
567  *
568  * This is a way for other parts of the kernel to enable or disable
569  * event recording.
570  *
571  * Returns 0 on success, -EINVAL if the parameters do not match any
572  * registered events.
573  */
574 int trace_set_clr_event(const char *system, const char *event, int set)
575 {
576         struct trace_array *tr = top_trace_array();
577 
578         if (!tr)
579                 return -ENODEV;
580 
581         return __ftrace_set_clr_event(tr, NULL, system, event, set);
582 }
583 EXPORT_SYMBOL_GPL(trace_set_clr_event);
584 
585 /* 128 should be much more than enough */
586 #define EVENT_BUF_SIZE          127
587 
588 static ssize_t
589 ftrace_event_write(struct file *file, const char __user *ubuf,
590                    size_t cnt, loff_t *ppos)
591 {
592         struct trace_parser parser;
593         struct seq_file *m = file->private_data;
594         struct trace_array *tr = m->private;
595         ssize_t read, ret;
596 
597         if (!cnt)
598                 return 0;
599 
600         ret = tracing_update_buffers();
601         if (ret < 0)
602                 return ret;
603 
604         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
605                 return -ENOMEM;
606 
607         read = trace_get_user(&parser, ubuf, cnt, ppos);
608 
609         if (read >= 0 && trace_parser_loaded((&parser))) {
610                 int set = 1;
611 
612                 if (*parser.buffer == '!')
613                         set = 0;
614 
615                 parser.buffer[parser.idx] = 0;
616 
617                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
618                 if (ret)
619                         goto out_put;
620         }
621 
622         ret = read;
623 
624  out_put:
625         trace_parser_put(&parser);
626 
627         return ret;
628 }
629 
630 static void *
631 t_next(struct seq_file *m, void *v, loff_t *pos)
632 {
633         struct ftrace_event_file *file = v;
634         struct ftrace_event_call *call;
635         struct trace_array *tr = m->private;
636 
637         (*pos)++;
638 
639         list_for_each_entry_continue(file, &tr->events, list) {
640                 call = file->event_call;
641                 /*
642                  * The ftrace subsystem is for showing formats only.
643                  * They can not be enabled or disabled via the event files.
644                  */
645                 if (call->class && call->class->reg &&
646                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
647                         return file;
648         }
649 
650         return NULL;
651 }
652 
653 static void *t_start(struct seq_file *m, loff_t *pos)
654 {
655         struct ftrace_event_file *file;
656         struct trace_array *tr = m->private;
657         loff_t l;
658 
659         mutex_lock(&event_mutex);
660 
661         file = list_entry(&tr->events, struct ftrace_event_file, list);
662         for (l = 0; l <= *pos; ) {
663                 file = t_next(m, file, &l);
664                 if (!file)
665                         break;
666         }
667         return file;
668 }
669 
670 static void *
671 s_next(struct seq_file *m, void *v, loff_t *pos)
672 {
673         struct ftrace_event_file *file = v;
674         struct trace_array *tr = m->private;
675 
676         (*pos)++;
677 
678         list_for_each_entry_continue(file, &tr->events, list) {
679                 if (file->flags & FTRACE_EVENT_FL_ENABLED)
680                         return file;
681         }
682 
683         return NULL;
684 }
685 
686 static void *s_start(struct seq_file *m, loff_t *pos)
687 {
688         struct ftrace_event_file *file;
689         struct trace_array *tr = m->private;
690         loff_t l;
691 
692         mutex_lock(&event_mutex);
693 
694         file = list_entry(&tr->events, struct ftrace_event_file, list);
695         for (l = 0; l <= *pos; ) {
696                 file = s_next(m, file, &l);
697                 if (!file)
698                         break;
699         }
700         return file;
701 }
702 
703 static int t_show(struct seq_file *m, void *v)
704 {
705         struct ftrace_event_file *file = v;
706         struct ftrace_event_call *call = file->event_call;
707 
708         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
709                 seq_printf(m, "%s:", call->class->system);
710         seq_printf(m, "%s\n", ftrace_event_name(call));
711 
712         return 0;
713 }
714 
715 static void t_stop(struct seq_file *m, void *p)
716 {
717         mutex_unlock(&event_mutex);
718 }
719 
720 static ssize_t
721 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
722                   loff_t *ppos)
723 {
724         struct ftrace_event_file *file;
725         unsigned long flags;
726         char buf[4] = "";
727 
728         mutex_lock(&event_mutex);
729         file = event_file_data(filp);
730         if (likely(file))
731                 flags = file->flags;
732         mutex_unlock(&event_mutex);
733 
734         if (!file)
735                 return -ENODEV;
736 
737         if (flags & FTRACE_EVENT_FL_ENABLED &&
738             !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
739                 strcpy(buf, "1");
740 
741         if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
742             flags & FTRACE_EVENT_FL_SOFT_MODE)
743                 strcat(buf, "*");
744 
745         strcat(buf, "\n");
746 
747         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
748 }
749 
750 static ssize_t
751 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
752                    loff_t *ppos)
753 {
754         struct ftrace_event_file *file;
755         unsigned long val;
756         int ret;
757 
758         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
759         if (ret)
760                 return ret;
761 
762         ret = tracing_update_buffers();
763         if (ret < 0)
764                 return ret;
765 
766         switch (val) {
767         case 0:
768         case 1:
769                 ret = -ENODEV;
770                 mutex_lock(&event_mutex);
771                 file = event_file_data(filp);
772                 if (likely(file))
773                         ret = ftrace_event_enable_disable(file, val);
774                 mutex_unlock(&event_mutex);
775                 break;
776 
777         default:
778                 return -EINVAL;
779         }
780 
781         *ppos += cnt;
782 
783         return ret ? ret : cnt;
784 }
785 
786 static ssize_t
787 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
788                    loff_t *ppos)
789 {
790         const char set_to_char[4] = { '?', '', '1', 'X' };
791         struct ftrace_subsystem_dir *dir = filp->private_data;
792         struct event_subsystem *system = dir->subsystem;
793         struct ftrace_event_call *call;
794         struct ftrace_event_file *file;
795         struct trace_array *tr = dir->tr;
796         char buf[2];
797         int set = 0;
798         int ret;
799 
800         mutex_lock(&event_mutex);
801         list_for_each_entry(file, &tr->events, list) {
802                 call = file->event_call;
803                 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
804                         continue;
805 
806                 if (system && strcmp(call->class->system, system->name) != 0)
807                         continue;
808 
809                 /*
810                  * We need to find out if all the events are set
811                  * or if all events or cleared, or if we have
812                  * a mixture.
813                  */
814                 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
815 
816                 /*
817                  * If we have a mixture, no need to look further.
818                  */
819                 if (set == 3)
820                         break;
821         }
822         mutex_unlock(&event_mutex);
823 
824         buf[0] = set_to_char[set];
825         buf[1] = '\n';
826 
827         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
828 
829         return ret;
830 }
831 
832 static ssize_t
833 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
834                     loff_t *ppos)
835 {
836         struct ftrace_subsystem_dir *dir = filp->private_data;
837         struct event_subsystem *system = dir->subsystem;
838         const char *name = NULL;
839         unsigned long val;
840         ssize_t ret;
841 
842         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
843         if (ret)
844                 return ret;
845 
846         ret = tracing_update_buffers();
847         if (ret < 0)
848                 return ret;
849 
850         if (val != 0 && val != 1)
851                 return -EINVAL;
852 
853         /*
854          * Opening of "enable" adds a ref count to system,
855          * so the name is safe to use.
856          */
857         if (system)
858                 name = system->name;
859 
860         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
861         if (ret)
862                 goto out;
863 
864         ret = cnt;
865 
866 out:
867         *ppos += cnt;
868 
869         return ret;
870 }
871 
872 enum {
873         FORMAT_HEADER           = 1,
874         FORMAT_FIELD_SEPERATOR  = 2,
875         FORMAT_PRINTFMT         = 3,
876 };
877 
878 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
879 {
880         struct ftrace_event_call *call = event_file_data(m->private);
881         struct list_head *common_head = &ftrace_common_fields;
882         struct list_head *head = trace_get_fields(call);
883         struct list_head *node = v;
884 
885         (*pos)++;
886 
887         switch ((unsigned long)v) {
888         case FORMAT_HEADER:
889                 node = common_head;
890                 break;
891 
892         case FORMAT_FIELD_SEPERATOR:
893                 node = head;
894                 break;
895 
896         case FORMAT_PRINTFMT:
897                 /* all done */
898                 return NULL;
899         }
900 
901         node = node->prev;
902         if (node == common_head)
903                 return (void *)FORMAT_FIELD_SEPERATOR;
904         else if (node == head)
905                 return (void *)FORMAT_PRINTFMT;
906         else
907                 return node;
908 }
909 
910 static int f_show(struct seq_file *m, void *v)
911 {
912         struct ftrace_event_call *call = event_file_data(m->private);
913         struct ftrace_event_field *field;
914         const char *array_descriptor;
915 
916         switch ((unsigned long)v) {
917         case FORMAT_HEADER:
918                 seq_printf(m, "name: %s\n", ftrace_event_name(call));
919                 seq_printf(m, "ID: %d\n", call->event.type);
920                 seq_printf(m, "format:\n");
921                 return 0;
922 
923         case FORMAT_FIELD_SEPERATOR:
924                 seq_putc(m, '\n');
925                 return 0;
926 
927         case FORMAT_PRINTFMT:
928                 seq_printf(m, "\nprint fmt: %s\n",
929                            call->print_fmt);
930                 return 0;
931         }
932 
933         field = list_entry(v, struct ftrace_event_field, link);
934         /*
935          * Smartly shows the array type(except dynamic array).
936          * Normal:
937          *      field:TYPE VAR
938          * If TYPE := TYPE[LEN], it is shown:
939          *      field:TYPE VAR[LEN]
940          */
941         array_descriptor = strchr(field->type, '[');
942 
943         if (!strncmp(field->type, "__data_loc", 10))
944                 array_descriptor = NULL;
945 
946         if (!array_descriptor)
947                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
948                            field->type, field->name, field->offset,
949                            field->size, !!field->is_signed);
950         else
951                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
952                            (int)(array_descriptor - field->type),
953                            field->type, field->name,
954                            array_descriptor, field->offset,
955                            field->size, !!field->is_signed);
956 
957         return 0;
958 }
959 
960 static void *f_start(struct seq_file *m, loff_t *pos)
961 {
962         void *p = (void *)FORMAT_HEADER;
963         loff_t l = 0;
964 
965         /* ->stop() is called even if ->start() fails */
966         mutex_lock(&event_mutex);
967         if (!event_file_data(m->private))
968                 return ERR_PTR(-ENODEV);
969 
970         while (l < *pos && p)
971                 p = f_next(m, p, &l);
972 
973         return p;
974 }
975 
976 static void f_stop(struct seq_file *m, void *p)
977 {
978         mutex_unlock(&event_mutex);
979 }
980 
981 static const struct seq_operations trace_format_seq_ops = {
982         .start          = f_start,
983         .next           = f_next,
984         .stop           = f_stop,
985         .show           = f_show,
986 };
987 
988 static int trace_format_open(struct inode *inode, struct file *file)
989 {
990         struct seq_file *m;
991         int ret;
992 
993         ret = seq_open(file, &trace_format_seq_ops);
994         if (ret < 0)
995                 return ret;
996 
997         m = file->private_data;
998         m->private = file;
999 
1000         return 0;
1001 }
1002 
1003 static ssize_t
1004 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1005 {
1006         int id = (long)event_file_data(filp);
1007         char buf[32];
1008         int len;
1009 
1010         if (unlikely(!id))
1011                 return -ENODEV;
1012 
1013         len = sprintf(buf, "%d\n", id);
1014 
1015         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1016 }
1017 
1018 static ssize_t
1019 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1020                   loff_t *ppos)
1021 {
1022         struct ftrace_event_file *file;
1023         struct trace_seq *s;
1024         int r = -ENODEV;
1025 
1026         if (*ppos)
1027                 return 0;
1028 
1029         s = kmalloc(sizeof(*s), GFP_KERNEL);
1030 
1031         if (!s)
1032                 return -ENOMEM;
1033 
1034         trace_seq_init(s);
1035 
1036         mutex_lock(&event_mutex);
1037         file = event_file_data(filp);
1038         if (file)
1039                 print_event_filter(file, s);
1040         mutex_unlock(&event_mutex);
1041 
1042         if (file)
1043                 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1044 
1045         kfree(s);
1046 
1047         return r;
1048 }
1049 
1050 static ssize_t
1051 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1052                    loff_t *ppos)
1053 {
1054         struct ftrace_event_file *file;
1055         char *buf;
1056         int err = -ENODEV;
1057 
1058         if (cnt >= PAGE_SIZE)
1059                 return -EINVAL;
1060 
1061         buf = (char *)__get_free_page(GFP_TEMPORARY);
1062         if (!buf)
1063                 return -ENOMEM;
1064 
1065         if (copy_from_user(buf, ubuf, cnt)) {
1066                 free_page((unsigned long) buf);
1067                 return -EFAULT;
1068         }
1069         buf[cnt] = '\0';
1070 
1071         mutex_lock(&event_mutex);
1072         file = event_file_data(filp);
1073         if (file)
1074                 err = apply_event_filter(file, buf);
1075         mutex_unlock(&event_mutex);
1076 
1077         free_page((unsigned long) buf);
1078         if (err < 0)
1079                 return err;
1080 
1081         *ppos += cnt;
1082 
1083         return cnt;
1084 }
1085 
1086 static LIST_HEAD(event_subsystems);
1087 
1088 static int subsystem_open(struct inode *inode, struct file *filp)
1089 {
1090         struct event_subsystem *system = NULL;
1091         struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1092         struct trace_array *tr;
1093         int ret;
1094 
1095         if (tracing_is_disabled())
1096                 return -ENODEV;
1097 
1098         /* Make sure the system still exists */
1099         mutex_lock(&trace_types_lock);
1100         mutex_lock(&event_mutex);
1101         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1102                 list_for_each_entry(dir, &tr->systems, list) {
1103                         if (dir == inode->i_private) {
1104                                 /* Don't open systems with no events */
1105                                 if (dir->nr_events) {
1106                                         __get_system_dir(dir);
1107                                         system = dir->subsystem;
1108                                 }
1109                                 goto exit_loop;
1110                         }
1111                 }
1112         }
1113  exit_loop:
1114         mutex_unlock(&event_mutex);
1115         mutex_unlock(&trace_types_lock);
1116 
1117         if (!system)
1118                 return -ENODEV;
1119 
1120         /* Some versions of gcc think dir can be uninitialized here */
1121         WARN_ON(!dir);
1122 
1123         /* Still need to increment the ref count of the system */
1124         if (trace_array_get(tr) < 0) {
1125                 put_system(dir);
1126                 return -ENODEV;
1127         }
1128 
1129         ret = tracing_open_generic(inode, filp);
1130         if (ret < 0) {
1131                 trace_array_put(tr);
1132                 put_system(dir);
1133         }
1134 
1135         return ret;
1136 }
1137 
1138 static int system_tr_open(struct inode *inode, struct file *filp)
1139 {
1140         struct ftrace_subsystem_dir *dir;
1141         struct trace_array *tr = inode->i_private;
1142         int ret;
1143 
1144         if (tracing_is_disabled())
1145                 return -ENODEV;
1146 
1147         if (trace_array_get(tr) < 0)
1148                 return -ENODEV;
1149 
1150         /* Make a temporary dir that has no system but points to tr */
1151         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1152         if (!dir) {
1153                 trace_array_put(tr);
1154                 return -ENOMEM;
1155         }
1156 
1157         dir->tr = tr;
1158 
1159         ret = tracing_open_generic(inode, filp);
1160         if (ret < 0) {
1161                 trace_array_put(tr);
1162                 kfree(dir);
1163                 return ret;
1164         }
1165 
1166         filp->private_data = dir;
1167 
1168         return 0;
1169 }
1170 
1171 static int subsystem_release(struct inode *inode, struct file *file)
1172 {
1173         struct ftrace_subsystem_dir *dir = file->private_data;
1174 
1175         trace_array_put(dir->tr);
1176 
1177         /*
1178          * If dir->subsystem is NULL, then this is a temporary
1179          * descriptor that was made for a trace_array to enable
1180          * all subsystems.
1181          */
1182         if (dir->subsystem)
1183                 put_system(dir);
1184         else
1185                 kfree(dir);
1186 
1187         return 0;
1188 }
1189 
1190 static ssize_t
1191 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1192                       loff_t *ppos)
1193 {
1194         struct ftrace_subsystem_dir *dir = filp->private_data;
1195         struct event_subsystem *system = dir->subsystem;
1196         struct trace_seq *s;
1197         int r;
1198 
1199         if (*ppos)
1200                 return 0;
1201 
1202         s = kmalloc(sizeof(*s), GFP_KERNEL);
1203         if (!s)
1204                 return -ENOMEM;
1205 
1206         trace_seq_init(s);
1207 
1208         print_subsystem_event_filter(system, s);
1209         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1210 
1211         kfree(s);
1212 
1213         return r;
1214 }
1215 
1216 static ssize_t
1217 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1218                        loff_t *ppos)
1219 {
1220         struct ftrace_subsystem_dir *dir = filp->private_data;
1221         char *buf;
1222         int err;
1223 
1224         if (cnt >= PAGE_SIZE)
1225                 return -EINVAL;
1226 
1227         buf = (char *)__get_free_page(GFP_TEMPORARY);
1228         if (!buf)
1229                 return -ENOMEM;
1230 
1231         if (copy_from_user(buf, ubuf, cnt)) {
1232                 free_page((unsigned long) buf);
1233                 return -EFAULT;
1234         }
1235         buf[cnt] = '\0';
1236 
1237         err = apply_subsystem_event_filter(dir, buf);
1238         free_page((unsigned long) buf);
1239         if (err < 0)
1240                 return err;
1241 
1242         *ppos += cnt;
1243 
1244         return cnt;
1245 }
1246 
1247 static ssize_t
1248 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1249 {
1250         int (*func)(struct trace_seq *s) = filp->private_data;
1251         struct trace_seq *s;
1252         int r;
1253 
1254         if (*ppos)
1255                 return 0;
1256 
1257         s = kmalloc(sizeof(*s), GFP_KERNEL);
1258         if (!s)
1259                 return -ENOMEM;
1260 
1261         trace_seq_init(s);
1262 
1263         func(s);
1264         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1265 
1266         kfree(s);
1267 
1268         return r;
1269 }
1270 
1271 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1272 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1273 static int ftrace_event_release(struct inode *inode, struct file *file);
1274 
1275 static const struct seq_operations show_event_seq_ops = {
1276         .start = t_start,
1277         .next = t_next,
1278         .show = t_show,
1279         .stop = t_stop,
1280 };
1281 
1282 static const struct seq_operations show_set_event_seq_ops = {
1283         .start = s_start,
1284         .next = s_next,
1285         .show = t_show,
1286         .stop = t_stop,
1287 };
1288 
1289 static const struct file_operations ftrace_avail_fops = {
1290         .open = ftrace_event_avail_open,
1291         .read = seq_read,
1292         .llseek = seq_lseek,
1293         .release = seq_release,
1294 };
1295 
1296 static const struct file_operations ftrace_set_event_fops = {
1297         .open = ftrace_event_set_open,
1298         .read = seq_read,
1299         .write = ftrace_event_write,
1300         .llseek = seq_lseek,
1301         .release = ftrace_event_release,
1302 };
1303 
1304 static const struct file_operations ftrace_enable_fops = {
1305         .open = tracing_open_generic,
1306         .read = event_enable_read,
1307         .write = event_enable_write,
1308         .llseek = default_llseek,
1309 };
1310 
1311 static const struct file_operations ftrace_event_format_fops = {
1312         .open = trace_format_open,
1313         .read = seq_read,
1314         .llseek = seq_lseek,
1315         .release = seq_release,
1316 };
1317 
1318 static const struct file_operations ftrace_event_id_fops = {
1319         .read = event_id_read,
1320         .llseek = default_llseek,
1321 };
1322 
1323 static const struct file_operations ftrace_event_filter_fops = {
1324         .open = tracing_open_generic,
1325         .read = event_filter_read,
1326         .write = event_filter_write,
1327         .llseek = default_llseek,
1328 };
1329 
1330 static const struct file_operations ftrace_subsystem_filter_fops = {
1331         .open = subsystem_open,
1332         .read = subsystem_filter_read,
1333         .write = subsystem_filter_write,
1334         .llseek = default_llseek,
1335         .release = subsystem_release,
1336 };
1337 
1338 static const struct file_operations ftrace_system_enable_fops = {
1339         .open = subsystem_open,
1340         .read = system_enable_read,
1341         .write = system_enable_write,
1342         .llseek = default_llseek,
1343         .release = subsystem_release,
1344 };
1345 
1346 static const struct file_operations ftrace_tr_enable_fops = {
1347         .open = system_tr_open,
1348         .read = system_enable_read,
1349         .write = system_enable_write,
1350         .llseek = default_llseek,
1351         .release = subsystem_release,
1352 };
1353 
1354 static const struct file_operations ftrace_show_header_fops = {
1355         .open = tracing_open_generic,
1356         .read = show_header,
1357         .llseek = default_llseek,
1358 };
1359 
1360 static int
1361 ftrace_event_open(struct inode *inode, struct file *file,
1362                   const struct seq_operations *seq_ops)
1363 {
1364         struct seq_file *m;
1365         int ret;
1366 
1367         ret = seq_open(file, seq_ops);
1368         if (ret < 0)
1369                 return ret;
1370         m = file->private_data;
1371         /* copy tr over to seq ops */
1372         m->private = inode->i_private;
1373 
1374         return ret;
1375 }
1376 
1377 static int ftrace_event_release(struct inode *inode, struct file *file)
1378 {
1379         struct trace_array *tr = inode->i_private;
1380 
1381         trace_array_put(tr);
1382 
1383         return seq_release(inode, file);
1384 }
1385 
1386 static int
1387 ftrace_event_avail_open(struct inode *inode, struct file *file)
1388 {
1389         const struct seq_operations *seq_ops = &show_event_seq_ops;
1390 
1391         return ftrace_event_open(inode, file, seq_ops);
1392 }
1393 
1394 static int
1395 ftrace_event_set_open(struct inode *inode, struct file *file)
1396 {
1397         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1398         struct trace_array *tr = inode->i_private;
1399         int ret;
1400 
1401         if (trace_array_get(tr) < 0)
1402                 return -ENODEV;
1403 
1404         if ((file->f_mode & FMODE_WRITE) &&
1405             (file->f_flags & O_TRUNC))
1406                 ftrace_clear_events(tr);
1407 
1408         ret = ftrace_event_open(inode, file, seq_ops);
1409         if (ret < 0)
1410                 trace_array_put(tr);
1411         return ret;
1412 }
1413 
1414 static struct event_subsystem *
1415 create_new_subsystem(const char *name)
1416 {
1417         struct event_subsystem *system;
1418 
1419         /* need to create new entry */
1420         system = kmalloc(sizeof(*system), GFP_KERNEL);
1421         if (!system)
1422                 return NULL;
1423 
1424         system->ref_count = 1;
1425 
1426         /* Only allocate if dynamic (kprobes and modules) */
1427         if (!core_kernel_data((unsigned long)name)) {
1428                 system->ref_count |= SYSTEM_FL_FREE_NAME;
1429                 system->name = kstrdup(name, GFP_KERNEL);
1430                 if (!system->name)
1431                         goto out_free;
1432         } else
1433                 system->name = name;
1434 
1435         system->filter = NULL;
1436 
1437         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1438         if (!system->filter)
1439                 goto out_free;
1440 
1441         list_add(&system->list, &event_subsystems);
1442 
1443         return system;
1444 
1445  out_free:
1446         if (system->ref_count & SYSTEM_FL_FREE_NAME)
1447                 kfree(system->name);
1448         kfree(system);
1449         return NULL;
1450 }
1451 
1452 static struct dentry *
1453 event_subsystem_dir(struct trace_array *tr, const char *name,
1454                     struct ftrace_event_file *file, struct dentry *parent)
1455 {
1456         struct ftrace_subsystem_dir *dir;
1457         struct event_subsystem *system;
1458         struct dentry *entry;
1459 
1460         /* First see if we did not already create this dir */
1461         list_for_each_entry(dir, &tr->systems, list) {
1462                 system = dir->subsystem;
1463                 if (strcmp(system->name, name) == 0) {
1464                         dir->nr_events++;
1465                         file->system = dir;
1466                         return dir->entry;
1467                 }
1468         }
1469 
1470         /* Now see if the system itself exists. */
1471         list_for_each_entry(system, &event_subsystems, list) {
1472                 if (strcmp(system->name, name) == 0)
1473                         break;
1474         }
1475         /* Reset system variable when not found */
1476         if (&system->list == &event_subsystems)
1477                 system = NULL;
1478 
1479         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1480         if (!dir)
1481                 goto out_fail;
1482 
1483         if (!system) {
1484                 system = create_new_subsystem(name);
1485                 if (!system)
1486                         goto out_free;
1487         } else
1488                 __get_system(system);
1489 
1490         dir->entry = debugfs_create_dir(name, parent);
1491         if (!dir->entry) {
1492                 pr_warning("Failed to create system directory %s\n", name);
1493                 __put_system(system);
1494                 goto out_free;
1495         }
1496 
1497         dir->tr = tr;
1498         dir->ref_count = 1;
1499         dir->nr_events = 1;
1500         dir->subsystem = system;
1501         file->system = dir;
1502 
1503         entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1504                                     &ftrace_subsystem_filter_fops);
1505         if (!entry) {
1506                 kfree(system->filter);
1507                 system->filter = NULL;
1508                 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1509         }
1510 
1511         trace_create_file("enable", 0644, dir->entry, dir,
1512                           &ftrace_system_enable_fops);
1513 
1514         list_add(&dir->list, &tr->systems);
1515 
1516         return dir->entry;
1517 
1518  out_free:
1519         kfree(dir);
1520  out_fail:
1521         /* Only print this message if failed on memory allocation */
1522         if (!dir || !system)
1523                 pr_warning("No memory to create event subsystem %s\n",
1524                            name);
1525         return NULL;
1526 }
1527 
1528 static int
1529 event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1530 {
1531         struct ftrace_event_call *call = file->event_call;
1532         struct trace_array *tr = file->tr;
1533         struct list_head *head;
1534         struct dentry *d_events;
1535         const char *name;
1536         int ret;
1537 
1538         /*
1539          * If the trace point header did not define TRACE_SYSTEM
1540          * then the system would be called "TRACE_SYSTEM".
1541          */
1542         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1543                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1544                 if (!d_events)
1545                         return -ENOMEM;
1546         } else
1547                 d_events = parent;
1548 
1549         name = ftrace_event_name(call);
1550         file->dir = debugfs_create_dir(name, d_events);
1551         if (!file->dir) {
1552                 pr_warning("Could not create debugfs '%s' directory\n",
1553                            name);
1554                 return -1;
1555         }
1556 
1557         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1558                 trace_create_file("enable", 0644, file->dir, file,
1559                                   &ftrace_enable_fops);
1560 
1561 #ifdef CONFIG_PERF_EVENTS
1562         if (call->event.type && call->class->reg)
1563                 trace_create_file("id", 0444, file->dir,
1564                                   (void *)(long)call->event.type,
1565                                   &ftrace_event_id_fops);
1566 #endif
1567 
1568         /*
1569          * Other events may have the same class. Only update
1570          * the fields if they are not already defined.
1571          */
1572         head = trace_get_fields(call);
1573         if (list_empty(head)) {
1574                 ret = call->class->define_fields(call);
1575                 if (ret < 0) {
1576                         pr_warning("Could not initialize trace point"
1577                                    " events/%s\n", name);
1578                         return -1;
1579                 }
1580         }
1581         trace_create_file("filter", 0644, file->dir, file,
1582                           &ftrace_event_filter_fops);
1583 
1584         /*
1585          * Only event directories that can be enabled should have
1586          * triggers.
1587          */
1588         if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1589                 trace_create_file("trigger", 0644, file->dir, file,
1590                                   &event_trigger_fops);
1591 
1592         trace_create_file("format", 0444, file->dir, call,
1593                           &ftrace_event_format_fops);
1594 
1595         return 0;
1596 }
1597 
1598 static void remove_event_from_tracers(struct ftrace_event_call *call)
1599 {
1600         struct ftrace_event_file *file;
1601         struct trace_array *tr;
1602 
1603         do_for_each_event_file_safe(tr, file) {
1604                 if (file->event_call != call)
1605                         continue;
1606 
1607                 remove_event_file_dir(file);
1608                 /*
1609                  * The do_for_each_event_file_safe() is
1610                  * a double loop. After finding the call for this
1611                  * trace_array, we use break to jump to the next
1612                  * trace_array.
1613                  */
1614                 break;
1615         } while_for_each_event_file();
1616 }
1617 
1618 static void event_remove(struct ftrace_event_call *call)
1619 {
1620         struct trace_array *tr;
1621         struct ftrace_event_file *file;
1622 
1623         do_for_each_event_file(tr, file) {
1624                 if (file->event_call != call)
1625                         continue;
1626                 ftrace_event_enable_disable(file, 0);
1627                 destroy_preds(file);
1628                 /*
1629                  * The do_for_each_event_file() is
1630                  * a double loop. After finding the call for this
1631                  * trace_array, we use break to jump to the next
1632                  * trace_array.
1633                  */
1634                 break;
1635         } while_for_each_event_file();
1636 
1637         if (call->event.funcs)
1638                 __unregister_ftrace_event(&call->event);
1639         remove_event_from_tracers(call);
1640         list_del(&call->list);
1641 }
1642 
1643 static int event_init(struct ftrace_event_call *call)
1644 {
1645         int ret = 0;
1646         const char *name;
1647 
1648         name = ftrace_event_name(call);
1649         if (WARN_ON(!name))
1650                 return -EINVAL;
1651 
1652         if (call->class->raw_init) {
1653                 ret = call->class->raw_init(call);
1654                 if (ret < 0 && ret != -ENOSYS)
1655                         pr_warn("Could not initialize trace events/%s\n",
1656                                 name);
1657         }
1658 
1659         return ret;
1660 }
1661 
1662 static int
1663 __register_event(struct ftrace_event_call *call, struct module *mod)
1664 {
1665         int ret;
1666 
1667         ret = event_init(call);
1668         if (ret < 0)
1669                 return ret;
1670 
1671         list_add(&call->list, &ftrace_events);
1672         call->mod = mod;
1673 
1674         return 0;
1675 }
1676 
1677 static struct ftrace_event_file *
1678 trace_create_new_event(struct ftrace_event_call *call,
1679                        struct trace_array *tr)
1680 {
1681         struct ftrace_event_file *file;
1682 
1683         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1684         if (!file)
1685                 return NULL;
1686 
1687         file->event_call = call;
1688         file->tr = tr;
1689         atomic_set(&file->sm_ref, 0);
1690         atomic_set(&file->tm_ref, 0);
1691         INIT_LIST_HEAD(&file->triggers);
1692         list_add(&file->list, &tr->events);
1693 
1694         return file;
1695 }
1696 
1697 /* Add an event to a trace directory */
1698 static int
1699 __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1700 {
1701         struct ftrace_event_file *file;
1702 
1703         file = trace_create_new_event(call, tr);
1704         if (!file)
1705                 return -ENOMEM;
1706 
1707         return event_create_dir(tr->event_dir, file);
1708 }
1709 
1710 /*
1711  * Just create a decriptor for early init. A descriptor is required
1712  * for enabling events at boot. We want to enable events before
1713  * the filesystem is initialized.
1714  */
1715 static __init int
1716 __trace_early_add_new_event(struct ftrace_event_call *call,
1717                             struct trace_array *tr)
1718 {
1719         struct ftrace_event_file *file;
1720 
1721         file = trace_create_new_event(call, tr);
1722         if (!file)
1723                 return -ENOMEM;
1724 
1725         return 0;
1726 }
1727 
1728 struct ftrace_module_file_ops;
1729 static void __add_event_to_tracers(struct ftrace_event_call *call);
1730 
1731 /* Add an additional event_call dynamically */
1732 int trace_add_event_call(struct ftrace_event_call *call)
1733 {
1734         int ret;
1735         mutex_lock(&trace_types_lock);
1736         mutex_lock(&event_mutex);
1737 
1738         ret = __register_event(call, NULL);
1739         if (ret >= 0)
1740                 __add_event_to_tracers(call);
1741 
1742         mutex_unlock(&event_mutex);
1743         mutex_unlock(&trace_types_lock);
1744         return ret;
1745 }
1746 
1747 /*
1748  * Must be called under locking of trace_types_lock, event_mutex and
1749  * trace_event_sem.
1750  */
1751 static void __trace_remove_event_call(struct ftrace_event_call *call)
1752 {
1753         event_remove(call);
1754         trace_destroy_fields(call);
1755         destroy_call_preds(call);
1756 }
1757 
1758 static int probe_remove_event_call(struct ftrace_event_call *call)
1759 {
1760         struct trace_array *tr;
1761         struct ftrace_event_file *file;
1762 
1763 #ifdef CONFIG_PERF_EVENTS
1764         if (call->perf_refcount)
1765                 return -EBUSY;
1766 #endif
1767         do_for_each_event_file(tr, file) {
1768                 if (file->event_call != call)
1769                         continue;
1770                 /*
1771                  * We can't rely on ftrace_event_enable_disable(enable => 0)
1772                  * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1773                  * TRACE_REG_UNREGISTER.
1774                  */
1775                 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1776                         return -EBUSY;
1777                 /*
1778                  * The do_for_each_event_file_safe() is
1779                  * a double loop. After finding the call for this
1780                  * trace_array, we use break to jump to the next
1781                  * trace_array.
1782                  */
1783                 break;
1784         } while_for_each_event_file();
1785 
1786         __trace_remove_event_call(call);
1787 
1788         return 0;
1789 }
1790 
1791 /* Remove an event_call */
1792 int trace_remove_event_call(struct ftrace_event_call *call)
1793 {
1794         int ret;
1795 
1796         mutex_lock(&trace_types_lock);
1797         mutex_lock(&event_mutex);
1798         down_write(&trace_event_sem);
1799         ret = probe_remove_event_call(call);
1800         up_write(&trace_event_sem);
1801         mutex_unlock(&event_mutex);
1802         mutex_unlock(&trace_types_lock);
1803 
1804         return ret;
1805 }
1806 
1807 #define for_each_event(event, start, end)                       \
1808         for (event = start;                                     \
1809              (unsigned long)event < (unsigned long)end;         \
1810              event++)
1811 
1812 #ifdef CONFIG_MODULES
1813 
1814 static void trace_module_add_events(struct module *mod)
1815 {
1816         struct ftrace_event_call **call, **start, **end;
1817 
1818         if (!mod->num_trace_events)
1819                 return;
1820 
1821         /* Don't add infrastructure for mods without tracepoints */
1822         if (trace_module_has_bad_taint(mod)) {
1823                 pr_err("%s: module has bad taint, not creating trace events\n",
1824                        mod->name);
1825                 return;
1826         }
1827 
1828         start = mod->trace_events;
1829         end = mod->trace_events + mod->num_trace_events;
1830 
1831         for_each_event(call, start, end) {
1832                 __register_event(*call, mod);
1833                 __add_event_to_tracers(*call);
1834         }
1835 }
1836 
1837 static void trace_module_remove_events(struct module *mod)
1838 {
1839         struct ftrace_event_call *call, *p;
1840         bool clear_trace = false;
1841 
1842         down_write(&trace_event_sem);
1843         list_for_each_entry_safe(call, p, &ftrace_events, list) {
1844                 if (call->mod == mod) {
1845                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1846                                 clear_trace = true;
1847                         __trace_remove_event_call(call);
1848                 }
1849         }
1850         up_write(&trace_event_sem);
1851 
1852         /*
1853          * It is safest to reset the ring buffer if the module being unloaded
1854          * registered any events that were used. The only worry is if
1855          * a new module gets loaded, and takes on the same id as the events
1856          * of this module. When printing out the buffer, traced events left
1857          * over from this module may be passed to the new module events and
1858          * unexpected results may occur.
1859          */
1860         if (clear_trace)
1861                 tracing_reset_all_online_cpus();
1862 }
1863 
1864 static int trace_module_notify(struct notifier_block *self,
1865                                unsigned long val, void *data)
1866 {
1867         struct module *mod = data;
1868 
1869         mutex_lock(&trace_types_lock);
1870         mutex_lock(&event_mutex);
1871         switch (val) {
1872         case MODULE_STATE_COMING:
1873                 trace_module_add_events(mod);
1874                 break;
1875         case MODULE_STATE_GOING:
1876                 trace_module_remove_events(mod);
1877                 break;
1878         }
1879         mutex_unlock(&event_mutex);
1880         mutex_unlock(&trace_types_lock);
1881 
1882         return 0;
1883 }
1884 
1885 static struct notifier_block trace_module_nb = {
1886         .notifier_call = trace_module_notify,
1887         .priority = 0,
1888 };
1889 #endif /* CONFIG_MODULES */
1890 
1891 /* Create a new event directory structure for a trace directory. */
1892 static void
1893 __trace_add_event_dirs(struct trace_array *tr)
1894 {
1895         struct ftrace_event_call *call;
1896         int ret;
1897 
1898         list_for_each_entry(call, &ftrace_events, list) {
1899                 ret = __trace_add_new_event(call, tr);
1900                 if (ret < 0)
1901                         pr_warning("Could not create directory for event %s\n",
1902                                    ftrace_event_name(call));
1903         }
1904 }
1905 
1906 struct ftrace_event_file *
1907 find_event_file(struct trace_array *tr, const char *system,  const char *event)
1908 {
1909         struct ftrace_event_file *file;
1910         struct ftrace_event_call *call;
1911         const char *name;
1912 
1913         list_for_each_entry(file, &tr->events, list) {
1914 
1915                 call = file->event_call;
1916                 name = ftrace_event_name(call);
1917 
1918                 if (!name || !call->class || !call->class->reg)
1919                         continue;
1920 
1921                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1922                         continue;
1923 
1924                 if (strcmp(event, name) == 0 &&
1925                     strcmp(system, call->class->system) == 0)
1926                         return file;
1927         }
1928         return NULL;
1929 }
1930 
1931 #ifdef CONFIG_DYNAMIC_FTRACE
1932 
1933 /* Avoid typos */
1934 #define ENABLE_EVENT_STR        "enable_event"
1935 #define DISABLE_EVENT_STR       "disable_event"
1936 
1937 struct event_probe_data {
1938         struct ftrace_event_file        *file;
1939         unsigned long                   count;
1940         int                             ref;
1941         bool                            enable;
1942 };
1943 
1944 static void
1945 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1946 {
1947         struct event_probe_data **pdata = (struct event_probe_data **)_data;
1948         struct event_probe_data *data = *pdata;
1949 
1950         if (!data)
1951                 return;
1952 
1953         if (data->enable)
1954                 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1955         else
1956                 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1957 }
1958 
1959 static void
1960 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1961 {
1962         struct event_probe_data **pdata = (struct event_probe_data **)_data;
1963         struct event_probe_data *data = *pdata;
1964 
1965         if (!data)
1966                 return;
1967 
1968         if (!data->count)
1969                 return;
1970 
1971         /* Skip if the event is in a state we want to switch to */
1972         if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1973                 return;
1974 
1975         if (data->count != -1)
1976                 (data->count)--;
1977 
1978         event_enable_probe(ip, parent_ip, _data);
1979 }
1980 
1981 static int
1982 event_enable_print(struct seq_file *m, unsigned long ip,
1983                       struct ftrace_probe_ops *ops, void *_data)
1984 {
1985         struct event_probe_data *data = _data;
1986 
1987         seq_printf(m, "%ps:", (void *)ip);
1988 
1989         seq_printf(m, "%s:%s:%s",
1990                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1991                    data->file->event_call->class->system,
1992                    ftrace_event_name(data->file->event_call));
1993 
1994         if (data->count == -1)
1995                 seq_printf(m, ":unlimited\n");
1996         else
1997                 seq_printf(m, ":count=%ld\n", data->count);
1998 
1999         return 0;
2000 }
2001 
2002 static int
2003 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2004                   void **_data)
2005 {
2006         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2007         struct event_probe_data *data = *pdata;
2008 
2009         data->ref++;
2010         return 0;
2011 }
2012 
2013 static void
2014 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2015                   void **_data)
2016 {
2017         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2018         struct event_probe_data *data = *pdata;
2019 
2020         if (WARN_ON_ONCE(data->ref <= 0))
2021                 return;
2022 
2023         data->ref--;
2024         if (!data->ref) {
2025                 /* Remove the SOFT_MODE flag */
2026                 __ftrace_event_enable_disable(data->file, 0, 1);
2027                 module_put(data->file->event_call->mod);
2028                 kfree(data);
2029         }
2030         *pdata = NULL;
2031 }
2032 
2033 static struct ftrace_probe_ops event_enable_probe_ops = {
2034         .func                   = event_enable_probe,
2035         .print                  = event_enable_print,
2036         .init                   = event_enable_init,
2037         .free                   = event_enable_free,
2038 };
2039 
2040 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2041         .func                   = event_enable_count_probe,
2042         .print                  = event_enable_print,
2043         .init                   = event_enable_init,
2044         .free                   = event_enable_free,
2045 };
2046 
2047 static struct ftrace_probe_ops event_disable_probe_ops = {
2048         .func                   = event_enable_probe,
2049         .print                  = event_enable_print,
2050         .init                   = event_enable_init,
2051         .free                   = event_enable_free,
2052 };
2053 
2054 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2055         .func                   = event_enable_count_probe,
2056         .print                  = event_enable_print,
2057         .init                   = event_enable_init,
2058         .free                   = event_enable_free,
2059 };
2060 
2061 static int
2062 event_enable_func(struct ftrace_hash *hash,
2063                   char *glob, char *cmd, char *param, int enabled)
2064 {
2065         struct trace_array *tr = top_trace_array();
2066         struct ftrace_event_file *file;
2067         struct ftrace_probe_ops *ops;
2068         struct event_probe_data *data;
2069         const char *system;
2070         const char *event;
2071         char *number;
2072         bool enable;
2073         int ret;
2074 
2075         if (!tr)
2076                 return -ENODEV;
2077 
2078         /* hash funcs only work with set_ftrace_filter */
2079         if (!enabled || !param)
2080                 return -EINVAL;
2081 
2082         system = strsep(&param, ":");
2083         if (!param)
2084                 return -EINVAL;
2085 
2086         event = strsep(&param, ":");
2087 
2088         mutex_lock(&event_mutex);
2089 
2090         ret = -EINVAL;
2091         file = find_event_file(tr, system, event);
2092         if (!file)
2093                 goto out;
2094 
2095         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2096 
2097         if (enable)
2098                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2099         else
2100                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2101 
2102         if (glob[0] == '!') {
2103                 unregister_ftrace_function_probe_func(glob+1, ops);
2104                 ret = 0;
2105                 goto out;
2106         }
2107 
2108         ret = -ENOMEM;
2109         data = kzalloc(sizeof(*data), GFP_KERNEL);
2110         if (!data)
2111                 goto out;
2112 
2113         data->enable = enable;
2114         data->count = -1;
2115         data->file = file;
2116 
2117         if (!param)
2118                 goto out_reg;
2119 
2120         number = strsep(&param, ":");
2121 
2122         ret = -EINVAL;
2123         if (!strlen(number))
2124                 goto out_free;
2125 
2126         /*
2127          * We use the callback data field (which is a pointer)
2128          * as our counter.
2129          */
2130         ret = kstrtoul(number, 0, &data->count);
2131         if (ret)
2132                 goto out_free;
2133 
2134  out_reg:
2135         /* Don't let event modules unload while probe registered */
2136         ret = try_module_get(file->event_call->mod);
2137         if (!ret) {
2138                 ret = -EBUSY;
2139                 goto out_free;
2140         }
2141 
2142         ret = __ftrace_event_enable_disable(file, 1, 1);
2143         if (ret < 0)
2144                 goto out_put;
2145         ret = register_ftrace_function_probe(glob, ops, data);
2146         /*
2147          * The above returns on success the # of functions enabled,
2148          * but if it didn't find any functions it returns zero.
2149          * Consider no functions a failure too.
2150          */
2151         if (!ret) {
2152                 ret = -ENOENT;
2153                 goto out_disable;
2154         } else if (ret < 0)
2155                 goto out_disable;
2156         /* Just return zero, not the number of enabled functions */
2157         ret = 0;
2158  out:
2159         mutex_unlock(&event_mutex);
2160         return ret;
2161 
2162  out_disable:
2163         __ftrace_event_enable_disable(file, 0, 1);
2164  out_put:
2165         module_put(file->event_call->mod);
2166  out_free:
2167         kfree(data);
2168         goto out;
2169 }
2170 
2171 static struct ftrace_func_command event_enable_cmd = {
2172         .name                   = ENABLE_EVENT_STR,
2173         .func                   = event_enable_func,
2174 };
2175 
2176 static struct ftrace_func_command event_disable_cmd = {
2177         .name                   = DISABLE_EVENT_STR,
2178         .func                   = event_enable_func,
2179 };
2180 
2181 static __init int register_event_cmds(void)
2182 {
2183         int ret;
2184 
2185         ret = register_ftrace_command(&event_enable_cmd);
2186         if (WARN_ON(ret < 0))
2187                 return ret;
2188         ret = register_ftrace_command(&event_disable_cmd);
2189         if (WARN_ON(ret < 0))
2190                 unregister_ftrace_command(&event_enable_cmd);
2191         return ret;
2192 }
2193 #else
2194 static inline int register_event_cmds(void) { return 0; }
2195 #endif /* CONFIG_DYNAMIC_FTRACE */
2196 
2197 /*
2198  * The top level array has already had its ftrace_event_file
2199  * descriptors created in order to allow for early events to
2200  * be recorded. This function is called after the debugfs has been
2201  * initialized, and we now have to create the files associated
2202  * to the events.
2203  */
2204 static __init void
2205 __trace_early_add_event_dirs(struct trace_array *tr)
2206 {
2207         struct ftrace_event_file *file;
2208         int ret;
2209 
2210 
2211         list_for_each_entry(file, &tr->events, list) {
2212                 ret = event_create_dir(tr->event_dir, file);
2213                 if (ret < 0)
2214                         pr_warning("Could not create directory for event %s\n",
2215                                    ftrace_event_name(file->event_call));
2216         }
2217 }
2218 
2219 /*
2220  * For early boot up, the top trace array requires to have
2221  * a list of events that can be enabled. This must be done before
2222  * the filesystem is set up in order to allow events to be traced
2223  * early.
2224  */
2225 static __init void
2226 __trace_early_add_events(struct trace_array *tr)
2227 {
2228         struct ftrace_event_call *call;
2229         int ret;
2230 
2231         list_for_each_entry(call, &ftrace_events, list) {
2232                 /* Early boot up should not have any modules loaded */
2233                 if (WARN_ON_ONCE(call->mod))
2234                         continue;
2235 
2236                 ret = __trace_early_add_new_event(call, tr);
2237                 if (ret < 0)
2238                         pr_warning("Could not create early event %s\n",
2239                                    ftrace_event_name(call));
2240         }
2241 }
2242 
2243 /* Remove the event directory structure for a trace directory. */
2244 static void
2245 __trace_remove_event_dirs(struct trace_array *tr)
2246 {
2247         struct ftrace_event_file *file, *next;
2248 
2249         list_for_each_entry_safe(file, next, &tr->events, list)
2250                 remove_event_file_dir(file);
2251 }
2252 
2253 static void __add_event_to_tracers(struct ftrace_event_call *call)
2254 {
2255         struct trace_array *tr;
2256 
2257         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2258                 __trace_add_new_event(call, tr);
2259 }
2260 
2261 extern struct ftrace_event_call *__start_ftrace_events[];
2262 extern struct ftrace_event_call *__stop_ftrace_events[];
2263 
2264 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2265 
2266 static __init int setup_trace_event(char *str)
2267 {
2268         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2269         ring_buffer_expanded = true;
2270         tracing_selftest_disabled = true;
2271 
2272         return 1;
2273 }
2274 __setup("trace_event=", setup_trace_event);
2275 
2276 /* Expects to have event_mutex held when called */
2277 static int
2278 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2279 {
2280         struct dentry *d_events;
2281         struct dentry *entry;
2282 
2283         entry = debugfs_create_file("set_event", 0644, parent,
2284                                     tr, &ftrace_set_event_fops);
2285         if (!entry) {
2286                 pr_warning("Could not create debugfs 'set_event' entry\n");
2287                 return -ENOMEM;
2288         }
2289 
2290         d_events = debugfs_create_dir("events", parent);
2291         if (!d_events) {
2292                 pr_warning("Could not create debugfs 'events' directory\n");
2293                 return -ENOMEM;
2294         }
2295 
2296         /* ring buffer internal formats */
2297         trace_create_file("header_page", 0444, d_events,
2298                           ring_buffer_print_page_header,
2299                           &ftrace_show_header_fops);
2300 
2301         trace_create_file("header_event", 0444, d_events,
2302                           ring_buffer_print_entry_header,
2303                           &ftrace_show_header_fops);
2304 
2305         trace_create_file("enable", 0644, d_events,
2306                           tr, &ftrace_tr_enable_fops);
2307 
2308         tr->event_dir = d_events;
2309 
2310         return 0;
2311 }
2312 
2313 /**
2314  * event_trace_add_tracer - add a instance of a trace_array to events
2315  * @parent: The parent dentry to place the files/directories for events in
2316  * @tr: The trace array associated with these events
2317  *
2318  * When a new instance is created, it needs to set up its events
2319  * directory, as well as other files associated with events. It also
2320  * creates the event hierachry in the @parent/events directory.
2321  *
2322  * Returns 0 on success.
2323  */
2324 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2325 {
2326         int ret;
2327 
2328         mutex_lock(&event_mutex);
2329 
2330         ret = create_event_toplevel_files(parent, tr);
2331         if (ret)
2332                 goto out_unlock;
2333 
2334         down_write(&trace_event_sem);
2335         __trace_add_event_dirs(tr);
2336         up_write(&trace_event_sem);
2337 
2338  out_unlock:
2339         mutex_unlock(&event_mutex);
2340 
2341         return ret;
2342 }
2343 
2344 /*
2345  * The top trace array already had its file descriptors created.
2346  * Now the files themselves need to be created.
2347  */
2348 static __init int
2349 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2350 {
2351         int ret;
2352 
2353         mutex_lock(&event_mutex);
2354 
2355         ret = create_event_toplevel_files(parent, tr);
2356         if (ret)
2357                 goto out_unlock;
2358 
2359         down_write(&trace_event_sem);
2360         __trace_early_add_event_dirs(tr);
2361         up_write(&trace_event_sem);
2362 
2363  out_unlock:
2364         mutex_unlock(&event_mutex);
2365 
2366         return ret;
2367 }
2368 
2369 int event_trace_del_tracer(struct trace_array *tr)
2370 {
2371         mutex_lock(&event_mutex);
2372 
2373         /* Disable any event triggers and associated soft-disabled events */
2374         clear_event_triggers(tr);
2375 
2376         /* Disable any running events */
2377         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2378 
2379         /* Access to events are within rcu_read_lock_sched() */
2380         synchronize_sched();
2381 
2382         down_write(&trace_event_sem);
2383         __trace_remove_event_dirs(tr);
2384         debugfs_remove_recursive(tr->event_dir);
2385         up_write(&trace_event_sem);
2386 
2387         tr->event_dir = NULL;
2388 
2389         mutex_unlock(&event_mutex);
2390 
2391         return 0;
2392 }
2393 
2394 static __init int event_trace_memsetup(void)
2395 {
2396         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2397         file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2398         return 0;
2399 }
2400 
2401 static __init int event_trace_enable(void)
2402 {
2403         struct trace_array *tr = top_trace_array();
2404         struct ftrace_event_call **iter, *call;
2405         char *buf = bootup_event_buf;
2406         char *token;
2407         int ret;
2408 
2409         if (!tr)
2410                 return -ENODEV;
2411 
2412         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2413 
2414                 call = *iter;
2415                 ret = event_init(call);
2416                 if (!ret)
2417                         list_add(&call->list, &ftrace_events);
2418         }
2419 
2420         /*
2421          * We need the top trace array to have a working set of trace
2422          * points at early init, before the debug files and directories
2423          * are created. Create the file entries now, and attach them
2424          * to the actual file dentries later.
2425          */
2426         __trace_early_add_events(tr);
2427 
2428         while (true) {
2429                 token = strsep(&buf, ",");
2430 
2431                 if (!token)
2432                         break;
2433                 if (!*token)
2434                         continue;
2435 
2436                 ret = ftrace_set_clr_event(tr, token, 1);
2437                 if (ret)
2438                         pr_warn("Failed to enable trace event: %s\n", token);
2439         }
2440 
2441         trace_printk_start_comm();
2442 
2443         register_event_cmds();
2444 
2445         register_trigger_cmds();
2446 
2447         return 0;
2448 }
2449 
2450 static __init int event_trace_init(void)
2451 {
2452         struct trace_array *tr;
2453         struct dentry *d_tracer;
2454         struct dentry *entry;
2455         int ret;
2456 
2457         tr = top_trace_array();
2458         if (!tr)
2459                 return -ENODEV;
2460 
2461         d_tracer = tracing_init_dentry();
2462         if (!d_tracer)
2463                 return 0;
2464 
2465         entry = debugfs_create_file("available_events", 0444, d_tracer,
2466                                     tr, &ftrace_avail_fops);
2467         if (!entry)
2468                 pr_warning("Could not create debugfs "
2469                            "'available_events' entry\n");
2470 
2471         if (trace_define_common_fields())
2472                 pr_warning("tracing: Failed to allocate common fields");
2473 
2474         ret = early_event_add_tracer(d_tracer, tr);
2475         if (ret)
2476                 return ret;
2477 
2478 #ifdef CONFIG_MODULES
2479         ret = register_module_notifier(&trace_module_nb);
2480         if (ret)
2481                 pr_warning("Failed to register trace events module notifier\n");
2482 #endif
2483         return 0;
2484 }
2485 early_initcall(event_trace_memsetup);
2486 core_initcall(event_trace_enable);
2487 fs_initcall(event_trace_init);
2488 
2489 #ifdef CONFIG_FTRACE_STARTUP_TEST
2490 
2491 static DEFINE_SPINLOCK(test_spinlock);
2492 static DEFINE_SPINLOCK(test_spinlock_irq);
2493 static DEFINE_MUTEX(test_mutex);
2494 
2495 static __init void test_work(struct work_struct *dummy)
2496 {
2497         spin_lock(&test_spinlock);
2498         spin_lock_irq(&test_spinlock_irq);
2499         udelay(1);
2500         spin_unlock_irq(&test_spinlock_irq);
2501         spin_unlock(&test_spinlock);
2502 
2503         mutex_lock(&test_mutex);
2504         msleep(1);
2505         mutex_unlock(&test_mutex);
2506 }
2507 
2508 static __init int event_test_thread(void *unused)
2509 {
2510         void *test_malloc;
2511 
2512         test_malloc = kmalloc(1234, GFP_KERNEL);
2513         if (!test_malloc)
2514                 pr_info("failed to kmalloc\n");
2515 
2516         schedule_on_each_cpu(test_work);
2517 
2518         kfree(test_malloc);
2519 
2520         set_current_state(TASK_INTERRUPTIBLE);
2521         while (!kthread_should_stop())
2522                 schedule();
2523 
2524         return 0;
2525 }
2526 
2527 /*
2528  * Do various things that may trigger events.
2529  */
2530 static __init void event_test_stuff(void)
2531 {
2532         struct task_struct *test_thread;
2533 
2534         test_thread = kthread_run(event_test_thread, NULL, "test-events");
2535         msleep(1);
2536         kthread_stop(test_thread);
2537 }
2538 
2539 /*
2540  * For every trace event defined, we will test each trace point separately,
2541  * and then by groups, and finally all trace points.
2542  */
2543 static __init void event_trace_self_tests(void)
2544 {
2545         struct ftrace_subsystem_dir *dir;
2546         struct ftrace_event_file *file;
2547         struct ftrace_event_call *call;
2548         struct event_subsystem *system;
2549         struct trace_array *tr;
2550         int ret;
2551 
2552         tr = top_trace_array();
2553         if (!tr)
2554                 return;
2555 
2556         pr_info("Running tests on trace events:\n");
2557 
2558         list_for_each_entry(file, &tr->events, list) {
2559 
2560                 call = file->event_call;
2561 
2562                 /* Only test those that have a probe */
2563                 if (!call->class || !call->class->probe)
2564                         continue;
2565 
2566 /*
2567  * Testing syscall events here is pretty useless, but
2568  * we still do it if configured. But this is time consuming.
2569  * What we really need is a user thread to perform the
2570  * syscalls as we test.
2571  */
2572 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2573                 if (call->class->system &&
2574                     strcmp(call->class->system, "syscalls") == 0)
2575                         continue;
2576 #endif
2577 
2578                 pr_info("Testing event %s: ", ftrace_event_name(call));
2579 
2580                 /*
2581                  * If an event is already enabled, someone is using
2582                  * it and the self test should not be on.
2583                  */
2584                 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2585                         pr_warning("Enabled event during self test!\n");
2586                         WARN_ON_ONCE(1);
2587                         continue;
2588                 }
2589 
2590                 ftrace_event_enable_disable(file, 1);
2591                 event_test_stuff();
2592                 ftrace_event_enable_disable(file, 0);
2593 
2594                 pr_cont("OK\n");
2595         }
2596 
2597         /* Now test at the sub system level */
2598 
2599         pr_info("Running tests on trace event systems:\n");
2600 
2601         list_for_each_entry(dir, &tr->systems, list) {
2602 
2603                 system = dir->subsystem;
2604 
2605                 /* the ftrace system is special, skip it */
2606                 if (strcmp(system->name, "ftrace") == 0)
2607                         continue;
2608 
2609                 pr_info("Testing event system %s: ", system->name);
2610 
2611                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2612                 if (WARN_ON_ONCE(ret)) {
2613                         pr_warning("error enabling system %s\n",
2614                                    system->name);
2615                         continue;
2616                 }
2617 
2618                 event_test_stuff();
2619 
2620                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2621                 if (WARN_ON_ONCE(ret)) {
2622                         pr_warning("error disabling system %s\n",
2623                                    system->name);
2624                         continue;
2625                 }
2626 
2627                 pr_cont("OK\n");
2628         }
2629 
2630         /* Test with all events enabled */
2631 
2632         pr_info("Running tests on all trace events:\n");
2633         pr_info("Testing all events: ");
2634 
2635         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2636         if (WARN_ON_ONCE(ret)) {
2637                 pr_warning("error enabling all events\n");
2638                 return;
2639         }
2640 
2641         event_test_stuff();
2642 
2643         /* reset sysname */
2644         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2645         if (WARN_ON_ONCE(ret)) {
2646                 pr_warning("error disabling all events\n");
2647                 return;
2648         }
2649 
2650         pr_cont("OK\n");
2651 }
2652 
2653 #ifdef CONFIG_FUNCTION_TRACER
2654 
2655 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2656 
2657 static void
2658 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2659                           struct ftrace_ops *op, struct pt_regs *pt_regs)
2660 {
2661         struct ring_buffer_event *event;
2662         struct ring_buffer *buffer;
2663         struct ftrace_entry *entry;
2664         unsigned long flags;
2665         long disabled;
2666         int cpu;
2667         int pc;
2668 
2669         pc = preempt_count();
2670         preempt_disable_notrace();
2671         cpu = raw_smp_processor_id();
2672         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2673 
2674         if (disabled != 1)
2675                 goto out;
2676 
2677         local_save_flags(flags);
2678 
2679         event = trace_current_buffer_lock_reserve(&buffer,
2680                                                   TRACE_FN, sizeof(*entry),
2681                                                   flags, pc);
2682         if (!event)
2683                 goto out;
2684         entry   = ring_buffer_event_data(event);
2685         entry->ip                       = ip;
2686         entry->parent_ip                = parent_ip;
2687 
2688         trace_buffer_unlock_commit(buffer, event, flags, pc);
2689 
2690  out:
2691         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2692         preempt_enable_notrace();
2693 }
2694 
2695 static struct ftrace_ops trace_ops __initdata  =
2696 {
2697         .func = function_test_events_call,
2698         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2699 };
2700 
2701 static __init void event_trace_self_test_with_function(void)
2702 {
2703         int ret;
2704         ret = register_ftrace_function(&trace_ops);
2705         if (WARN_ON(ret < 0)) {
2706                 pr_info("Failed to enable function tracer for event tests\n");
2707                 return;
2708         }
2709         pr_info("Running tests again, along with the function tracer\n");
2710         event_trace_self_tests();
2711         unregister_ftrace_function(&trace_ops);
2712 }
2713 #else
2714 static __init void event_trace_self_test_with_function(void)
2715 {
2716 }
2717 #endif
2718 
2719 static __init int event_trace_self_tests_init(void)
2720 {
2721         if (!tracing_selftest_disabled) {
2722                 event_trace_self_tests();
2723                 event_trace_self_test_with_function();
2724         }
2725 
2726         return 0;
2727 }
2728 
2729 late_initcall(event_trace_self_tests_init);
2730 
2731 #endif
2732 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp