~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/ftrace.c

Version: ~ [ linux-5.2-rc4 ] ~ [ linux-5.1.9 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.50 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.125 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.181 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.181 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.68 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Infrastructure for profiling code inserted by 'gcc -pg'.
  3  *
  4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6  *
  7  * Originally ported from the -rt patch by:
  8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9  *
 10  * Based on code in the latency_tracer, that is:
 11  *
 12  *  Copyright (C) 2004-2006 Ingo Molnar
 13  *  Copyright (C) 2004 Nadia Yvette Chambers
 14  */
 15 
 16 #include <linux/stop_machine.h>
 17 #include <linux/clocksource.h>
 18 #include <linux/kallsyms.h>
 19 #include <linux/seq_file.h>
 20 #include <linux/suspend.h>
 21 #include <linux/debugfs.h>
 22 #include <linux/hardirq.h>
 23 #include <linux/kthread.h>
 24 #include <linux/uaccess.h>
 25 #include <linux/bsearch.h>
 26 #include <linux/module.h>
 27 #include <linux/ftrace.h>
 28 #include <linux/sysctl.h>
 29 #include <linux/slab.h>
 30 #include <linux/ctype.h>
 31 #include <linux/sort.h>
 32 #include <linux/list.h>
 33 #include <linux/hash.h>
 34 #include <linux/rcupdate.h>
 35 
 36 #include <trace/events/sched.h>
 37 
 38 #include <asm/setup.h>
 39 
 40 #include "trace_output.h"
 41 #include "trace_stat.h"
 42 
 43 #define FTRACE_WARN_ON(cond)                    \
 44         ({                                      \
 45                 int ___r = cond;                \
 46                 if (WARN_ON(___r))              \
 47                         ftrace_kill();          \
 48                 ___r;                           \
 49         })
 50 
 51 #define FTRACE_WARN_ON_ONCE(cond)               \
 52         ({                                      \
 53                 int ___r = cond;                \
 54                 if (WARN_ON_ONCE(___r))         \
 55                         ftrace_kill();          \
 56                 ___r;                           \
 57         })
 58 
 59 /* hash bits for specific function selection */
 60 #define FTRACE_HASH_BITS 7
 61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
 62 #define FTRACE_HASH_DEFAULT_BITS 10
 63 #define FTRACE_HASH_MAX_BITS 12
 64 
 65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 66 
 67 static struct ftrace_ops ftrace_list_end __read_mostly = {
 68         .func           = ftrace_stub,
 69         .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
 70 };
 71 
 72 /* ftrace_enabled is a method to turn ftrace on or off */
 73 int ftrace_enabled __read_mostly;
 74 static int last_ftrace_enabled;
 75 
 76 /* Quick disabling of function tracer. */
 77 int function_trace_stop __read_mostly;
 78 
 79 /* Current function tracing op */
 80 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 81 
 82 /* List for set_ftrace_pid's pids. */
 83 LIST_HEAD(ftrace_pids);
 84 struct ftrace_pid {
 85         struct list_head list;
 86         struct pid *pid;
 87 };
 88 
 89 /*
 90  * ftrace_disabled is set when an anomaly is discovered.
 91  * ftrace_disabled is much stronger than ftrace_enabled.
 92  */
 93 static int ftrace_disabled __read_mostly;
 94 
 95 static DEFINE_MUTEX(ftrace_lock);
 96 
 97 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
 98 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 99 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
100 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
101 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
102 static struct ftrace_ops global_ops;
103 static struct ftrace_ops control_ops;
104 
105 #if ARCH_SUPPORTS_FTRACE_OPS
106 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
107                                  struct ftrace_ops *op, struct pt_regs *regs);
108 #else
109 /* See comment below, where ftrace_ops_list_func is defined */
110 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
111 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
112 #endif
113 
114 /**
115  * ftrace_nr_registered_ops - return number of ops registered
116  *
117  * Returns the number of ftrace_ops registered and tracing functions
118  */
119 int ftrace_nr_registered_ops(void)
120 {
121         struct ftrace_ops *ops;
122         int cnt = 0;
123 
124         mutex_lock(&ftrace_lock);
125 
126         for (ops = ftrace_ops_list;
127              ops != &ftrace_list_end; ops = ops->next)
128                 cnt++;
129 
130         mutex_unlock(&ftrace_lock);
131 
132         return cnt;
133 }
134 
135 /*
136  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
137  * can use rcu_dereference_raw() is that elements removed from this list
138  * are simply leaked, so there is no need to interact with a grace-period
139  * mechanism.  The rcu_dereference_raw() calls are needed to handle
140  * concurrent insertions into the ftrace_global_list.
141  *
142  * Silly Alpha and silly pointer-speculation compiler optimizations!
143  */
144 static void
145 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
146                         struct ftrace_ops *op, struct pt_regs *regs)
147 {
148         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
149                 return;
150 
151         trace_recursion_set(TRACE_GLOBAL_BIT);
152         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
153         while (op != &ftrace_list_end) {
154                 op->func(ip, parent_ip, op, regs);
155                 op = rcu_dereference_raw(op->next); /*see above*/
156         };
157         trace_recursion_clear(TRACE_GLOBAL_BIT);
158 }
159 
160 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
161                             struct ftrace_ops *op, struct pt_regs *regs)
162 {
163         if (!test_tsk_trace_trace(current))
164                 return;
165 
166         ftrace_pid_function(ip, parent_ip, op, regs);
167 }
168 
169 static void set_ftrace_pid_function(ftrace_func_t func)
170 {
171         /* do not set ftrace_pid_function to itself! */
172         if (func != ftrace_pid_func)
173                 ftrace_pid_function = func;
174 }
175 
176 /**
177  * clear_ftrace_function - reset the ftrace function
178  *
179  * This NULLs the ftrace function and in essence stops
180  * tracing.  There may be lag
181  */
182 void clear_ftrace_function(void)
183 {
184         ftrace_trace_function = ftrace_stub;
185         ftrace_pid_function = ftrace_stub;
186 }
187 
188 static void control_ops_disable_all(struct ftrace_ops *ops)
189 {
190         int cpu;
191 
192         for_each_possible_cpu(cpu)
193                 *per_cpu_ptr(ops->disabled, cpu) = 1;
194 }
195 
196 static int control_ops_alloc(struct ftrace_ops *ops)
197 {
198         int __percpu *disabled;
199 
200         disabled = alloc_percpu(int);
201         if (!disabled)
202                 return -ENOMEM;
203 
204         ops->disabled = disabled;
205         control_ops_disable_all(ops);
206         return 0;
207 }
208 
209 static void control_ops_free(struct ftrace_ops *ops)
210 {
211         free_percpu(ops->disabled);
212 }
213 
214 static void update_global_ops(void)
215 {
216         ftrace_func_t func;
217 
218         /*
219          * If there's only one function registered, then call that
220          * function directly. Otherwise, we need to iterate over the
221          * registered callers.
222          */
223         if (ftrace_global_list == &ftrace_list_end ||
224             ftrace_global_list->next == &ftrace_list_end)
225                 func = ftrace_global_list->func;
226         else
227                 func = ftrace_global_list_func;
228 
229         /* If we filter on pids, update to use the pid function */
230         if (!list_empty(&ftrace_pids)) {
231                 set_ftrace_pid_function(func);
232                 func = ftrace_pid_func;
233         }
234 
235         global_ops.func = func;
236 }
237 
238 static void update_ftrace_function(void)
239 {
240         ftrace_func_t func;
241 
242         update_global_ops();
243 
244         /*
245          * If we are at the end of the list and this ops is
246          * recursion safe and not dynamic and the arch supports passing ops,
247          * then have the mcount trampoline call the function directly.
248          */
249         if (ftrace_ops_list == &ftrace_list_end ||
250             (ftrace_ops_list->next == &ftrace_list_end &&
251              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
252              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
253              !FTRACE_FORCE_LIST_FUNC)) {
254                 /* Set the ftrace_ops that the arch callback uses */
255                 if (ftrace_ops_list == &global_ops)
256                         function_trace_op = ftrace_global_list;
257                 else
258                         function_trace_op = ftrace_ops_list;
259                 func = ftrace_ops_list->func;
260         } else {
261                 /* Just use the default ftrace_ops */
262                 function_trace_op = &ftrace_list_end;
263                 func = ftrace_ops_list_func;
264         }
265 
266         ftrace_trace_function = func;
267 }
268 
269 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
270 {
271         ops->next = *list;
272         /*
273          * We are entering ops into the list but another
274          * CPU might be walking that list. We need to make sure
275          * the ops->next pointer is valid before another CPU sees
276          * the ops pointer included into the list.
277          */
278         rcu_assign_pointer(*list, ops);
279 }
280 
281 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
282 {
283         struct ftrace_ops **p;
284 
285         /*
286          * If we are removing the last function, then simply point
287          * to the ftrace_stub.
288          */
289         if (*list == ops && ops->next == &ftrace_list_end) {
290                 *list = &ftrace_list_end;
291                 return 0;
292         }
293 
294         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
295                 if (*p == ops)
296                         break;
297 
298         if (*p != ops)
299                 return -1;
300 
301         *p = (*p)->next;
302         return 0;
303 }
304 
305 static void add_ftrace_list_ops(struct ftrace_ops **list,
306                                 struct ftrace_ops *main_ops,
307                                 struct ftrace_ops *ops)
308 {
309         int first = *list == &ftrace_list_end;
310         add_ftrace_ops(list, ops);
311         if (first)
312                 add_ftrace_ops(&ftrace_ops_list, main_ops);
313 }
314 
315 static int remove_ftrace_list_ops(struct ftrace_ops **list,
316                                   struct ftrace_ops *main_ops,
317                                   struct ftrace_ops *ops)
318 {
319         int ret = remove_ftrace_ops(list, ops);
320         if (!ret && *list == &ftrace_list_end)
321                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
322         return ret;
323 }
324 
325 static int __register_ftrace_function(struct ftrace_ops *ops)
326 {
327         if (unlikely(ftrace_disabled))
328                 return -ENODEV;
329 
330         if (FTRACE_WARN_ON(ops == &global_ops))
331                 return -EINVAL;
332 
333         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
334                 return -EBUSY;
335 
336         /* We don't support both control and global flags set. */
337         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
338                 return -EINVAL;
339 
340 #ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
341         /*
342          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
343          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
344          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
345          */
346         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
347             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
348                 return -EINVAL;
349 
350         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
351                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
352 #endif
353 
354         if (!core_kernel_data((unsigned long)ops))
355                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
356 
357         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
358                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
359                 ops->flags |= FTRACE_OPS_FL_ENABLED;
360         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
361                 if (control_ops_alloc(ops))
362                         return -ENOMEM;
363                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
364         } else
365                 add_ftrace_ops(&ftrace_ops_list, ops);
366 
367         if (ftrace_enabled)
368                 update_ftrace_function();
369 
370         return 0;
371 }
372 
373 static int __unregister_ftrace_function(struct ftrace_ops *ops)
374 {
375         int ret;
376 
377         if (ftrace_disabled)
378                 return -ENODEV;
379 
380         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
381                 return -EBUSY;
382 
383         if (FTRACE_WARN_ON(ops == &global_ops))
384                 return -EINVAL;
385 
386         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
387                 ret = remove_ftrace_list_ops(&ftrace_global_list,
388                                              &global_ops, ops);
389                 if (!ret)
390                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
391         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
392                 ret = remove_ftrace_list_ops(&ftrace_control_list,
393                                              &control_ops, ops);
394                 if (!ret) {
395                         /*
396                          * The ftrace_ops is now removed from the list,
397                          * so there'll be no new users. We must ensure
398                          * all current users are done before we free
399                          * the control data.
400                          */
401                         synchronize_sched();
402                         control_ops_free(ops);
403                 }
404         } else
405                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
406 
407         if (ret < 0)
408                 return ret;
409 
410         if (ftrace_enabled)
411                 update_ftrace_function();
412 
413         /*
414          * Dynamic ops may be freed, we must make sure that all
415          * callers are done before leaving this function.
416          */
417         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
418                 synchronize_sched();
419 
420         return 0;
421 }
422 
423 static void ftrace_update_pid_func(void)
424 {
425         /* Only do something if we are tracing something */
426         if (ftrace_trace_function == ftrace_stub)
427                 return;
428 
429         update_ftrace_function();
430 }
431 
432 #ifdef CONFIG_FUNCTION_PROFILER
433 struct ftrace_profile {
434         struct hlist_node               node;
435         unsigned long                   ip;
436         unsigned long                   counter;
437 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
438         unsigned long long              time;
439         unsigned long long              time_squared;
440 #endif
441 };
442 
443 struct ftrace_profile_page {
444         struct ftrace_profile_page      *next;
445         unsigned long                   index;
446         struct ftrace_profile           records[];
447 };
448 
449 struct ftrace_profile_stat {
450         atomic_t                        disabled;
451         struct hlist_head               *hash;
452         struct ftrace_profile_page      *pages;
453         struct ftrace_profile_page      *start;
454         struct tracer_stat              stat;
455 };
456 
457 #define PROFILE_RECORDS_SIZE                                            \
458         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
459 
460 #define PROFILES_PER_PAGE                                       \
461         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
462 
463 static int ftrace_profile_bits __read_mostly;
464 static int ftrace_profile_enabled __read_mostly;
465 
466 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
467 static DEFINE_MUTEX(ftrace_profile_lock);
468 
469 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
470 
471 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
472 
473 static void *
474 function_stat_next(void *v, int idx)
475 {
476         struct ftrace_profile *rec = v;
477         struct ftrace_profile_page *pg;
478 
479         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
480 
481  again:
482         if (idx != 0)
483                 rec++;
484 
485         if ((void *)rec >= (void *)&pg->records[pg->index]) {
486                 pg = pg->next;
487                 if (!pg)
488                         return NULL;
489                 rec = &pg->records[0];
490                 if (!rec->counter)
491                         goto again;
492         }
493 
494         return rec;
495 }
496 
497 static void *function_stat_start(struct tracer_stat *trace)
498 {
499         struct ftrace_profile_stat *stat =
500                 container_of(trace, struct ftrace_profile_stat, stat);
501 
502         if (!stat || !stat->start)
503                 return NULL;
504 
505         return function_stat_next(&stat->start->records[0], 0);
506 }
507 
508 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
509 /* function graph compares on total time */
510 static int function_stat_cmp(void *p1, void *p2)
511 {
512         struct ftrace_profile *a = p1;
513         struct ftrace_profile *b = p2;
514 
515         if (a->time < b->time)
516                 return -1;
517         if (a->time > b->time)
518                 return 1;
519         else
520                 return 0;
521 }
522 #else
523 /* not function graph compares against hits */
524 static int function_stat_cmp(void *p1, void *p2)
525 {
526         struct ftrace_profile *a = p1;
527         struct ftrace_profile *b = p2;
528 
529         if (a->counter < b->counter)
530                 return -1;
531         if (a->counter > b->counter)
532                 return 1;
533         else
534                 return 0;
535 }
536 #endif
537 
538 static int function_stat_headers(struct seq_file *m)
539 {
540 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
541         seq_printf(m, "  Function                               "
542                    "Hit    Time            Avg             s^2\n"
543                       "  --------                               "
544                    "---    ----            ---             ---\n");
545 #else
546         seq_printf(m, "  Function                               Hit\n"
547                       "  --------                               ---\n");
548 #endif
549         return 0;
550 }
551 
552 static int function_stat_show(struct seq_file *m, void *v)
553 {
554         struct ftrace_profile *rec = v;
555         char str[KSYM_SYMBOL_LEN];
556         int ret = 0;
557 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
558         static struct trace_seq s;
559         unsigned long long avg;
560         unsigned long long stddev;
561 #endif
562         mutex_lock(&ftrace_profile_lock);
563 
564         /* we raced with function_profile_reset() */
565         if (unlikely(rec->counter == 0)) {
566                 ret = -EBUSY;
567                 goto out;
568         }
569 
570         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
571         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
572 
573 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
574         seq_printf(m, "    ");
575         avg = rec->time;
576         do_div(avg, rec->counter);
577 
578         /* Sample standard deviation (s^2) */
579         if (rec->counter <= 1)
580                 stddev = 0;
581         else {
582                 stddev = rec->time_squared - rec->counter * avg * avg;
583                 /*
584                  * Divide only 1000 for ns^2 -> us^2 conversion.
585                  * trace_print_graph_duration will divide 1000 again.
586                  */
587                 do_div(stddev, (rec->counter - 1) * 1000);
588         }
589 
590         trace_seq_init(&s);
591         trace_print_graph_duration(rec->time, &s);
592         trace_seq_puts(&s, "    ");
593         trace_print_graph_duration(avg, &s);
594         trace_seq_puts(&s, "    ");
595         trace_print_graph_duration(stddev, &s);
596         trace_print_seq(m, &s);
597 #endif
598         seq_putc(m, '\n');
599 out:
600         mutex_unlock(&ftrace_profile_lock);
601 
602         return ret;
603 }
604 
605 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
606 {
607         struct ftrace_profile_page *pg;
608 
609         pg = stat->pages = stat->start;
610 
611         while (pg) {
612                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
613                 pg->index = 0;
614                 pg = pg->next;
615         }
616 
617         memset(stat->hash, 0,
618                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
619 }
620 
621 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
622 {
623         struct ftrace_profile_page *pg;
624         int functions;
625         int pages;
626         int i;
627 
628         /* If we already allocated, do nothing */
629         if (stat->pages)
630                 return 0;
631 
632         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
633         if (!stat->pages)
634                 return -ENOMEM;
635 
636 #ifdef CONFIG_DYNAMIC_FTRACE
637         functions = ftrace_update_tot_cnt;
638 #else
639         /*
640          * We do not know the number of functions that exist because
641          * dynamic tracing is what counts them. With past experience
642          * we have around 20K functions. That should be more than enough.
643          * It is highly unlikely we will execute every function in
644          * the kernel.
645          */
646         functions = 20000;
647 #endif
648 
649         pg = stat->start = stat->pages;
650 
651         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
652 
653         for (i = 1; i < pages; i++) {
654                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
655                 if (!pg->next)
656                         goto out_free;
657                 pg = pg->next;
658         }
659 
660         return 0;
661 
662  out_free:
663         pg = stat->start;
664         while (pg) {
665                 unsigned long tmp = (unsigned long)pg;
666 
667                 pg = pg->next;
668                 free_page(tmp);
669         }
670 
671         stat->pages = NULL;
672         stat->start = NULL;
673 
674         return -ENOMEM;
675 }
676 
677 static int ftrace_profile_init_cpu(int cpu)
678 {
679         struct ftrace_profile_stat *stat;
680         int size;
681 
682         stat = &per_cpu(ftrace_profile_stats, cpu);
683 
684         if (stat->hash) {
685                 /* If the profile is already created, simply reset it */
686                 ftrace_profile_reset(stat);
687                 return 0;
688         }
689 
690         /*
691          * We are profiling all functions, but usually only a few thousand
692          * functions are hit. We'll make a hash of 1024 items.
693          */
694         size = FTRACE_PROFILE_HASH_SIZE;
695 
696         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
697 
698         if (!stat->hash)
699                 return -ENOMEM;
700 
701         if (!ftrace_profile_bits) {
702                 size--;
703 
704                 for (; size; size >>= 1)
705                         ftrace_profile_bits++;
706         }
707 
708         /* Preallocate the function profiling pages */
709         if (ftrace_profile_pages_init(stat) < 0) {
710                 kfree(stat->hash);
711                 stat->hash = NULL;
712                 return -ENOMEM;
713         }
714 
715         return 0;
716 }
717 
718 static int ftrace_profile_init(void)
719 {
720         int cpu;
721         int ret = 0;
722 
723         for_each_online_cpu(cpu) {
724                 ret = ftrace_profile_init_cpu(cpu);
725                 if (ret)
726                         break;
727         }
728 
729         return ret;
730 }
731 
732 /* interrupts must be disabled */
733 static struct ftrace_profile *
734 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
735 {
736         struct ftrace_profile *rec;
737         struct hlist_head *hhd;
738         struct hlist_node *n;
739         unsigned long key;
740 
741         key = hash_long(ip, ftrace_profile_bits);
742         hhd = &stat->hash[key];
743 
744         if (hlist_empty(hhd))
745                 return NULL;
746 
747         hlist_for_each_entry_rcu(rec, n, hhd, node) {
748                 if (rec->ip == ip)
749                         return rec;
750         }
751 
752         return NULL;
753 }
754 
755 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
756                                struct ftrace_profile *rec)
757 {
758         unsigned long key;
759 
760         key = hash_long(rec->ip, ftrace_profile_bits);
761         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
762 }
763 
764 /*
765  * The memory is already allocated, this simply finds a new record to use.
766  */
767 static struct ftrace_profile *
768 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
769 {
770         struct ftrace_profile *rec = NULL;
771 
772         /* prevent recursion (from NMIs) */
773         if (atomic_inc_return(&stat->disabled) != 1)
774                 goto out;
775 
776         /*
777          * Try to find the function again since an NMI
778          * could have added it
779          */
780         rec = ftrace_find_profiled_func(stat, ip);
781         if (rec)
782                 goto out;
783 
784         if (stat->pages->index == PROFILES_PER_PAGE) {
785                 if (!stat->pages->next)
786                         goto out;
787                 stat->pages = stat->pages->next;
788         }
789 
790         rec = &stat->pages->records[stat->pages->index++];
791         rec->ip = ip;
792         ftrace_add_profile(stat, rec);
793 
794  out:
795         atomic_dec(&stat->disabled);
796 
797         return rec;
798 }
799 
800 static void
801 function_profile_call(unsigned long ip, unsigned long parent_ip,
802                       struct ftrace_ops *ops, struct pt_regs *regs)
803 {
804         struct ftrace_profile_stat *stat;
805         struct ftrace_profile *rec;
806         unsigned long flags;
807 
808         if (!ftrace_profile_enabled)
809                 return;
810 
811         local_irq_save(flags);
812 
813         stat = &__get_cpu_var(ftrace_profile_stats);
814         if (!stat->hash || !ftrace_profile_enabled)
815                 goto out;
816 
817         rec = ftrace_find_profiled_func(stat, ip);
818         if (!rec) {
819                 rec = ftrace_profile_alloc(stat, ip);
820                 if (!rec)
821                         goto out;
822         }
823 
824         rec->counter++;
825  out:
826         local_irq_restore(flags);
827 }
828 
829 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
830 static int profile_graph_entry(struct ftrace_graph_ent *trace)
831 {
832         function_profile_call(trace->func, 0, NULL, NULL);
833         return 1;
834 }
835 
836 static void profile_graph_return(struct ftrace_graph_ret *trace)
837 {
838         struct ftrace_profile_stat *stat;
839         unsigned long long calltime;
840         struct ftrace_profile *rec;
841         unsigned long flags;
842 
843         local_irq_save(flags);
844         stat = &__get_cpu_var(ftrace_profile_stats);
845         if (!stat->hash || !ftrace_profile_enabled)
846                 goto out;
847 
848         /* If the calltime was zero'd ignore it */
849         if (!trace->calltime)
850                 goto out;
851 
852         calltime = trace->rettime - trace->calltime;
853 
854         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
855                 int index;
856 
857                 index = trace->depth;
858 
859                 /* Append this call time to the parent time to subtract */
860                 if (index)
861                         current->ret_stack[index - 1].subtime += calltime;
862 
863                 if (current->ret_stack[index].subtime < calltime)
864                         calltime -= current->ret_stack[index].subtime;
865                 else
866                         calltime = 0;
867         }
868 
869         rec = ftrace_find_profiled_func(stat, trace->func);
870         if (rec) {
871                 rec->time += calltime;
872                 rec->time_squared += calltime * calltime;
873         }
874 
875  out:
876         local_irq_restore(flags);
877 }
878 
879 static int register_ftrace_profiler(void)
880 {
881         return register_ftrace_graph(&profile_graph_return,
882                                      &profile_graph_entry);
883 }
884 
885 static void unregister_ftrace_profiler(void)
886 {
887         unregister_ftrace_graph();
888 }
889 #else
890 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
891         .func           = function_profile_call,
892         .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
893 };
894 
895 static int register_ftrace_profiler(void)
896 {
897         return register_ftrace_function(&ftrace_profile_ops);
898 }
899 
900 static void unregister_ftrace_profiler(void)
901 {
902         unregister_ftrace_function(&ftrace_profile_ops);
903 }
904 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
905 
906 static ssize_t
907 ftrace_profile_write(struct file *filp, const char __user *ubuf,
908                      size_t cnt, loff_t *ppos)
909 {
910         unsigned long val;
911         int ret;
912 
913         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
914         if (ret)
915                 return ret;
916 
917         val = !!val;
918 
919         mutex_lock(&ftrace_profile_lock);
920         if (ftrace_profile_enabled ^ val) {
921                 if (val) {
922                         ret = ftrace_profile_init();
923                         if (ret < 0) {
924                                 cnt = ret;
925                                 goto out;
926                         }
927 
928                         ret = register_ftrace_profiler();
929                         if (ret < 0) {
930                                 cnt = ret;
931                                 goto out;
932                         }
933                         ftrace_profile_enabled = 1;
934                 } else {
935                         ftrace_profile_enabled = 0;
936                         /*
937                          * unregister_ftrace_profiler calls stop_machine
938                          * so this acts like an synchronize_sched.
939                          */
940                         unregister_ftrace_profiler();
941                 }
942         }
943  out:
944         mutex_unlock(&ftrace_profile_lock);
945 
946         *ppos += cnt;
947 
948         return cnt;
949 }
950 
951 static ssize_t
952 ftrace_profile_read(struct file *filp, char __user *ubuf,
953                      size_t cnt, loff_t *ppos)
954 {
955         char buf[64];           /* big enough to hold a number */
956         int r;
957 
958         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
959         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
960 }
961 
962 static const struct file_operations ftrace_profile_fops = {
963         .open           = tracing_open_generic,
964         .read           = ftrace_profile_read,
965         .write          = ftrace_profile_write,
966         .llseek         = default_llseek,
967 };
968 
969 /* used to initialize the real stat files */
970 static struct tracer_stat function_stats __initdata = {
971         .name           = "functions",
972         .stat_start     = function_stat_start,
973         .stat_next      = function_stat_next,
974         .stat_cmp       = function_stat_cmp,
975         .stat_headers   = function_stat_headers,
976         .stat_show      = function_stat_show
977 };
978 
979 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
980 {
981         struct ftrace_profile_stat *stat;
982         struct dentry *entry;
983         char *name;
984         int ret;
985         int cpu;
986 
987         for_each_possible_cpu(cpu) {
988                 stat = &per_cpu(ftrace_profile_stats, cpu);
989 
990                 /* allocate enough for function name + cpu number */
991                 name = kmalloc(32, GFP_KERNEL);
992                 if (!name) {
993                         /*
994                          * The files created are permanent, if something happens
995                          * we still do not free memory.
996                          */
997                         WARN(1,
998                              "Could not allocate stat file for cpu %d\n",
999                              cpu);
1000                         return;
1001                 }
1002                 stat->stat = function_stats;
1003                 snprintf(name, 32, "function%d", cpu);
1004                 stat->stat.name = name;
1005                 ret = register_stat_tracer(&stat->stat);
1006                 if (ret) {
1007                         WARN(1,
1008                              "Could not register function stat for cpu %d\n",
1009                              cpu);
1010                         kfree(name);
1011                         return;
1012                 }
1013         }
1014 
1015         entry = debugfs_create_file("function_profile_enabled", 0644,
1016                                     d_tracer, NULL, &ftrace_profile_fops);
1017         if (!entry)
1018                 pr_warning("Could not create debugfs "
1019                            "'function_profile_enabled' entry\n");
1020 }
1021 
1022 #else /* CONFIG_FUNCTION_PROFILER */
1023 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1024 {
1025 }
1026 #endif /* CONFIG_FUNCTION_PROFILER */
1027 
1028 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1029 
1030 loff_t
1031 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1032 {
1033         loff_t ret;
1034 
1035         if (file->f_mode & FMODE_READ)
1036                 ret = seq_lseek(file, offset, whence);
1037         else
1038                 file->f_pos = ret = 1;
1039 
1040         return ret;
1041 }
1042 
1043 #ifdef CONFIG_DYNAMIC_FTRACE
1044 
1045 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1046 # error Dynamic ftrace depends on MCOUNT_RECORD
1047 #endif
1048 
1049 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1050 
1051 struct ftrace_func_probe {
1052         struct hlist_node       node;
1053         struct ftrace_probe_ops *ops;
1054         unsigned long           flags;
1055         unsigned long           ip;
1056         void                    *data;
1057         struct rcu_head         rcu;
1058 };
1059 
1060 struct ftrace_func_entry {
1061         struct hlist_node hlist;
1062         unsigned long ip;
1063 };
1064 
1065 struct ftrace_hash {
1066         unsigned long           size_bits;
1067         struct hlist_head       *buckets;
1068         unsigned long           count;
1069         struct rcu_head         rcu;
1070 };
1071 
1072 /*
1073  * We make these constant because no one should touch them,
1074  * but they are used as the default "empty hash", to avoid allocating
1075  * it all the time. These are in a read only section such that if
1076  * anyone does try to modify it, it will cause an exception.
1077  */
1078 static const struct hlist_head empty_buckets[1];
1079 static const struct ftrace_hash empty_hash = {
1080         .buckets = (struct hlist_head *)empty_buckets,
1081 };
1082 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1083 
1084 static struct ftrace_ops global_ops = {
1085         .func                   = ftrace_stub,
1086         .notrace_hash           = EMPTY_HASH,
1087         .filter_hash            = EMPTY_HASH,
1088         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
1089 };
1090 
1091 static DEFINE_MUTEX(ftrace_regex_lock);
1092 
1093 struct ftrace_page {
1094         struct ftrace_page      *next;
1095         struct dyn_ftrace       *records;
1096         int                     index;
1097         int                     size;
1098 };
1099 
1100 static struct ftrace_page *ftrace_new_pgs;
1101 
1102 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1103 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1104 
1105 /* estimate from running different kernels */
1106 #define NR_TO_INIT              10000
1107 
1108 static struct ftrace_page       *ftrace_pages_start;
1109 static struct ftrace_page       *ftrace_pages;
1110 
1111 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1112 {
1113         return !hash || !hash->count;
1114 }
1115 
1116 static struct ftrace_func_entry *
1117 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1118 {
1119         unsigned long key;
1120         struct ftrace_func_entry *entry;
1121         struct hlist_head *hhd;
1122         struct hlist_node *n;
1123 
1124         if (ftrace_hash_empty(hash))
1125                 return NULL;
1126 
1127         if (hash->size_bits > 0)
1128                 key = hash_long(ip, hash->size_bits);
1129         else
1130                 key = 0;
1131 
1132         hhd = &hash->buckets[key];
1133 
1134         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1135                 if (entry->ip == ip)
1136                         return entry;
1137         }
1138         return NULL;
1139 }
1140 
1141 static void __add_hash_entry(struct ftrace_hash *hash,
1142                              struct ftrace_func_entry *entry)
1143 {
1144         struct hlist_head *hhd;
1145         unsigned long key;
1146 
1147         if (hash->size_bits)
1148                 key = hash_long(entry->ip, hash->size_bits);
1149         else
1150                 key = 0;
1151 
1152         hhd = &hash->buckets[key];
1153         hlist_add_head(&entry->hlist, hhd);
1154         hash->count++;
1155 }
1156 
1157 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1158 {
1159         struct ftrace_func_entry *entry;
1160 
1161         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1162         if (!entry)
1163                 return -ENOMEM;
1164 
1165         entry->ip = ip;
1166         __add_hash_entry(hash, entry);
1167 
1168         return 0;
1169 }
1170 
1171 static void
1172 free_hash_entry(struct ftrace_hash *hash,
1173                   struct ftrace_func_entry *entry)
1174 {
1175         hlist_del(&entry->hlist);
1176         kfree(entry);
1177         hash->count--;
1178 }
1179 
1180 static void
1181 remove_hash_entry(struct ftrace_hash *hash,
1182                   struct ftrace_func_entry *entry)
1183 {
1184         hlist_del(&entry->hlist);
1185         hash->count--;
1186 }
1187 
1188 static void ftrace_hash_clear(struct ftrace_hash *hash)
1189 {
1190         struct hlist_head *hhd;
1191         struct hlist_node *tp, *tn;
1192         struct ftrace_func_entry *entry;
1193         int size = 1 << hash->size_bits;
1194         int i;
1195 
1196         if (!hash->count)
1197                 return;
1198 
1199         for (i = 0; i < size; i++) {
1200                 hhd = &hash->buckets[i];
1201                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1202                         free_hash_entry(hash, entry);
1203         }
1204         FTRACE_WARN_ON(hash->count);
1205 }
1206 
1207 static void free_ftrace_hash(struct ftrace_hash *hash)
1208 {
1209         if (!hash || hash == EMPTY_HASH)
1210                 return;
1211         ftrace_hash_clear(hash);
1212         kfree(hash->buckets);
1213         kfree(hash);
1214 }
1215 
1216 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1217 {
1218         struct ftrace_hash *hash;
1219 
1220         hash = container_of(rcu, struct ftrace_hash, rcu);
1221         free_ftrace_hash(hash);
1222 }
1223 
1224 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1225 {
1226         if (!hash || hash == EMPTY_HASH)
1227                 return;
1228         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1229 }
1230 
1231 void ftrace_free_filter(struct ftrace_ops *ops)
1232 {
1233         free_ftrace_hash(ops->filter_hash);
1234         free_ftrace_hash(ops->notrace_hash);
1235 }
1236 
1237 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1238 {
1239         struct ftrace_hash *hash;
1240         int size;
1241 
1242         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1243         if (!hash)
1244                 return NULL;
1245 
1246         size = 1 << size_bits;
1247         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1248 
1249         if (!hash->buckets) {
1250                 kfree(hash);
1251                 return NULL;
1252         }
1253 
1254         hash->size_bits = size_bits;
1255 
1256         return hash;
1257 }
1258 
1259 static struct ftrace_hash *
1260 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1261 {
1262         struct ftrace_func_entry *entry;
1263         struct ftrace_hash *new_hash;
1264         struct hlist_node *tp;
1265         int size;
1266         int ret;
1267         int i;
1268 
1269         new_hash = alloc_ftrace_hash(size_bits);
1270         if (!new_hash)
1271                 return NULL;
1272 
1273         /* Empty hash? */
1274         if (ftrace_hash_empty(hash))
1275                 return new_hash;
1276 
1277         size = 1 << hash->size_bits;
1278         for (i = 0; i < size; i++) {
1279                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1280                         ret = add_hash_entry(new_hash, entry->ip);
1281                         if (ret < 0)
1282                                 goto free_hash;
1283                 }
1284         }
1285 
1286         FTRACE_WARN_ON(new_hash->count != hash->count);
1287 
1288         return new_hash;
1289 
1290  free_hash:
1291         free_ftrace_hash(new_hash);
1292         return NULL;
1293 }
1294 
1295 static void
1296 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1297 static void
1298 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1299 
1300 static int
1301 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1302                  struct ftrace_hash **dst, struct ftrace_hash *src)
1303 {
1304         struct ftrace_func_entry *entry;
1305         struct hlist_node *tp, *tn;
1306         struct hlist_head *hhd;
1307         struct ftrace_hash *old_hash;
1308         struct ftrace_hash *new_hash;
1309         unsigned long key;
1310         int size = src->count;
1311         int bits = 0;
1312         int ret;
1313         int i;
1314 
1315         /*
1316          * Remove the current set, update the hash and add
1317          * them back.
1318          */
1319         ftrace_hash_rec_disable(ops, enable);
1320 
1321         /*
1322          * If the new source is empty, just free dst and assign it
1323          * the empty_hash.
1324          */
1325         if (!src->count) {
1326                 free_ftrace_hash_rcu(*dst);
1327                 rcu_assign_pointer(*dst, EMPTY_HASH);
1328                 /* still need to update the function records */
1329                 ret = 0;
1330                 goto out;
1331         }
1332 
1333         /*
1334          * Make the hash size about 1/2 the # found
1335          */
1336         for (size /= 2; size; size >>= 1)
1337                 bits++;
1338 
1339         /* Don't allocate too much */
1340         if (bits > FTRACE_HASH_MAX_BITS)
1341                 bits = FTRACE_HASH_MAX_BITS;
1342 
1343         ret = -ENOMEM;
1344         new_hash = alloc_ftrace_hash(bits);
1345         if (!new_hash)
1346                 goto out;
1347 
1348         size = 1 << src->size_bits;
1349         for (i = 0; i < size; i++) {
1350                 hhd = &src->buckets[i];
1351                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1352                         if (bits > 0)
1353                                 key = hash_long(entry->ip, bits);
1354                         else
1355                                 key = 0;
1356                         remove_hash_entry(src, entry);
1357                         __add_hash_entry(new_hash, entry);
1358                 }
1359         }
1360 
1361         old_hash = *dst;
1362         rcu_assign_pointer(*dst, new_hash);
1363         free_ftrace_hash_rcu(old_hash);
1364 
1365         ret = 0;
1366  out:
1367         /*
1368          * Enable regardless of ret:
1369          *  On success, we enable the new hash.
1370          *  On failure, we re-enable the original hash.
1371          */
1372         ftrace_hash_rec_enable(ops, enable);
1373 
1374         return ret;
1375 }
1376 
1377 /*
1378  * Test the hashes for this ops to see if we want to call
1379  * the ops->func or not.
1380  *
1381  * It's a match if the ip is in the ops->filter_hash or
1382  * the filter_hash does not exist or is empty,
1383  *  AND
1384  * the ip is not in the ops->notrace_hash.
1385  *
1386  * This needs to be called with preemption disabled as
1387  * the hashes are freed with call_rcu_sched().
1388  */
1389 static int
1390 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1391 {
1392         struct ftrace_hash *filter_hash;
1393         struct ftrace_hash *notrace_hash;
1394         int ret;
1395 
1396         filter_hash = rcu_dereference_raw(ops->filter_hash);
1397         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1398 
1399         if ((ftrace_hash_empty(filter_hash) ||
1400              ftrace_lookup_ip(filter_hash, ip)) &&
1401             (ftrace_hash_empty(notrace_hash) ||
1402              !ftrace_lookup_ip(notrace_hash, ip)))
1403                 ret = 1;
1404         else
1405                 ret = 0;
1406 
1407         return ret;
1408 }
1409 
1410 /*
1411  * This is a double for. Do not use 'break' to break out of the loop,
1412  * you must use a goto.
1413  */
1414 #define do_for_each_ftrace_rec(pg, rec)                                 \
1415         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1416                 int _____i;                                             \
1417                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1418                         rec = &pg->records[_____i];
1419 
1420 #define while_for_each_ftrace_rec()             \
1421                 }                               \
1422         }
1423 
1424 
1425 static int ftrace_cmp_recs(const void *a, const void *b)
1426 {
1427         const struct dyn_ftrace *key = a;
1428         const struct dyn_ftrace *rec = b;
1429 
1430         if (key->flags < rec->ip)
1431                 return -1;
1432         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1433                 return 1;
1434         return 0;
1435 }
1436 
1437 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1438 {
1439         struct ftrace_page *pg;
1440         struct dyn_ftrace *rec;
1441         struct dyn_ftrace key;
1442 
1443         key.ip = start;
1444         key.flags = end;        /* overload flags, as it is unsigned long */
1445 
1446         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1447                 if (end < pg->records[0].ip ||
1448                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1449                         continue;
1450                 rec = bsearch(&key, pg->records, pg->index,
1451                               sizeof(struct dyn_ftrace),
1452                               ftrace_cmp_recs);
1453                 if (rec)
1454                         return rec->ip;
1455         }
1456 
1457         return 0;
1458 }
1459 
1460 /**
1461  * ftrace_location - return true if the ip giving is a traced location
1462  * @ip: the instruction pointer to check
1463  *
1464  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1465  * That is, the instruction that is either a NOP or call to
1466  * the function tracer. It checks the ftrace internal tables to
1467  * determine if the address belongs or not.
1468  */
1469 unsigned long ftrace_location(unsigned long ip)
1470 {
1471         return ftrace_location_range(ip, ip);
1472 }
1473 
1474 /**
1475  * ftrace_text_reserved - return true if range contains an ftrace location
1476  * @start: start of range to search
1477  * @end: end of range to search (inclusive). @end points to the last byte to check.
1478  *
1479  * Returns 1 if @start and @end contains a ftrace location.
1480  * That is, the instruction that is either a NOP or call to
1481  * the function tracer. It checks the ftrace internal tables to
1482  * determine if the address belongs or not.
1483  */
1484 int ftrace_text_reserved(void *start, void *end)
1485 {
1486         unsigned long ret;
1487 
1488         ret = ftrace_location_range((unsigned long)start,
1489                                     (unsigned long)end);
1490 
1491         return (int)!!ret;
1492 }
1493 
1494 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1495                                      int filter_hash,
1496                                      bool inc)
1497 {
1498         struct ftrace_hash *hash;
1499         struct ftrace_hash *other_hash;
1500         struct ftrace_page *pg;
1501         struct dyn_ftrace *rec;
1502         int count = 0;
1503         int all = 0;
1504 
1505         /* Only update if the ops has been registered */
1506         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1507                 return;
1508 
1509         /*
1510          * In the filter_hash case:
1511          *   If the count is zero, we update all records.
1512          *   Otherwise we just update the items in the hash.
1513          *
1514          * In the notrace_hash case:
1515          *   We enable the update in the hash.
1516          *   As disabling notrace means enabling the tracing,
1517          *   and enabling notrace means disabling, the inc variable
1518          *   gets inversed.
1519          */
1520         if (filter_hash) {
1521                 hash = ops->filter_hash;
1522                 other_hash = ops->notrace_hash;
1523                 if (ftrace_hash_empty(hash))
1524                         all = 1;
1525         } else {
1526                 inc = !inc;
1527                 hash = ops->notrace_hash;
1528                 other_hash = ops->filter_hash;
1529                 /*
1530                  * If the notrace hash has no items,
1531                  * then there's nothing to do.
1532                  */
1533                 if (ftrace_hash_empty(hash))
1534                         return;
1535         }
1536 
1537         do_for_each_ftrace_rec(pg, rec) {
1538                 int in_other_hash = 0;
1539                 int in_hash = 0;
1540                 int match = 0;
1541 
1542                 if (all) {
1543                         /*
1544                          * Only the filter_hash affects all records.
1545                          * Update if the record is not in the notrace hash.
1546                          */
1547                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1548                                 match = 1;
1549                 } else {
1550                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1551                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1552 
1553                         /*
1554                          *
1555                          */
1556                         if (filter_hash && in_hash && !in_other_hash)
1557                                 match = 1;
1558                         else if (!filter_hash && in_hash &&
1559                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1560                                 match = 1;
1561                 }
1562                 if (!match)
1563                         continue;
1564 
1565                 if (inc) {
1566                         rec->flags++;
1567                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1568                                 return;
1569                         /*
1570                          * If any ops wants regs saved for this function
1571                          * then all ops will get saved regs.
1572                          */
1573                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1574                                 rec->flags |= FTRACE_FL_REGS;
1575                 } else {
1576                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1577                                 return;
1578                         rec->flags--;
1579                 }
1580                 count++;
1581                 /* Shortcut, if we handled all records, we are done. */
1582                 if (!all && count == hash->count)
1583                         return;
1584         } while_for_each_ftrace_rec();
1585 }
1586 
1587 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1588                                     int filter_hash)
1589 {
1590         __ftrace_hash_rec_update(ops, filter_hash, 0);
1591 }
1592 
1593 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1594                                    int filter_hash)
1595 {
1596         __ftrace_hash_rec_update(ops, filter_hash, 1);
1597 }
1598 
1599 static void print_ip_ins(const char *fmt, unsigned char *p)
1600 {
1601         int i;
1602 
1603         printk(KERN_CONT "%s", fmt);
1604 
1605         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1606                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1607 }
1608 
1609 /**
1610  * ftrace_bug - report and shutdown function tracer
1611  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1612  * @ip: The address that failed
1613  *
1614  * The arch code that enables or disables the function tracing
1615  * can call ftrace_bug() when it has detected a problem in
1616  * modifying the code. @failed should be one of either:
1617  * EFAULT - if the problem happens on reading the @ip address
1618  * EINVAL - if what is read at @ip is not what was expected
1619  * EPERM - if the problem happens on writting to the @ip address
1620  */
1621 void ftrace_bug(int failed, unsigned long ip)
1622 {
1623         switch (failed) {
1624         case -EFAULT:
1625                 FTRACE_WARN_ON_ONCE(1);
1626                 pr_info("ftrace faulted on modifying ");
1627                 print_ip_sym(ip);
1628                 break;
1629         case -EINVAL:
1630                 FTRACE_WARN_ON_ONCE(1);
1631                 pr_info("ftrace failed to modify ");
1632                 print_ip_sym(ip);
1633                 print_ip_ins(" actual: ", (unsigned char *)ip);
1634                 printk(KERN_CONT "\n");
1635                 break;
1636         case -EPERM:
1637                 FTRACE_WARN_ON_ONCE(1);
1638                 pr_info("ftrace faulted on writing ");
1639                 print_ip_sym(ip);
1640                 break;
1641         default:
1642                 FTRACE_WARN_ON_ONCE(1);
1643                 pr_info("ftrace faulted on unknown error ");
1644                 print_ip_sym(ip);
1645         }
1646 }
1647 
1648 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1649 {
1650         unsigned long flag = 0UL;
1651 
1652         /*
1653          * If we are updating calls:
1654          *
1655          *   If the record has a ref count, then we need to enable it
1656          *   because someone is using it.
1657          *
1658          *   Otherwise we make sure its disabled.
1659          *
1660          * If we are disabling calls, then disable all records that
1661          * are enabled.
1662          */
1663         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1664                 flag = FTRACE_FL_ENABLED;
1665 
1666         /*
1667          * If enabling and the REGS flag does not match the REGS_EN, then
1668          * do not ignore this record. Set flags to fail the compare against
1669          * ENABLED.
1670          */
1671         if (flag &&
1672             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1673                 flag |= FTRACE_FL_REGS;
1674 
1675         /* If the state of this record hasn't changed, then do nothing */
1676         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1677                 return FTRACE_UPDATE_IGNORE;
1678 
1679         if (flag) {
1680                 /* Save off if rec is being enabled (for return value) */
1681                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1682 
1683                 if (update) {
1684                         rec->flags |= FTRACE_FL_ENABLED;
1685                         if (flag & FTRACE_FL_REGS) {
1686                                 if (rec->flags & FTRACE_FL_REGS)
1687                                         rec->flags |= FTRACE_FL_REGS_EN;
1688                                 else
1689                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1690                         }
1691                 }
1692 
1693                 /*
1694                  * If this record is being updated from a nop, then
1695                  *   return UPDATE_MAKE_CALL.
1696                  * Otherwise, if the EN flag is set, then return
1697                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1698                  *   from the non-save regs, to a save regs function.
1699                  * Otherwise,
1700                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1701                  *   from the save regs, to a non-save regs function.
1702                  */
1703                 if (flag & FTRACE_FL_ENABLED)
1704                         return FTRACE_UPDATE_MAKE_CALL;
1705                 else if (rec->flags & FTRACE_FL_REGS_EN)
1706                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1707                 else
1708                         return FTRACE_UPDATE_MODIFY_CALL;
1709         }
1710 
1711         if (update) {
1712                 /* If there's no more users, clear all flags */
1713                 if (!(rec->flags & ~FTRACE_FL_MASK))
1714                         rec->flags = 0;
1715                 else
1716                         /* Just disable the record (keep REGS state) */
1717                         rec->flags &= ~FTRACE_FL_ENABLED;
1718         }
1719 
1720         return FTRACE_UPDATE_MAKE_NOP;
1721 }
1722 
1723 /**
1724  * ftrace_update_record, set a record that now is tracing or not
1725  * @rec: the record to update
1726  * @enable: set to 1 if the record is tracing, zero to force disable
1727  *
1728  * The records that represent all functions that can be traced need
1729  * to be updated when tracing has been enabled.
1730  */
1731 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1732 {
1733         return ftrace_check_record(rec, enable, 1);
1734 }
1735 
1736 /**
1737  * ftrace_test_record, check if the record has been enabled or not
1738  * @rec: the record to test
1739  * @enable: set to 1 to check if enabled, 0 if it is disabled
1740  *
1741  * The arch code may need to test if a record is already set to
1742  * tracing to determine how to modify the function code that it
1743  * represents.
1744  */
1745 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1746 {
1747         return ftrace_check_record(rec, enable, 0);
1748 }
1749 
1750 static int
1751 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1752 {
1753         unsigned long ftrace_old_addr;
1754         unsigned long ftrace_addr;
1755         int ret;
1756 
1757         ret = ftrace_update_record(rec, enable);
1758 
1759         if (rec->flags & FTRACE_FL_REGS)
1760                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1761         else
1762                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1763 
1764         switch (ret) {
1765         case FTRACE_UPDATE_IGNORE:
1766                 return 0;
1767 
1768         case FTRACE_UPDATE_MAKE_CALL:
1769                 return ftrace_make_call(rec, ftrace_addr);
1770 
1771         case FTRACE_UPDATE_MAKE_NOP:
1772                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1773 
1774         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1775         case FTRACE_UPDATE_MODIFY_CALL:
1776                 if (rec->flags & FTRACE_FL_REGS)
1777                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1778                 else
1779                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1780 
1781                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1782         }
1783 
1784         return -1; /* unknow ftrace bug */
1785 }
1786 
1787 void __weak ftrace_replace_code(int enable)
1788 {
1789         struct dyn_ftrace *rec;
1790         struct ftrace_page *pg;
1791         int failed;
1792 
1793         if (unlikely(ftrace_disabled))
1794                 return;
1795 
1796         do_for_each_ftrace_rec(pg, rec) {
1797                 failed = __ftrace_replace_code(rec, enable);
1798                 if (failed) {
1799                         ftrace_bug(failed, rec->ip);
1800                         /* Stop processing */
1801                         return;
1802                 }
1803         } while_for_each_ftrace_rec();
1804 }
1805 
1806 struct ftrace_rec_iter {
1807         struct ftrace_page      *pg;
1808         int                     index;
1809 };
1810 
1811 /**
1812  * ftrace_rec_iter_start, start up iterating over traced functions
1813  *
1814  * Returns an iterator handle that is used to iterate over all
1815  * the records that represent address locations where functions
1816  * are traced.
1817  *
1818  * May return NULL if no records are available.
1819  */
1820 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1821 {
1822         /*
1823          * We only use a single iterator.
1824          * Protected by the ftrace_lock mutex.
1825          */
1826         static struct ftrace_rec_iter ftrace_rec_iter;
1827         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1828 
1829         iter->pg = ftrace_pages_start;
1830         iter->index = 0;
1831 
1832         /* Could have empty pages */
1833         while (iter->pg && !iter->pg->index)
1834                 iter->pg = iter->pg->next;
1835 
1836         if (!iter->pg)
1837                 return NULL;
1838 
1839         return iter;
1840 }
1841 
1842 /**
1843  * ftrace_rec_iter_next, get the next record to process.
1844  * @iter: The handle to the iterator.
1845  *
1846  * Returns the next iterator after the given iterator @iter.
1847  */
1848 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1849 {
1850         iter->index++;
1851 
1852         if (iter->index >= iter->pg->index) {
1853                 iter->pg = iter->pg->next;
1854                 iter->index = 0;
1855 
1856                 /* Could have empty pages */
1857                 while (iter->pg && !iter->pg->index)
1858                         iter->pg = iter->pg->next;
1859         }
1860 
1861         if (!iter->pg)
1862                 return NULL;
1863 
1864         return iter;
1865 }
1866 
1867 /**
1868  * ftrace_rec_iter_record, get the record at the iterator location
1869  * @iter: The current iterator location
1870  *
1871  * Returns the record that the current @iter is at.
1872  */
1873 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1874 {
1875         return &iter->pg->records[iter->index];
1876 }
1877 
1878 static int
1879 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1880 {
1881         unsigned long ip;
1882         int ret;
1883 
1884         ip = rec->ip;
1885 
1886         if (unlikely(ftrace_disabled))
1887                 return 0;
1888 
1889         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1890         if (ret) {
1891                 ftrace_bug(ret, ip);
1892                 return 0;
1893         }
1894         return 1;
1895 }
1896 
1897 /*
1898  * archs can override this function if they must do something
1899  * before the modifying code is performed.
1900  */
1901 int __weak ftrace_arch_code_modify_prepare(void)
1902 {
1903         return 0;
1904 }
1905 
1906 /*
1907  * archs can override this function if they must do something
1908  * after the modifying code is performed.
1909  */
1910 int __weak ftrace_arch_code_modify_post_process(void)
1911 {
1912         return 0;
1913 }
1914 
1915 void ftrace_modify_all_code(int command)
1916 {
1917         if (command & FTRACE_UPDATE_CALLS)
1918                 ftrace_replace_code(1);
1919         else if (command & FTRACE_DISABLE_CALLS)
1920                 ftrace_replace_code(0);
1921 
1922         if (command & FTRACE_UPDATE_TRACE_FUNC)
1923                 ftrace_update_ftrace_func(ftrace_trace_function);
1924 
1925         if (command & FTRACE_START_FUNC_RET)
1926                 ftrace_enable_ftrace_graph_caller();
1927         else if (command & FTRACE_STOP_FUNC_RET)
1928                 ftrace_disable_ftrace_graph_caller();
1929 }
1930 
1931 static int __ftrace_modify_code(void *data)
1932 {
1933         int *command = data;
1934 
1935         ftrace_modify_all_code(*command);
1936 
1937         return 0;
1938 }
1939 
1940 /**
1941  * ftrace_run_stop_machine, go back to the stop machine method
1942  * @command: The command to tell ftrace what to do
1943  *
1944  * If an arch needs to fall back to the stop machine method, the
1945  * it can call this function.
1946  */
1947 void ftrace_run_stop_machine(int command)
1948 {
1949         stop_machine(__ftrace_modify_code, &command, NULL);
1950 }
1951 
1952 /**
1953  * arch_ftrace_update_code, modify the code to trace or not trace
1954  * @command: The command that needs to be done
1955  *
1956  * Archs can override this function if it does not need to
1957  * run stop_machine() to modify code.
1958  */
1959 void __weak arch_ftrace_update_code(int command)
1960 {
1961         ftrace_run_stop_machine(command);
1962 }
1963 
1964 static void ftrace_run_update_code(int command)
1965 {
1966         int ret;
1967 
1968         ret = ftrace_arch_code_modify_prepare();
1969         FTRACE_WARN_ON(ret);
1970         if (ret)
1971                 return;
1972         /*
1973          * Do not call function tracer while we update the code.
1974          * We are in stop machine.
1975          */
1976         function_trace_stop++;
1977 
1978         /*
1979          * By default we use stop_machine() to modify the code.
1980          * But archs can do what ever they want as long as it
1981          * is safe. The stop_machine() is the safest, but also
1982          * produces the most overhead.
1983          */
1984         arch_ftrace_update_code(command);
1985 
1986         function_trace_stop--;
1987 
1988         ret = ftrace_arch_code_modify_post_process();
1989         FTRACE_WARN_ON(ret);
1990 }
1991 
1992 static ftrace_func_t saved_ftrace_func;
1993 static int ftrace_start_up;
1994 static int global_start_up;
1995 
1996 static void ftrace_startup_enable(int command)
1997 {
1998         if (saved_ftrace_func != ftrace_trace_function) {
1999                 saved_ftrace_func = ftrace_trace_function;
2000                 command |= FTRACE_UPDATE_TRACE_FUNC;
2001         }
2002 
2003         if (!command || !ftrace_enabled)
2004                 return;
2005 
2006         ftrace_run_update_code(command);
2007 }
2008 
2009 static int ftrace_startup(struct ftrace_ops *ops, int command)
2010 {
2011         bool hash_enable = true;
2012 
2013         if (unlikely(ftrace_disabled))
2014                 return -ENODEV;
2015 
2016         ftrace_start_up++;
2017         command |= FTRACE_UPDATE_CALLS;
2018 
2019         /* ops marked global share the filter hashes */
2020         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2021                 ops = &global_ops;
2022                 /* Don't update hash if global is already set */
2023                 if (global_start_up)
2024                         hash_enable = false;
2025                 global_start_up++;
2026         }
2027 
2028         ops->flags |= FTRACE_OPS_FL_ENABLED;
2029         if (hash_enable)
2030                 ftrace_hash_rec_enable(ops, 1);
2031 
2032         ftrace_startup_enable(command);
2033 
2034         return 0;
2035 }
2036 
2037 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2038 {
2039         bool hash_disable = true;
2040 
2041         if (unlikely(ftrace_disabled))
2042                 return;
2043 
2044         ftrace_start_up--;
2045         /*
2046          * Just warn in case of unbalance, no need to kill ftrace, it's not
2047          * critical but the ftrace_call callers may be never nopped again after
2048          * further ftrace uses.
2049          */
2050         WARN_ON_ONCE(ftrace_start_up < 0);
2051 
2052         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2053                 ops = &global_ops;
2054                 global_start_up--;
2055                 WARN_ON_ONCE(global_start_up < 0);
2056                 /* Don't update hash if global still has users */
2057                 if (global_start_up) {
2058                         WARN_ON_ONCE(!ftrace_start_up);
2059                         hash_disable = false;
2060                 }
2061         }
2062 
2063         if (hash_disable)
2064                 ftrace_hash_rec_disable(ops, 1);
2065 
2066         if (ops != &global_ops || !global_start_up)
2067                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2068 
2069         command |= FTRACE_UPDATE_CALLS;
2070 
2071         if (saved_ftrace_func != ftrace_trace_function) {
2072                 saved_ftrace_func = ftrace_trace_function;
2073                 command |= FTRACE_UPDATE_TRACE_FUNC;
2074         }
2075 
2076         if (!command || !ftrace_enabled)
2077                 return;
2078 
2079         ftrace_run_update_code(command);
2080 }
2081 
2082 static void ftrace_startup_sysctl(void)
2083 {
2084         if (unlikely(ftrace_disabled))
2085                 return;
2086 
2087         /* Force update next time */
2088         saved_ftrace_func = NULL;
2089         /* ftrace_start_up is true if we want ftrace running */
2090         if (ftrace_start_up)
2091                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2092 }
2093 
2094 static void ftrace_shutdown_sysctl(void)
2095 {
2096         if (unlikely(ftrace_disabled))
2097                 return;
2098 
2099         /* ftrace_start_up is true if ftrace is running */
2100         if (ftrace_start_up)
2101                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2102 }
2103 
2104 static cycle_t          ftrace_update_time;
2105 static unsigned long    ftrace_update_cnt;
2106 unsigned long           ftrace_update_tot_cnt;
2107 
2108 static int ops_traces_mod(struct ftrace_ops *ops)
2109 {
2110         struct ftrace_hash *hash;
2111 
2112         hash = ops->filter_hash;
2113         return ftrace_hash_empty(hash);
2114 }
2115 
2116 static int ftrace_update_code(struct module *mod)
2117 {
2118         struct ftrace_page *pg;
2119         struct dyn_ftrace *p;
2120         cycle_t start, stop;
2121         unsigned long ref = 0;
2122         int i;
2123 
2124         /*
2125          * When adding a module, we need to check if tracers are
2126          * currently enabled and if they are set to trace all functions.
2127          * If they are, we need to enable the module functions as well
2128          * as update the reference counts for those function records.
2129          */
2130         if (mod) {
2131                 struct ftrace_ops *ops;
2132 
2133                 for (ops = ftrace_ops_list;
2134                      ops != &ftrace_list_end; ops = ops->next) {
2135                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2136                             ops_traces_mod(ops))
2137                                 ref++;
2138                 }
2139         }
2140 
2141         start = ftrace_now(raw_smp_processor_id());
2142         ftrace_update_cnt = 0;
2143 
2144         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2145 
2146                 for (i = 0; i < pg->index; i++) {
2147                         /* If something went wrong, bail without enabling anything */
2148                         if (unlikely(ftrace_disabled))
2149                                 return -1;
2150 
2151                         p = &pg->records[i];
2152                         p->flags = ref;
2153 
2154                         /*
2155                          * Do the initial record conversion from mcount jump
2156                          * to the NOP instructions.
2157                          */
2158                         if (!ftrace_code_disable(mod, p))
2159                                 break;
2160 
2161                         ftrace_update_cnt++;
2162 
2163                         /*
2164                          * If the tracing is enabled, go ahead and enable the record.
2165                          *
2166                          * The reason not to enable the record immediatelly is the
2167                          * inherent check of ftrace_make_nop/ftrace_make_call for
2168                          * correct previous instructions.  Making first the NOP
2169                          * conversion puts the module to the correct state, thus
2170                          * passing the ftrace_make_call check.
2171                          */
2172                         if (ftrace_start_up && ref) {
2173                                 int failed = __ftrace_replace_code(p, 1);
2174                                 if (failed)
2175                                         ftrace_bug(failed, p->ip);
2176                         }
2177                 }
2178         }
2179 
2180         ftrace_new_pgs = NULL;
2181 
2182         stop = ftrace_now(raw_smp_processor_id());
2183         ftrace_update_time = stop - start;
2184         ftrace_update_tot_cnt += ftrace_update_cnt;
2185 
2186         return 0;
2187 }
2188 
2189 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2190 {
2191         int order;
2192         int cnt;
2193 
2194         if (WARN_ON(!count))
2195                 return -EINVAL;
2196 
2197         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2198 
2199         /*
2200          * We want to fill as much as possible. No more than a page
2201          * may be empty.
2202          */
2203         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2204                 order--;
2205 
2206  again:
2207         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2208 
2209         if (!pg->records) {
2210                 /* if we can't allocate this size, try something smaller */
2211                 if (!order)
2212                         return -ENOMEM;
2213                 order >>= 1;
2214                 goto again;
2215         }
2216 
2217         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2218         pg->size = cnt;
2219 
2220         if (cnt > count)
2221                 cnt = count;
2222 
2223         return cnt;
2224 }
2225 
2226 static struct ftrace_page *
2227 ftrace_allocate_pages(unsigned long num_to_init)
2228 {
2229         struct ftrace_page *start_pg;
2230         struct ftrace_page *pg;
2231         int order;
2232         int cnt;
2233 
2234         if (!num_to_init)
2235                 return 0;
2236 
2237         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2238         if (!pg)
2239                 return NULL;
2240 
2241         /*
2242          * Try to allocate as much as possible in one continues
2243          * location that fills in all of the space. We want to
2244          * waste as little space as possible.
2245          */
2246         for (;;) {
2247                 cnt = ftrace_allocate_records(pg, num_to_init);
2248                 if (cnt < 0)
2249                         goto free_pages;
2250 
2251                 num_to_init -= cnt;
2252                 if (!num_to_init)
2253                         break;
2254 
2255                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2256                 if (!pg->next)
2257                         goto free_pages;
2258 
2259                 pg = pg->next;
2260         }
2261 
2262         return start_pg;
2263 
2264  free_pages:
2265         while (start_pg) {
2266                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2267                 free_pages((unsigned long)pg->records, order);
2268                 start_pg = pg->next;
2269                 kfree(pg);
2270                 pg = start_pg;
2271         }
2272         pr_info("ftrace: FAILED to allocate memory for functions\n");
2273         return NULL;
2274 }
2275 
2276 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2277 {
2278         int cnt;
2279 
2280         if (!num_to_init) {
2281                 pr_info("ftrace: No functions to be traced?\n");
2282                 return -1;
2283         }
2284 
2285         cnt = num_to_init / ENTRIES_PER_PAGE;
2286         pr_info("ftrace: allocating %ld entries in %d pages\n",
2287                 num_to_init, cnt + 1);
2288 
2289         return 0;
2290 }
2291 
2292 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2293 
2294 struct ftrace_iterator {
2295         loff_t                          pos;
2296         loff_t                          func_pos;
2297         struct ftrace_page              *pg;
2298         struct dyn_ftrace               *func;
2299         struct ftrace_func_probe        *probe;
2300         struct trace_parser             parser;
2301         struct ftrace_hash              *hash;
2302         struct ftrace_ops               *ops;
2303         int                             hidx;
2304         int                             idx;
2305         unsigned                        flags;
2306 };
2307 
2308 static void *
2309 t_hash_next(struct seq_file *m, loff_t *pos)
2310 {
2311         struct ftrace_iterator *iter = m->private;
2312         struct hlist_node *hnd = NULL;
2313         struct hlist_head *hhd;
2314 
2315         (*pos)++;
2316         iter->pos = *pos;
2317 
2318         if (iter->probe)
2319                 hnd = &iter->probe->node;
2320  retry:
2321         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2322                 return NULL;
2323 
2324         hhd = &ftrace_func_hash[iter->hidx];
2325 
2326         if (hlist_empty(hhd)) {
2327                 iter->hidx++;
2328                 hnd = NULL;
2329                 goto retry;
2330         }
2331 
2332         if (!hnd)
2333                 hnd = hhd->first;
2334         else {
2335                 hnd = hnd->next;
2336                 if (!hnd) {
2337                         iter->hidx++;
2338                         goto retry;
2339                 }
2340         }
2341 
2342         if (WARN_ON_ONCE(!hnd))
2343                 return NULL;
2344 
2345         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2346 
2347         return iter;
2348 }
2349 
2350 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2351 {
2352         struct ftrace_iterator *iter = m->private;
2353         void *p = NULL;
2354         loff_t l;
2355 
2356         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2357                 return NULL;
2358 
2359         if (iter->func_pos > *pos)
2360                 return NULL;
2361 
2362         iter->hidx = 0;
2363         for (l = 0; l <= (*pos - iter->func_pos); ) {
2364                 p = t_hash_next(m, &l);
2365                 if (!p)
2366                         break;
2367         }
2368         if (!p)
2369                 return NULL;
2370 
2371         /* Only set this if we have an item */
2372         iter->flags |= FTRACE_ITER_HASH;
2373 
2374         return iter;
2375 }
2376 
2377 static int
2378 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2379 {
2380         struct ftrace_func_probe *rec;
2381 
2382         rec = iter->probe;
2383         if (WARN_ON_ONCE(!rec))
2384                 return -EIO;
2385 
2386         if (rec->ops->print)
2387                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2388 
2389         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2390 
2391         if (rec->data)
2392                 seq_printf(m, ":%p", rec->data);
2393         seq_putc(m, '\n');
2394 
2395         return 0;
2396 }
2397 
2398 static void *
2399 t_next(struct seq_file *m, void *v, loff_t *pos)
2400 {
2401         struct ftrace_iterator *iter = m->private;
2402         struct ftrace_ops *ops = iter->ops;
2403         struct dyn_ftrace *rec = NULL;
2404 
2405         if (unlikely(ftrace_disabled))
2406                 return NULL;
2407 
2408         if (iter->flags & FTRACE_ITER_HASH)
2409                 return t_hash_next(m, pos);
2410 
2411         (*pos)++;
2412         iter->pos = iter->func_pos = *pos;
2413 
2414         if (iter->flags & FTRACE_ITER_PRINTALL)
2415                 return t_hash_start(m, pos);
2416 
2417  retry:
2418         if (iter->idx >= iter->pg->index) {
2419                 if (iter->pg->next) {
2420                         iter->pg = iter->pg->next;
2421                         iter->idx = 0;
2422                         goto retry;
2423                 }
2424         } else {
2425                 rec = &iter->pg->records[iter->idx++];
2426                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2427                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2428 
2429                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2430                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2431 
2432                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2433                      !(rec->flags & ~FTRACE_FL_MASK))) {
2434 
2435                         rec = NULL;
2436                         goto retry;
2437                 }
2438         }
2439 
2440         if (!rec)
2441                 return t_hash_start(m, pos);
2442 
2443         iter->func = rec;
2444 
2445         return iter;
2446 }
2447 
2448 static void reset_iter_read(struct ftrace_iterator *iter)
2449 {
2450         iter->pos = 0;
2451         iter->func_pos = 0;
2452         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2453 }
2454 
2455 static void *t_start(struct seq_file *m, loff_t *pos)
2456 {
2457         struct ftrace_iterator *iter = m->private;
2458         struct ftrace_ops *ops = iter->ops;
2459         void *p = NULL;
2460         loff_t l;
2461 
2462         mutex_lock(&ftrace_lock);
2463 
2464         if (unlikely(ftrace_disabled))
2465                 return NULL;
2466 
2467         /*
2468          * If an lseek was done, then reset and start from beginning.
2469          */
2470         if (*pos < iter->pos)
2471                 reset_iter_read(iter);
2472 
2473         /*
2474          * For set_ftrace_filter reading, if we have the filter
2475          * off, we can short cut and just print out that all
2476          * functions are enabled.
2477          */
2478         if (iter->flags & FTRACE_ITER_FILTER &&
2479             ftrace_hash_empty(ops->filter_hash)) {
2480                 if (*pos > 0)
2481                         return t_hash_start(m, pos);
2482                 iter->flags |= FTRACE_ITER_PRINTALL;
2483                 /* reset in case of seek/pread */
2484                 iter->flags &= ~FTRACE_ITER_HASH;
2485                 return iter;
2486         }
2487 
2488         if (iter->flags & FTRACE_ITER_HASH)
2489                 return t_hash_start(m, pos);
2490 
2491         /*
2492          * Unfortunately, we need to restart at ftrace_pages_start
2493          * every time we let go of the ftrace_mutex. This is because
2494          * those pointers can change without the lock.
2495          */
2496         iter->pg = ftrace_pages_start;
2497         iter->idx = 0;
2498         for (l = 0; l <= *pos; ) {
2499                 p = t_next(m, p, &l);
2500                 if (!p)
2501                         break;
2502         }
2503 
2504         if (!p)
2505                 return t_hash_start(m, pos);
2506 
2507         return iter;
2508 }
2509 
2510 static void t_stop(struct seq_file *m, void *p)
2511 {
2512         mutex_unlock(&ftrace_lock);
2513 }
2514 
2515 static int t_show(struct seq_file *m, void *v)
2516 {
2517         struct ftrace_iterator *iter = m->private;
2518         struct dyn_ftrace *rec;
2519 
2520         if (iter->flags & FTRACE_ITER_HASH)
2521                 return t_hash_show(m, iter);
2522 
2523         if (iter->flags & FTRACE_ITER_PRINTALL) {
2524                 seq_printf(m, "#### all functions enabled ####\n");
2525                 return 0;
2526         }
2527 
2528         rec = iter->func;
2529 
2530         if (!rec)
2531                 return 0;
2532 
2533         seq_printf(m, "%ps", (void *)rec->ip);
2534         if (iter->flags & FTRACE_ITER_ENABLED)
2535                 seq_printf(m, " (%ld)%s",
2536                            rec->flags & ~FTRACE_FL_MASK,
2537                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2538         seq_printf(m, "\n");
2539 
2540         return 0;
2541 }
2542 
2543 static const struct seq_operations show_ftrace_seq_ops = {
2544         .start = t_start,
2545         .next = t_next,
2546         .stop = t_stop,
2547         .show = t_show,
2548 };
2549 
2550 static int
2551 ftrace_avail_open(struct inode *inode, struct file *file)
2552 {
2553         struct ftrace_iterator *iter;
2554 
2555         if (unlikely(ftrace_disabled))
2556                 return -ENODEV;
2557 
2558         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2559         if (iter) {
2560                 iter->pg = ftrace_pages_start;
2561                 iter->ops = &global_ops;
2562         }
2563 
2564         return iter ? 0 : -ENOMEM;
2565 }
2566 
2567 static int
2568 ftrace_enabled_open(struct inode *inode, struct file *file)
2569 {
2570         struct ftrace_iterator *iter;
2571 
2572         if (unlikely(ftrace_disabled))
2573                 return -ENODEV;
2574 
2575         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2576         if (iter) {
2577                 iter->pg = ftrace_pages_start;
2578                 iter->flags = FTRACE_ITER_ENABLED;
2579                 iter->ops = &global_ops;
2580         }
2581 
2582         return iter ? 0 : -ENOMEM;
2583 }
2584 
2585 static void ftrace_filter_reset(struct ftrace_hash *hash)
2586 {
2587         mutex_lock(&ftrace_lock);
2588         ftrace_hash_clear(hash);
2589         mutex_unlock(&ftrace_lock);
2590 }
2591 
2592 /**
2593  * ftrace_regex_open - initialize function tracer filter files
2594  * @ops: The ftrace_ops that hold the hash filters
2595  * @flag: The type of filter to process
2596  * @inode: The inode, usually passed in to your open routine
2597  * @file: The file, usually passed in to your open routine
2598  *
2599  * ftrace_regex_open() initializes the filter files for the
2600  * @ops. Depending on @flag it may process the filter hash or
2601  * the notrace hash of @ops. With this called from the open
2602  * routine, you can use ftrace_filter_write() for the write
2603  * routine if @flag has FTRACE_ITER_FILTER set, or
2604  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2605  * ftrace_filter_lseek() should be used as the lseek routine, and
2606  * release must call ftrace_regex_release().
2607  */
2608 int
2609 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2610                   struct inode *inode, struct file *file)
2611 {
2612         struct ftrace_iterator *iter;
2613         struct ftrace_hash *hash;
2614         int ret = 0;
2615 
2616         if (unlikely(ftrace_disabled))
2617                 return -ENODEV;
2618 
2619         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2620         if (!iter)
2621                 return -ENOMEM;
2622 
2623         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2624                 kfree(iter);
2625                 return -ENOMEM;
2626         }
2627 
2628         if (flag & FTRACE_ITER_NOTRACE)
2629                 hash = ops->notrace_hash;
2630         else
2631                 hash = ops->filter_hash;
2632 
2633         iter->ops = ops;
2634         iter->flags = flag;
2635 
2636         if (file->f_mode & FMODE_WRITE) {
2637                 mutex_lock(&ftrace_lock);
2638                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2639                 mutex_unlock(&ftrace_lock);
2640 
2641                 if (!iter->hash) {
2642                         trace_parser_put(&iter->parser);
2643                         kfree(iter);
2644                         return -ENOMEM;
2645                 }
2646         }
2647 
2648         mutex_lock(&ftrace_regex_lock);
2649 
2650         if ((file->f_mode & FMODE_WRITE) &&
2651             (file->f_flags & O_TRUNC))
2652                 ftrace_filter_reset(iter->hash);
2653 
2654         if (file->f_mode & FMODE_READ) {
2655                 iter->pg = ftrace_pages_start;
2656 
2657                 ret = seq_open(file, &show_ftrace_seq_ops);
2658                 if (!ret) {
2659                         struct seq_file *m = file->private_data;
2660                         m->private = iter;
2661                 } else {
2662                         /* Failed */
2663                         free_ftrace_hash(iter->hash);
2664                         trace_parser_put(&iter->parser);
2665                         kfree(iter);
2666                 }
2667         } else
2668                 file->private_data = iter;
2669         mutex_unlock(&ftrace_regex_lock);
2670 
2671         return ret;
2672 }
2673 
2674 static int
2675 ftrace_filter_open(struct inode *inode, struct file *file)
2676 {
2677         return ftrace_regex_open(&global_ops,
2678                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2679                         inode, file);
2680 }
2681 
2682 static int
2683 ftrace_notrace_open(struct inode *inode, struct file *file)
2684 {
2685         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2686                                  inode, file);
2687 }
2688 
2689 static int ftrace_match(char *str, char *regex, int len, int type)
2690 {
2691         int matched = 0;
2692         int slen;
2693 
2694         switch (type) {
2695         case MATCH_FULL:
2696                 if (strcmp(str, regex) == 0)
2697                         matched = 1;
2698                 break;
2699         case MATCH_FRONT_ONLY:
2700                 if (strncmp(str, regex, len) == 0)
2701                         matched = 1;
2702                 break;
2703         case MATCH_MIDDLE_ONLY:
2704                 if (strstr(str, regex))
2705                         matched = 1;
2706                 break;
2707         case MATCH_END_ONLY:
2708                 slen = strlen(str);
2709                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2710                         matched = 1;
2711                 break;
2712         }
2713 
2714         return matched;
2715 }
2716 
2717 static int
2718 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2719 {
2720         struct ftrace_func_entry *entry;
2721         int ret = 0;
2722 
2723         entry = ftrace_lookup_ip(hash, rec->ip);
2724         if (not) {
2725                 /* Do nothing if it doesn't exist */
2726                 if (!entry)
2727                         return 0;
2728 
2729                 free_hash_entry(hash, entry);
2730         } else {
2731                 /* Do nothing if it exists */
2732                 if (entry)
2733                         return 0;
2734 
2735                 ret = add_hash_entry(hash, rec->ip);
2736         }
2737         return ret;
2738 }
2739 
2740 static int
2741 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2742                     char *regex, int len, int type)
2743 {
2744         char str[KSYM_SYMBOL_LEN];
2745         char *modname;
2746 
2747         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2748 
2749         if (mod) {
2750                 /* module lookup requires matching the module */
2751                 if (!modname || strcmp(modname, mod))
2752                         return 0;
2753 
2754                 /* blank search means to match all funcs in the mod */
2755                 if (!len)
2756                         return 1;
2757         }
2758 
2759         return ftrace_match(str, regex, len, type);
2760 }
2761 
2762 static int
2763 match_records(struct ftrace_hash *hash, char *buff,
2764               int len, char *mod, int not)
2765 {
2766         unsigned search_len = 0;
2767         struct ftrace_page *pg;
2768         struct dyn_ftrace *rec;
2769         int type = MATCH_FULL;
2770         char *search = buff;
2771         int found = 0;
2772         int ret;
2773 
2774         if (len) {
2775                 type = filter_parse_regex(buff, len, &search, &not);
2776                 search_len = strlen(search);
2777         }
2778 
2779         mutex_lock(&ftrace_lock);
2780 
2781         if (unlikely(ftrace_disabled))
2782                 goto out_unlock;
2783 
2784         do_for_each_ftrace_rec(pg, rec) {
2785                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2786                         ret = enter_record(hash, rec, not);
2787                         if (ret < 0) {
2788                                 found = ret;
2789                                 goto out_unlock;
2790                         }
2791                         found = 1;
2792                 }
2793         } while_for_each_ftrace_rec();
2794  out_unlock:
2795         mutex_unlock(&ftrace_lock);
2796 
2797         return found;
2798 }
2799 
2800 static int
2801 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2802 {
2803         return match_records(hash, buff, len, NULL, 0);
2804 }
2805 
2806 static int
2807 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2808 {
2809         int not = 0;
2810 
2811         /* blank or '*' mean the same */
2812         if (strcmp(buff, "*") == 0)
2813                 buff[0] = 0;
2814 
2815         /* handle the case of 'dont filter this module' */
2816         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2817                 buff[0] = 0;
2818                 not = 1;
2819         }
2820 
2821         return match_records(hash, buff, strlen(buff), mod, not);
2822 }
2823 
2824 /*
2825  * We register the module command as a template to show others how
2826  * to register the a command as well.
2827  */
2828 
2829 static int
2830 ftrace_mod_callback(struct ftrace_hash *hash,
2831                     char *func, char *cmd, char *param, int enable)
2832 {
2833         char *mod;
2834         int ret = -EINVAL;
2835 
2836         /*
2837          * cmd == 'mod' because we only registered this func
2838          * for the 'mod' ftrace_func_command.
2839          * But if you register one func with multiple commands,
2840          * you can tell which command was used by the cmd
2841          * parameter.
2842          */
2843 
2844         /* we must have a module name */
2845         if (!param)
2846                 return ret;
2847 
2848         mod = strsep(&param, ":");
2849         if (!strlen(mod))
2850                 return ret;
2851 
2852         ret = ftrace_match_module_records(hash, func, mod);
2853         if (!ret)
2854                 ret = -EINVAL;
2855         if (ret < 0)
2856                 return ret;
2857 
2858         return 0;
2859 }
2860 
2861 static struct ftrace_func_command ftrace_mod_cmd = {
2862         .name                   = "mod",
2863         .func                   = ftrace_mod_callback,
2864 };
2865 
2866 static int __init ftrace_mod_cmd_init(void)
2867 {
2868         return register_ftrace_command(&ftrace_mod_cmd);
2869 }
2870 core_initcall(ftrace_mod_cmd_init);
2871 
2872 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2873                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2874 {
2875         struct ftrace_func_probe *entry;
2876         struct hlist_head *hhd;
2877         struct hlist_node *n;
2878         unsigned long key;
2879 
2880         key = hash_long(ip, FTRACE_HASH_BITS);
2881 
2882         hhd = &ftrace_func_hash[key];
2883 
2884         if (hlist_empty(hhd))
2885                 return;
2886 
2887         /*
2888          * Disable preemption for these calls to prevent a RCU grace
2889          * period. This syncs the hash iteration and freeing of items
2890          * on the hash. rcu_read_lock is too dangerous here.
2891          */
2892         preempt_disable_notrace();
2893         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2894                 if (entry->ip == ip)
2895                         entry->ops->func(ip, parent_ip, &entry->data);
2896         }
2897         preempt_enable_notrace();
2898 }
2899 
2900 static struct ftrace_ops trace_probe_ops __read_mostly =
2901 {
2902         .func           = function_trace_probe_call,
2903 };
2904 
2905 static int ftrace_probe_registered;
2906 
2907 static void __enable_ftrace_function_probe(void)
2908 {
2909         int ret;
2910         int i;
2911 
2912         if (ftrace_probe_registered)
2913                 return;
2914 
2915         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2916                 struct hlist_head *hhd = &ftrace_func_hash[i];
2917                 if (hhd->first)
2918                         break;
2919         }
2920         /* Nothing registered? */
2921         if (i == FTRACE_FUNC_HASHSIZE)
2922                 return;
2923 
2924         ret = __register_ftrace_function(&trace_probe_ops);
2925         if (!ret)
2926                 ret = ftrace_startup(&trace_probe_ops, 0);
2927 
2928         ftrace_probe_registered = 1;
2929 }
2930 
2931 static void __disable_ftrace_function_probe(void)
2932 {
2933         int ret;
2934         int i;
2935 
2936         if (!ftrace_probe_registered)
2937                 return;
2938 
2939         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2940                 struct hlist_head *hhd = &ftrace_func_hash[i];
2941                 if (hhd->first)
2942                         return;
2943         }
2944 
2945         /* no more funcs left */
2946         ret = __unregister_ftrace_function(&trace_probe_ops);
2947         if (!ret)
2948                 ftrace_shutdown(&trace_probe_ops, 0);
2949 
2950         ftrace_probe_registered = 0;
2951 }
2952 
2953 
2954 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2955 {
2956         struct ftrace_func_probe *entry =
2957                 container_of(rhp, struct ftrace_func_probe, rcu);
2958 
2959         if (entry->ops->free)
2960                 entry->ops->free(&entry->data);
2961         kfree(entry);
2962 }
2963 
2964 
2965 int
2966 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2967                               void *data)
2968 {
2969         struct ftrace_func_probe *entry;
2970         struct ftrace_page *pg;
2971         struct dyn_ftrace *rec;
2972         int type, len, not;
2973         unsigned long key;
2974         int count = 0;
2975         char *search;
2976 
2977         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2978         len = strlen(search);
2979 
2980         /* we do not support '!' for function probes */
2981         if (WARN_ON(not))
2982                 return -EINVAL;
2983 
2984         mutex_lock(&ftrace_lock);
2985 
2986         if (unlikely(ftrace_disabled))
2987                 goto out_unlock;
2988 
2989         do_for_each_ftrace_rec(pg, rec) {
2990 
2991                 if (!ftrace_match_record(rec, NULL, search, len, type))
2992                         continue;
2993 
2994                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2995                 if (!entry) {
2996                         /* If we did not process any, then return error */
2997                         if (!count)
2998                                 count = -ENOMEM;
2999                         goto out_unlock;
3000                 }
3001 
3002                 count++;
3003 
3004                 entry->data = data;
3005 
3006                 /*
3007                  * The caller might want to do something special
3008                  * for each function we find. We call the callback
3009                  * to give the caller an opportunity to do so.
3010                  */
3011                 if (ops->callback) {
3012                         if (ops->callback(rec->ip, &entry->data) < 0) {
3013                                 /* caller does not like this func */
3014                                 kfree(entry);
3015                                 continue;
3016                         }
3017                 }
3018 
3019                 entry->ops = ops;
3020                 entry->ip = rec->ip;
3021 
3022                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3023                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3024 
3025         } while_for_each_ftrace_rec();
3026         __enable_ftrace_function_probe();
3027 
3028  out_unlock:
3029         mutex_unlock(&ftrace_lock);
3030 
3031         return count;
3032 }
3033 
3034 enum {
3035         PROBE_TEST_FUNC         = 1,
3036         PROBE_TEST_DATA         = 2
3037 };
3038 
3039 static void
3040 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3041                                   void *data, int flags)
3042 {
3043         struct ftrace_func_probe *entry;
3044         struct hlist_node *n, *tmp;
3045         char str[KSYM_SYMBOL_LEN];
3046         int type = MATCH_FULL;
3047         int i, len = 0;
3048         char *search;
3049 
3050         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3051                 glob = NULL;
3052         else if (glob) {
3053                 int not;
3054 
3055                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3056                 len = strlen(search);
3057 
3058                 /* we do not support '!' for function probes */
3059                 if (WARN_ON(not))
3060                         return;
3061         }
3062 
3063         mutex_lock(&ftrace_lock);
3064         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3065                 struct hlist_head *hhd = &ftrace_func_hash[i];
3066 
3067                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
3068 
3069                         /* break up if statements for readability */
3070                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3071                                 continue;
3072 
3073                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3074                                 continue;
3075 
3076                         /* do this last, since it is the most expensive */
3077                         if (glob) {
3078                                 kallsyms_lookup(entry->ip, NULL, NULL,
3079                                                 NULL, str);
3080                                 if (!ftrace_match(str, glob, len, type))
3081                                         continue;
3082                         }
3083 
3084                         hlist_del_rcu(&entry->node);
3085                         call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3086                 }
3087         }
3088         __disable_ftrace_function_probe();
3089         mutex_unlock(&ftrace_lock);
3090 }
3091 
3092 void
3093 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3094                                 void *data)
3095 {
3096         __unregister_ftrace_function_probe(glob, ops, data,
3097                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3098 }
3099 
3100 void
3101 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3102 {
3103         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3104 }
3105 
3106 void unregister_ftrace_function_probe_all(char *glob)
3107 {
3108         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3109 }
3110 
3111 static LIST_HEAD(ftrace_commands);
3112 static DEFINE_MUTEX(ftrace_cmd_mutex);
3113 
3114 int register_ftrace_command(struct ftrace_func_command *cmd)
3115 {
3116         struct ftrace_func_command *p;
3117         int ret = 0;
3118 
3119         mutex_lock(&ftrace_cmd_mutex);
3120         list_for_each_entry(p, &ftrace_commands, list) {
3121                 if (strcmp(cmd->name, p->name) == 0) {
3122                         ret = -EBUSY;
3123                         goto out_unlock;
3124                 }
3125         }
3126         list_add(&cmd->list, &ftrace_commands);
3127  out_unlock:
3128         mutex_unlock(&ftrace_cmd_mutex);
3129 
3130         return ret;
3131 }
3132 
3133 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3134 {
3135         struct ftrace_func_command *p, *n;
3136         int ret = -ENODEV;
3137 
3138         mutex_lock(&ftrace_cmd_mutex);
3139         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3140                 if (strcmp(cmd->name, p->name) == 0) {
3141                         ret = 0;
3142                         list_del_init(&p->list);
3143                         goto out_unlock;
3144                 }
3145         }
3146  out_unlock:
3147         mutex_unlock(&ftrace_cmd_mutex);
3148 
3149         return ret;
3150 }
3151 
3152 static int ftrace_process_regex(struct ftrace_hash *hash,
3153                                 char *buff, int len, int enable)
3154 {
3155         char *func, *command, *next = buff;
3156         struct ftrace_func_command *p;
3157         int ret = -EINVAL;
3158 
3159         func = strsep(&next, ":");
3160 
3161         if (!next) {
3162                 ret = ftrace_match_records(hash, func, len);
3163                 if (!ret)
3164                         ret = -EINVAL;
3165                 if (ret < 0)
3166                         return ret;
3167                 return 0;
3168         }
3169 
3170         /* command found */
3171 
3172         command = strsep(&next, ":");
3173 
3174         mutex_lock(&ftrace_cmd_mutex);
3175         list_for_each_entry(p, &ftrace_commands, list) {
3176                 if (strcmp(p->name, command) == 0) {
3177                         ret = p->func(hash, func, command, next, enable);
3178                         goto out_unlock;
3179                 }
3180         }
3181  out_unlock:
3182         mutex_unlock(&ftrace_cmd_mutex);
3183 
3184         return ret;
3185 }
3186 
3187 static ssize_t
3188 ftrace_regex_write(struct file *file, const char __user *ubuf,
3189                    size_t cnt, loff_t *ppos, int enable)
3190 {
3191         struct ftrace_iterator *iter;
3192         struct trace_parser *parser;
3193         ssize_t ret, read;
3194 
3195         if (!cnt)
3196                 return 0;
3197 
3198         mutex_lock(&ftrace_regex_lock);
3199 
3200         ret = -ENODEV;
3201         if (unlikely(ftrace_disabled))
3202                 goto out_unlock;
3203 
3204         if (file->f_mode & FMODE_READ) {
3205                 struct seq_file *m = file->private_data;
3206                 iter = m->private;
3207         } else
3208                 iter = file->private_data;
3209 
3210         parser = &iter->parser;
3211         read = trace_get_user(parser, ubuf, cnt, ppos);
3212 
3213         if (read >= 0 && trace_parser_loaded(parser) &&
3214             !trace_parser_cont(parser)) {
3215                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3216                                            parser->idx, enable);
3217                 trace_parser_clear(parser);
3218                 if (ret)
3219                         goto out_unlock;
3220         }
3221 
3222         ret = read;
3223 out_unlock:
3224         mutex_unlock(&ftrace_regex_lock);
3225 
3226         return ret;
3227 }
3228 
3229 ssize_t
3230 ftrace_filter_write(struct file *file, const char __user *ubuf,
3231                     size_t cnt, loff_t *ppos)
3232 {
3233         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3234 }
3235 
3236 ssize_t
3237 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3238                      size_t cnt, loff_t *ppos)
3239 {
3240         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3241 }
3242 
3243 static int
3244 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3245 {
3246         struct ftrace_func_entry *entry;
3247 
3248         if (!ftrace_location(ip))
3249                 return -EINVAL;
3250 
3251         if (remove) {
3252                 entry = ftrace_lookup_ip(hash, ip);
3253                 if (!entry)
3254                         return -ENOENT;
3255                 free_hash_entry(hash, entry);
3256                 return 0;
3257         }
3258 
3259         return add_hash_entry(hash, ip);
3260 }
3261 
3262 static int
3263 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3264                 unsigned long ip, int remove, int reset, int enable)
3265 {
3266         struct ftrace_hash **orig_hash;
3267         struct ftrace_hash *hash;
3268         int ret;
3269 
3270         /* All global ops uses the global ops filters */
3271         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3272                 ops = &global_ops;
3273 
3274         if (unlikely(ftrace_disabled))
3275                 return -ENODEV;
3276 
3277         if (enable)
3278                 orig_hash = &ops->filter_hash;
3279         else
3280                 orig_hash = &ops->notrace_hash;
3281 
3282         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3283         if (!hash)
3284                 return -ENOMEM;
3285 
3286         mutex_lock(&ftrace_regex_lock);
3287         if (reset)
3288                 ftrace_filter_reset(hash);
3289         if (buf && !ftrace_match_records(hash, buf, len)) {
3290                 ret = -EINVAL;
3291                 goto out_regex_unlock;
3292         }
3293         if (ip) {
3294                 ret = ftrace_match_addr(hash, ip, remove);
3295                 if (ret < 0)
3296                         goto out_regex_unlock;
3297         }
3298 
3299         mutex_lock(&ftrace_lock);
3300         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3301         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3302             && ftrace_enabled)
3303                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3304 
3305         mutex_unlock(&ftrace_lock);
3306 
3307  out_regex_unlock:
3308         mutex_unlock(&ftrace_regex_lock);
3309 
3310         free_ftrace_hash(hash);
3311         return ret;
3312 }
3313 
3314 static int
3315 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3316                 int reset, int enable)
3317 {
3318         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3319 }
3320 
3321 /**
3322  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3323  * @ops - the ops to set the filter with
3324  * @ip - the address to add to or remove from the filter.
3325  * @remove - non zero to remove the ip from the filter
3326  * @reset - non zero to reset all filters before applying this filter.
3327  *
3328  * Filters denote which functions should be enabled when tracing is enabled
3329  * If @ip is NULL, it failes to update filter.
3330  */
3331 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3332                          int remove, int reset)
3333 {
3334         return ftrace_set_addr(ops, ip, remove, reset, 1);
3335 }
3336 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3337 
3338 static int
3339 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3340                  int reset, int enable)
3341 {
3342         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3343 }
3344 
3345 /**
3346  * ftrace_set_filter - set a function to filter on in ftrace
3347  * @ops - the ops to set the filter with
3348  * @buf - the string that holds the function filter text.
3349  * @len - the length of the string.
3350  * @reset - non zero to reset all filters before applying this filter.
3351  *
3352  * Filters denote which functions should be enabled when tracing is enabled.
3353  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3354  */
3355 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3356                        int len, int reset)
3357 {
3358         return ftrace_set_regex(ops, buf, len, reset, 1);
3359 }
3360 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3361 
3362 /**
3363  * ftrace_set_notrace - set a function to not trace in ftrace
3364  * @ops - the ops to set the notrace filter with
3365  * @buf - the string that holds the function notrace text.
3366  * @len - the length of the string.
3367  * @reset - non zero to reset all filters before applying this filter.
3368  *
3369  * Notrace Filters denote which functions should not be enabled when tracing
3370  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3371  * for tracing.
3372  */
3373 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3374                         int len, int reset)
3375 {
3376         return ftrace_set_regex(ops, buf, len, reset, 0);
3377 }
3378 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3379 /**
3380  * ftrace_set_filter - set a function to filter on in ftrace
3381  * @ops - the ops to set the filter with
3382  * @buf - the string that holds the function filter text.
3383  * @len - the length of the string.
3384  * @reset - non zero to reset all filters before applying this filter.
3385  *
3386  * Filters denote which functions should be enabled when tracing is enabled.
3387  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3388  */
3389 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3390 {
3391         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3392 }
3393 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3394 
3395 /**
3396  * ftrace_set_notrace - set a function to not trace in ftrace
3397  * @ops - the ops to set the notrace filter with
3398  * @buf - the string that holds the function notrace text.
3399  * @len - the length of the string.
3400  * @reset - non zero to reset all filters before applying this filter.
3401  *
3402  * Notrace Filters denote which functions should not be enabled when tracing
3403  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3404  * for tracing.
3405  */
3406 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3407 {
3408         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3409 }
3410 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3411 
3412 /*
3413  * command line interface to allow users to set filters on boot up.
3414  */
3415 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3416 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3417 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3418 
3419 static int __init set_ftrace_notrace(char *str)
3420 {
3421         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3422         return 1;
3423 }
3424 __setup("ftrace_notrace=", set_ftrace_notrace);
3425 
3426 static int __init set_ftrace_filter(char *str)
3427 {
3428         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3429         return 1;
3430 }
3431 __setup("ftrace_filter=", set_ftrace_filter);
3432 
3433 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3434 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3435 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3436 
3437 static int __init set_graph_function(char *str)
3438 {
3439         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3440         return 1;
3441 }
3442 __setup("ftrace_graph_filter=", set_graph_function);
3443 
3444 static void __init set_ftrace_early_graph(char *buf)
3445 {
3446         int ret;
3447         char *func;
3448 
3449         while (buf) {
3450                 func = strsep(&buf, ",");
3451                 /* we allow only one expression at a time */
3452                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3453                                       func);
3454                 if (ret)
3455                         printk(KERN_DEBUG "ftrace: function %s not "
3456                                           "traceable\n", func);
3457         }
3458 }
3459 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3460 
3461 void __init
3462 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3463 {
3464         char *func;
3465 
3466         while (buf) {
3467                 func = strsep(&buf, ",");
3468                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3469         }
3470 }
3471 
3472 static void __init set_ftrace_early_filters(void)
3473 {
3474         if (ftrace_filter_buf[0])
3475                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3476         if (ftrace_notrace_buf[0])
3477                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3478 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3479         if (ftrace_graph_buf[0])
3480                 set_ftrace_early_graph(ftrace_graph_buf);
3481 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3482 }
3483 
3484 int ftrace_regex_release(struct inode *inode, struct file *file)
3485 {
3486         struct seq_file *m = (struct seq_file *)file->private_data;
3487         struct ftrace_iterator *iter;
3488         struct ftrace_hash **orig_hash;
3489         struct trace_parser *parser;
3490         int filter_hash;
3491         int ret;
3492 
3493         mutex_lock(&ftrace_regex_lock);
3494         if (file->f_mode & FMODE_READ) {
3495                 iter = m->private;
3496 
3497                 seq_release(inode, file);
3498         } else
3499                 iter = file->private_data;
3500 
3501         parser = &iter->parser;
3502         if (trace_parser_loaded(parser)) {
3503                 parser->buffer[parser->idx] = 0;
3504                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3505         }
3506 
3507         trace_parser_put(parser);
3508 
3509         if (file->f_mode & FMODE_WRITE) {
3510                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3511 
3512                 if (filter_hash)
3513                         orig_hash = &iter->ops->filter_hash;
3514                 else
3515                         orig_hash = &iter->ops->notrace_hash;
3516 
3517                 mutex_lock(&ftrace_lock);
3518                 ret = ftrace_hash_move(iter->ops, filter_hash,
3519                                        orig_hash, iter->hash);
3520                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3521                     && ftrace_enabled)
3522                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3523 
3524                 mutex_unlock(&ftrace_lock);
3525         }
3526         free_ftrace_hash(iter->hash);
3527         kfree(iter);
3528 
3529         mutex_unlock(&ftrace_regex_lock);
3530         return 0;
3531 }
3532 
3533 static const struct file_operations ftrace_avail_fops = {
3534         .open = ftrace_avail_open,
3535         .read = seq_read,
3536         .llseek = seq_lseek,
3537         .release = seq_release_private,
3538 };
3539 
3540 static const struct file_operations ftrace_enabled_fops = {
3541         .open = ftrace_enabled_open,
3542         .read = seq_read,
3543         .llseek = seq_lseek,
3544         .release = seq_release_private,
3545 };
3546 
3547 static const struct file_operations ftrace_filter_fops = {
3548         .open = ftrace_filter_open,
3549         .read = seq_read,
3550         .write = ftrace_filter_write,
3551         .llseek = ftrace_filter_lseek,
3552         .release = ftrace_regex_release,
3553 };
3554 
3555 static const struct file_operations ftrace_notrace_fops = {
3556         .open = ftrace_notrace_open,
3557         .read = seq_read,
3558         .write = ftrace_notrace_write,
3559         .llseek = ftrace_filter_lseek,
3560         .release = ftrace_regex_release,
3561 };
3562 
3563 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3564 
3565 static DEFINE_MUTEX(graph_lock);
3566 
3567 int ftrace_graph_count;
3568 int ftrace_graph_filter_enabled;
3569 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3570 
3571 static void *
3572 __g_next(struct seq_file *m, loff_t *pos)
3573 {
3574         if (*pos >= ftrace_graph_count)
3575                 return NULL;
3576         return &ftrace_graph_funcs[*pos];
3577 }
3578 
3579 static void *
3580 g_next(struct seq_file *m, void *v, loff_t *pos)
3581 {
3582         (*pos)++;
3583         return __g_next(m, pos);
3584 }
3585 
3586 static void *g_start(struct seq_file *m, loff_t *pos)
3587 {
3588         mutex_lock(&graph_lock);
3589 
3590         /* Nothing, tell g_show to print all functions are enabled */
3591         if (!ftrace_graph_filter_enabled && !*pos)
3592                 return (void *)1;
3593 
3594         return __g_next(m, pos);
3595 }
3596 
3597 static void g_stop(struct seq_file *m, void *p)
3598 {
3599         mutex_unlock(&graph_lock);
3600 }
3601 
3602 static int g_show(struct seq_file *m, void *v)
3603 {
3604         unsigned long *ptr = v;
3605 
3606         if (!ptr)
3607                 return 0;
3608 
3609         if (ptr == (unsigned long *)1) {
3610                 seq_printf(m, "#### all functions enabled ####\n");
3611                 return 0;
3612         }
3613 
3614         seq_printf(m, "%ps\n", (void *)*ptr);
3615 
3616         return 0;
3617 }
3618 
3619 static const struct seq_operations ftrace_graph_seq_ops = {
3620         .start = g_start,
3621         .next = g_next,
3622         .stop = g_stop,
3623         .show = g_show,
3624 };
3625 
3626 static int
3627 ftrace_graph_open(struct inode *inode, struct file *file)
3628 {
3629         int ret = 0;
3630 
3631         if (unlikely(ftrace_disabled))
3632                 return -ENODEV;
3633 
3634         mutex_lock(&graph_lock);
3635         if ((file->f_mode & FMODE_WRITE) &&
3636             (file->f_flags & O_TRUNC)) {
3637                 ftrace_graph_filter_enabled = 0;
3638                 ftrace_graph_count = 0;
3639                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3640         }
3641         mutex_unlock(&graph_lock);
3642 
3643         if (file->f_mode & FMODE_READ)
3644                 ret = seq_open(file, &ftrace_graph_seq_ops);
3645 
3646         return ret;
3647 }
3648 
3649 static int
3650 ftrace_graph_release(struct inode *inode, struct file *file)
3651 {
3652         if (file->f_mode & FMODE_READ)
3653                 seq_release(inode, file);
3654         return 0;
3655 }
3656 
3657 static int
3658 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3659 {
3660         struct dyn_ftrace *rec;
3661         struct ftrace_page *pg;
3662         int search_len;
3663         int fail = 1;
3664         int type, not;
3665         char *search;
3666         bool exists;
3667         int i;
3668 
3669         /* decode regex */
3670         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3671         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3672                 return -EBUSY;
3673 
3674         search_len = strlen(search);
3675 
3676         mutex_lock(&ftrace_lock);
3677 
3678         if (unlikely(ftrace_disabled)) {
3679                 mutex_unlock(&ftrace_lock);
3680                 return -ENODEV;
3681         }
3682 
3683         do_for_each_ftrace_rec(pg, rec) {
3684 
3685                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3686                         /* if it is in the array */
3687                         exists = false;
3688                         for (i = 0; i < *idx; i++) {
3689                                 if (array[i] == rec->ip) {
3690                                         exists = true;
3691                                         break;
3692                                 }
3693                         }
3694 
3695                         if (!not) {
3696                                 fail = 0;
3697                                 if (!exists) {
3698                                         array[(*idx)++] = rec->ip;
3699                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3700                                                 goto out;
3701                                 }
3702                         } else {
3703                                 if (exists) {
3704                                         array[i] = array[--(*idx)];
3705                                         array[*idx] = 0;
3706                                         fail = 0;
3707                                 }
3708                         }
3709                 }
3710         } while_for_each_ftrace_rec();
3711 out:
3712         mutex_unlock(&ftrace_lock);
3713 
3714         if (fail)
3715                 return -EINVAL;
3716 
3717         ftrace_graph_filter_enabled = !!(*idx);
3718 
3719         return 0;
3720 }
3721 
3722 static ssize_t
3723 ftrace_graph_write(struct file *file, const char __user *ubuf,
3724                    size_t cnt, loff_t *ppos)
3725 {
3726         struct trace_parser parser;
3727         ssize_t read, ret;
3728 
3729         if (!cnt)
3730                 return 0;
3731 
3732         mutex_lock(&graph_lock);
3733 
3734         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3735                 ret = -ENOMEM;
3736                 goto out_unlock;
3737         }
3738 
3739         read = trace_get_user(&parser, ubuf, cnt, ppos);
3740 
3741         if (read >= 0 && trace_parser_loaded((&parser))) {
3742                 parser.buffer[parser.idx] = 0;
3743 
3744                 /* we allow only one expression at a time */
3745                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3746                                         parser.buffer);
3747                 if (ret)
3748                         goto out_free;
3749         }
3750 
3751         ret = read;
3752 
3753 out_free:
3754         trace_parser_put(&parser);
3755 out_unlock:
3756         mutex_unlock(&graph_lock);
3757 
3758         return ret;
3759 }
3760 
3761 static const struct file_operations ftrace_graph_fops = {
3762         .open           = ftrace_graph_open,
3763         .read           = seq_read,
3764         .write          = ftrace_graph_write,
3765         .llseek         = ftrace_filter_lseek,
3766         .release        = ftrace_graph_release,
3767 };
3768 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3769 
3770 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3771 {
3772 
3773         trace_create_file("available_filter_functions", 0444,
3774                         d_tracer, NULL, &ftrace_avail_fops);
3775 
3776         trace_create_file("enabled_functions", 0444,
3777                         d_tracer, NULL, &ftrace_enabled_fops);
3778 
3779         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3780                         NULL, &ftrace_filter_fops);
3781 
3782         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3783                                     NULL, &ftrace_notrace_fops);
3784 
3785 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3786         trace_create_file("set_graph_function", 0444, d_tracer,
3787                                     NULL,
3788                                     &ftrace_graph_fops);
3789 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3790 
3791         return 0;
3792 }
3793 
3794 static int ftrace_cmp_ips(const void *a, const void *b)
3795 {
3796         const unsigned long *ipa = a;
3797         const unsigned long *ipb = b;
3798 
3799         if (*ipa > *ipb)
3800                 return 1;
3801         if (*ipa < *ipb)
3802                 return -1;
3803         return 0;
3804 }
3805 
3806 static void ftrace_swap_ips(void *a, void *b, int size)
3807 {
3808         unsigned long *ipa = a;
3809         unsigned long *ipb = b;
3810         unsigned long t;
3811 
3812         t = *ipa;
3813         *ipa = *ipb;
3814         *ipb = t;
3815 }
3816 
3817 static int ftrace_process_locs(struct module *mod,
3818                                unsigned long *start,
3819                                unsigned long *end)
3820 {
3821         struct ftrace_page *start_pg;
3822         struct ftrace_page *pg;
3823         struct dyn_ftrace *rec;
3824         unsigned long count;
3825         unsigned long *p;
3826         unsigned long addr;
3827         unsigned long flags = 0; /* Shut up gcc */
3828         int ret = -ENOMEM;
3829 
3830         count = end - start;
3831 
3832         if (!count)
3833                 return 0;
3834 
3835         sort(start, count, sizeof(*start),
3836              ftrace_cmp_ips, ftrace_swap_ips);
3837 
3838         start_pg = ftrace_allocate_pages(count);
3839         if (!start_pg)
3840                 return -ENOMEM;
3841 
3842         mutex_lock(&ftrace_lock);
3843 
3844         /*
3845          * Core and each module needs their own pages, as
3846          * modules will free them when they are removed.
3847          * Force a new page to be allocated for modules.
3848          */
3849         if (!mod) {
3850                 WARN_ON(ftrace_pages || ftrace_pages_start);
3851                 /* First initialization */
3852                 ftrace_pages = ftrace_pages_start = start_pg;
3853         } else {
3854                 if (!ftrace_pages)
3855                         goto out;
3856 
3857                 if (WARN_ON(ftrace_pages->next)) {
3858                         /* Hmm, we have free pages? */
3859                         while (ftrace_pages->next)
3860                                 ftrace_pages = ftrace_pages->next;
3861                 }
3862 
3863                 ftrace_pages->next = start_pg;
3864         }
3865 
3866         p = start;
3867         pg = start_pg;
3868         while (p < end) {
3869                 addr = ftrace_call_adjust(*p++);
3870                 /*
3871                  * Some architecture linkers will pad between
3872                  * the different mcount_loc sections of different
3873                  * object files to satisfy alignments.
3874                  * Skip any NULL pointers.
3875                  */
3876                 if (!addr)
3877                         continue;
3878 
3879                 if (pg->index == pg->size) {
3880                         /* We should have allocated enough */
3881                         if (WARN_ON(!pg->next))
3882                                 break;
3883                         pg = pg->next;
3884                 }
3885 
3886                 rec = &pg->records[pg->index++];
3887                 rec->ip = addr;
3888         }
3889 
3890         /* We should have used all pages */
3891         WARN_ON(pg->next);
3892 
3893         /* Assign the last page to ftrace_pages */
3894         ftrace_pages = pg;
3895 
3896         /* These new locations need to be initialized */
3897         ftrace_new_pgs = start_pg;
3898 
3899         /*
3900          * We only need to disable interrupts on start up
3901          * because we are modifying code that an interrupt
3902          * may execute, and the modification is not atomic.
3903          * But for modules, nothing runs the code we modify
3904          * until we are finished with it, and there's no
3905          * reason to cause large interrupt latencies while we do it.
3906          */
3907         if (!mod)
3908                 local_irq_save(flags);
3909         ftrace_update_code(mod);
3910         if (!mod)
3911                 local_irq_restore(flags);
3912         ret = 0;
3913  out:
3914         mutex_unlock(&ftrace_lock);
3915 
3916         return ret;
3917 }
3918 
3919 #ifdef CONFIG_MODULES
3920 
3921 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3922 
3923 void ftrace_release_mod(struct module *mod)
3924 {
3925         struct dyn_ftrace *rec;
3926         struct ftrace_page **last_pg;
3927         struct ftrace_page *pg;
3928         int order;
3929 
3930         mutex_lock(&ftrace_lock);
3931 
3932         if (ftrace_disabled)
3933                 goto out_unlock;
3934 
3935         /*
3936          * Each module has its own ftrace_pages, remove
3937          * them from the list.
3938          */
3939         last_pg = &ftrace_pages_start;
3940         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3941                 rec = &pg->records[0];
3942                 if (within_module_core(rec->ip, mod)) {
3943                         /*
3944                          * As core pages are first, the first
3945                          * page should never be a module page.
3946                          */
3947                         if (WARN_ON(pg == ftrace_pages_start))
3948                                 goto out_unlock;
3949 
3950                         /* Check if we are deleting the last page */
3951                         if (pg == ftrace_pages)
3952                                 ftrace_pages = next_to_ftrace_page(last_pg);
3953 
3954                         *last_pg = pg->next;
3955                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3956                         free_pages((unsigned long)pg->records, order);
3957                         kfree(pg);
3958                 } else
3959                         last_pg = &pg->next;
3960         }
3961  out_unlock:
3962         mutex_unlock(&ftrace_lock);
3963 }
3964 
3965 static void ftrace_init_module(struct module *mod,
3966                                unsigned long *start, unsigned long *end)
3967 {
3968         if (ftrace_disabled || start == end)
3969                 return;
3970         ftrace_process_locs(mod, start, end);
3971 }
3972 
3973 static int ftrace_module_notify_enter(struct notifier_block *self,
3974                                       unsigned long val, void *data)
3975 {
3976         struct module *mod = data;
3977 
3978         if (val == MODULE_STATE_COMING)
3979                 ftrace_init_module(mod, mod->ftrace_callsites,
3980                                    mod->ftrace_callsites +
3981                                    mod->num_ftrace_callsites);
3982         return 0;
3983 }
3984 
3985 static int ftrace_module_notify_exit(struct notifier_block *self,
3986                                      unsigned long val, void *data)
3987 {
3988         struct module *mod = data;
3989 
3990         if (val == MODULE_STATE_GOING)
3991                 ftrace_release_mod(mod);
3992 
3993         return 0;
3994 }
3995 #else
3996 static int ftrace_module_notify_enter(struct notifier_block *self,
3997                                       unsigned long val, void *data)
3998 {
3999         return 0;
4000 }
4001 static int ftrace_module_notify_exit(struct notifier_block *self,
4002                                      unsigned long val, void *data)
4003 {
4004         return 0;
4005 }
4006 #endif /* CONFIG_MODULES */
4007 
4008 struct notifier_block ftrace_module_enter_nb = {
4009         .notifier_call = ftrace_module_notify_enter,
4010         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4011 };
4012 
4013 struct notifier_block ftrace_module_exit_nb = {
4014         .notifier_call = ftrace_module_notify_exit,
4015         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4016 };
4017 
4018 extern unsigned long __start_mcount_loc[];
4019 extern unsigned long __stop_mcount_loc[];
4020 
4021 void __init ftrace_init(void)
4022 {
4023         unsigned long count, addr, flags;
4024         int ret;
4025 
4026         /* Keep the ftrace pointer to the stub */
4027         addr = (unsigned long)ftrace_stub;
4028 
4029         local_irq_save(flags);
4030         ftrace_dyn_arch_init(&addr);
4031         local_irq_restore(flags);
4032 
4033         /* ftrace_dyn_arch_init places the return code in addr */
4034         if (addr)
4035                 goto failed;
4036 
4037         count = __stop_mcount_loc - __start_mcount_loc;
4038 
4039         ret = ftrace_dyn_table_alloc(count);
4040         if (ret)
4041                 goto failed;
4042 
4043         last_ftrace_enabled = ftrace_enabled = 1;
4044 
4045         ret = ftrace_process_locs(NULL,
4046                                   __start_mcount_loc,
4047                                   __stop_mcount_loc);
4048 
4049         ret = register_module_notifier(&ftrace_module_enter_nb);
4050         if (ret)
4051                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4052 
4053         ret = register_module_notifier(&ftrace_module_exit_nb);
4054         if (ret)
4055                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4056 
4057         set_ftrace_early_filters();
4058 
4059         return;
4060  failed:
4061         ftrace_disabled = 1;
4062 }
4063 
4064 #else
4065 
4066 static struct ftrace_ops global_ops = {
4067         .func                   = ftrace_stub,
4068         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
4069 };
4070 
4071 static int __init ftrace_nodyn_init(void)
4072 {
4073         ftrace_enabled = 1;
4074         return 0;
4075 }
4076 core_initcall(ftrace_nodyn_init);
4077 
4078 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4079 static inline void ftrace_startup_enable(int command) { }
4080 /* Keep as macros so we do not need to define the commands */
4081 # define ftrace_startup(ops, command)                   \
4082         ({                                              \
4083                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4084                 0;                                      \
4085         })
4086 # define ftrace_shutdown(ops, command)  do { } while (0)
4087 # define ftrace_startup_sysctl()        do { } while (0)
4088 # define ftrace_shutdown_sysctl()       do { } while (0)
4089 
4090 static inline int
4091 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4092 {
4093         return 1;
4094 }
4095 
4096 #endif /* CONFIG_DYNAMIC_FTRACE */
4097 
4098 static void
4099 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4100                         struct ftrace_ops *op, struct pt_regs *regs)
4101 {
4102         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4103                 return;
4104 
4105         /*
4106          * Some of the ops may be dynamically allocated,
4107          * they must be freed after a synchronize_sched().
4108          */
4109         preempt_disable_notrace();
4110         trace_recursion_set(TRACE_CONTROL_BIT);
4111         op = rcu_dereference_raw(ftrace_control_list);
4112         while (op != &ftrace_list_end) {
4113                 if (!ftrace_function_local_disabled(op) &&
4114                     ftrace_ops_test(op, ip))
4115                         op->func(ip, parent_ip, op, regs);
4116 
4117                 op = rcu_dereference_raw(op->next);
4118         };
4119         trace_recursion_clear(TRACE_CONTROL_BIT);
4120         preempt_enable_notrace();
4121 }
4122 
4123 static struct ftrace_ops control_ops = {
4124         .func = ftrace_ops_control_func,
4125         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
4126 };
4127 
4128 static inline void
4129 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4130                        struct ftrace_ops *ignored, struct pt_regs *regs)
4131 {
4132         struct ftrace_ops *op;
4133 
4134         if (function_trace_stop)
4135                 return;
4136 
4137         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
4138                 return;
4139 
4140         trace_recursion_set(TRACE_INTERNAL_BIT);
4141         /*
4142          * Some of the ops may be dynamically allocated,
4143          * they must be freed after a synchronize_sched().
4144          */
4145         preempt_disable_notrace();
4146         op = rcu_dereference_raw(ftrace_ops_list);
4147         while (op != &ftrace_list_end) {
4148                 if (ftrace_ops_test(op, ip))
4149                         op->func(ip, parent_ip, op, regs);
4150                 op = rcu_dereference_raw(op->next);
4151         };
4152         preempt_enable_notrace();
4153         trace_recursion_clear(TRACE_INTERNAL_BIT);
4154 }
4155 
4156 /*
4157  * Some archs only support passing ip and parent_ip. Even though
4158  * the list function ignores the op parameter, we do not want any
4159  * C side effects, where a function is called without the caller
4160  * sending a third parameter.
4161  * Archs are to support both the regs and ftrace_ops at the same time.
4162  * If they support ftrace_ops, it is assumed they support regs.
4163  * If call backs want to use regs, they must either check for regs
4164  * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
4165  * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
4166  * An architecture can pass partial regs with ftrace_ops and still
4167  * set the ARCH_SUPPORT_FTARCE_OPS.
4168  */
4169 #if ARCH_SUPPORTS_FTRACE_OPS
4170 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4171                                  struct ftrace_ops *op, struct pt_regs *regs)
4172 {
4173         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4174 }
4175 #else
4176 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4177 {
4178         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4179 }
4180 #endif
4181 
4182 static void clear_ftrace_swapper(void)
4183 {
4184         struct task_struct *p;
4185         int cpu;
4186 
4187         get_online_cpus();
4188         for_each_online_cpu(cpu) {
4189                 p = idle_task(cpu);
4190                 clear_tsk_trace_trace(p);
4191         }
4192         put_online_cpus();
4193 }
4194 
4195 static void set_ftrace_swapper(void)
4196 {
4197         struct task_struct *p;
4198         int cpu;
4199 
4200         get_online_cpus();
4201         for_each_online_cpu(cpu) {
4202                 p = idle_task(cpu);
4203                 set_tsk_trace_trace(p);
4204         }
4205         put_online_cpus();
4206 }
4207 
4208 static void clear_ftrace_pid(struct pid *pid)
4209 {
4210         struct task_struct *p;
4211 
4212         rcu_read_lock();
4213         do_each_pid_task(pid, PIDTYPE_PID, p) {
4214                 clear_tsk_trace_trace(p);
4215         } while_each_pid_task(pid, PIDTYPE_PID, p);
4216         rcu_read_unlock();
4217 
4218         put_pid(pid);
4219 }
4220 
4221 static void set_ftrace_pid(struct pid *pid)
4222 {
4223         struct task_struct *p;
4224 
4225         rcu_read_lock();
4226         do_each_pid_task(pid, PIDTYPE_PID, p) {
4227                 set_tsk_trace_trace(p);
4228         } while_each_pid_task(pid, PIDTYPE_PID, p);
4229         rcu_read_unlock();
4230 }
4231 
4232 static void clear_ftrace_pid_task(struct pid *pid)
4233 {
4234         if (pid == ftrace_swapper_pid)
4235                 clear_ftrace_swapper();
4236         else
4237                 clear_ftrace_pid(pid);
4238 }
4239 
4240 static void set_ftrace_pid_task(struct pid *pid)
4241 {
4242         if (pid == ftrace_swapper_pid)
4243                 set_ftrace_swapper();
4244         else
4245                 set_ftrace_pid(pid);
4246 }
4247 
4248 static int ftrace_pid_add(int p)
4249 {
4250         struct pid *pid;
4251         struct ftrace_pid *fpid;
4252         int ret = -EINVAL;
4253 
4254         mutex_lock(&ftrace_lock);
4255 
4256         if (!p)
4257                 pid = ftrace_swapper_pid;
4258         else
4259                 pid = find_get_pid(p);
4260 
4261         if (!pid)
4262                 goto out;
4263 
4264         ret = 0;
4265 
4266         list_for_each_entry(fpid, &ftrace_pids, list)
4267                 if (fpid->pid == pid)
4268                         goto out_put;
4269 
4270         ret = -ENOMEM;
4271 
4272         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4273         if (!fpid)
4274                 goto out_put;
4275 
4276         list_add(&fpid->list, &ftrace_pids);
4277         fpid->pid = pid;
4278 
4279         set_ftrace_pid_task(pid);
4280 
4281         ftrace_update_pid_func();
4282         ftrace_startup_enable(0);
4283 
4284         mutex_unlock(&ftrace_lock);
4285         return 0;
4286 
4287 out_put:
4288         if (pid != ftrace_swapper_pid)
4289                 put_pid(pid);
4290 
4291 out:
4292         mutex_unlock(&ftrace_lock);
4293         return ret;
4294 }
4295 
4296 static void ftrace_pid_reset(void)
4297 {
4298         struct ftrace_pid *fpid, *safe;
4299 
4300         mutex_lock(&ftrace_lock);
4301         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4302                 struct pid *pid = fpid->pid;
4303 
4304                 clear_ftrace_pid_task(pid);
4305 
4306                 list_del(&fpid->list);
4307                 kfree(fpid);
4308         }
4309 
4310         ftrace_update_pid_func();
4311         ftrace_startup_enable(0);
4312 
4313         mutex_unlock(&ftrace_lock);
4314 }
4315 
4316 static void *fpid_start(struct seq_file *m, loff_t *pos)
4317 {
4318         mutex_lock(&ftrace_lock);
4319 
4320         if (list_empty(&ftrace_pids) && (!*pos))
4321                 return (void *) 1;
4322 
4323         return seq_list_start(&ftrace_pids, *pos);
4324 }
4325 
4326 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4327 {
4328         if (v == (void *)1)
4329                 return NULL;
4330 
4331         return seq_list_next(v, &ftrace_pids, pos);
4332 }
4333 
4334 static void fpid_stop(struct seq_file *m, void *p)
4335 {
4336         mutex_unlock(&ftrace_lock);
4337 }
4338 
4339 static int fpid_show(struct seq_file *m, void *v)
4340 {
4341         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4342 
4343         if (v == (void *)1) {
4344                 seq_printf(m, "no pid\n");
4345                 return 0;
4346         }
4347 
4348         if (fpid->pid == ftrace_swapper_pid)
4349                 seq_printf(m, "swapper tasks\n");
4350         else
4351                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4352 
4353         return 0;
4354 }
4355 
4356 static const struct seq_operations ftrace_pid_sops = {
4357         .start = fpid_start,
4358         .next = fpid_next,
4359         .stop = fpid_stop,
4360         .show = fpid_show,
4361 };
4362 
4363 static int
4364 ftrace_pid_open(struct inode *inode, struct file *file)
4365 {
4366         int ret = 0;
4367 
4368         if ((file->f_mode & FMODE_WRITE) &&
4369             (file->f_flags & O_TRUNC))
4370                 ftrace_pid_reset();
4371 
4372         if (file->f_mode & FMODE_READ)
4373                 ret = seq_open(file, &ftrace_pid_sops);
4374 
4375         return ret;
4376 }
4377 
4378 static ssize_t
4379 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4380                    size_t cnt, loff_t *ppos)
4381 {
4382         char buf[64], *tmp;
4383         long val;
4384         int ret;
4385 
4386         if (cnt >= sizeof(buf))
4387                 return -EINVAL;
4388 
4389         if (copy_from_user(&buf, ubuf, cnt))
4390                 return -EFAULT;
4391 
4392         buf[cnt] = 0;
4393 
4394         /*
4395          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4396          * to clean the filter quietly.
4397          */
4398         tmp = strstrip(buf);
4399         if (strlen(tmp) == 0)
4400                 return 1;
4401 
4402         ret = kstrtol(tmp, 10, &val);
4403         if (ret < 0)
4404                 return ret;
4405 
4406         ret = ftrace_pid_add(val);
4407 
4408         return ret ? ret : cnt;
4409 }
4410 
4411 static int
4412 ftrace_pid_release(struct inode *inode, struct file *file)
4413 {
4414         if (file->f_mode & FMODE_READ)
4415                 seq_release(inode, file);
4416 
4417         return 0;
4418 }
4419 
4420 static const struct file_operations ftrace_pid_fops = {
4421         .open           = ftrace_pid_open,
4422         .write          = ftrace_pid_write,
4423         .read           = seq_read,
4424         .llseek         = ftrace_filter_lseek,
4425         .release        = ftrace_pid_release,
4426 };
4427 
4428 static __init int ftrace_init_debugfs(void)
4429 {
4430         struct dentry *d_tracer;
4431 
4432         d_tracer = tracing_init_dentry();
4433         if (!d_tracer)
4434                 return 0;
4435 
4436         ftrace_init_dyn_debugfs(d_tracer);
4437 
4438         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4439                             NULL, &ftrace_pid_fops);
4440 
4441         ftrace_profile_debugfs(d_tracer);
4442 
4443         return 0;
4444 }
4445 fs_initcall(ftrace_init_debugfs);
4446 
4447 /**
4448  * ftrace_kill - kill ftrace
4449  *
4450  * This function should be used by panic code. It stops ftrace
4451  * but in a not so nice way. If you need to simply kill ftrace
4452  * from a non-atomic section, use ftrace_kill.
4453  */
4454 void ftrace_kill(void)
4455 {
4456         ftrace_disabled = 1;
4457         ftrace_enabled = 0;
4458         clear_ftrace_function();
4459 }
4460 
4461 /**
4462  * Test if ftrace is dead or not.
4463  */
4464 int ftrace_is_dead(void)
4465 {
4466         return ftrace_disabled;
4467 }
4468 
4469 /**
4470  * register_ftrace_function - register a function for profiling
4471  * @ops - ops structure that holds the function for profiling.
4472  *
4473  * Register a function to be called by all functions in the
4474  * kernel.
4475  *
4476  * Note: @ops->func and all the functions it calls must be labeled
4477  *       with "notrace", otherwise it will go into a
4478  *       recursive loop.
4479  */
4480 int register_ftrace_function(struct ftrace_ops *ops)
4481 {
4482         int ret = -1;
4483 
4484         mutex_lock(&ftrace_lock);
4485 
4486         ret = __register_ftrace_function(ops);
4487         if (!ret)
4488                 ret = ftrace_startup(ops, 0);
4489 
4490         mutex_unlock(&ftrace_lock);
4491 
4492         return ret;
4493 }
4494 EXPORT_SYMBOL_GPL(register_ftrace_function);
4495 
4496 /**
4497  * unregister_ftrace_function - unregister a function for profiling.
4498  * @ops - ops structure that holds the function to unregister
4499  *
4500  * Unregister a function that was added to be called by ftrace profiling.
4501  */
4502 int unregister_ftrace_function(struct ftrace_ops *ops)
4503 {
4504         int ret;
4505 
4506         mutex_lock(&ftrace_lock);
4507         ret = __unregister_ftrace_function(ops);
4508         if (!ret)
4509                 ftrace_shutdown(ops, 0);
4510         mutex_unlock(&ftrace_lock);
4511 
4512         return ret;
4513 }
4514 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4515 
4516 int
4517 ftrace_enable_sysctl(struct ctl_table *table, int write,
4518                      void __user *buffer, size_t *lenp,
4519                      loff_t *ppos)
4520 {
4521         int ret = -ENODEV;
4522 
4523         mutex_lock(&ftrace_lock);
4524 
4525         if (unlikely(ftrace_disabled))
4526                 goto out;
4527 
4528         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4529 
4530         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4531                 goto out;
4532 
4533         last_ftrace_enabled = !!ftrace_enabled;
4534 
4535         if (ftrace_enabled) {
4536 
4537                 ftrace_startup_sysctl();
4538 
4539                 /* we are starting ftrace again */
4540                 if (ftrace_ops_list != &ftrace_list_end)
4541                         update_ftrace_function();
4542 
4543         } else {
4544                 /* stopping ftrace calls (just send to ftrace_stub) */
4545                 ftrace_trace_function = ftrace_stub;
4546 
4547                 ftrace_shutdown_sysctl();
4548         }
4549 
4550  out:
4551         mutex_unlock(&ftrace_lock);
4552         return ret;
4553 }
4554 
4555 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4556 
4557 static int ftrace_graph_active;
4558 static struct notifier_block ftrace_suspend_notifier;
4559 
4560 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4561 {
4562         return 0;
4563 }
4564 
4565 /* The callbacks that hook a function */
4566 trace_func_graph_ret_t ftrace_graph_return =
4567                         (trace_func_graph_ret_t)ftrace_stub;
4568 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4569 
4570 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4571 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4572 {
4573         int i;
4574         int ret = 0;
4575         unsigned long flags;
4576         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4577         struct task_struct *g, *t;
4578 
4579         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4580                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4581                                         * sizeof(struct ftrace_ret_stack),
4582                                         GFP_KERNEL);
4583                 if (!ret_stack_list[i]) {
4584                         start = 0;
4585                         end = i;
4586                         ret = -ENOMEM;
4587                         goto free;
4588                 }
4589         }
4590 
4591         read_lock_irqsave(&tasklist_lock, flags);
4592         do_each_thread(g, t) {
4593                 if (start == end) {
4594                         ret = -EAGAIN;
4595                         goto unlock;
4596                 }
4597 
4598                 if (t->ret_stack == NULL) {
4599                         atomic_set(&t->tracing_graph_pause, 0);
4600                         atomic_set(&t->trace_overrun, 0);
4601                         t->curr_ret_stack = -1;
4602                         /* Make sure the tasks see the -1 first: */
4603                         smp_wmb();
4604                         t->ret_stack = ret_stack_list[start++];
4605                 }
4606         } while_each_thread(g, t);
4607 
4608 unlock:
4609         read_unlock_irqrestore(&tasklist_lock, flags);
4610 free:
4611         for (i = start; i < end; i++)
4612                 kfree(ret_stack_list[i]);
4613         return ret;
4614 }
4615 
4616 static void
4617 ftrace_graph_probe_sched_switch(void *ignore,
4618                         struct task_struct *prev, struct task_struct *next)
4619 {
4620         unsigned long long timestamp;
4621         int index;
4622 
4623         /*
4624          * Does the user want to count the time a function was asleep.
4625          * If so, do not update the time stamps.
4626          */
4627         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4628                 return;
4629 
4630         timestamp = trace_clock_local();
4631 
4632         prev->ftrace_timestamp = timestamp;
4633 
4634         /* only process tasks that we timestamped */
4635         if (!next->ftrace_timestamp)
4636                 return;
4637 
4638         /*
4639          * Update all the counters in next to make up for the
4640          * time next was sleeping.
4641          */
4642         timestamp -= next->ftrace_timestamp;
4643 
4644         for (index = next->curr_ret_stack; index >= 0; index--)
4645                 next->ret_stack[index].calltime += timestamp;
4646 }
4647 
4648 /* Allocate a return stack for each task */
4649 static int start_graph_tracing(void)
4650 {
4651         struct ftrace_ret_stack **ret_stack_list;
4652         int ret, cpu;
4653 
4654         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4655                                 sizeof(struct ftrace_ret_stack *),
4656                                 GFP_KERNEL);
4657 
4658         if (!ret_stack_list)
4659                 return -ENOMEM;
4660 
4661         /* The cpu_boot init_task->ret_stack will never be freed */
4662         for_each_online_cpu(cpu) {
4663                 if (!idle_task(cpu)->ret_stack)
4664                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4665         }
4666 
4667         do {
4668                 ret = alloc_retstack_tasklist(ret_stack_list);
4669         } while (ret == -EAGAIN);
4670 
4671         if (!ret) {
4672                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4673                 if (ret)
4674                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4675                                 " probe to kernel_sched_switch\n");
4676         }
4677 
4678         kfree(ret_stack_list);
4679         return ret;
4680 }
4681 
4682 /*
4683  * Hibernation protection.
4684  * The state of the current task is too much unstable during
4685  * suspend/restore to disk. We want to protect against that.
4686  */
4687 static int
4688 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4689                                                         void *unused)
4690 {
4691         switch (state) {
4692         case PM_HIBERNATION_PREPARE:
4693                 pause_graph_tracing();
4694                 break;
4695 
4696         case PM_POST_HIBERNATION:
4697                 unpause_graph_tracing();
4698                 break;
4699         }
4700         return NOTIFY_DONE;
4701 }
4702 
4703 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4704                         trace_func_graph_ent_t entryfunc)
4705 {
4706         int ret = 0;
4707 
4708         mutex_lock(&ftrace_lock);
4709 
4710         /* we currently allow only one tracer registered at a time */
4711         if (ftrace_graph_active) {
4712                 ret = -EBUSY;
4713                 goto out;
4714         }
4715 
4716         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4717         register_pm_notifier(&ftrace_suspend_notifier);
4718 
4719         ftrace_graph_active++;
4720         ret = start_graph_tracing();
4721         if (ret) {
4722                 ftrace_graph_active--;
4723                 goto out;
4724         }
4725 
4726         ftrace_graph_return = retfunc;
4727         ftrace_graph_entry = entryfunc;
4728 
4729         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4730 
4731 out:
4732         mutex_unlock(&ftrace_lock);
4733         return ret;
4734 }
4735 
4736 void unregister_ftrace_graph(void)
4737 {
4738         mutex_lock(&ftrace_lock);
4739 
4740         if (unlikely(!ftrace_graph_active))
4741                 goto out;
4742 
4743         ftrace_graph_active--;
4744         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4745         ftrace_graph_entry = ftrace_graph_entry_stub;
4746         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4747         unregister_pm_notifier(&ftrace_suspend_notifier);
4748         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4749 
4750  out:
4751         mutex_unlock(&ftrace_lock);
4752 }
4753 
4754 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4755 
4756 static void
4757 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4758 {
4759         atomic_set(&t->tracing_graph_pause, 0);
4760         atomic_set(&t->trace_overrun, 0);
4761         t->ftrace_timestamp = 0;
4762         /* make curr_ret_stack visible before we add the ret_stack */
4763         smp_wmb();
4764         t->ret_stack = ret_stack;
4765 }
4766 
4767 /*
4768  * Allocate a return stack for the idle task. May be the first
4769  * time through, or it may be done by CPU hotplug online.
4770  */
4771 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4772 {
4773         t->curr_ret_stack = -1;
4774         /*
4775          * The idle task has no parent, it either has its own
4776          * stack or no stack at all.
4777          */
4778         if (t->ret_stack)
4779                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4780 
4781         if (ftrace_graph_active) {
4782                 struct ftrace_ret_stack *ret_stack;
4783 
4784                 ret_stack = per_cpu(idle_ret_stack, cpu);
4785                 if (!ret_stack) {
4786                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4787                                             * sizeof(struct ftrace_ret_stack),
4788                                             GFP_KERNEL);
4789                         if (!ret_stack)
4790                                 return;
4791                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4792                 }
4793                 graph_init_task(t, ret_stack);
4794         }
4795 }
4796 
4797 /* Allocate a return stack for newly created task */
4798 void ftrace_graph_init_task(struct task_struct *t)
4799 {
4800         /* Make sure we do not use the parent ret_stack */
4801         t->ret_stack = NULL;
4802         t->curr_ret_stack = -1;
4803 
4804         if (ftrace_graph_active) {
4805                 struct ftrace_ret_stack *ret_stack;
4806 
4807                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4808                                 * sizeof(struct ftrace_ret_stack),
4809                                 GFP_KERNEL);
4810                 if (!ret_stack)
4811                         return;
4812                 graph_init_task(t, ret_stack);
4813         }
4814 }
4815 
4816 void ftrace_graph_exit_task(struct task_struct *t)
4817 {
4818         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4819 
4820         t->ret_stack = NULL;
4821         /* NULL must become visible to IRQs before we free it: */
4822         barrier();
4823 
4824         kfree(ret_stack);
4825 }
4826 
4827 void ftrace_graph_stop(void)
4828 {
4829         ftrace_stop();
4830 }
4831 #endif
4832 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp