~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/ftrace.h

Version: ~ [ linux-4.18-rc5 ] ~ [ linux-4.17.6 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.55 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.112 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.140 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.115 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Ftrace header.  For implementation details beyond the random comments
  4  * scattered below, see: Documentation/trace/ftrace-design.rst
  5  */
  6 
  7 #ifndef _LINUX_FTRACE_H
  8 #define _LINUX_FTRACE_H
  9 
 10 #include <linux/trace_clock.h>
 11 #include <linux/kallsyms.h>
 12 #include <linux/linkage.h>
 13 #include <linux/bitops.h>
 14 #include <linux/ptrace.h>
 15 #include <linux/ktime.h>
 16 #include <linux/sched.h>
 17 #include <linux/types.h>
 18 #include <linux/init.h>
 19 #include <linux/fs.h>
 20 
 21 #include <asm/ftrace.h>
 22 
 23 /*
 24  * If the arch supports passing the variable contents of
 25  * function_trace_op as the third parameter back from the
 26  * mcount call, then the arch should define this as 1.
 27  */
 28 #ifndef ARCH_SUPPORTS_FTRACE_OPS
 29 #define ARCH_SUPPORTS_FTRACE_OPS 0
 30 #endif
 31 
 32 /*
 33  * If the arch's mcount caller does not support all of ftrace's
 34  * features, then it must call an indirect function that
 35  * does. Or at least does enough to prevent any unwelcomed side effects.
 36  */
 37 #if !ARCH_SUPPORTS_FTRACE_OPS
 38 # define FTRACE_FORCE_LIST_FUNC 1
 39 #else
 40 # define FTRACE_FORCE_LIST_FUNC 0
 41 #endif
 42 
 43 /* Main tracing buffer and events set up */
 44 #ifdef CONFIG_TRACING
 45 void trace_init(void);
 46 void early_trace_init(void);
 47 #else
 48 static inline void trace_init(void) { }
 49 static inline void early_trace_init(void) { }
 50 #endif
 51 
 52 struct module;
 53 struct ftrace_hash;
 54 
 55 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
 56         defined(CONFIG_DYNAMIC_FTRACE)
 57 const char *
 58 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
 59                    unsigned long *off, char **modname, char *sym);
 60 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
 61                            char *type, char *name,
 62                            char *module_name, int *exported);
 63 #else
 64 static inline const char *
 65 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
 66                    unsigned long *off, char **modname, char *sym)
 67 {
 68         return NULL;
 69 }
 70 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
 71                                          char *type, char *name,
 72                                          char *module_name, int *exported)
 73 {
 74         return -1;
 75 }
 76 #endif
 77 
 78 
 79 #ifdef CONFIG_FUNCTION_TRACER
 80 
 81 extern int ftrace_enabled;
 82 extern int
 83 ftrace_enable_sysctl(struct ctl_table *table, int write,
 84                      void __user *buffer, size_t *lenp,
 85                      loff_t *ppos);
 86 
 87 struct ftrace_ops;
 88 
 89 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
 90                               struct ftrace_ops *op, struct pt_regs *regs);
 91 
 92 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
 93 
 94 /*
 95  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
 96  * set in the flags member.
 97  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
 98  * IPMODIFY are a kind of attribute flags which can be set only before
 99  * registering the ftrace_ops, and can not be modified while registered.
100  * Changing those attribute flags after registering ftrace_ops will
101  * cause unexpected results.
102  *
103  * ENABLED - set/unset when ftrace_ops is registered/unregistered
104  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
105  *           allocated ftrace_ops which need special care
106  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
107  *            and passed to the callback. If this flag is set, but the
108  *            architecture does not support passing regs
109  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
110  *            ftrace_ops will fail to register, unless the next flag
111  *            is set.
112  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
113  *            handler can handle an arch that does not save regs
114  *            (the handler tests if regs == NULL), then it can set
115  *            this flag instead. It will not fail registering the ftrace_ops
116  *            but, the regs field will be NULL if the arch does not support
117  *            passing regs to the handler.
118  *            Note, if this flag is set, the SAVE_REGS flag will automatically
119  *            get set upon registering the ftrace_ops, if the arch supports it.
120  * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
121  *            that the call back has its own recursion protection. If it does
122  *            not set this, then the ftrace infrastructure will add recursion
123  *            protection for the caller.
124  * STUB   - The ftrace_ops is just a place holder.
125  * INITIALIZED - The ftrace_ops has already been initialized (first use time
126  *            register_ftrace_function() is called, it will initialized the ops)
127  * DELETED - The ops are being deleted, do not let them be registered again.
128  * ADDING  - The ops is in the process of being added.
129  * REMOVING - The ops is in the process of being removed.
130  * MODIFYING - The ops is in the process of changing its filter functions.
131  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
132  *            The arch specific code sets this flag when it allocated a
133  *            trampoline. This lets the arch know that it can update the
134  *            trampoline in case the callback function changes.
135  *            The ftrace_ops trampoline can be set by the ftrace users, and
136  *            in such cases the arch must not modify it. Only the arch ftrace
137  *            core code should set this flag.
138  * IPMODIFY - The ops can modify the IP register. This can only be set with
139  *            SAVE_REGS. If another ops with this flag set is already registered
140  *            for any of the functions that this ops will be registered for, then
141  *            this ops will fail to register or set_filter_ip.
142  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
143  * RCU     - Set when the ops can only be called when RCU is watching.
144  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
145  */
146 enum {
147         FTRACE_OPS_FL_ENABLED                   = 1 << 0,
148         FTRACE_OPS_FL_DYNAMIC                   = 1 << 1,
149         FTRACE_OPS_FL_SAVE_REGS                 = 1 << 2,
150         FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 3,
151         FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 4,
152         FTRACE_OPS_FL_STUB                      = 1 << 5,
153         FTRACE_OPS_FL_INITIALIZED               = 1 << 6,
154         FTRACE_OPS_FL_DELETED                   = 1 << 7,
155         FTRACE_OPS_FL_ADDING                    = 1 << 8,
156         FTRACE_OPS_FL_REMOVING                  = 1 << 9,
157         FTRACE_OPS_FL_MODIFYING                 = 1 << 10,
158         FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 11,
159         FTRACE_OPS_FL_IPMODIFY                  = 1 << 12,
160         FTRACE_OPS_FL_PID                       = 1 << 13,
161         FTRACE_OPS_FL_RCU                       = 1 << 14,
162         FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 15,
163 };
164 
165 #ifdef CONFIG_DYNAMIC_FTRACE
166 /* The hash used to know what functions callbacks trace */
167 struct ftrace_ops_hash {
168         struct ftrace_hash __rcu        *notrace_hash;
169         struct ftrace_hash __rcu        *filter_hash;
170         struct mutex                    regex_lock;
171 };
172 
173 void ftrace_free_init_mem(void);
174 void ftrace_free_mem(struct module *mod, void *start, void *end);
175 #else
176 static inline void ftrace_free_init_mem(void) { }
177 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
178 #endif
179 
180 /*
181  * Note, ftrace_ops can be referenced outside of RCU protection, unless
182  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
183  * core data, the unregistering of it will perform a scheduling on all CPUs
184  * to make sure that there are no more users. Depending on the load of the
185  * system that may take a bit of time.
186  *
187  * Any private data added must also take care not to be freed and if private
188  * data is added to a ftrace_ops that is in core code, the user of the
189  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
190  */
191 struct ftrace_ops {
192         ftrace_func_t                   func;
193         struct ftrace_ops __rcu         *next;
194         unsigned long                   flags;
195         void                            *private;
196         ftrace_func_t                   saved_func;
197 #ifdef CONFIG_DYNAMIC_FTRACE
198         struct ftrace_ops_hash          local_hash;
199         struct ftrace_ops_hash          *func_hash;
200         struct ftrace_ops_hash          old_hash;
201         unsigned long                   trampoline;
202         unsigned long                   trampoline_size;
203 #endif
204 };
205 
206 /*
207  * Type of the current tracing.
208  */
209 enum ftrace_tracing_type_t {
210         FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
211         FTRACE_TYPE_RETURN,     /* Hook the return of the function */
212 };
213 
214 /* Current tracing type, default is FTRACE_TYPE_ENTER */
215 extern enum ftrace_tracing_type_t ftrace_tracing_type;
216 
217 /*
218  * The ftrace_ops must be a static and should also
219  * be read_mostly.  These functions do modify read_mostly variables
220  * so use them sparely. Never free an ftrace_op or modify the
221  * next pointer after it has been registered. Even after unregistering
222  * it, the next pointer may still be used internally.
223  */
224 int register_ftrace_function(struct ftrace_ops *ops);
225 int unregister_ftrace_function(struct ftrace_ops *ops);
226 
227 extern void ftrace_stub(unsigned long a0, unsigned long a1,
228                         struct ftrace_ops *op, struct pt_regs *regs);
229 
230 #else /* !CONFIG_FUNCTION_TRACER */
231 /*
232  * (un)register_ftrace_function must be a macro since the ops parameter
233  * must not be evaluated.
234  */
235 #define register_ftrace_function(ops) ({ 0; })
236 #define unregister_ftrace_function(ops) ({ 0; })
237 static inline int ftrace_nr_registered_ops(void)
238 {
239         return 0;
240 }
241 static inline void ftrace_kill(void) { }
242 static inline void ftrace_free_init_mem(void) { }
243 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
244 #endif /* CONFIG_FUNCTION_TRACER */
245 
246 #ifdef CONFIG_STACK_TRACER
247 
248 #define STACK_TRACE_ENTRIES 500
249 
250 struct stack_trace;
251 
252 extern unsigned stack_trace_index[];
253 extern struct stack_trace stack_trace_max;
254 extern unsigned long stack_trace_max_size;
255 extern arch_spinlock_t stack_trace_max_lock;
256 
257 extern int stack_tracer_enabled;
258 void stack_trace_print(void);
259 int
260 stack_trace_sysctl(struct ctl_table *table, int write,
261                    void __user *buffer, size_t *lenp,
262                    loff_t *ppos);
263 
264 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
265 DECLARE_PER_CPU(int, disable_stack_tracer);
266 
267 /**
268  * stack_tracer_disable - temporarily disable the stack tracer
269  *
270  * There's a few locations (namely in RCU) where stack tracing
271  * cannot be executed. This function is used to disable stack
272  * tracing during those critical sections.
273  *
274  * This function must be called with preemption or interrupts
275  * disabled and stack_tracer_enable() must be called shortly after
276  * while preemption or interrupts are still disabled.
277  */
278 static inline void stack_tracer_disable(void)
279 {
280         /* Preemption or interupts must be disabled */
281         if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
282                 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
283         this_cpu_inc(disable_stack_tracer);
284 }
285 
286 /**
287  * stack_tracer_enable - re-enable the stack tracer
288  *
289  * After stack_tracer_disable() is called, stack_tracer_enable()
290  * must be called shortly afterward.
291  */
292 static inline void stack_tracer_enable(void)
293 {
294         if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
295                 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
296         this_cpu_dec(disable_stack_tracer);
297 }
298 #else
299 static inline void stack_tracer_disable(void) { }
300 static inline void stack_tracer_enable(void) { }
301 #endif
302 
303 #ifdef CONFIG_DYNAMIC_FTRACE
304 
305 int ftrace_arch_code_modify_prepare(void);
306 int ftrace_arch_code_modify_post_process(void);
307 
308 struct dyn_ftrace;
309 
310 enum ftrace_bug_type {
311         FTRACE_BUG_UNKNOWN,
312         FTRACE_BUG_INIT,
313         FTRACE_BUG_NOP,
314         FTRACE_BUG_CALL,
315         FTRACE_BUG_UPDATE,
316 };
317 extern enum ftrace_bug_type ftrace_bug_type;
318 
319 /*
320  * Archs can set this to point to a variable that holds the value that was
321  * expected at the call site before calling ftrace_bug().
322  */
323 extern const void *ftrace_expected;
324 
325 void ftrace_bug(int err, struct dyn_ftrace *rec);
326 
327 struct seq_file;
328 
329 extern int ftrace_text_reserved(const void *start, const void *end);
330 
331 extern int ftrace_nr_registered_ops(void);
332 
333 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
334 
335 bool is_ftrace_trampoline(unsigned long addr);
336 
337 /*
338  * The dyn_ftrace record's flags field is split into two parts.
339  * the first part which is '0-FTRACE_REF_MAX' is a counter of
340  * the number of callbacks that have registered the function that
341  * the dyn_ftrace descriptor represents.
342  *
343  * The second part is a mask:
344  *  ENABLED - the function is being traced
345  *  REGS    - the record wants the function to save regs
346  *  REGS_EN - the function is set up to save regs.
347  *  IPMODIFY - the record allows for the IP address to be changed.
348  *  DISABLED - the record is not ready to be touched yet
349  *
350  * When a new ftrace_ops is registered and wants a function to save
351  * pt_regs, the rec->flag REGS is set. When the function has been
352  * set up to save regs, the REG_EN flag is set. Once a function
353  * starts saving regs it will do so until all ftrace_ops are removed
354  * from tracing that function.
355  */
356 enum {
357         FTRACE_FL_ENABLED       = (1UL << 31),
358         FTRACE_FL_REGS          = (1UL << 30),
359         FTRACE_FL_REGS_EN       = (1UL << 29),
360         FTRACE_FL_TRAMP         = (1UL << 28),
361         FTRACE_FL_TRAMP_EN      = (1UL << 27),
362         FTRACE_FL_IPMODIFY      = (1UL << 26),
363         FTRACE_FL_DISABLED      = (1UL << 25),
364 };
365 
366 #define FTRACE_REF_MAX_SHIFT    25
367 #define FTRACE_FL_BITS          7
368 #define FTRACE_FL_MASKED_BITS   ((1UL << FTRACE_FL_BITS) - 1)
369 #define FTRACE_FL_MASK          (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
370 #define FTRACE_REF_MAX          ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
371 
372 #define ftrace_rec_count(rec)   ((rec)->flags & ~FTRACE_FL_MASK)
373 
374 struct dyn_ftrace {
375         unsigned long           ip; /* address of mcount call-site */
376         unsigned long           flags;
377         struct dyn_arch_ftrace  arch;
378 };
379 
380 int ftrace_force_update(void);
381 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
382                          int remove, int reset);
383 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
384                        int len, int reset);
385 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
386                         int len, int reset);
387 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
388 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
389 void ftrace_free_filter(struct ftrace_ops *ops);
390 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
391 
392 enum {
393         FTRACE_UPDATE_CALLS             = (1 << 0),
394         FTRACE_DISABLE_CALLS            = (1 << 1),
395         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
396         FTRACE_START_FUNC_RET           = (1 << 3),
397         FTRACE_STOP_FUNC_RET            = (1 << 4),
398 };
399 
400 /*
401  * The FTRACE_UPDATE_* enum is used to pass information back
402  * from the ftrace_update_record() and ftrace_test_record()
403  * functions. These are called by the code update routines
404  * to find out what is to be done for a given function.
405  *
406  *  IGNORE           - The function is already what we want it to be
407  *  MAKE_CALL        - Start tracing the function
408  *  MODIFY_CALL      - Stop saving regs for the function
409  *  MAKE_NOP         - Stop tracing the function
410  */
411 enum {
412         FTRACE_UPDATE_IGNORE,
413         FTRACE_UPDATE_MAKE_CALL,
414         FTRACE_UPDATE_MODIFY_CALL,
415         FTRACE_UPDATE_MAKE_NOP,
416 };
417 
418 enum {
419         FTRACE_ITER_FILTER      = (1 << 0),
420         FTRACE_ITER_NOTRACE     = (1 << 1),
421         FTRACE_ITER_PRINTALL    = (1 << 2),
422         FTRACE_ITER_DO_PROBES   = (1 << 3),
423         FTRACE_ITER_PROBE       = (1 << 4),
424         FTRACE_ITER_MOD         = (1 << 5),
425         FTRACE_ITER_ENABLED     = (1 << 6),
426 };
427 
428 void arch_ftrace_update_code(int command);
429 
430 struct ftrace_rec_iter;
431 
432 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
433 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
434 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
435 
436 #define for_ftrace_rec_iter(iter)               \
437         for (iter = ftrace_rec_iter_start();    \
438              iter;                              \
439              iter = ftrace_rec_iter_next(iter))
440 
441 
442 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
443 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
444 void ftrace_run_stop_machine(int command);
445 unsigned long ftrace_location(unsigned long ip);
446 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
447 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
448 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
449 
450 extern ftrace_func_t ftrace_trace_function;
451 
452 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
453                   struct inode *inode, struct file *file);
454 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
455                             size_t cnt, loff_t *ppos);
456 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
457                              size_t cnt, loff_t *ppos);
458 int ftrace_regex_release(struct inode *inode, struct file *file);
459 
460 void __init
461 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
462 
463 /* defined in arch */
464 extern int ftrace_ip_converted(unsigned long ip);
465 extern int ftrace_dyn_arch_init(void);
466 extern void ftrace_replace_code(int enable);
467 extern int ftrace_update_ftrace_func(ftrace_func_t func);
468 extern void ftrace_caller(void);
469 extern void ftrace_regs_caller(void);
470 extern void ftrace_call(void);
471 extern void ftrace_regs_call(void);
472 extern void mcount_call(void);
473 
474 void ftrace_modify_all_code(int command);
475 
476 #ifndef FTRACE_ADDR
477 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
478 #endif
479 
480 #ifndef FTRACE_GRAPH_ADDR
481 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
482 #endif
483 
484 #ifndef FTRACE_REGS_ADDR
485 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
486 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
487 #else
488 # define FTRACE_REGS_ADDR FTRACE_ADDR
489 #endif
490 #endif
491 
492 /*
493  * If an arch would like functions that are only traced
494  * by the function graph tracer to jump directly to its own
495  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
496  * to be that address to jump to.
497  */
498 #ifndef FTRACE_GRAPH_TRAMP_ADDR
499 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
500 #endif
501 
502 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
503 extern void ftrace_graph_caller(void);
504 extern int ftrace_enable_ftrace_graph_caller(void);
505 extern int ftrace_disable_ftrace_graph_caller(void);
506 #else
507 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
508 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
509 #endif
510 
511 /**
512  * ftrace_make_nop - convert code into nop
513  * @mod: module structure if called by module load initialization
514  * @rec: the mcount call site record
515  * @addr: the address that the call site should be calling
516  *
517  * This is a very sensitive operation and great care needs
518  * to be taken by the arch.  The operation should carefully
519  * read the location, check to see if what is read is indeed
520  * what we expect it to be, and then on success of the compare,
521  * it should write to the location.
522  *
523  * The code segment at @rec->ip should be a caller to @addr
524  *
525  * Return must be:
526  *  0 on success
527  *  -EFAULT on error reading the location
528  *  -EINVAL on a failed compare of the contents
529  *  -EPERM  on error writing to the location
530  * Any other value will be considered a failure.
531  */
532 extern int ftrace_make_nop(struct module *mod,
533                            struct dyn_ftrace *rec, unsigned long addr);
534 
535 /**
536  * ftrace_make_call - convert a nop call site into a call to addr
537  * @rec: the mcount call site record
538  * @addr: the address that the call site should call
539  *
540  * This is a very sensitive operation and great care needs
541  * to be taken by the arch.  The operation should carefully
542  * read the location, check to see if what is read is indeed
543  * what we expect it to be, and then on success of the compare,
544  * it should write to the location.
545  *
546  * The code segment at @rec->ip should be a nop
547  *
548  * Return must be:
549  *  0 on success
550  *  -EFAULT on error reading the location
551  *  -EINVAL on a failed compare of the contents
552  *  -EPERM  on error writing to the location
553  * Any other value will be considered a failure.
554  */
555 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
556 
557 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
558 /**
559  * ftrace_modify_call - convert from one addr to another (no nop)
560  * @rec: the mcount call site record
561  * @old_addr: the address expected to be currently called to
562  * @addr: the address to change to
563  *
564  * This is a very sensitive operation and great care needs
565  * to be taken by the arch.  The operation should carefully
566  * read the location, check to see if what is read is indeed
567  * what we expect it to be, and then on success of the compare,
568  * it should write to the location.
569  *
570  * The code segment at @rec->ip should be a caller to @old_addr
571  *
572  * Return must be:
573  *  0 on success
574  *  -EFAULT on error reading the location
575  *  -EINVAL on a failed compare of the contents
576  *  -EPERM  on error writing to the location
577  * Any other value will be considered a failure.
578  */
579 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
580                               unsigned long addr);
581 #else
582 /* Should never be called */
583 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
584                                      unsigned long addr)
585 {
586         return -EINVAL;
587 }
588 #endif
589 
590 /* May be defined in arch */
591 extern int ftrace_arch_read_dyn_info(char *buf, int size);
592 
593 extern int skip_trace(unsigned long ip);
594 extern void ftrace_module_init(struct module *mod);
595 extern void ftrace_module_enable(struct module *mod);
596 extern void ftrace_release_mod(struct module *mod);
597 
598 extern void ftrace_disable_daemon(void);
599 extern void ftrace_enable_daemon(void);
600 #else /* CONFIG_DYNAMIC_FTRACE */
601 static inline int skip_trace(unsigned long ip) { return 0; }
602 static inline int ftrace_force_update(void) { return 0; }
603 static inline void ftrace_disable_daemon(void) { }
604 static inline void ftrace_enable_daemon(void) { }
605 static inline void ftrace_module_init(struct module *mod) { }
606 static inline void ftrace_module_enable(struct module *mod) { }
607 static inline void ftrace_release_mod(struct module *mod) { }
608 static inline int ftrace_text_reserved(const void *start, const void *end)
609 {
610         return 0;
611 }
612 static inline unsigned long ftrace_location(unsigned long ip)
613 {
614         return 0;
615 }
616 
617 /*
618  * Again users of functions that have ftrace_ops may not
619  * have them defined when ftrace is not enabled, but these
620  * functions may still be called. Use a macro instead of inline.
621  */
622 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
623 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
624 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
625 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
626 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
627 #define ftrace_free_filter(ops) do { } while (0)
628 #define ftrace_ops_set_global_filter(ops) do { } while (0)
629 
630 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
631                             size_t cnt, loff_t *ppos) { return -ENODEV; }
632 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
633                              size_t cnt, loff_t *ppos) { return -ENODEV; }
634 static inline int
635 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
636 
637 static inline bool is_ftrace_trampoline(unsigned long addr)
638 {
639         return false;
640 }
641 #endif /* CONFIG_DYNAMIC_FTRACE */
642 
643 /* totally disable ftrace - can not re-enable after this */
644 void ftrace_kill(void);
645 
646 static inline void tracer_disable(void)
647 {
648 #ifdef CONFIG_FUNCTION_TRACER
649         ftrace_enabled = 0;
650 #endif
651 }
652 
653 /*
654  * Ftrace disable/restore without lock. Some synchronization mechanism
655  * must be used to prevent ftrace_enabled to be changed between
656  * disable/restore.
657  */
658 static inline int __ftrace_enabled_save(void)
659 {
660 #ifdef CONFIG_FUNCTION_TRACER
661         int saved_ftrace_enabled = ftrace_enabled;
662         ftrace_enabled = 0;
663         return saved_ftrace_enabled;
664 #else
665         return 0;
666 #endif
667 }
668 
669 static inline void __ftrace_enabled_restore(int enabled)
670 {
671 #ifdef CONFIG_FUNCTION_TRACER
672         ftrace_enabled = enabled;
673 #endif
674 }
675 
676 /* All archs should have this, but we define it for consistency */
677 #ifndef ftrace_return_address0
678 # define ftrace_return_address0 __builtin_return_address(0)
679 #endif
680 
681 /* Archs may use other ways for ADDR1 and beyond */
682 #ifndef ftrace_return_address
683 # ifdef CONFIG_FRAME_POINTER
684 #  define ftrace_return_address(n) __builtin_return_address(n)
685 # else
686 #  define ftrace_return_address(n) 0UL
687 # endif
688 #endif
689 
690 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
691 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
692 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
693 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
694 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
695 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
696 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
697 
698 static inline unsigned long get_lock_parent_ip(void)
699 {
700         unsigned long addr = CALLER_ADDR0;
701 
702         if (!in_lock_functions(addr))
703                 return addr;
704         addr = CALLER_ADDR1;
705         if (!in_lock_functions(addr))
706                 return addr;
707         return CALLER_ADDR2;
708 }
709 
710 #ifdef CONFIG_IRQSOFF_TRACER
711   extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
712   extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
713 #else
714   static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
715   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
716 #endif
717 
718 #if defined(CONFIG_PREEMPT_TRACER) || \
719         (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
720   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
721   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
722 #else
723 /*
724  * Use defines instead of static inlines because some arches will make code out
725  * of the CALLER_ADDR, when we really want these to be a real nop.
726  */
727 # define trace_preempt_on(a0, a1) do { } while (0)
728 # define trace_preempt_off(a0, a1) do { } while (0)
729 #endif
730 
731 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
732 extern void ftrace_init(void);
733 #else
734 static inline void ftrace_init(void) { }
735 #endif
736 
737 /*
738  * Structure that defines an entry function trace.
739  * It's already packed but the attribute "packed" is needed
740  * to remove extra padding at the end.
741  */
742 struct ftrace_graph_ent {
743         unsigned long func; /* Current function */
744         int depth;
745 } __packed;
746 
747 /*
748  * Structure that defines a return function trace.
749  * It's already packed but the attribute "packed" is needed
750  * to remove extra padding at the end.
751  */
752 struct ftrace_graph_ret {
753         unsigned long func; /* Current function */
754         /* Number of functions that overran the depth limit for current task */
755         unsigned long overrun;
756         unsigned long long calltime;
757         unsigned long long rettime;
758         int depth;
759 } __packed;
760 
761 /* Type of the callback handlers for tracing function graph*/
762 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
763 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
764 
765 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
766 
767 /*
768  * Stack of return addresses for functions
769  * of a thread.
770  * Used in struct thread_info
771  */
772 struct ftrace_ret_stack {
773         unsigned long ret;
774         unsigned long func;
775         unsigned long long calltime;
776 #ifdef CONFIG_FUNCTION_PROFILER
777         unsigned long long subtime;
778 #endif
779 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
780         unsigned long fp;
781 #endif
782 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
783         unsigned long *retp;
784 #endif
785 };
786 
787 /*
788  * Primary handler of a function return.
789  * It relays on ftrace_return_to_handler.
790  * Defined in entry_32/64.S
791  */
792 extern void return_to_handler(void);
793 
794 extern int
795 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
796                          unsigned long frame_pointer, unsigned long *retp);
797 
798 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
799                                     unsigned long ret, unsigned long *retp);
800 
801 /*
802  * Sometimes we don't want to trace a function with the function
803  * graph tracer but we want them to keep traced by the usual function
804  * tracer if the function graph tracer is not configured.
805  */
806 #define __notrace_funcgraph             notrace
807 
808 #define FTRACE_NOTRACE_DEPTH 65536
809 #define FTRACE_RETFUNC_DEPTH 50
810 #define FTRACE_RETSTACK_ALLOC_SIZE 32
811 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
812                                 trace_func_graph_ent_t entryfunc);
813 
814 extern bool ftrace_graph_is_dead(void);
815 extern void ftrace_graph_stop(void);
816 
817 /* The current handlers in use */
818 extern trace_func_graph_ret_t ftrace_graph_return;
819 extern trace_func_graph_ent_t ftrace_graph_entry;
820 
821 extern void unregister_ftrace_graph(void);
822 
823 extern void ftrace_graph_init_task(struct task_struct *t);
824 extern void ftrace_graph_exit_task(struct task_struct *t);
825 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
826 
827 static inline int task_curr_ret_stack(struct task_struct *t)
828 {
829         return t->curr_ret_stack;
830 }
831 
832 static inline void pause_graph_tracing(void)
833 {
834         atomic_inc(&current->tracing_graph_pause);
835 }
836 
837 static inline void unpause_graph_tracing(void)
838 {
839         atomic_dec(&current->tracing_graph_pause);
840 }
841 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
842 
843 #define __notrace_funcgraph
844 
845 static inline void ftrace_graph_init_task(struct task_struct *t) { }
846 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
847 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
848 
849 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
850                           trace_func_graph_ent_t entryfunc)
851 {
852         return -1;
853 }
854 static inline void unregister_ftrace_graph(void) { }
855 
856 static inline int task_curr_ret_stack(struct task_struct *tsk)
857 {
858         return -1;
859 }
860 
861 static inline unsigned long
862 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
863                       unsigned long *retp)
864 {
865         return ret;
866 }
867 
868 static inline void pause_graph_tracing(void) { }
869 static inline void unpause_graph_tracing(void) { }
870 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
871 
872 #ifdef CONFIG_TRACING
873 
874 /* flags for current->trace */
875 enum {
876         TSK_TRACE_FL_TRACE_BIT  = 0,
877         TSK_TRACE_FL_GRAPH_BIT  = 1,
878 };
879 enum {
880         TSK_TRACE_FL_TRACE      = 1 << TSK_TRACE_FL_TRACE_BIT,
881         TSK_TRACE_FL_GRAPH      = 1 << TSK_TRACE_FL_GRAPH_BIT,
882 };
883 
884 static inline void set_tsk_trace_trace(struct task_struct *tsk)
885 {
886         set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
887 }
888 
889 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
890 {
891         clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
892 }
893 
894 static inline int test_tsk_trace_trace(struct task_struct *tsk)
895 {
896         return tsk->trace & TSK_TRACE_FL_TRACE;
897 }
898 
899 static inline void set_tsk_trace_graph(struct task_struct *tsk)
900 {
901         set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
902 }
903 
904 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
905 {
906         clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
907 }
908 
909 static inline int test_tsk_trace_graph(struct task_struct *tsk)
910 {
911         return tsk->trace & TSK_TRACE_FL_GRAPH;
912 }
913 
914 enum ftrace_dump_mode;
915 
916 extern enum ftrace_dump_mode ftrace_dump_on_oops;
917 extern int tracepoint_printk;
918 
919 extern void disable_trace_on_warning(void);
920 extern int __disable_trace_on_warning;
921 
922 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
923                              void __user *buffer, size_t *lenp,
924                              loff_t *ppos);
925 
926 #else /* CONFIG_TRACING */
927 static inline void  disable_trace_on_warning(void) { }
928 #endif /* CONFIG_TRACING */
929 
930 #ifdef CONFIG_FTRACE_SYSCALLS
931 
932 unsigned long arch_syscall_addr(int nr);
933 
934 #endif /* CONFIG_FTRACE_SYSCALLS */
935 
936 #endif /* _LINUX_FTRACE_H */
937 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp