~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/perf_event.h

Version: ~ [ linux-5.6-rc3 ] ~ [ linux-5.5.6 ] ~ [ linux-5.4.22 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.106 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.171 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.214 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.214 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.82 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Performance events:
  3  *
  4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7  *
  8  * Data type definitions, declarations, prototypes.
  9  *
 10  *    Started by: Thomas Gleixner and Ingo Molnar
 11  *
 12  * For licencing details see kernel-base/COPYING
 13  */
 14 #ifndef _LINUX_PERF_EVENT_H
 15 #define _LINUX_PERF_EVENT_H
 16 
 17 #include <uapi/linux/perf_event.h>
 18 
 19 /*
 20  * Kernel-internal data types and definitions:
 21  */
 22 
 23 #ifdef CONFIG_PERF_EVENTS
 24 # include <asm/perf_event.h>
 25 # include <asm/local64.h>
 26 #endif
 27 
 28 struct perf_guest_info_callbacks {
 29         int                             (*is_in_guest)(void);
 30         int                             (*is_user_mode)(void);
 31         unsigned long                   (*get_guest_ip)(void);
 32 };
 33 
 34 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 35 #include <asm/hw_breakpoint.h>
 36 #endif
 37 
 38 #include <linux/list.h>
 39 #include <linux/mutex.h>
 40 #include <linux/rculist.h>
 41 #include <linux/rcupdate.h>
 42 #include <linux/spinlock.h>
 43 #include <linux/hrtimer.h>
 44 #include <linux/fs.h>
 45 #include <linux/pid_namespace.h>
 46 #include <linux/workqueue.h>
 47 #include <linux/ftrace.h>
 48 #include <linux/cpu.h>
 49 #include <linux/irq_work.h>
 50 #include <linux/static_key.h>
 51 #include <linux/atomic.h>
 52 #include <linux/sysfs.h>
 53 #include <linux/perf_regs.h>
 54 #include <asm/local.h>
 55 
 56 struct perf_callchain_entry {
 57         __u64                           nr;
 58         __u64                           ip[PERF_MAX_STACK_DEPTH];
 59 };
 60 
 61 struct perf_raw_record {
 62         u32                             size;
 63         void                            *data;
 64 };
 65 
 66 /*
 67  * single taken branch record layout:
 68  *
 69  *      from: source instruction (may not always be a branch insn)
 70  *        to: branch target
 71  *   mispred: branch target was mispredicted
 72  * predicted: branch target was predicted
 73  *
 74  * support for mispred, predicted is optional. In case it
 75  * is not supported mispred = predicted = 0.
 76  *
 77  *     in_tx: running in a hardware transaction
 78  *     abort: aborting a hardware transaction
 79  */
 80 struct perf_branch_entry {
 81         __u64   from;
 82         __u64   to;
 83         __u64   mispred:1,  /* target mispredicted */
 84                 predicted:1,/* target predicted */
 85                 in_tx:1,    /* in transaction */
 86                 abort:1,    /* transaction abort */
 87                 reserved:60;
 88 };
 89 
 90 /*
 91  * branch stack layout:
 92  *  nr: number of taken branches stored in entries[]
 93  *
 94  * Note that nr can vary from sample to sample
 95  * branches (to, from) are stored from most recent
 96  * to least recent, i.e., entries[0] contains the most
 97  * recent branch.
 98  */
 99 struct perf_branch_stack {
100         __u64                           nr;
101         struct perf_branch_entry        entries[0];
102 };
103 
104 struct perf_regs_user {
105         __u64           abi;
106         struct pt_regs  *regs;
107 };
108 
109 struct task_struct;
110 
111 /*
112  * extra PMU register associated with an event
113  */
114 struct hw_perf_event_extra {
115         u64             config; /* register value */
116         unsigned int    reg;    /* register address or index */
117         int             alloc;  /* extra register already allocated */
118         int             idx;    /* index in shared_regs->regs[] */
119 };
120 
121 struct event_constraint;
122 
123 /**
124  * struct hw_perf_event - performance event hardware details:
125  */
126 struct hw_perf_event {
127 #ifdef CONFIG_PERF_EVENTS
128         union {
129                 struct { /* hardware */
130                         u64             config;
131                         u64             last_tag;
132                         unsigned long   config_base;
133                         unsigned long   event_base;
134                         int             event_base_rdpmc;
135                         int             idx;
136                         int             last_cpu;
137                         int             flags;
138 
139                         struct hw_perf_event_extra extra_reg;
140                         struct hw_perf_event_extra branch_reg;
141 
142                         struct event_constraint *constraint;
143                 };
144                 struct { /* software */
145                         struct hrtimer  hrtimer;
146                 };
147                 struct { /* tracepoint */
148                         struct task_struct      *tp_target;
149                         /* for tp_event->class */
150                         struct list_head        tp_list;
151                 };
152 #ifdef CONFIG_HAVE_HW_BREAKPOINT
153                 struct { /* breakpoint */
154                         /*
155                          * Crufty hack to avoid the chicken and egg
156                          * problem hw_breakpoint has with context
157                          * creation and event initalization.
158                          */
159                         struct task_struct              *bp_target;
160                         struct arch_hw_breakpoint       info;
161                         struct list_head                bp_list;
162                 };
163 #endif
164         };
165         int                             state;
166         local64_t                       prev_count;
167         u64                             sample_period;
168         u64                             last_period;
169         local64_t                       period_left;
170         u64                             interrupts_seq;
171         u64                             interrupts;
172 
173         u64                             freq_time_stamp;
174         u64                             freq_count_stamp;
175 #endif
176 };
177 
178 /*
179  * hw_perf_event::state flags
180  */
181 #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
182 #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
183 #define PERF_HES_ARCH           0x04
184 
185 struct perf_event;
186 
187 /*
188  * Common implementation detail of pmu::{start,commit,cancel}_txn
189  */
190 #define PERF_EVENT_TXN 0x1
191 
192 /**
193  * struct pmu - generic performance monitoring unit
194  */
195 struct pmu {
196         struct list_head                entry;
197 
198         struct device                   *dev;
199         const struct attribute_group    **attr_groups;
200         const char                      *name;
201         int                             type;
202 
203         int * __percpu                  pmu_disable_count;
204         struct perf_cpu_context * __percpu pmu_cpu_context;
205         int                             task_ctx_nr;
206         int                             hrtimer_interval_ms;
207 
208         /*
209          * Fully disable/enable this PMU, can be used to protect from the PMI
210          * as well as for lazy/batch writing of the MSRs.
211          */
212         void (*pmu_enable)              (struct pmu *pmu); /* optional */
213         void (*pmu_disable)             (struct pmu *pmu); /* optional */
214 
215         /*
216          * Try and initialize the event for this PMU.
217          * Should return -ENOENT when the @event doesn't match this PMU.
218          */
219         int (*event_init)               (struct perf_event *event);
220 
221 #define PERF_EF_START   0x01            /* start the counter when adding    */
222 #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
223 #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
224 
225         /*
226          * Adds/Removes a counter to/from the PMU, can be done inside
227          * a transaction, see the ->*_txn() methods.
228          */
229         int  (*add)                     (struct perf_event *event, int flags);
230         void (*del)                     (struct perf_event *event, int flags);
231 
232         /*
233          * Starts/Stops a counter present on the PMU. The PMI handler
234          * should stop the counter when perf_event_overflow() returns
235          * !0. ->start() will be used to continue.
236          */
237         void (*start)                   (struct perf_event *event, int flags);
238         void (*stop)                    (struct perf_event *event, int flags);
239 
240         /*
241          * Updates the counter value of the event.
242          */
243         void (*read)                    (struct perf_event *event);
244 
245         /*
246          * Group events scheduling is treated as a transaction, add
247          * group events as a whole and perform one schedulability test.
248          * If the test fails, roll back the whole group
249          *
250          * Start the transaction, after this ->add() doesn't need to
251          * do schedulability tests.
252          */
253         void (*start_txn)               (struct pmu *pmu); /* optional */
254         /*
255          * If ->start_txn() disabled the ->add() schedulability test
256          * then ->commit_txn() is required to perform one. On success
257          * the transaction is closed. On error the transaction is kept
258          * open until ->cancel_txn() is called.
259          */
260         int  (*commit_txn)              (struct pmu *pmu); /* optional */
261         /*
262          * Will cancel the transaction, assumes ->del() is called
263          * for each successful ->add() during the transaction.
264          */
265         void (*cancel_txn)              (struct pmu *pmu); /* optional */
266 
267         /*
268          * Will return the value for perf_event_mmap_page::index for this event,
269          * if no implementation is provided it will default to: event->hw.idx + 1.
270          */
271         int (*event_idx)                (struct perf_event *event); /*optional */
272 
273         /*
274          * flush branch stack on context-switches (needed in cpu-wide mode)
275          */
276         void (*flush_branch_stack)      (void);
277 };
278 
279 /**
280  * enum perf_event_active_state - the states of a event
281  */
282 enum perf_event_active_state {
283         PERF_EVENT_STATE_ERROR          = -2,
284         PERF_EVENT_STATE_OFF            = -1,
285         PERF_EVENT_STATE_INACTIVE       =  0,
286         PERF_EVENT_STATE_ACTIVE         =  1,
287 };
288 
289 struct file;
290 struct perf_sample_data;
291 
292 typedef void (*perf_overflow_handler_t)(struct perf_event *,
293                                         struct perf_sample_data *,
294                                         struct pt_regs *regs);
295 
296 enum perf_group_flag {
297         PERF_GROUP_SOFTWARE             = 0x1,
298 };
299 
300 #define SWEVENT_HLIST_BITS              8
301 #define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
302 
303 struct swevent_hlist {
304         struct hlist_head               heads[SWEVENT_HLIST_SIZE];
305         struct rcu_head                 rcu_head;
306 };
307 
308 #define PERF_ATTACH_CONTEXT     0x01
309 #define PERF_ATTACH_GROUP       0x02
310 #define PERF_ATTACH_TASK        0x04
311 
312 struct perf_cgroup;
313 struct ring_buffer;
314 
315 /**
316  * struct perf_event - performance event kernel representation:
317  */
318 struct perf_event {
319 #ifdef CONFIG_PERF_EVENTS
320         struct list_head                group_entry;
321         struct list_head                event_entry;
322         struct list_head                sibling_list;
323         struct hlist_node               hlist_entry;
324         int                             nr_siblings;
325         int                             group_flags;
326         struct perf_event               *group_leader;
327         struct pmu                      *pmu;
328 
329         enum perf_event_active_state    state;
330         unsigned int                    attach_state;
331         local64_t                       count;
332         atomic64_t                      child_count;
333 
334         /*
335          * These are the total time in nanoseconds that the event
336          * has been enabled (i.e. eligible to run, and the task has
337          * been scheduled in, if this is a per-task event)
338          * and running (scheduled onto the CPU), respectively.
339          *
340          * They are computed from tstamp_enabled, tstamp_running and
341          * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
342          */
343         u64                             total_time_enabled;
344         u64                             total_time_running;
345 
346         /*
347          * These are timestamps used for computing total_time_enabled
348          * and total_time_running when the event is in INACTIVE or
349          * ACTIVE state, measured in nanoseconds from an arbitrary point
350          * in time.
351          * tstamp_enabled: the notional time when the event was enabled
352          * tstamp_running: the notional time when the event was scheduled on
353          * tstamp_stopped: in INACTIVE state, the notional time when the
354          *      event was scheduled off.
355          */
356         u64                             tstamp_enabled;
357         u64                             tstamp_running;
358         u64                             tstamp_stopped;
359 
360         /*
361          * timestamp shadows the actual context timing but it can
362          * be safely used in NMI interrupt context. It reflects the
363          * context time as it was when the event was last scheduled in.
364          *
365          * ctx_time already accounts for ctx->timestamp. Therefore to
366          * compute ctx_time for a sample, simply add perf_clock().
367          */
368         u64                             shadow_ctx_time;
369 
370         struct perf_event_attr          attr;
371         u16                             header_size;
372         u16                             id_header_size;
373         u16                             read_size;
374         struct hw_perf_event            hw;
375 
376         struct perf_event_context       *ctx;
377         atomic_long_t                   refcount;
378 
379         /*
380          * These accumulate total time (in nanoseconds) that children
381          * events have been enabled and running, respectively.
382          */
383         atomic64_t                      child_total_time_enabled;
384         atomic64_t                      child_total_time_running;
385 
386         /*
387          * Protect attach/detach and child_list:
388          */
389         struct mutex                    child_mutex;
390         struct list_head                child_list;
391         struct perf_event               *parent;
392 
393         int                             oncpu;
394         int                             cpu;
395 
396         struct list_head                owner_entry;
397         struct task_struct              *owner;
398 
399         /* mmap bits */
400         struct mutex                    mmap_mutex;
401         atomic_t                        mmap_count;
402 
403         struct ring_buffer              *rb;
404         struct list_head                rb_entry;
405 
406         /* poll related */
407         wait_queue_head_t               waitq;
408         struct fasync_struct            *fasync;
409 
410         /* delayed work for NMIs and such */
411         int                             pending_wakeup;
412         int                             pending_kill;
413         int                             pending_disable;
414         struct irq_work                 pending;
415 
416         atomic_t                        event_limit;
417 
418         void (*destroy)(struct perf_event *);
419         struct rcu_head                 rcu_head;
420 
421         struct pid_namespace            *ns;
422         u64                             id;
423 
424         perf_overflow_handler_t         overflow_handler;
425         void                            *overflow_handler_context;
426 
427 #ifdef CONFIG_EVENT_TRACING
428         struct ftrace_event_call        *tp_event;
429         struct event_filter             *filter;
430 #ifdef CONFIG_FUNCTION_TRACER
431         struct ftrace_ops               ftrace_ops;
432 #endif
433 #endif
434 
435 #ifdef CONFIG_CGROUP_PERF
436         struct perf_cgroup              *cgrp; /* cgroup event is attach to */
437         int                             cgrp_defer_enabled;
438 #endif
439 
440 #endif /* CONFIG_PERF_EVENTS */
441 };
442 
443 enum perf_event_context_type {
444         task_context,
445         cpu_context,
446 };
447 
448 /**
449  * struct perf_event_context - event context structure
450  *
451  * Used as a container for task events and CPU events as well:
452  */
453 struct perf_event_context {
454         struct pmu                      *pmu;
455         enum perf_event_context_type    type;
456         /*
457          * Protect the states of the events in the list,
458          * nr_active, and the list:
459          */
460         raw_spinlock_t                  lock;
461         /*
462          * Protect the list of events.  Locking either mutex or lock
463          * is sufficient to ensure the list doesn't change; to change
464          * the list you need to lock both the mutex and the spinlock.
465          */
466         struct mutex                    mutex;
467 
468         struct list_head                pinned_groups;
469         struct list_head                flexible_groups;
470         struct list_head                event_list;
471         int                             nr_events;
472         int                             nr_active;
473         int                             is_active;
474         int                             nr_stat;
475         int                             nr_freq;
476         int                             rotate_disable;
477         atomic_t                        refcount;
478         struct task_struct              *task;
479 
480         /*
481          * Context clock, runs when context enabled.
482          */
483         u64                             time;
484         u64                             timestamp;
485 
486         /*
487          * These fields let us detect when two contexts have both
488          * been cloned (inherited) from a common ancestor.
489          */
490         struct perf_event_context       *parent_ctx;
491         u64                             parent_gen;
492         u64                             generation;
493         int                             pin_count;
494         int                             nr_cgroups;      /* cgroup evts */
495         int                             nr_branch_stack; /* branch_stack evt */
496         struct rcu_head                 rcu_head;
497 };
498 
499 /*
500  * Number of contexts where an event can trigger:
501  *      task, softirq, hardirq, nmi.
502  */
503 #define PERF_NR_CONTEXTS        4
504 
505 /**
506  * struct perf_event_cpu_context - per cpu event context structure
507  */
508 struct perf_cpu_context {
509         struct perf_event_context       ctx;
510         struct perf_event_context       *task_ctx;
511         int                             active_oncpu;
512         int                             exclusive;
513         struct hrtimer                  hrtimer;
514         ktime_t                         hrtimer_interval;
515         struct list_head                rotation_list;
516         struct pmu                      *unique_pmu;
517         struct perf_cgroup              *cgrp;
518 };
519 
520 struct perf_output_handle {
521         struct perf_event               *event;
522         struct ring_buffer              *rb;
523         unsigned long                   wakeup;
524         unsigned long                   size;
525         void                            *addr;
526         int                             page;
527 };
528 
529 #ifdef CONFIG_PERF_EVENTS
530 
531 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
532 extern void perf_pmu_unregister(struct pmu *pmu);
533 
534 extern int perf_num_counters(void);
535 extern const char *perf_pmu_name(void);
536 extern void __perf_event_task_sched_in(struct task_struct *prev,
537                                        struct task_struct *task);
538 extern void __perf_event_task_sched_out(struct task_struct *prev,
539                                         struct task_struct *next);
540 extern int perf_event_init_task(struct task_struct *child);
541 extern void perf_event_exit_task(struct task_struct *child);
542 extern void perf_event_free_task(struct task_struct *task);
543 extern void perf_event_delayed_put(struct task_struct *task);
544 extern void perf_event_print_debug(void);
545 extern void perf_pmu_disable(struct pmu *pmu);
546 extern void perf_pmu_enable(struct pmu *pmu);
547 extern int perf_event_task_disable(void);
548 extern int perf_event_task_enable(void);
549 extern int perf_event_refresh(struct perf_event *event, int refresh);
550 extern void perf_event_update_userpage(struct perf_event *event);
551 extern int perf_event_release_kernel(struct perf_event *event);
552 extern struct perf_event *
553 perf_event_create_kernel_counter(struct perf_event_attr *attr,
554                                 int cpu,
555                                 struct task_struct *task,
556                                 perf_overflow_handler_t callback,
557                                 void *context);
558 extern void perf_pmu_migrate_context(struct pmu *pmu,
559                                 int src_cpu, int dst_cpu);
560 extern u64 perf_event_read_value(struct perf_event *event,
561                                  u64 *enabled, u64 *running);
562 
563 
564 struct perf_sample_data {
565         u64                             type;
566 
567         u64                             ip;
568         struct {
569                 u32     pid;
570                 u32     tid;
571         }                               tid_entry;
572         u64                             time;
573         u64                             addr;
574         u64                             id;
575         u64                             stream_id;
576         struct {
577                 u32     cpu;
578                 u32     reserved;
579         }                               cpu_entry;
580         u64                             period;
581         union  perf_mem_data_src        data_src;
582         struct perf_callchain_entry     *callchain;
583         struct perf_raw_record          *raw;
584         struct perf_branch_stack        *br_stack;
585         struct perf_regs_user           regs_user;
586         u64                             stack_user_size;
587         u64                             weight;
588 };
589 
590 static inline void perf_sample_data_init(struct perf_sample_data *data,
591                                          u64 addr, u64 period)
592 {
593         /* remaining struct members initialized in perf_prepare_sample() */
594         data->addr = addr;
595         data->raw  = NULL;
596         data->br_stack = NULL;
597         data->period = period;
598         data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
599         data->regs_user.regs = NULL;
600         data->stack_user_size = 0;
601         data->weight = 0;
602         data->data_src.val = 0;
603 }
604 
605 extern void perf_output_sample(struct perf_output_handle *handle,
606                                struct perf_event_header *header,
607                                struct perf_sample_data *data,
608                                struct perf_event *event);
609 extern void perf_prepare_sample(struct perf_event_header *header,
610                                 struct perf_sample_data *data,
611                                 struct perf_event *event,
612                                 struct pt_regs *regs);
613 
614 extern int perf_event_overflow(struct perf_event *event,
615                                  struct perf_sample_data *data,
616                                  struct pt_regs *regs);
617 
618 static inline bool is_sampling_event(struct perf_event *event)
619 {
620         return event->attr.sample_period != 0;
621 }
622 
623 /*
624  * Return 1 for a software event, 0 for a hardware event
625  */
626 static inline int is_software_event(struct perf_event *event)
627 {
628         return event->pmu->task_ctx_nr == perf_sw_context;
629 }
630 
631 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
632 
633 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
634 
635 #ifndef perf_arch_fetch_caller_regs
636 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
637 #endif
638 
639 /*
640  * Take a snapshot of the regs. Skip ip and frame pointer to
641  * the nth caller. We only need a few of the regs:
642  * - ip for PERF_SAMPLE_IP
643  * - cs for user_mode() tests
644  * - bp for callchains
645  * - eflags, for future purposes, just in case
646  */
647 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
648 {
649         memset(regs, 0, sizeof(*regs));
650 
651         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
652 }
653 
654 static __always_inline void
655 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
656 {
657         struct pt_regs hot_regs;
658 
659         if (static_key_false(&perf_swevent_enabled[event_id])) {
660                 if (!regs) {
661                         perf_fetch_caller_regs(&hot_regs);
662                         regs = &hot_regs;
663                 }
664                 __perf_sw_event(event_id, nr, regs, addr);
665         }
666 }
667 
668 extern struct static_key_deferred perf_sched_events;
669 
670 static inline void perf_event_task_sched_in(struct task_struct *prev,
671                                             struct task_struct *task)
672 {
673         if (static_key_false(&perf_sched_events.key))
674                 __perf_event_task_sched_in(prev, task);
675 }
676 
677 static inline void perf_event_task_sched_out(struct task_struct *prev,
678                                              struct task_struct *next)
679 {
680         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
681 
682         if (static_key_false(&perf_sched_events.key))
683                 __perf_event_task_sched_out(prev, next);
684 }
685 
686 extern void perf_event_mmap(struct vm_area_struct *vma);
687 extern struct perf_guest_info_callbacks *perf_guest_cbs;
688 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
689 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
690 
691 extern void perf_event_comm(struct task_struct *tsk);
692 extern void perf_event_fork(struct task_struct *tsk);
693 
694 /* Callchains */
695 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
696 
697 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
698 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
699 
700 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
701 {
702         if (entry->nr < PERF_MAX_STACK_DEPTH)
703                 entry->ip[entry->nr++] = ip;
704 }
705 
706 extern int sysctl_perf_event_paranoid;
707 extern int sysctl_perf_event_mlock;
708 extern int sysctl_perf_event_sample_rate;
709 extern int sysctl_perf_cpu_time_max_percent;
710 
711 extern void perf_sample_event_took(u64 sample_len_ns);
712 
713 extern int perf_proc_update_handler(struct ctl_table *table, int write,
714                 void __user *buffer, size_t *lenp,
715                 loff_t *ppos);
716 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
717                 void __user *buffer, size_t *lenp,
718                 loff_t *ppos);
719 
720 
721 static inline bool perf_paranoid_tracepoint_raw(void)
722 {
723         return sysctl_perf_event_paranoid > -1;
724 }
725 
726 static inline bool perf_paranoid_cpu(void)
727 {
728         return sysctl_perf_event_paranoid > 0;
729 }
730 
731 static inline bool perf_paranoid_kernel(void)
732 {
733         return sysctl_perf_event_paranoid > 1;
734 }
735 
736 extern void perf_event_init(void);
737 extern void perf_tp_event(u64 addr, u64 count, void *record,
738                           int entry_size, struct pt_regs *regs,
739                           struct hlist_head *head, int rctx,
740                           struct task_struct *task);
741 extern void perf_bp_event(struct perf_event *event, void *data);
742 
743 #ifndef perf_misc_flags
744 # define perf_misc_flags(regs) \
745                 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
746 # define perf_instruction_pointer(regs) instruction_pointer(regs)
747 #endif
748 
749 static inline bool has_branch_stack(struct perf_event *event)
750 {
751         return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
752 }
753 
754 extern int perf_output_begin(struct perf_output_handle *handle,
755                              struct perf_event *event, unsigned int size);
756 extern void perf_output_end(struct perf_output_handle *handle);
757 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
758                              const void *buf, unsigned int len);
759 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
760                                      unsigned int len);
761 extern int perf_swevent_get_recursion_context(void);
762 extern void perf_swevent_put_recursion_context(int rctx);
763 extern u64 perf_swevent_set_period(struct perf_event *event);
764 extern void perf_event_enable(struct perf_event *event);
765 extern void perf_event_disable(struct perf_event *event);
766 extern int __perf_event_disable(void *info);
767 extern void perf_event_task_tick(void);
768 #else
769 static inline void
770 perf_event_task_sched_in(struct task_struct *prev,
771                          struct task_struct *task)                      { }
772 static inline void
773 perf_event_task_sched_out(struct task_struct *prev,
774                           struct task_struct *next)                     { }
775 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
776 static inline void perf_event_exit_task(struct task_struct *child)      { }
777 static inline void perf_event_free_task(struct task_struct *task)       { }
778 static inline void perf_event_delayed_put(struct task_struct *task)     { }
779 static inline void perf_event_print_debug(void)                         { }
780 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
781 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
782 static inline int perf_event_refresh(struct perf_event *event, int refresh)
783 {
784         return -EINVAL;
785 }
786 
787 static inline void
788 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
789 static inline void
790 perf_bp_event(struct perf_event *event, void *data)                     { }
791 
792 static inline int perf_register_guest_info_callbacks
793 (struct perf_guest_info_callbacks *callbacks)                           { return 0; }
794 static inline int perf_unregister_guest_info_callbacks
795 (struct perf_guest_info_callbacks *callbacks)                           { return 0; }
796 
797 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
798 static inline void perf_event_comm(struct task_struct *tsk)             { }
799 static inline void perf_event_fork(struct task_struct *tsk)             { }
800 static inline void perf_event_init(void)                                { }
801 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
802 static inline void perf_swevent_put_recursion_context(int rctx)         { }
803 static inline u64 perf_swevent_set_period(struct perf_event *event)     { return 0; }
804 static inline void perf_event_enable(struct perf_event *event)          { }
805 static inline void perf_event_disable(struct perf_event *event)         { }
806 static inline int __perf_event_disable(void *info)                      { return -1; }
807 static inline void perf_event_task_tick(void)                           { }
808 #endif
809 
810 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
811 extern bool perf_event_can_stop_tick(void);
812 #else
813 static inline bool perf_event_can_stop_tick(void)                       { return true; }
814 #endif
815 
816 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
817 extern void perf_restore_debug_store(void);
818 #else
819 static inline void perf_restore_debug_store(void)                       { }
820 #endif
821 
822 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
823 
824 /*
825  * This has to have a higher priority than migration_notifier in sched/core.c.
826  */
827 #define perf_cpu_notifier(fn)                                           \
828 do {                                                                    \
829         static struct notifier_block fn##_nb =                          \
830                 { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
831         unsigned long cpu = smp_processor_id();                         \
832         unsigned long flags;                                            \
833         fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
834                 (void *)(unsigned long)cpu);                            \
835         local_irq_save(flags);                                          \
836         fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
837                 (void *)(unsigned long)cpu);                            \
838         local_irq_restore(flags);                                       \
839         fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
840                 (void *)(unsigned long)cpu);                            \
841         register_cpu_notifier(&fn##_nb);                                \
842 } while (0)
843 
844 
845 struct perf_pmu_events_attr {
846         struct device_attribute attr;
847         u64 id;
848         const char *event_str;
849 };
850 
851 #define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
852 static struct perf_pmu_events_attr _var = {                             \
853         .attr = __ATTR(_name, 0444, _show, NULL),                       \
854         .id   =  _id,                                                   \
855 };
856 
857 #define PMU_FORMAT_ATTR(_name, _format)                                 \
858 static ssize_t                                                          \
859 _name##_show(struct device *dev,                                        \
860                                struct device_attribute *attr,           \
861                                char *page)                              \
862 {                                                                       \
863         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
864         return sprintf(page, _format "\n");                             \
865 }                                                                       \
866                                                                         \
867 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
868 
869 #endif /* _LINUX_PERF_EVENT_H */
870 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp