~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/perf_event.h

Version: ~ [ linux-5.13-rc2 ] ~ [ linux-5.12.4 ] ~ [ linux-5.11.21 ] ~ [ linux-5.10.37 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.119 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.190 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.232 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.268 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.268 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Performance events:
  3  *
  4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7  *
  8  * Data type definitions, declarations, prototypes.
  9  *
 10  *    Started by: Thomas Gleixner and Ingo Molnar
 11  *
 12  * For licencing details see kernel-base/COPYING
 13  */
 14 #ifndef _LINUX_PERF_EVENT_H
 15 #define _LINUX_PERF_EVENT_H
 16 
 17 #include <uapi/linux/perf_event.h>
 18 #include <uapi/linux/bpf_perf_event.h>
 19 
 20 /*
 21  * Kernel-internal data types and definitions:
 22  */
 23 
 24 #ifdef CONFIG_PERF_EVENTS
 25 # include <asm/perf_event.h>
 26 # include <asm/local64.h>
 27 #endif
 28 
 29 struct perf_guest_info_callbacks {
 30         int                             (*is_in_guest)(void);
 31         int                             (*is_user_mode)(void);
 32         unsigned long                   (*get_guest_ip)(void);
 33         void                            (*handle_intel_pt_intr)(void);
 34 };
 35 
 36 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 37 #include <asm/hw_breakpoint.h>
 38 #endif
 39 
 40 #include <linux/list.h>
 41 #include <linux/mutex.h>
 42 #include <linux/rculist.h>
 43 #include <linux/rcupdate.h>
 44 #include <linux/spinlock.h>
 45 #include <linux/hrtimer.h>
 46 #include <linux/fs.h>
 47 #include <linux/pid_namespace.h>
 48 #include <linux/workqueue.h>
 49 #include <linux/ftrace.h>
 50 #include <linux/cpu.h>
 51 #include <linux/irq_work.h>
 52 #include <linux/static_key.h>
 53 #include <linux/jump_label_ratelimit.h>
 54 #include <linux/atomic.h>
 55 #include <linux/sysfs.h>
 56 #include <linux/perf_regs.h>
 57 #include <linux/cgroup.h>
 58 #include <linux/refcount.h>
 59 #include <linux/security.h>
 60 #include <asm/local.h>
 61 
 62 struct perf_callchain_entry {
 63         __u64                           nr;
 64         __u64                           ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
 65 };
 66 
 67 struct perf_callchain_entry_ctx {
 68         struct perf_callchain_entry *entry;
 69         u32                         max_stack;
 70         u32                         nr;
 71         short                       contexts;
 72         bool                        contexts_maxed;
 73 };
 74 
 75 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
 76                                      unsigned long off, unsigned long len);
 77 
 78 struct perf_raw_frag {
 79         union {
 80                 struct perf_raw_frag    *next;
 81                 unsigned long           pad;
 82         };
 83         perf_copy_f                     copy;
 84         void                            *data;
 85         u32                             size;
 86 } __packed;
 87 
 88 struct perf_raw_record {
 89         struct perf_raw_frag            frag;
 90         u32                             size;
 91 };
 92 
 93 /*
 94  * branch stack layout:
 95  *  nr: number of taken branches stored in entries[]
 96  *
 97  * Note that nr can vary from sample to sample
 98  * branches (to, from) are stored from most recent
 99  * to least recent, i.e., entries[0] contains the most
100  * recent branch.
101  */
102 struct perf_branch_stack {
103         __u64                           nr;
104         struct perf_branch_entry        entries[0];
105 };
106 
107 struct task_struct;
108 
109 /*
110  * extra PMU register associated with an event
111  */
112 struct hw_perf_event_extra {
113         u64             config; /* register value */
114         unsigned int    reg;    /* register address or index */
115         int             alloc;  /* extra register already allocated */
116         int             idx;    /* index in shared_regs->regs[] */
117 };
118 
119 /**
120  * struct hw_perf_event - performance event hardware details:
121  */
122 struct hw_perf_event {
123 #ifdef CONFIG_PERF_EVENTS
124         union {
125                 struct { /* hardware */
126                         u64             config;
127                         u64             last_tag;
128                         unsigned long   config_base;
129                         unsigned long   event_base;
130                         int             event_base_rdpmc;
131                         int             idx;
132                         int             last_cpu;
133                         int             flags;
134 
135                         struct hw_perf_event_extra extra_reg;
136                         struct hw_perf_event_extra branch_reg;
137                 };
138                 struct { /* software */
139                         struct hrtimer  hrtimer;
140                 };
141                 struct { /* tracepoint */
142                         /* for tp_event->class */
143                         struct list_head        tp_list;
144                 };
145                 struct { /* amd_power */
146                         u64     pwr_acc;
147                         u64     ptsc;
148                 };
149 #ifdef CONFIG_HAVE_HW_BREAKPOINT
150                 struct { /* breakpoint */
151                         /*
152                          * Crufty hack to avoid the chicken and egg
153                          * problem hw_breakpoint has with context
154                          * creation and event initalization.
155                          */
156                         struct arch_hw_breakpoint       info;
157                         struct list_head                bp_list;
158                 };
159 #endif
160                 struct { /* amd_iommu */
161                         u8      iommu_bank;
162                         u8      iommu_cntr;
163                         u16     padding;
164                         u64     conf;
165                         u64     conf1;
166                 };
167         };
168         /*
169          * If the event is a per task event, this will point to the task in
170          * question. See the comment in perf_event_alloc().
171          */
172         struct task_struct              *target;
173 
174         /*
175          * PMU would store hardware filter configuration
176          * here.
177          */
178         void                            *addr_filters;
179 
180         /* Last sync'ed generation of filters */
181         unsigned long                   addr_filters_gen;
182 
183 /*
184  * hw_perf_event::state flags; used to track the PERF_EF_* state.
185  */
186 #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
187 #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
188 #define PERF_HES_ARCH           0x04
189 
190         int                             state;
191 
192         /*
193          * The last observed hardware counter value, updated with a
194          * local64_cmpxchg() such that pmu::read() can be called nested.
195          */
196         local64_t                       prev_count;
197 
198         /*
199          * The period to start the next sample with.
200          */
201         u64                             sample_period;
202 
203         /*
204          * The period we started this sample with.
205          */
206         u64                             last_period;
207 
208         /*
209          * However much is left of the current period; note that this is
210          * a full 64bit value and allows for generation of periods longer
211          * than hardware might allow.
212          */
213         local64_t                       period_left;
214 
215         /*
216          * State for throttling the event, see __perf_event_overflow() and
217          * perf_adjust_freq_unthr_context().
218          */
219         u64                             interrupts_seq;
220         u64                             interrupts;
221 
222         /*
223          * State for freq target events, see __perf_event_overflow() and
224          * perf_adjust_freq_unthr_context().
225          */
226         u64                             freq_time_stamp;
227         u64                             freq_count_stamp;
228 #endif
229 };
230 
231 struct perf_event;
232 
233 /*
234  * Common implementation detail of pmu::{start,commit,cancel}_txn
235  */
236 #define PERF_PMU_TXN_ADD  0x1           /* txn to add/schedule event on PMU */
237 #define PERF_PMU_TXN_READ 0x2           /* txn to read event group from PMU */
238 
239 /**
240  * pmu::capabilities flags
241  */
242 #define PERF_PMU_CAP_NO_INTERRUPT               0x01
243 #define PERF_PMU_CAP_NO_NMI                     0x02
244 #define PERF_PMU_CAP_AUX_NO_SG                  0x04
245 #define PERF_PMU_CAP_EXTENDED_REGS              0x08
246 #define PERF_PMU_CAP_EXCLUSIVE                  0x10
247 #define PERF_PMU_CAP_ITRACE                     0x20
248 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS         0x40
249 #define PERF_PMU_CAP_NO_EXCLUDE                 0x80
250 #define PERF_PMU_CAP_AUX_OUTPUT                 0x100
251 
252 struct perf_output_handle;
253 
254 /**
255  * struct pmu - generic performance monitoring unit
256  */
257 struct pmu {
258         struct list_head                entry;
259 
260         struct module                   *module;
261         struct device                   *dev;
262         const struct attribute_group    **attr_groups;
263         const struct attribute_group    **attr_update;
264         const char                      *name;
265         int                             type;
266 
267         /*
268          * various common per-pmu feature flags
269          */
270         int                             capabilities;
271 
272         int __percpu                    *pmu_disable_count;
273         struct perf_cpu_context __percpu *pmu_cpu_context;
274         atomic_t                        exclusive_cnt; /* < 0: cpu; > 0: tsk */
275         int                             task_ctx_nr;
276         int                             hrtimer_interval_ms;
277 
278         /* number of address filters this PMU can do */
279         unsigned int                    nr_addr_filters;
280 
281         /*
282          * Fully disable/enable this PMU, can be used to protect from the PMI
283          * as well as for lazy/batch writing of the MSRs.
284          */
285         void (*pmu_enable)              (struct pmu *pmu); /* optional */
286         void (*pmu_disable)             (struct pmu *pmu); /* optional */
287 
288         /*
289          * Try and initialize the event for this PMU.
290          *
291          * Returns:
292          *  -ENOENT     -- @event is not for this PMU
293          *
294          *  -ENODEV     -- @event is for this PMU but PMU not present
295          *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
296          *  -EINVAL     -- @event is for this PMU but @event is not valid
297          *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
298          *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
299          *
300          *  0           -- @event is for this PMU and valid
301          *
302          * Other error return values are allowed.
303          */
304         int (*event_init)               (struct perf_event *event);
305 
306         /*
307          * Notification that the event was mapped or unmapped.  Called
308          * in the context of the mapping task.
309          */
310         void (*event_mapped)            (struct perf_event *event, struct mm_struct *mm); /* optional */
311         void (*event_unmapped)          (struct perf_event *event, struct mm_struct *mm); /* optional */
312 
313         /*
314          * Flags for ->add()/->del()/ ->start()/->stop(). There are
315          * matching hw_perf_event::state flags.
316          */
317 #define PERF_EF_START   0x01            /* start the counter when adding    */
318 #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
319 #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
320 
321         /*
322          * Adds/Removes a counter to/from the PMU, can be done inside a
323          * transaction, see the ->*_txn() methods.
324          *
325          * The add/del callbacks will reserve all hardware resources required
326          * to service the event, this includes any counter constraint
327          * scheduling etc.
328          *
329          * Called with IRQs disabled and the PMU disabled on the CPU the event
330          * is on.
331          *
332          * ->add() called without PERF_EF_START should result in the same state
333          *  as ->add() followed by ->stop().
334          *
335          * ->del() must always PERF_EF_UPDATE stop an event. If it calls
336          *  ->stop() that must deal with already being stopped without
337          *  PERF_EF_UPDATE.
338          */
339         int  (*add)                     (struct perf_event *event, int flags);
340         void (*del)                     (struct perf_event *event, int flags);
341 
342         /*
343          * Starts/Stops a counter present on the PMU.
344          *
345          * The PMI handler should stop the counter when perf_event_overflow()
346          * returns !0. ->start() will be used to continue.
347          *
348          * Also used to change the sample period.
349          *
350          * Called with IRQs disabled and the PMU disabled on the CPU the event
351          * is on -- will be called from NMI context with the PMU generates
352          * NMIs.
353          *
354          * ->stop() with PERF_EF_UPDATE will read the counter and update
355          *  period/count values like ->read() would.
356          *
357          * ->start() with PERF_EF_RELOAD will reprogram the the counter
358          *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
359          */
360         void (*start)                   (struct perf_event *event, int flags);
361         void (*stop)                    (struct perf_event *event, int flags);
362 
363         /*
364          * Updates the counter value of the event.
365          *
366          * For sampling capable PMUs this will also update the software period
367          * hw_perf_event::period_left field.
368          */
369         void (*read)                    (struct perf_event *event);
370 
371         /*
372          * Group events scheduling is treated as a transaction, add
373          * group events as a whole and perform one schedulability test.
374          * If the test fails, roll back the whole group
375          *
376          * Start the transaction, after this ->add() doesn't need to
377          * do schedulability tests.
378          *
379          * Optional.
380          */
381         void (*start_txn)               (struct pmu *pmu, unsigned int txn_flags);
382         /*
383          * If ->start_txn() disabled the ->add() schedulability test
384          * then ->commit_txn() is required to perform one. On success
385          * the transaction is closed. On error the transaction is kept
386          * open until ->cancel_txn() is called.
387          *
388          * Optional.
389          */
390         int  (*commit_txn)              (struct pmu *pmu);
391         /*
392          * Will cancel the transaction, assumes ->del() is called
393          * for each successful ->add() during the transaction.
394          *
395          * Optional.
396          */
397         void (*cancel_txn)              (struct pmu *pmu);
398 
399         /*
400          * Will return the value for perf_event_mmap_page::index for this event,
401          * if no implementation is provided it will default to: event->hw.idx + 1.
402          */
403         int (*event_idx)                (struct perf_event *event); /*optional */
404 
405         /*
406          * context-switches callback
407          */
408         void (*sched_task)              (struct perf_event_context *ctx,
409                                         bool sched_in);
410         /*
411          * PMU specific data size
412          */
413         size_t                          task_ctx_size;
414 
415         /*
416          * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
417          * can be synchronized using this function. See Intel LBR callstack support
418          * implementation and Perf core context switch handling callbacks for usage
419          * examples.
420          */
421         void (*swap_task_ctx)           (struct perf_event_context *prev,
422                                          struct perf_event_context *next);
423                                         /* optional */
424 
425         /*
426          * Set up pmu-private data structures for an AUX area
427          */
428         void *(*setup_aux)              (struct perf_event *event, void **pages,
429                                          int nr_pages, bool overwrite);
430                                         /* optional */
431 
432         /*
433          * Free pmu-private AUX data structures
434          */
435         void (*free_aux)                (void *aux); /* optional */
436 
437         /*
438          * Take a snapshot of the AUX buffer without touching the event
439          * state, so that preempting ->start()/->stop() callbacks does
440          * not interfere with their logic. Called in PMI context.
441          *
442          * Returns the size of AUX data copied to the output handle.
443          *
444          * Optional.
445          */
446         long (*snapshot_aux)            (struct perf_event *event,
447                                          struct perf_output_handle *handle,
448                                          unsigned long size);
449 
450         /*
451          * Validate address range filters: make sure the HW supports the
452          * requested configuration and number of filters; return 0 if the
453          * supplied filters are valid, -errno otherwise.
454          *
455          * Runs in the context of the ioctl()ing process and is not serialized
456          * with the rest of the PMU callbacks.
457          */
458         int (*addr_filters_validate)    (struct list_head *filters);
459                                         /* optional */
460 
461         /*
462          * Synchronize address range filter configuration:
463          * translate hw-agnostic filters into hardware configuration in
464          * event::hw::addr_filters.
465          *
466          * Runs as a part of filter sync sequence that is done in ->start()
467          * callback by calling perf_event_addr_filters_sync().
468          *
469          * May (and should) traverse event::addr_filters::list, for which its
470          * caller provides necessary serialization.
471          */
472         void (*addr_filters_sync)       (struct perf_event *event);
473                                         /* optional */
474 
475         /*
476          * Check if event can be used for aux_output purposes for
477          * events of this PMU.
478          *
479          * Runs from perf_event_open(). Should return 0 for "no match"
480          * or non-zero for "match".
481          */
482         int (*aux_output_match)         (struct perf_event *event);
483                                         /* optional */
484 
485         /*
486          * Filter events for PMU-specific reasons.
487          */
488         int (*filter_match)             (struct perf_event *event); /* optional */
489 
490         /*
491          * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
492          */
493         int (*check_period)             (struct perf_event *event, u64 value); /* optional */
494 };
495 
496 enum perf_addr_filter_action_t {
497         PERF_ADDR_FILTER_ACTION_STOP = 0,
498         PERF_ADDR_FILTER_ACTION_START,
499         PERF_ADDR_FILTER_ACTION_FILTER,
500 };
501 
502 /**
503  * struct perf_addr_filter - address range filter definition
504  * @entry:      event's filter list linkage
505  * @path:       object file's path for file-based filters
506  * @offset:     filter range offset
507  * @size:       filter range size (size==0 means single address trigger)
508  * @action:     filter/start/stop
509  *
510  * This is a hardware-agnostic filter configuration as specified by the user.
511  */
512 struct perf_addr_filter {
513         struct list_head        entry;
514         struct path             path;
515         unsigned long           offset;
516         unsigned long           size;
517         enum perf_addr_filter_action_t  action;
518 };
519 
520 /**
521  * struct perf_addr_filters_head - container for address range filters
522  * @list:       list of filters for this event
523  * @lock:       spinlock that serializes accesses to the @list and event's
524  *              (and its children's) filter generations.
525  * @nr_file_filters:    number of file-based filters
526  *
527  * A child event will use parent's @list (and therefore @lock), so they are
528  * bundled together; see perf_event_addr_filters().
529  */
530 struct perf_addr_filters_head {
531         struct list_head        list;
532         raw_spinlock_t          lock;
533         unsigned int            nr_file_filters;
534 };
535 
536 struct perf_addr_filter_range {
537         unsigned long           start;
538         unsigned long           size;
539 };
540 
541 /**
542  * enum perf_event_state - the states of an event:
543  */
544 enum perf_event_state {
545         PERF_EVENT_STATE_DEAD           = -4,
546         PERF_EVENT_STATE_EXIT           = -3,
547         PERF_EVENT_STATE_ERROR          = -2,
548         PERF_EVENT_STATE_OFF            = -1,
549         PERF_EVENT_STATE_INACTIVE       =  0,
550         PERF_EVENT_STATE_ACTIVE         =  1,
551 };
552 
553 struct file;
554 struct perf_sample_data;
555 
556 typedef void (*perf_overflow_handler_t)(struct perf_event *,
557                                         struct perf_sample_data *,
558                                         struct pt_regs *regs);
559 
560 /*
561  * Event capabilities. For event_caps and groups caps.
562  *
563  * PERF_EV_CAP_SOFTWARE: Is a software event.
564  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
565  * from any CPU in the package where it is active.
566  */
567 #define PERF_EV_CAP_SOFTWARE            BIT(0)
568 #define PERF_EV_CAP_READ_ACTIVE_PKG     BIT(1)
569 
570 #define SWEVENT_HLIST_BITS              8
571 #define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
572 
573 struct swevent_hlist {
574         struct hlist_head               heads[SWEVENT_HLIST_SIZE];
575         struct rcu_head                 rcu_head;
576 };
577 
578 #define PERF_ATTACH_CONTEXT     0x01
579 #define PERF_ATTACH_GROUP       0x02
580 #define PERF_ATTACH_TASK        0x04
581 #define PERF_ATTACH_TASK_DATA   0x08
582 #define PERF_ATTACH_ITRACE      0x10
583 
584 struct perf_cgroup;
585 struct perf_buffer;
586 
587 struct pmu_event_list {
588         raw_spinlock_t          lock;
589         struct list_head        list;
590 };
591 
592 #define for_each_sibling_event(sibling, event)                  \
593         if ((event)->group_leader == (event))                   \
594                 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
595 
596 /**
597  * struct perf_event - performance event kernel representation:
598  */
599 struct perf_event {
600 #ifdef CONFIG_PERF_EVENTS
601         /*
602          * entry onto perf_event_context::event_list;
603          *   modifications require ctx->lock
604          *   RCU safe iterations.
605          */
606         struct list_head                event_entry;
607 
608         /*
609          * Locked for modification by both ctx->mutex and ctx->lock; holding
610          * either sufficies for read.
611          */
612         struct list_head                sibling_list;
613         struct list_head                active_list;
614         /*
615          * Node on the pinned or flexible tree located at the event context;
616          */
617         struct rb_node                  group_node;
618         u64                             group_index;
619         /*
620          * We need storage to track the entries in perf_pmu_migrate_context; we
621          * cannot use the event_entry because of RCU and we want to keep the
622          * group in tact which avoids us using the other two entries.
623          */
624         struct list_head                migrate_entry;
625 
626         struct hlist_node               hlist_entry;
627         struct list_head                active_entry;
628         int                             nr_siblings;
629 
630         /* Not serialized. Only written during event initialization. */
631         int                             event_caps;
632         /* The cumulative AND of all event_caps for events in this group. */
633         int                             group_caps;
634 
635         struct perf_event               *group_leader;
636         struct pmu                      *pmu;
637         void                            *pmu_private;
638 
639         enum perf_event_state           state;
640         unsigned int                    attach_state;
641         local64_t                       count;
642         atomic64_t                      child_count;
643 
644         /*
645          * These are the total time in nanoseconds that the event
646          * has been enabled (i.e. eligible to run, and the task has
647          * been scheduled in, if this is a per-task event)
648          * and running (scheduled onto the CPU), respectively.
649          */
650         u64                             total_time_enabled;
651         u64                             total_time_running;
652         u64                             tstamp;
653 
654         /*
655          * timestamp shadows the actual context timing but it can
656          * be safely used in NMI interrupt context. It reflects the
657          * context time as it was when the event was last scheduled in.
658          *
659          * ctx_time already accounts for ctx->timestamp. Therefore to
660          * compute ctx_time for a sample, simply add perf_clock().
661          */
662         u64                             shadow_ctx_time;
663 
664         struct perf_event_attr          attr;
665         u16                             header_size;
666         u16                             id_header_size;
667         u16                             read_size;
668         struct hw_perf_event            hw;
669 
670         struct perf_event_context       *ctx;
671         atomic_long_t                   refcount;
672 
673         /*
674          * These accumulate total time (in nanoseconds) that children
675          * events have been enabled and running, respectively.
676          */
677         atomic64_t                      child_total_time_enabled;
678         atomic64_t                      child_total_time_running;
679 
680         /*
681          * Protect attach/detach and child_list:
682          */
683         struct mutex                    child_mutex;
684         struct list_head                child_list;
685         struct perf_event               *parent;
686 
687         int                             oncpu;
688         int                             cpu;
689 
690         struct list_head                owner_entry;
691         struct task_struct              *owner;
692 
693         /* mmap bits */
694         struct mutex                    mmap_mutex;
695         atomic_t                        mmap_count;
696 
697         struct perf_buffer              *rb;
698         struct list_head                rb_entry;
699         unsigned long                   rcu_batches;
700         int                             rcu_pending;
701 
702         /* poll related */
703         wait_queue_head_t               waitq;
704         struct fasync_struct            *fasync;
705 
706         /* delayed work for NMIs and such */
707         int                             pending_wakeup;
708         int                             pending_kill;
709         int                             pending_disable;
710         struct irq_work                 pending;
711 
712         atomic_t                        event_limit;
713 
714         /* address range filters */
715         struct perf_addr_filters_head   addr_filters;
716         /* vma address array for file-based filders */
717         struct perf_addr_filter_range   *addr_filter_ranges;
718         unsigned long                   addr_filters_gen;
719 
720         /* for aux_output events */
721         struct perf_event               *aux_event;
722 
723         void (*destroy)(struct perf_event *);
724         struct rcu_head                 rcu_head;
725 
726         struct pid_namespace            *ns;
727         u64                             id;
728 
729         u64                             (*clock)(void);
730         perf_overflow_handler_t         overflow_handler;
731         void                            *overflow_handler_context;
732 #ifdef CONFIG_BPF_SYSCALL
733         perf_overflow_handler_t         orig_overflow_handler;
734         struct bpf_prog                 *prog;
735 #endif
736 
737 #ifdef CONFIG_EVENT_TRACING
738         struct trace_event_call         *tp_event;
739         struct event_filter             *filter;
740 #ifdef CONFIG_FUNCTION_TRACER
741         struct ftrace_ops               ftrace_ops;
742 #endif
743 #endif
744 
745 #ifdef CONFIG_CGROUP_PERF
746         struct perf_cgroup              *cgrp; /* cgroup event is attach to */
747 #endif
748 
749 #ifdef CONFIG_SECURITY
750         void *security;
751 #endif
752         struct list_head                sb_list;
753 #endif /* CONFIG_PERF_EVENTS */
754 };
755 
756 
757 struct perf_event_groups {
758         struct rb_root  tree;
759         u64             index;
760 };
761 
762 /**
763  * struct perf_event_context - event context structure
764  *
765  * Used as a container for task events and CPU events as well:
766  */
767 struct perf_event_context {
768         struct pmu                      *pmu;
769         /*
770          * Protect the states of the events in the list,
771          * nr_active, and the list:
772          */
773         raw_spinlock_t                  lock;
774         /*
775          * Protect the list of events.  Locking either mutex or lock
776          * is sufficient to ensure the list doesn't change; to change
777          * the list you need to lock both the mutex and the spinlock.
778          */
779         struct mutex                    mutex;
780 
781         struct list_head                active_ctx_list;
782         struct perf_event_groups        pinned_groups;
783         struct perf_event_groups        flexible_groups;
784         struct list_head                event_list;
785 
786         struct list_head                pinned_active;
787         struct list_head                flexible_active;
788 
789         int                             nr_events;
790         int                             nr_active;
791         int                             is_active;
792         int                             nr_stat;
793         int                             nr_freq;
794         int                             rotate_disable;
795         /*
796          * Set when nr_events != nr_active, except tolerant to events not
797          * necessary to be active due to scheduling constraints, such as cgroups.
798          */
799         int                             rotate_necessary;
800         refcount_t                      refcount;
801         struct task_struct              *task;
802 
803         /*
804          * Context clock, runs when context enabled.
805          */
806         u64                             time;
807         u64                             timestamp;
808 
809         /*
810          * These fields let us detect when two contexts have both
811          * been cloned (inherited) from a common ancestor.
812          */
813         struct perf_event_context       *parent_ctx;
814         u64                             parent_gen;
815         u64                             generation;
816         int                             pin_count;
817 #ifdef CONFIG_CGROUP_PERF
818         int                             nr_cgroups;      /* cgroup evts */
819 #endif
820         void                            *task_ctx_data; /* pmu specific data */
821         struct rcu_head                 rcu_head;
822 };
823 
824 /*
825  * Number of contexts where an event can trigger:
826  *      task, softirq, hardirq, nmi.
827  */
828 #define PERF_NR_CONTEXTS        4
829 
830 /**
831  * struct perf_event_cpu_context - per cpu event context structure
832  */
833 struct perf_cpu_context {
834         struct perf_event_context       ctx;
835         struct perf_event_context       *task_ctx;
836         int                             active_oncpu;
837         int                             exclusive;
838 
839         raw_spinlock_t                  hrtimer_lock;
840         struct hrtimer                  hrtimer;
841         ktime_t                         hrtimer_interval;
842         unsigned int                    hrtimer_active;
843 
844 #ifdef CONFIG_CGROUP_PERF
845         struct perf_cgroup              *cgrp;
846         struct list_head                cgrp_cpuctx_entry;
847 #endif
848 
849         struct list_head                sched_cb_entry;
850         int                             sched_cb_usage;
851 
852         int                             online;
853 };
854 
855 struct perf_output_handle {
856         struct perf_event               *event;
857         struct perf_buffer              *rb;
858         unsigned long                   wakeup;
859         unsigned long                   size;
860         u64                             aux_flags;
861         union {
862                 void                    *addr;
863                 unsigned long           head;
864         };
865         int                             page;
866 };
867 
868 struct bpf_perf_event_data_kern {
869         bpf_user_pt_regs_t *regs;
870         struct perf_sample_data *data;
871         struct perf_event *event;
872 };
873 
874 #ifdef CONFIG_CGROUP_PERF
875 
876 /*
877  * perf_cgroup_info keeps track of time_enabled for a cgroup.
878  * This is a per-cpu dynamically allocated data structure.
879  */
880 struct perf_cgroup_info {
881         u64                             time;
882         u64                             timestamp;
883 };
884 
885 struct perf_cgroup {
886         struct cgroup_subsys_state      css;
887         struct perf_cgroup_info __percpu *info;
888 };
889 
890 /*
891  * Must ensure cgroup is pinned (css_get) before calling
892  * this function. In other words, we cannot call this function
893  * if there is no cgroup event for the current CPU context.
894  */
895 static inline struct perf_cgroup *
896 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
897 {
898         return container_of(task_css_check(task, perf_event_cgrp_id,
899                                            ctx ? lockdep_is_held(&ctx->lock)
900                                                : true),
901                             struct perf_cgroup, css);
902 }
903 #endif /* CONFIG_CGROUP_PERF */
904 
905 #ifdef CONFIG_PERF_EVENTS
906 
907 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
908                                    struct perf_event *event);
909 extern void perf_aux_output_end(struct perf_output_handle *handle,
910                                 unsigned long size);
911 extern int perf_aux_output_skip(struct perf_output_handle *handle,
912                                 unsigned long size);
913 extern void *perf_get_aux(struct perf_output_handle *handle);
914 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
915 extern void perf_event_itrace_started(struct perf_event *event);
916 
917 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
918 extern void perf_pmu_unregister(struct pmu *pmu);
919 
920 extern int perf_num_counters(void);
921 extern const char *perf_pmu_name(void);
922 extern void __perf_event_task_sched_in(struct task_struct *prev,
923                                        struct task_struct *task);
924 extern void __perf_event_task_sched_out(struct task_struct *prev,
925                                         struct task_struct *next);
926 extern int perf_event_init_task(struct task_struct *child);
927 extern void perf_event_exit_task(struct task_struct *child);
928 extern void perf_event_free_task(struct task_struct *task);
929 extern void perf_event_delayed_put(struct task_struct *task);
930 extern struct file *perf_event_get(unsigned int fd);
931 extern const struct perf_event *perf_get_event(struct file *file);
932 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
933 extern void perf_event_print_debug(void);
934 extern void perf_pmu_disable(struct pmu *pmu);
935 extern void perf_pmu_enable(struct pmu *pmu);
936 extern void perf_sched_cb_dec(struct pmu *pmu);
937 extern void perf_sched_cb_inc(struct pmu *pmu);
938 extern int perf_event_task_disable(void);
939 extern int perf_event_task_enable(void);
940 
941 extern void perf_pmu_resched(struct pmu *pmu);
942 
943 extern int perf_event_refresh(struct perf_event *event, int refresh);
944 extern void perf_event_update_userpage(struct perf_event *event);
945 extern int perf_event_release_kernel(struct perf_event *event);
946 extern struct perf_event *
947 perf_event_create_kernel_counter(struct perf_event_attr *attr,
948                                 int cpu,
949                                 struct task_struct *task,
950                                 perf_overflow_handler_t callback,
951                                 void *context);
952 extern void perf_pmu_migrate_context(struct pmu *pmu,
953                                 int src_cpu, int dst_cpu);
954 int perf_event_read_local(struct perf_event *event, u64 *value,
955                           u64 *enabled, u64 *running);
956 extern u64 perf_event_read_value(struct perf_event *event,
957                                  u64 *enabled, u64 *running);
958 
959 
960 struct perf_sample_data {
961         /*
962          * Fields set by perf_sample_data_init(), group so as to
963          * minimize the cachelines touched.
964          */
965         u64                             addr;
966         struct perf_raw_record          *raw;
967         struct perf_branch_stack        *br_stack;
968         u64                             period;
969         u64                             weight;
970         u64                             txn;
971         union  perf_mem_data_src        data_src;
972 
973         /*
974          * The other fields, optionally {set,used} by
975          * perf_{prepare,output}_sample().
976          */
977         u64                             type;
978         u64                             ip;
979         struct {
980                 u32     pid;
981                 u32     tid;
982         }                               tid_entry;
983         u64                             time;
984         u64                             id;
985         u64                             stream_id;
986         struct {
987                 u32     cpu;
988                 u32     reserved;
989         }                               cpu_entry;
990         struct perf_callchain_entry     *callchain;
991         u64                             aux_size;
992 
993         /*
994          * regs_user may point to task_pt_regs or to regs_user_copy, depending
995          * on arch details.
996          */
997         struct perf_regs                regs_user;
998         struct pt_regs                  regs_user_copy;
999 
1000         struct perf_regs                regs_intr;
1001         u64                             stack_user_size;
1002 
1003         u64                             phys_addr;
1004 } ____cacheline_aligned;
1005 
1006 /* default value for data source */
1007 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1008                     PERF_MEM_S(LVL, NA)   |\
1009                     PERF_MEM_S(SNOOP, NA) |\
1010                     PERF_MEM_S(LOCK, NA)  |\
1011                     PERF_MEM_S(TLB, NA))
1012 
1013 static inline void perf_sample_data_init(struct perf_sample_data *data,
1014                                          u64 addr, u64 period)
1015 {
1016         /* remaining struct members initialized in perf_prepare_sample() */
1017         data->addr = addr;
1018         data->raw  = NULL;
1019         data->br_stack = NULL;
1020         data->period = period;
1021         data->weight = 0;
1022         data->data_src.val = PERF_MEM_NA;
1023         data->txn = 0;
1024 }
1025 
1026 extern void perf_output_sample(struct perf_output_handle *handle,
1027                                struct perf_event_header *header,
1028                                struct perf_sample_data *data,
1029                                struct perf_event *event);
1030 extern void perf_prepare_sample(struct perf_event_header *header,
1031                                 struct perf_sample_data *data,
1032                                 struct perf_event *event,
1033                                 struct pt_regs *regs);
1034 
1035 extern int perf_event_overflow(struct perf_event *event,
1036                                  struct perf_sample_data *data,
1037                                  struct pt_regs *regs);
1038 
1039 extern void perf_event_output_forward(struct perf_event *event,
1040                                      struct perf_sample_data *data,
1041                                      struct pt_regs *regs);
1042 extern void perf_event_output_backward(struct perf_event *event,
1043                                        struct perf_sample_data *data,
1044                                        struct pt_regs *regs);
1045 extern int perf_event_output(struct perf_event *event,
1046                              struct perf_sample_data *data,
1047                              struct pt_regs *regs);
1048 
1049 static inline bool
1050 is_default_overflow_handler(struct perf_event *event)
1051 {
1052         if (likely(event->overflow_handler == perf_event_output_forward))
1053                 return true;
1054         if (unlikely(event->overflow_handler == perf_event_output_backward))
1055                 return true;
1056         return false;
1057 }
1058 
1059 extern void
1060 perf_event_header__init_id(struct perf_event_header *header,
1061                            struct perf_sample_data *data,
1062                            struct perf_event *event);
1063 extern void
1064 perf_event__output_id_sample(struct perf_event *event,
1065                              struct perf_output_handle *handle,
1066                              struct perf_sample_data *sample);
1067 
1068 extern void
1069 perf_log_lost_samples(struct perf_event *event, u64 lost);
1070 
1071 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1072 {
1073         struct perf_event_attr *attr = &event->attr;
1074 
1075         return attr->exclude_idle || attr->exclude_user ||
1076                attr->exclude_kernel || attr->exclude_hv ||
1077                attr->exclude_guest || attr->exclude_host;
1078 }
1079 
1080 static inline bool is_sampling_event(struct perf_event *event)
1081 {
1082         return event->attr.sample_period != 0;
1083 }
1084 
1085 /*
1086  * Return 1 for a software event, 0 for a hardware event
1087  */
1088 static inline int is_software_event(struct perf_event *event)
1089 {
1090         return event->event_caps & PERF_EV_CAP_SOFTWARE;
1091 }
1092 
1093 /*
1094  * Return 1 for event in sw context, 0 for event in hw context
1095  */
1096 static inline int in_software_context(struct perf_event *event)
1097 {
1098         return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1099 }
1100 
1101 static inline int is_exclusive_pmu(struct pmu *pmu)
1102 {
1103         return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1104 }
1105 
1106 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1107 
1108 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1109 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1110 
1111 #ifndef perf_arch_fetch_caller_regs
1112 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1113 #endif
1114 
1115 /*
1116  * When generating a perf sample in-line, instead of from an interrupt /
1117  * exception, we lack a pt_regs. This is typically used from software events
1118  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1119  *
1120  * We typically don't need a full set, but (for x86) do require:
1121  * - ip for PERF_SAMPLE_IP
1122  * - cs for user_mode() tests
1123  * - sp for PERF_SAMPLE_CALLCHAIN
1124  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1125  *
1126  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1127  * things like PERF_SAMPLE_REGS_INTR.
1128  */
1129 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1130 {
1131         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1132 }
1133 
1134 static __always_inline void
1135 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1136 {
1137         if (static_key_false(&perf_swevent_enabled[event_id]))
1138                 __perf_sw_event(event_id, nr, regs, addr);
1139 }
1140 
1141 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1142 
1143 /*
1144  * 'Special' version for the scheduler, it hard assumes no recursion,
1145  * which is guaranteed by us not actually scheduling inside other swevents
1146  * because those disable preemption.
1147  */
1148 static __always_inline void
1149 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1150 {
1151         if (static_key_false(&perf_swevent_enabled[event_id])) {
1152                 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1153 
1154                 perf_fetch_caller_regs(regs);
1155                 ___perf_sw_event(event_id, nr, regs, addr);
1156         }
1157 }
1158 
1159 extern struct static_key_false perf_sched_events;
1160 
1161 static __always_inline bool
1162 perf_sw_migrate_enabled(void)
1163 {
1164         if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1165                 return true;
1166         return false;
1167 }
1168 
1169 static inline void perf_event_task_migrate(struct task_struct *task)
1170 {
1171         if (perf_sw_migrate_enabled())
1172                 task->sched_migrated = 1;
1173 }
1174 
1175 static inline void perf_event_task_sched_in(struct task_struct *prev,
1176                                             struct task_struct *task)
1177 {
1178         if (static_branch_unlikely(&perf_sched_events))
1179                 __perf_event_task_sched_in(prev, task);
1180 
1181         if (perf_sw_migrate_enabled() && task->sched_migrated) {
1182                 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1183 
1184                 perf_fetch_caller_regs(regs);
1185                 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1186                 task->sched_migrated = 0;
1187         }
1188 }
1189 
1190 static inline void perf_event_task_sched_out(struct task_struct *prev,
1191                                              struct task_struct *next)
1192 {
1193         perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1194 
1195         if (static_branch_unlikely(&perf_sched_events))
1196                 __perf_event_task_sched_out(prev, next);
1197 }
1198 
1199 extern void perf_event_mmap(struct vm_area_struct *vma);
1200 
1201 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1202                                bool unregister, const char *sym);
1203 extern void perf_event_bpf_event(struct bpf_prog *prog,
1204                                  enum perf_bpf_event_type type,
1205                                  u16 flags);
1206 
1207 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1208 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1209 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1210 
1211 extern void perf_event_exec(void);
1212 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1213 extern void perf_event_namespaces(struct task_struct *tsk);
1214 extern void perf_event_fork(struct task_struct *tsk);
1215 
1216 /* Callchains */
1217 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1218 
1219 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1220 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1221 extern struct perf_callchain_entry *
1222 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1223                    u32 max_stack, bool crosstask, bool add_mark);
1224 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1225 extern int get_callchain_buffers(int max_stack);
1226 extern void put_callchain_buffers(void);
1227 
1228 extern int sysctl_perf_event_max_stack;
1229 extern int sysctl_perf_event_max_contexts_per_stack;
1230 
1231 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1232 {
1233         if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1234                 struct perf_callchain_entry *entry = ctx->entry;
1235                 entry->ip[entry->nr++] = ip;
1236                 ++ctx->contexts;
1237                 return 0;
1238         } else {
1239                 ctx->contexts_maxed = true;
1240                 return -1; /* no more room, stop walking the stack */
1241         }
1242 }
1243 
1244 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1245 {
1246         if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1247                 struct perf_callchain_entry *entry = ctx->entry;
1248                 entry->ip[entry->nr++] = ip;
1249                 ++ctx->nr;
1250                 return 0;
1251         } else {
1252                 return -1; /* no more room, stop walking the stack */
1253         }
1254 }
1255 
1256 extern int sysctl_perf_event_paranoid;
1257 extern int sysctl_perf_event_mlock;
1258 extern int sysctl_perf_event_sample_rate;
1259 extern int sysctl_perf_cpu_time_max_percent;
1260 
1261 extern void perf_sample_event_took(u64 sample_len_ns);
1262 
1263 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1264                 void __user *buffer, size_t *lenp,
1265                 loff_t *ppos);
1266 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1267                 void __user *buffer, size_t *lenp,
1268                 loff_t *ppos);
1269 
1270 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1271                                  void __user *buffer, size_t *lenp, loff_t *ppos);
1272 
1273 /* Access to perf_event_open(2) syscall. */
1274 #define PERF_SECURITY_OPEN              0
1275 
1276 /* Finer grained perf_event_open(2) access control. */
1277 #define PERF_SECURITY_CPU               1
1278 #define PERF_SECURITY_KERNEL            2
1279 #define PERF_SECURITY_TRACEPOINT        3
1280 
1281 static inline int perf_is_paranoid(void)
1282 {
1283         return sysctl_perf_event_paranoid > -1;
1284 }
1285 
1286 static inline int perf_allow_kernel(struct perf_event_attr *attr)
1287 {
1288         if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
1289                 return -EACCES;
1290 
1291         return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1292 }
1293 
1294 static inline int perf_allow_cpu(struct perf_event_attr *attr)
1295 {
1296         if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
1297                 return -EACCES;
1298 
1299         return security_perf_event_open(attr, PERF_SECURITY_CPU);
1300 }
1301 
1302 static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1303 {
1304         if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
1305                 return -EPERM;
1306 
1307         return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1308 }
1309 
1310 extern void perf_event_init(void);
1311 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1312                           int entry_size, struct pt_regs *regs,
1313                           struct hlist_head *head, int rctx,
1314                           struct task_struct *task);
1315 extern void perf_bp_event(struct perf_event *event, void *data);
1316 
1317 #ifndef perf_misc_flags
1318 # define perf_misc_flags(regs) \
1319                 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1320 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1321 #endif
1322 #ifndef perf_arch_bpf_user_pt_regs
1323 # define perf_arch_bpf_user_pt_regs(regs) regs
1324 #endif
1325 
1326 static inline bool has_branch_stack(struct perf_event *event)
1327 {
1328         return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1329 }
1330 
1331 static inline bool needs_branch_stack(struct perf_event *event)
1332 {
1333         return event->attr.branch_sample_type != 0;
1334 }
1335 
1336 static inline bool has_aux(struct perf_event *event)
1337 {
1338         return event->pmu->setup_aux;
1339 }
1340 
1341 static inline bool is_write_backward(struct perf_event *event)
1342 {
1343         return !!event->attr.write_backward;
1344 }
1345 
1346 static inline bool has_addr_filter(struct perf_event *event)
1347 {
1348         return event->pmu->nr_addr_filters;
1349 }
1350 
1351 /*
1352  * An inherited event uses parent's filters
1353  */
1354 static inline struct perf_addr_filters_head *
1355 perf_event_addr_filters(struct perf_event *event)
1356 {
1357         struct perf_addr_filters_head *ifh = &event->addr_filters;
1358 
1359         if (event->parent)
1360                 ifh = &event->parent->addr_filters;
1361 
1362         return ifh;
1363 }
1364 
1365 extern void perf_event_addr_filters_sync(struct perf_event *event);
1366 
1367 extern int perf_output_begin(struct perf_output_handle *handle,
1368                              struct perf_event *event, unsigned int size);
1369 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1370                                     struct perf_event *event,
1371                                     unsigned int size);
1372 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1373                                       struct perf_event *event,
1374                                       unsigned int size);
1375 
1376 extern void perf_output_end(struct perf_output_handle *handle);
1377 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1378                              const void *buf, unsigned int len);
1379 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1380                                      unsigned int len);
1381 extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1382                                  struct perf_output_handle *handle,
1383                                  unsigned long from, unsigned long to);
1384 extern int perf_swevent_get_recursion_context(void);
1385 extern void perf_swevent_put_recursion_context(int rctx);
1386 extern u64 perf_swevent_set_period(struct perf_event *event);
1387 extern void perf_event_enable(struct perf_event *event);
1388 extern void perf_event_disable(struct perf_event *event);
1389 extern void perf_event_disable_local(struct perf_event *event);
1390 extern void perf_event_disable_inatomic(struct perf_event *event);
1391 extern void perf_event_task_tick(void);
1392 extern int perf_event_account_interrupt(struct perf_event *event);
1393 extern int perf_event_period(struct perf_event *event, u64 value);
1394 extern u64 perf_event_pause(struct perf_event *event, bool reset);
1395 #else /* !CONFIG_PERF_EVENTS: */
1396 static inline void *
1397 perf_aux_output_begin(struct perf_output_handle *handle,
1398                       struct perf_event *event)                         { return NULL; }
1399 static inline void
1400 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1401                                                                         { }
1402 static inline int
1403 perf_aux_output_skip(struct perf_output_handle *handle,
1404                      unsigned long size)                                { return -EINVAL; }
1405 static inline void *
1406 perf_get_aux(struct perf_output_handle *handle)                         { return NULL; }
1407 static inline void
1408 perf_event_task_migrate(struct task_struct *task)                       { }
1409 static inline void
1410 perf_event_task_sched_in(struct task_struct *prev,
1411                          struct task_struct *task)                      { }
1412 static inline void
1413 perf_event_task_sched_out(struct task_struct *prev,
1414                           struct task_struct *next)                     { }
1415 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1416 static inline void perf_event_exit_task(struct task_struct *child)      { }
1417 static inline void perf_event_free_task(struct task_struct *task)       { }
1418 static inline void perf_event_delayed_put(struct task_struct *task)     { }
1419 static inline struct file *perf_event_get(unsigned int fd)      { return ERR_PTR(-EINVAL); }
1420 static inline const struct perf_event *perf_get_event(struct file *file)
1421 {
1422         return ERR_PTR(-EINVAL);
1423 }
1424 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1425 {
1426         return ERR_PTR(-EINVAL);
1427 }
1428 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1429                                         u64 *enabled, u64 *running)
1430 {
1431         return -EINVAL;
1432 }
1433 static inline void perf_event_print_debug(void)                         { }
1434 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1435 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1436 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1437 {
1438         return -EINVAL;
1439 }
1440 
1441 static inline void
1442 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
1443 static inline void
1444 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                     { }
1445 static inline void
1446 perf_bp_event(struct perf_event *event, void *data)                     { }
1447 
1448 static inline int perf_register_guest_info_callbacks
1449 (struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1450 static inline int perf_unregister_guest_info_callbacks
1451 (struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1452 
1453 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1454 
1455 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1456 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1457                                       bool unregister, const char *sym) { }
1458 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1459                                         enum perf_bpf_event_type type,
1460                                         u16 flags)                      { }
1461 static inline void perf_event_exec(void)                                { }
1462 static inline void perf_event_comm(struct task_struct *tsk, bool exec)  { }
1463 static inline void perf_event_namespaces(struct task_struct *tsk)       { }
1464 static inline void perf_event_fork(struct task_struct *tsk)             { }
1465 static inline void perf_event_init(void)                                { }
1466 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1467 static inline void perf_swevent_put_recursion_context(int rctx)         { }
1468 static inline u64 perf_swevent_set_period(struct perf_event *event)     { return 0; }
1469 static inline void perf_event_enable(struct perf_event *event)          { }
1470 static inline void perf_event_disable(struct perf_event *event)         { }
1471 static inline int __perf_event_disable(void *info)                      { return -1; }
1472 static inline void perf_event_task_tick(void)                           { }
1473 static inline int perf_event_release_kernel(struct perf_event *event)   { return 0; }
1474 static inline int perf_event_period(struct perf_event *event, u64 value)
1475 {
1476         return -EINVAL;
1477 }
1478 static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1479 {
1480         return 0;
1481 }
1482 #endif
1483 
1484 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1485 extern void perf_restore_debug_store(void);
1486 #else
1487 static inline void perf_restore_debug_store(void)                       { }
1488 #endif
1489 
1490 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1491 {
1492         return frag->pad < sizeof(u64);
1493 }
1494 
1495 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1496 
1497 struct perf_pmu_events_attr {
1498         struct device_attribute attr;
1499         u64 id;
1500         const char *event_str;
1501 };
1502 
1503 struct perf_pmu_events_ht_attr {
1504         struct device_attribute                 attr;
1505         u64                                     id;
1506         const char                              *event_str_ht;
1507         const char                              *event_str_noht;
1508 };
1509 
1510 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1511                               char *page);
1512 
1513 #define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
1514 static struct perf_pmu_events_attr _var = {                             \
1515         .attr = __ATTR(_name, 0444, _show, NULL),                       \
1516         .id   =  _id,                                                   \
1517 };
1518 
1519 #define PMU_EVENT_ATTR_STRING(_name, _var, _str)                            \
1520 static struct perf_pmu_events_attr _var = {                                 \
1521         .attr           = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1522         .id             = 0,                                                \
1523         .event_str      = _str,                                             \
1524 };
1525 
1526 #define PMU_FORMAT_ATTR(_name, _format)                                 \
1527 static ssize_t                                                          \
1528 _name##_show(struct device *dev,                                        \
1529                                struct device_attribute *attr,           \
1530                                char *page)                              \
1531 {                                                                       \
1532         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
1533         return sprintf(page, _format "\n");                             \
1534 }                                                                       \
1535                                                                         \
1536 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1537 
1538 /* Performance counter hotplug functions */
1539 #ifdef CONFIG_PERF_EVENTS
1540 int perf_event_init_cpu(unsigned int cpu);
1541 int perf_event_exit_cpu(unsigned int cpu);
1542 #else
1543 #define perf_event_init_cpu     NULL
1544 #define perf_event_exit_cpu     NULL
1545 #endif
1546 
1547 extern void __weak arch_perf_update_userpage(struct perf_event *event,
1548                                              struct perf_event_mmap_page *userpg,
1549                                              u64 now);
1550 
1551 #endif /* _LINUX_PERF_EVENT_H */
1552 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp