~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/events/perf_event.h

Version: ~ [ linux-5.2-rc1 ] ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Performance events x86 architecture header
  3  *
  4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6  *  Copyright (C) 2009 Jaswinder Singh Rajput
  7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
 10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
 11  *
 12  *  For licencing details see kernel-base/COPYING
 13  */
 14 
 15 #include <linux/perf_event.h>
 16 
 17 /* To enable MSR tracing please use the generic trace points. */
 18 
 19 /*
 20  *          |   NHM/WSM    |      SNB     |
 21  * register -------------------------------
 22  *          |  HT  | no HT |  HT  | no HT |
 23  *-----------------------------------------
 24  * offcore  | core | core  | cpu  | core  |
 25  * lbr_sel  | core | core  | cpu  | core  |
 26  * ld_lat   | cpu  | core  | cpu  | core  |
 27  *-----------------------------------------
 28  *
 29  * Given that there is a small number of shared regs,
 30  * we can pre-allocate their slot in the per-cpu
 31  * per-core reg tables.
 32  */
 33 enum extra_reg_type {
 34         EXTRA_REG_NONE  = -1,   /* not used */
 35 
 36         EXTRA_REG_RSP_0 = 0,    /* offcore_response_0 */
 37         EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
 38         EXTRA_REG_LBR   = 2,    /* lbr_select */
 39         EXTRA_REG_LDLAT = 3,    /* ld_lat_threshold */
 40         EXTRA_REG_FE    = 4,    /* fe_* */
 41 
 42         EXTRA_REG_MAX           /* number of entries needed */
 43 };
 44 
 45 struct event_constraint {
 46         union {
 47                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 48                 u64             idxmsk64;
 49         };
 50         u64     code;
 51         u64     cmask;
 52         int     weight;
 53         int     overlap;
 54         int     flags;
 55 };
 56 /*
 57  * struct hw_perf_event.flags flags
 58  */
 59 #define PERF_X86_EVENT_PEBS_LDLAT       0x0001 /* ld+ldlat data address sampling */
 60 #define PERF_X86_EVENT_PEBS_ST          0x0002 /* st data address sampling */
 61 #define PERF_X86_EVENT_PEBS_ST_HSW      0x0004 /* haswell style datala, store */
 62 #define PERF_X86_EVENT_COMMITTED        0x0008 /* event passed commit_txn */
 63 #define PERF_X86_EVENT_PEBS_LD_HSW      0x0010 /* haswell style datala, load */
 64 #define PERF_X86_EVENT_PEBS_NA_HSW      0x0020 /* haswell style datala, unknown */
 65 #define PERF_X86_EVENT_EXCL             0x0040 /* HT exclusivity on counter */
 66 #define PERF_X86_EVENT_DYNAMIC          0x0080 /* dynamic alloc'd constraint */
 67 #define PERF_X86_EVENT_RDPMC_ALLOWED    0x0100 /* grant rdpmc permission */
 68 #define PERF_X86_EVENT_EXCL_ACCT        0x0200 /* accounted EXCL event */
 69 #define PERF_X86_EVENT_AUTO_RELOAD      0x0400 /* use PEBS auto-reload */
 70 #define PERF_X86_EVENT_FREERUNNING      0x0800 /* use freerunning PEBS */
 71 
 72 
 73 struct amd_nb {
 74         int nb_id;  /* NorthBridge id */
 75         int refcnt; /* reference count */
 76         struct perf_event *owners[X86_PMC_IDX_MAX];
 77         struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 78 };
 79 
 80 /* The maximal number of PEBS events: */
 81 #define MAX_PEBS_EVENTS         8
 82 
 83 /*
 84  * Flags PEBS can handle without an PMI.
 85  *
 86  * TID can only be handled by flushing at context switch.
 87  *
 88  */
 89 #define PEBS_FREERUNNING_FLAGS \
 90         (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
 91         PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
 92         PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
 93         PERF_SAMPLE_TRANSACTION)
 94 
 95 /*
 96  * A debug store configuration.
 97  *
 98  * We only support architectures that use 64bit fields.
 99  */
100 struct debug_store {
101         u64     bts_buffer_base;
102         u64     bts_index;
103         u64     bts_absolute_maximum;
104         u64     bts_interrupt_threshold;
105         u64     pebs_buffer_base;
106         u64     pebs_index;
107         u64     pebs_absolute_maximum;
108         u64     pebs_interrupt_threshold;
109         u64     pebs_event_reset[MAX_PEBS_EVENTS];
110 };
111 
112 /*
113  * Per register state.
114  */
115 struct er_account {
116         raw_spinlock_t      lock;       /* per-core: protect structure */
117         u64                 config;     /* extra MSR config */
118         u64                 reg;        /* extra MSR number */
119         atomic_t            ref;        /* reference count */
120 };
121 
122 /*
123  * Per core/cpu state
124  *
125  * Used to coordinate shared registers between HT threads or
126  * among events on a single PMU.
127  */
128 struct intel_shared_regs {
129         struct er_account       regs[EXTRA_REG_MAX];
130         int                     refcnt;         /* per-core: #HT threads */
131         unsigned                core_id;        /* per-core: core id */
132 };
133 
134 enum intel_excl_state_type {
135         INTEL_EXCL_UNUSED    = 0, /* counter is unused */
136         INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
137         INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
138 };
139 
140 struct intel_excl_states {
141         enum intel_excl_state_type state[X86_PMC_IDX_MAX];
142         bool sched_started; /* true if scheduling has started */
143 };
144 
145 struct intel_excl_cntrs {
146         raw_spinlock_t  lock;
147 
148         struct intel_excl_states states[2];
149 
150         union {
151                 u16     has_exclusive[2];
152                 u32     exclusive_present;
153         };
154 
155         int             refcnt;         /* per-core: #HT threads */
156         unsigned        core_id;        /* per-core: core id */
157 };
158 
159 #define MAX_LBR_ENTRIES         32
160 
161 enum {
162         X86_PERF_KFREE_SHARED = 0,
163         X86_PERF_KFREE_EXCL   = 1,
164         X86_PERF_KFREE_MAX
165 };
166 
167 struct cpu_hw_events {
168         /*
169          * Generic x86 PMC bits
170          */
171         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
172         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
173         unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
174         int                     enabled;
175 
176         int                     n_events; /* the # of events in the below arrays */
177         int                     n_added;  /* the # last events in the below arrays;
178                                              they've never been enabled yet */
179         int                     n_txn;    /* the # last events in the below arrays;
180                                              added in the current transaction */
181         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
182         u64                     tags[X86_PMC_IDX_MAX];
183 
184         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
185         struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
186 
187         int                     n_excl; /* the number of exclusive events */
188 
189         unsigned int            txn_flags;
190         int                     is_fake;
191 
192         /*
193          * Intel DebugStore bits
194          */
195         struct debug_store      *ds;
196         u64                     pebs_enabled;
197         int                     n_pebs;
198         int                     n_large_pebs;
199 
200         /*
201          * Intel LBR bits
202          */
203         int                             lbr_users;
204         struct perf_branch_stack        lbr_stack;
205         struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
206         struct er_account               *lbr_sel;
207         u64                             br_sel;
208 
209         /*
210          * Intel host/guest exclude bits
211          */
212         u64                             intel_ctrl_guest_mask;
213         u64                             intel_ctrl_host_mask;
214         struct perf_guest_switch_msr    guest_switch_msrs[X86_PMC_IDX_MAX];
215 
216         /*
217          * Intel checkpoint mask
218          */
219         u64                             intel_cp_status;
220 
221         /*
222          * manage shared (per-core, per-cpu) registers
223          * used on Intel NHM/WSM/SNB
224          */
225         struct intel_shared_regs        *shared_regs;
226         /*
227          * manage exclusive counter access between hyperthread
228          */
229         struct event_constraint *constraint_list; /* in enable order */
230         struct intel_excl_cntrs         *excl_cntrs;
231         int excl_thread_id; /* 0 or 1 */
232 
233         /*
234          * AMD specific bits
235          */
236         struct amd_nb                   *amd_nb;
237         /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
238         u64                             perf_ctr_virt_mask;
239 
240         void                            *kfree_on_online[X86_PERF_KFREE_MAX];
241 };
242 
243 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
244         { .idxmsk64 = (n) },            \
245         .code = (c),                    \
246         .cmask = (m),                   \
247         .weight = (w),                  \
248         .overlap = (o),                 \
249         .flags = f,                     \
250 }
251 
252 #define EVENT_CONSTRAINT(c, n, m)       \
253         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
254 
255 #define INTEL_EXCLEVT_CONSTRAINT(c, n)  \
256         __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
257                            0, PERF_X86_EVENT_EXCL)
258 
259 /*
260  * The overlap flag marks event constraints with overlapping counter
261  * masks. This is the case if the counter mask of such an event is not
262  * a subset of any other counter mask of a constraint with an equal or
263  * higher weight, e.g.:
264  *
265  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
266  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
267  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
268  *
269  * The event scheduler may not select the correct counter in the first
270  * cycle because it needs to know which subsequent events will be
271  * scheduled. It may fail to schedule the events then. So we set the
272  * overlap flag for such constraints to give the scheduler a hint which
273  * events to select for counter rescheduling.
274  *
275  * Care must be taken as the rescheduling algorithm is O(n!) which
276  * will increase scheduling cycles for an over-committed system
277  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
278  * and its counter masks must be kept at a minimum.
279  */
280 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)       \
281         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
282 
283 /*
284  * Constraint on the Event code.
285  */
286 #define INTEL_EVENT_CONSTRAINT(c, n)    \
287         EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
288 
289 /*
290  * Constraint on the Event code + UMask + fixed-mask
291  *
292  * filter mask to validate fixed counter events.
293  * the following filters disqualify for fixed counters:
294  *  - inv
295  *  - edge
296  *  - cnt-mask
297  *  - in_tx
298  *  - in_tx_checkpointed
299  *  The other filters are supported by fixed counters.
300  *  The any-thread option is supported starting with v3.
301  */
302 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
303 #define FIXED_EVENT_CONSTRAINT(c, n)    \
304         EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
305 
306 /*
307  * Constraint on the Event code + UMask
308  */
309 #define INTEL_UEVENT_CONSTRAINT(c, n)   \
310         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
311 
312 /* Constraint on specific umask bit only + event */
313 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)       \
314         EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
315 
316 /* Like UEVENT_CONSTRAINT, but match flags too */
317 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)     \
318         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
319 
320 #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
321         __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
322                            HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
323 
324 #define INTEL_PLD_CONSTRAINT(c, n)      \
325         __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
326                            HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
327 
328 #define INTEL_PST_CONSTRAINT(c, n)      \
329         __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
330                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
331 
332 /* Event constraint, but match on all event flags too. */
333 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
334         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
335 
336 /* Check only flags, but allow all event/umask */
337 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)     \
338         EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
339 
340 /* Check flags and event code, and set the HSW store flag */
341 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
342         __EVENT_CONSTRAINT(code, n,                     \
343                           ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
344                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
345 
346 /* Check flags and event code, and set the HSW load flag */
347 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
348         __EVENT_CONSTRAINT(code, n,                     \
349                           ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
350                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
351 
352 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
353         __EVENT_CONSTRAINT(code, n,                     \
354                           ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
355                           HWEIGHT(n), 0, \
356                           PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
357 
358 /* Check flags and event code/umask, and set the HSW store flag */
359 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
360         __EVENT_CONSTRAINT(code, n,                     \
361                           INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
362                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
363 
364 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
365         __EVENT_CONSTRAINT(code, n,                     \
366                           INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
367                           HWEIGHT(n), 0, \
368                           PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
369 
370 /* Check flags and event code/umask, and set the HSW load flag */
371 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
372         __EVENT_CONSTRAINT(code, n,                     \
373                           INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
374                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
375 
376 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
377         __EVENT_CONSTRAINT(code, n,                     \
378                           INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
379                           HWEIGHT(n), 0, \
380                           PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
381 
382 /* Check flags and event code/umask, and set the HSW N/A flag */
383 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
384         __EVENT_CONSTRAINT(code, n,                     \
385                           INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
386                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
387 
388 
389 /*
390  * We define the end marker as having a weight of -1
391  * to enable blacklisting of events using a counter bitmask
392  * of zero and thus a weight of zero.
393  * The end marker has a weight that cannot possibly be
394  * obtained from counting the bits in the bitmask.
395  */
396 #define EVENT_CONSTRAINT_END { .weight = -1 }
397 
398 /*
399  * Check for end marker with weight == -1
400  */
401 #define for_each_event_constraint(e, c) \
402         for ((e) = (c); (e)->weight != -1; (e)++)
403 
404 /*
405  * Extra registers for specific events.
406  *
407  * Some events need large masks and require external MSRs.
408  * Those extra MSRs end up being shared for all events on
409  * a PMU and sometimes between PMU of sibling HT threads.
410  * In either case, the kernel needs to handle conflicting
411  * accesses to those extra, shared, regs. The data structure
412  * to manage those registers is stored in cpu_hw_event.
413  */
414 struct extra_reg {
415         unsigned int            event;
416         unsigned int            msr;
417         u64                     config_mask;
418         u64                     valid_mask;
419         int                     idx;  /* per_xxx->regs[] reg index */
420         bool                    extra_msr_access;
421 };
422 
423 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {      \
424         .event = (e),                   \
425         .msr = (ms),                    \
426         .config_mask = (m),             \
427         .valid_mask = (vm),             \
428         .idx = EXTRA_REG_##i,           \
429         .extra_msr_access = true,       \
430         }
431 
432 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)      \
433         EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
434 
435 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
436         EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
437                         ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
438 
439 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
440         INTEL_UEVENT_EXTRA_REG(c, \
441                                MSR_PEBS_LD_LAT_THRESHOLD, \
442                                0xffff, \
443                                LDLAT)
444 
445 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
446 
447 union perf_capabilities {
448         struct {
449                 u64     lbr_format:6;
450                 u64     pebs_trap:1;
451                 u64     pebs_arch_reg:1;
452                 u64     pebs_format:4;
453                 u64     smm_freeze:1;
454                 /*
455                  * PMU supports separate counter range for writing
456                  * values > 32bit.
457                  */
458                 u64     full_width_write:1;
459         };
460         u64     capabilities;
461 };
462 
463 struct x86_pmu_quirk {
464         struct x86_pmu_quirk *next;
465         void (*func)(void);
466 };
467 
468 union x86_pmu_config {
469         struct {
470                 u64 event:8,
471                     umask:8,
472                     usr:1,
473                     os:1,
474                     edge:1,
475                     pc:1,
476                     interrupt:1,
477                     __reserved1:1,
478                     en:1,
479                     inv:1,
480                     cmask:8,
481                     event2:4,
482                     __reserved2:4,
483                     go:1,
484                     ho:1;
485         } bits;
486         u64 value;
487 };
488 
489 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
490 
491 enum {
492         x86_lbr_exclusive_lbr,
493         x86_lbr_exclusive_bts,
494         x86_lbr_exclusive_pt,
495         x86_lbr_exclusive_max,
496 };
497 
498 /*
499  * struct x86_pmu - generic x86 pmu
500  */
501 struct x86_pmu {
502         /*
503          * Generic x86 PMC bits
504          */
505         const char      *name;
506         int             version;
507         int             (*handle_irq)(struct pt_regs *);
508         void            (*disable_all)(void);
509         void            (*enable_all)(int added);
510         void            (*enable)(struct perf_event *);
511         void            (*disable)(struct perf_event *);
512         void            (*add)(struct perf_event *);
513         void            (*del)(struct perf_event *);
514         int             (*hw_config)(struct perf_event *event);
515         int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
516         unsigned        eventsel;
517         unsigned        perfctr;
518         int             (*addr_offset)(int index, bool eventsel);
519         int             (*rdpmc_index)(int index);
520         u64             (*event_map)(int);
521         int             max_events;
522         int             num_counters;
523         int             num_counters_fixed;
524         int             cntval_bits;
525         u64             cntval_mask;
526         union {
527                         unsigned long events_maskl;
528                         unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
529         };
530         int             events_mask_len;
531         int             apic;
532         u64             max_period;
533         struct event_constraint *
534                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
535                                                  int idx,
536                                                  struct perf_event *event);
537 
538         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
539                                                  struct perf_event *event);
540 
541         void            (*start_scheduling)(struct cpu_hw_events *cpuc);
542 
543         void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
544 
545         void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
546 
547         struct event_constraint *event_constraints;
548         struct x86_pmu_quirk *quirks;
549         int             perfctr_second_write;
550         bool            late_ack;
551         unsigned        (*limit_period)(struct perf_event *event, unsigned l);
552 
553         /*
554          * sysfs attrs
555          */
556         int             attr_rdpmc_broken;
557         int             attr_rdpmc;
558         struct attribute **format_attrs;
559         struct attribute **event_attrs;
560 
561         ssize_t         (*events_sysfs_show)(char *page, u64 config);
562         struct attribute **cpu_events;
563 
564         /*
565          * CPU Hotplug hooks
566          */
567         int             (*cpu_prepare)(int cpu);
568         void            (*cpu_starting)(int cpu);
569         void            (*cpu_dying)(int cpu);
570         void            (*cpu_dead)(int cpu);
571 
572         void            (*check_microcode)(void);
573         void            (*sched_task)(struct perf_event_context *ctx,
574                                       bool sched_in);
575 
576         /*
577          * Intel Arch Perfmon v2+
578          */
579         u64                     intel_ctrl;
580         union perf_capabilities intel_cap;
581 
582         /*
583          * Intel DebugStore bits
584          */
585         unsigned int    bts             :1,
586                         bts_active      :1,
587                         pebs            :1,
588                         pebs_active     :1,
589                         pebs_broken     :1,
590                         pebs_prec_dist  :1;
591         int             pebs_record_size;
592         int             pebs_buffer_size;
593         void            (*drain_pebs)(struct pt_regs *regs);
594         struct event_constraint *pebs_constraints;
595         void            (*pebs_aliases)(struct perf_event *event);
596         int             max_pebs_events;
597         unsigned long   free_running_flags;
598 
599         /*
600          * Intel LBR
601          */
602         unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
603         int             lbr_nr;                    /* hardware stack size */
604         u64             lbr_sel_mask;              /* LBR_SELECT valid bits */
605         const int       *lbr_sel_map;              /* lbr_select mappings */
606         bool            lbr_double_abort;          /* duplicated lbr aborts */
607         bool            lbr_pt_coexist;            /* (LBR|BTS) may coexist with PT */
608 
609         /*
610          * Intel PT/LBR/BTS are exclusive
611          */
612         atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
613 
614         /*
615          * AMD bits
616          */
617         unsigned int    amd_nb_constraints : 1;
618 
619         /*
620          * Extra registers for events
621          */
622         struct extra_reg *extra_regs;
623         unsigned int flags;
624 
625         /*
626          * Intel host/guest support (KVM)
627          */
628         struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
629 };
630 
631 struct x86_perf_task_context {
632         u64 lbr_from[MAX_LBR_ENTRIES];
633         u64 lbr_to[MAX_LBR_ENTRIES];
634         u64 lbr_info[MAX_LBR_ENTRIES];
635         int tos;
636         int lbr_callstack_users;
637         int lbr_stack_state;
638 };
639 
640 #define x86_add_quirk(func_)                                            \
641 do {                                                                    \
642         static struct x86_pmu_quirk __quirk __initdata = {              \
643                 .func = func_,                                          \
644         };                                                              \
645         __quirk.next = x86_pmu.quirks;                                  \
646         x86_pmu.quirks = &__quirk;                                      \
647 } while (0)
648 
649 /*
650  * x86_pmu flags
651  */
652 #define PMU_FL_NO_HT_SHARING    0x1 /* no hyper-threading resource sharing */
653 #define PMU_FL_HAS_RSP_1        0x2 /* has 2 equivalent offcore_rsp regs   */
654 #define PMU_FL_EXCL_CNTRS       0x4 /* has exclusive counter requirements  */
655 #define PMU_FL_EXCL_ENABLED     0x8 /* exclusive counter active */
656 
657 #define EVENT_VAR(_id)  event_attr_##_id
658 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
659 
660 #define EVENT_ATTR(_name, _id)                                          \
661 static struct perf_pmu_events_attr EVENT_VAR(_id) = {                   \
662         .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
663         .id             = PERF_COUNT_HW_##_id,                          \
664         .event_str      = NULL,                                         \
665 };
666 
667 #define EVENT_ATTR_STR(_name, v, str)                                   \
668 static struct perf_pmu_events_attr event_attr_##v = {                   \
669         .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
670         .id             = 0,                                            \
671         .event_str      = str,                                          \
672 };
673 
674 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)                           \
675 static struct perf_pmu_events_ht_attr event_attr_##v = {                \
676         .attr           = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
677         .id             = 0,                                            \
678         .event_str_noht = noht,                                         \
679         .event_str_ht   = ht,                                           \
680 }
681 
682 extern struct x86_pmu x86_pmu __read_mostly;
683 
684 static inline bool x86_pmu_has_lbr_callstack(void)
685 {
686         return  x86_pmu.lbr_sel_map &&
687                 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
688 }
689 
690 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
691 
692 int x86_perf_event_set_period(struct perf_event *event);
693 
694 /*
695  * Generalized hw caching related hw_event table, filled
696  * in on a per model basis. A value of 0 means
697  * 'not supported', -1 means 'hw_event makes no sense on
698  * this CPU', any other value means the raw hw_event
699  * ID.
700  */
701 
702 #define C(x) PERF_COUNT_HW_CACHE_##x
703 
704 extern u64 __read_mostly hw_cache_event_ids
705                                 [PERF_COUNT_HW_CACHE_MAX]
706                                 [PERF_COUNT_HW_CACHE_OP_MAX]
707                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
708 extern u64 __read_mostly hw_cache_extra_regs
709                                 [PERF_COUNT_HW_CACHE_MAX]
710                                 [PERF_COUNT_HW_CACHE_OP_MAX]
711                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
712 
713 u64 x86_perf_event_update(struct perf_event *event);
714 
715 static inline unsigned int x86_pmu_config_addr(int index)
716 {
717         return x86_pmu.eventsel + (x86_pmu.addr_offset ?
718                                    x86_pmu.addr_offset(index, true) : index);
719 }
720 
721 static inline unsigned int x86_pmu_event_addr(int index)
722 {
723         return x86_pmu.perfctr + (x86_pmu.addr_offset ?
724                                   x86_pmu.addr_offset(index, false) : index);
725 }
726 
727 static inline int x86_pmu_rdpmc_index(int index)
728 {
729         return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
730 }
731 
732 int x86_add_exclusive(unsigned int what);
733 
734 void x86_del_exclusive(unsigned int what);
735 
736 int x86_reserve_hardware(void);
737 
738 void x86_release_hardware(void);
739 
740 void hw_perf_lbr_event_destroy(struct perf_event *event);
741 
742 int x86_setup_perfctr(struct perf_event *event);
743 
744 int x86_pmu_hw_config(struct perf_event *event);
745 
746 void x86_pmu_disable_all(void);
747 
748 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
749                                           u64 enable_mask)
750 {
751         u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
752 
753         if (hwc->extra_reg.reg)
754                 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
755         wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
756 }
757 
758 void x86_pmu_enable_all(int added);
759 
760 int perf_assign_events(struct event_constraint **constraints, int n,
761                         int wmin, int wmax, int gpmax, int *assign);
762 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
763 
764 void x86_pmu_stop(struct perf_event *event, int flags);
765 
766 static inline void x86_pmu_disable_event(struct perf_event *event)
767 {
768         struct hw_perf_event *hwc = &event->hw;
769 
770         wrmsrl(hwc->config_base, hwc->config);
771 }
772 
773 void x86_pmu_enable_event(struct perf_event *event);
774 
775 int x86_pmu_handle_irq(struct pt_regs *regs);
776 
777 extern struct event_constraint emptyconstraint;
778 
779 extern struct event_constraint unconstrained;
780 
781 static inline bool kernel_ip(unsigned long ip)
782 {
783 #ifdef CONFIG_X86_32
784         return ip > PAGE_OFFSET;
785 #else
786         return (long)ip < 0;
787 #endif
788 }
789 
790 /*
791  * Not all PMUs provide the right context information to place the reported IP
792  * into full context. Specifically segment registers are typically not
793  * supplied.
794  *
795  * Assuming the address is a linear address (it is for IBS), we fake the CS and
796  * vm86 mode using the known zero-based code segment and 'fix up' the registers
797  * to reflect this.
798  *
799  * Intel PEBS/LBR appear to typically provide the effective address, nothing
800  * much we can do about that but pray and treat it like a linear address.
801  */
802 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
803 {
804         regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
805         if (regs->flags & X86_VM_MASK)
806                 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
807         regs->ip = ip;
808 }
809 
810 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
811 ssize_t intel_event_sysfs_show(char *page, u64 config);
812 
813 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
814 
815 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
816                           char *page);
817 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
818                           char *page);
819 
820 #ifdef CONFIG_CPU_SUP_AMD
821 
822 int amd_pmu_init(void);
823 
824 #else /* CONFIG_CPU_SUP_AMD */
825 
826 static inline int amd_pmu_init(void)
827 {
828         return 0;
829 }
830 
831 #endif /* CONFIG_CPU_SUP_AMD */
832 
833 #ifdef CONFIG_CPU_SUP_INTEL
834 
835 static inline bool intel_pmu_has_bts(struct perf_event *event)
836 {
837         if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
838             !event->attr.freq && event->hw.sample_period == 1)
839                 return true;
840 
841         return false;
842 }
843 
844 int intel_pmu_save_and_restart(struct perf_event *event);
845 
846 struct event_constraint *
847 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
848                           struct perf_event *event);
849 
850 struct intel_shared_regs *allocate_shared_regs(int cpu);
851 
852 int intel_pmu_init(void);
853 
854 void init_debug_store_on_cpu(int cpu);
855 
856 void fini_debug_store_on_cpu(int cpu);
857 
858 void release_ds_buffers(void);
859 
860 void reserve_ds_buffers(void);
861 
862 extern struct event_constraint bts_constraint;
863 
864 void intel_pmu_enable_bts(u64 config);
865 
866 void intel_pmu_disable_bts(void);
867 
868 int intel_pmu_drain_bts_buffer(void);
869 
870 extern struct event_constraint intel_core2_pebs_event_constraints[];
871 
872 extern struct event_constraint intel_atom_pebs_event_constraints[];
873 
874 extern struct event_constraint intel_slm_pebs_event_constraints[];
875 
876 extern struct event_constraint intel_glm_pebs_event_constraints[];
877 
878 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
879 
880 extern struct event_constraint intel_westmere_pebs_event_constraints[];
881 
882 extern struct event_constraint intel_snb_pebs_event_constraints[];
883 
884 extern struct event_constraint intel_ivb_pebs_event_constraints[];
885 
886 extern struct event_constraint intel_hsw_pebs_event_constraints[];
887 
888 extern struct event_constraint intel_bdw_pebs_event_constraints[];
889 
890 extern struct event_constraint intel_skl_pebs_event_constraints[];
891 
892 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
893 
894 void intel_pmu_pebs_add(struct perf_event *event);
895 
896 void intel_pmu_pebs_del(struct perf_event *event);
897 
898 void intel_pmu_pebs_enable(struct perf_event *event);
899 
900 void intel_pmu_pebs_disable(struct perf_event *event);
901 
902 void intel_pmu_pebs_enable_all(void);
903 
904 void intel_pmu_pebs_disable_all(void);
905 
906 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
907 
908 void intel_ds_init(void);
909 
910 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
911 
912 u64 lbr_from_signext_quirk_wr(u64 val);
913 
914 void intel_pmu_lbr_reset(void);
915 
916 void intel_pmu_lbr_add(struct perf_event *event);
917 
918 void intel_pmu_lbr_del(struct perf_event *event);
919 
920 void intel_pmu_lbr_enable_all(bool pmi);
921 
922 void intel_pmu_lbr_disable_all(void);
923 
924 void intel_pmu_lbr_read(void);
925 
926 void intel_pmu_lbr_init_core(void);
927 
928 void intel_pmu_lbr_init_nhm(void);
929 
930 void intel_pmu_lbr_init_atom(void);
931 
932 void intel_pmu_lbr_init_slm(void);
933 
934 void intel_pmu_lbr_init_snb(void);
935 
936 void intel_pmu_lbr_init_hsw(void);
937 
938 void intel_pmu_lbr_init_skl(void);
939 
940 void intel_pmu_lbr_init_knl(void);
941 
942 void intel_pmu_pebs_data_source_nhm(void);
943 
944 int intel_pmu_setup_lbr_filter(struct perf_event *event);
945 
946 void intel_pt_interrupt(void);
947 
948 int intel_bts_interrupt(void);
949 
950 void intel_bts_enable_local(void);
951 
952 void intel_bts_disable_local(void);
953 
954 int p4_pmu_init(void);
955 
956 int p6_pmu_init(void);
957 
958 int knc_pmu_init(void);
959 
960 static inline int is_ht_workaround_enabled(void)
961 {
962         return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
963 }
964 
965 #else /* CONFIG_CPU_SUP_INTEL */
966 
967 static inline void reserve_ds_buffers(void)
968 {
969 }
970 
971 static inline void release_ds_buffers(void)
972 {
973 }
974 
975 static inline int intel_pmu_init(void)
976 {
977         return 0;
978 }
979 
980 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
981 {
982         return NULL;
983 }
984 
985 static inline int is_ht_workaround_enabled(void)
986 {
987         return 0;
988 }
989 #endif /* CONFIG_CPU_SUP_INTEL */
990 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp