~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/events/intel/pt.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.12 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.55 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.136 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.191 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Intel(R) Processor Trace PMU driver for perf
  4  * Copyright (c) 2013-2014, Intel Corporation.
  5  *
  6  * Intel PT is specified in the Intel Architecture Instruction Set Extensions
  7  * Programming Reference:
  8  * http://software.intel.com/en-us/intel-isa-extensions
  9  */
 10 
 11 #undef DEBUG
 12 
 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14 
 15 #include <linux/types.h>
 16 #include <linux/slab.h>
 17 #include <linux/device.h>
 18 
 19 #include <asm/perf_event.h>
 20 #include <asm/insn.h>
 21 #include <asm/io.h>
 22 #include <asm/intel_pt.h>
 23 #include <asm/intel-family.h>
 24 
 25 #include "../perf_event.h"
 26 #include "pt.h"
 27 
 28 static DEFINE_PER_CPU(struct pt, pt_ctx);
 29 
 30 static struct pt_pmu pt_pmu;
 31 
 32 /*
 33  * Capabilities of Intel PT hardware, such as number of address bits or
 34  * supported output schemes, are cached and exported to userspace as "caps"
 35  * attribute group of pt pmu device
 36  * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
 37  * relevant bits together with intel_pt traces.
 38  *
 39  * These are necessary for both trace decoding (payloads_lip, contains address
 40  * width encoded in IP-related packets), and event configuration (bitmasks with
 41  * permitted values for certain bit fields).
 42  */
 43 #define PT_CAP(_n, _l, _r, _m)                                          \
 44         [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l,        \
 45                             .reg = _r, .mask = _m }
 46 
 47 static struct pt_cap_desc {
 48         const char      *name;
 49         u32             leaf;
 50         u8              reg;
 51         u32             mask;
 52 } pt_caps[] = {
 53         PT_CAP(max_subleaf,             0, CPUID_EAX, 0xffffffff),
 54         PT_CAP(cr3_filtering,           0, CPUID_EBX, BIT(0)),
 55         PT_CAP(psb_cyc,                 0, CPUID_EBX, BIT(1)),
 56         PT_CAP(ip_filtering,            0, CPUID_EBX, BIT(2)),
 57         PT_CAP(mtc,                     0, CPUID_EBX, BIT(3)),
 58         PT_CAP(ptwrite,                 0, CPUID_EBX, BIT(4)),
 59         PT_CAP(power_event_trace,       0, CPUID_EBX, BIT(5)),
 60         PT_CAP(topa_output,             0, CPUID_ECX, BIT(0)),
 61         PT_CAP(topa_multiple_entries,   0, CPUID_ECX, BIT(1)),
 62         PT_CAP(single_range_output,     0, CPUID_ECX, BIT(2)),
 63         PT_CAP(output_subsys,           0, CPUID_ECX, BIT(3)),
 64         PT_CAP(payloads_lip,            0, CPUID_ECX, BIT(31)),
 65         PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x3),
 66         PT_CAP(mtc_periods,             1, CPUID_EAX, 0xffff0000),
 67         PT_CAP(cycle_thresholds,        1, CPUID_EBX, 0xffff),
 68         PT_CAP(psb_periods,             1, CPUID_EBX, 0xffff0000),
 69 };
 70 
 71 u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
 72 {
 73         struct pt_cap_desc *cd = &pt_caps[capability];
 74         u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
 75         unsigned int shift = __ffs(cd->mask);
 76 
 77         return (c & cd->mask) >> shift;
 78 }
 79 EXPORT_SYMBOL_GPL(intel_pt_validate_cap);
 80 
 81 u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
 82 {
 83         return intel_pt_validate_cap(pt_pmu.caps, cap);
 84 }
 85 EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
 86 
 87 static ssize_t pt_cap_show(struct device *cdev,
 88                            struct device_attribute *attr,
 89                            char *buf)
 90 {
 91         struct dev_ext_attribute *ea =
 92                 container_of(attr, struct dev_ext_attribute, attr);
 93         enum pt_capabilities cap = (long)ea->var;
 94 
 95         return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
 96 }
 97 
 98 static struct attribute_group pt_cap_group __ro_after_init = {
 99         .name   = "caps",
100 };
101 
102 PMU_FORMAT_ATTR(pt,             "config:0"      );
103 PMU_FORMAT_ATTR(cyc,            "config:1"      );
104 PMU_FORMAT_ATTR(pwr_evt,        "config:4"      );
105 PMU_FORMAT_ATTR(fup_on_ptw,     "config:5"      );
106 PMU_FORMAT_ATTR(mtc,            "config:9"      );
107 PMU_FORMAT_ATTR(tsc,            "config:10"     );
108 PMU_FORMAT_ATTR(noretcomp,      "config:11"     );
109 PMU_FORMAT_ATTR(ptw,            "config:12"     );
110 PMU_FORMAT_ATTR(branch,         "config:13"     );
111 PMU_FORMAT_ATTR(mtc_period,     "config:14-17"  );
112 PMU_FORMAT_ATTR(cyc_thresh,     "config:19-22"  );
113 PMU_FORMAT_ATTR(psb_period,     "config:24-27"  );
114 
115 static struct attribute *pt_formats_attr[] = {
116         &format_attr_pt.attr,
117         &format_attr_cyc.attr,
118         &format_attr_pwr_evt.attr,
119         &format_attr_fup_on_ptw.attr,
120         &format_attr_mtc.attr,
121         &format_attr_tsc.attr,
122         &format_attr_noretcomp.attr,
123         &format_attr_ptw.attr,
124         &format_attr_branch.attr,
125         &format_attr_mtc_period.attr,
126         &format_attr_cyc_thresh.attr,
127         &format_attr_psb_period.attr,
128         NULL,
129 };
130 
131 static struct attribute_group pt_format_group = {
132         .name   = "format",
133         .attrs  = pt_formats_attr,
134 };
135 
136 static ssize_t
137 pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
138                     char *page)
139 {
140         struct perf_pmu_events_attr *pmu_attr =
141                 container_of(attr, struct perf_pmu_events_attr, attr);
142 
143         switch (pmu_attr->id) {
144         case 0:
145                 return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
146         case 1:
147                 return sprintf(page, "%u:%u\n",
148                                pt_pmu.tsc_art_num,
149                                pt_pmu.tsc_art_den);
150         default:
151                 break;
152         }
153 
154         return -EINVAL;
155 }
156 
157 PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
158                pt_timing_attr_show);
159 PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
160                pt_timing_attr_show);
161 
162 static struct attribute *pt_timing_attr[] = {
163         &timing_attr_max_nonturbo_ratio.attr.attr,
164         &timing_attr_tsc_art_ratio.attr.attr,
165         NULL,
166 };
167 
168 static struct attribute_group pt_timing_group = {
169         .attrs  = pt_timing_attr,
170 };
171 
172 static const struct attribute_group *pt_attr_groups[] = {
173         &pt_cap_group,
174         &pt_format_group,
175         &pt_timing_group,
176         NULL,
177 };
178 
179 static int __init pt_pmu_hw_init(void)
180 {
181         struct dev_ext_attribute *de_attrs;
182         struct attribute **attrs;
183         size_t size;
184         u64 reg;
185         int ret;
186         long i;
187 
188         rdmsrl(MSR_PLATFORM_INFO, reg);
189         pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
190 
191         /*
192          * if available, read in TSC to core crystal clock ratio,
193          * otherwise, zero for numerator stands for "not enumerated"
194          * as per SDM
195          */
196         if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
197                 u32 eax, ebx, ecx, edx;
198 
199                 cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
200 
201                 pt_pmu.tsc_art_num = ebx;
202                 pt_pmu.tsc_art_den = eax;
203         }
204 
205         /* model-specific quirks */
206         switch (boot_cpu_data.x86_model) {
207         case INTEL_FAM6_BROADWELL_CORE:
208         case INTEL_FAM6_BROADWELL_XEON_D:
209         case INTEL_FAM6_BROADWELL_GT3E:
210         case INTEL_FAM6_BROADWELL_X:
211                 /* not setting BRANCH_EN will #GP, erratum BDM106 */
212                 pt_pmu.branch_en_always_on = true;
213                 break;
214         default:
215                 break;
216         }
217 
218         if (boot_cpu_has(X86_FEATURE_VMX)) {
219                 /*
220                  * Intel SDM, 36.5 "Tracing post-VMXON" says that
221                  * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
222                  * post-VMXON.
223                  */
224                 rdmsrl(MSR_IA32_VMX_MISC, reg);
225                 if (reg & BIT(14))
226                         pt_pmu.vmx = true;
227         }
228 
229         attrs = NULL;
230 
231         for (i = 0; i < PT_CPUID_LEAVES; i++) {
232                 cpuid_count(20, i,
233                             &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
234                             &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
235                             &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
236                             &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
237         }
238 
239         ret = -ENOMEM;
240         size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
241         attrs = kzalloc(size, GFP_KERNEL);
242         if (!attrs)
243                 goto fail;
244 
245         size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
246         de_attrs = kzalloc(size, GFP_KERNEL);
247         if (!de_attrs)
248                 goto fail;
249 
250         for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
251                 struct dev_ext_attribute *de_attr = de_attrs + i;
252 
253                 de_attr->attr.attr.name = pt_caps[i].name;
254 
255                 sysfs_attr_init(&de_attr->attr.attr);
256 
257                 de_attr->attr.attr.mode         = S_IRUGO;
258                 de_attr->attr.show              = pt_cap_show;
259                 de_attr->var                    = (void *)i;
260 
261                 attrs[i] = &de_attr->attr.attr;
262         }
263 
264         pt_cap_group.attrs = attrs;
265 
266         return 0;
267 
268 fail:
269         kfree(attrs);
270 
271         return ret;
272 }
273 
274 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC     | \
275                           RTIT_CTL_CYC_THRESH   | \
276                           RTIT_CTL_PSB_FREQ)
277 
278 #define RTIT_CTL_MTC    (RTIT_CTL_MTC_EN        | \
279                          RTIT_CTL_MTC_RANGE)
280 
281 #define RTIT_CTL_PTW    (RTIT_CTL_PTW_EN        | \
282                          RTIT_CTL_FUP_ON_PTW)
283 
284 /*
285  * Bit 0 (TraceEn) in the attr.config is meaningless as the
286  * corresponding bit in the RTIT_CTL can only be controlled
287  * by the driver; therefore, repurpose it to mean: pass
288  * through the bit that was previously assumed to be always
289  * on for PT, thereby allowing the user to *not* set it if
290  * they so wish. See also pt_event_valid() and pt_config().
291  */
292 #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
293 
294 #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN        | \
295                         RTIT_CTL_TSC_EN         | \
296                         RTIT_CTL_DISRETC        | \
297                         RTIT_CTL_BRANCH_EN      | \
298                         RTIT_CTL_CYC_PSB        | \
299                         RTIT_CTL_MTC            | \
300                         RTIT_CTL_PWR_EVT_EN     | \
301                         RTIT_CTL_FUP_ON_PTW     | \
302                         RTIT_CTL_PTW_EN)
303 
304 static bool pt_event_valid(struct perf_event *event)
305 {
306         u64 config = event->attr.config;
307         u64 allowed, requested;
308 
309         if ((config & PT_CONFIG_MASK) != config)
310                 return false;
311 
312         if (config & RTIT_CTL_CYC_PSB) {
313                 if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
314                         return false;
315 
316                 allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
317                 requested = (config & RTIT_CTL_PSB_FREQ) >>
318                         RTIT_CTL_PSB_FREQ_OFFSET;
319                 if (requested && (!(allowed & BIT(requested))))
320                         return false;
321 
322                 allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
323                 requested = (config & RTIT_CTL_CYC_THRESH) >>
324                         RTIT_CTL_CYC_THRESH_OFFSET;
325                 if (requested && (!(allowed & BIT(requested))))
326                         return false;
327         }
328 
329         if (config & RTIT_CTL_MTC) {
330                 /*
331                  * In the unlikely case that CPUID lists valid mtc periods,
332                  * but not the mtc capability, drop out here.
333                  *
334                  * Spec says that setting mtc period bits while mtc bit in
335                  * CPUID is 0 will #GP, so better safe than sorry.
336                  */
337                 if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
338                         return false;
339 
340                 allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
341                 if (!allowed)
342                         return false;
343 
344                 requested = (config & RTIT_CTL_MTC_RANGE) >>
345                         RTIT_CTL_MTC_RANGE_OFFSET;
346 
347                 if (!(allowed & BIT(requested)))
348                         return false;
349         }
350 
351         if (config & RTIT_CTL_PWR_EVT_EN &&
352             !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
353                 return false;
354 
355         if (config & RTIT_CTL_PTW) {
356                 if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
357                         return false;
358 
359                 /* FUPonPTW without PTW doesn't make sense */
360                 if ((config & RTIT_CTL_FUP_ON_PTW) &&
361                     !(config & RTIT_CTL_PTW_EN))
362                         return false;
363         }
364 
365         /*
366          * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
367          * clears the assomption that BranchEn must always be enabled,
368          * as was the case with the first implementation of PT.
369          * If this bit is not set, the legacy behavior is preserved
370          * for compatibility with the older userspace.
371          *
372          * Re-using bit 0 for this purpose is fine because it is never
373          * directly set by the user; previous attempts at setting it in
374          * the attr.config resulted in -EINVAL.
375          */
376         if (config & RTIT_CTL_PASSTHROUGH) {
377                 /*
378                  * Disallow not setting BRANCH_EN where BRANCH_EN is
379                  * always required.
380                  */
381                 if (pt_pmu.branch_en_always_on &&
382                     !(config & RTIT_CTL_BRANCH_EN))
383                         return false;
384         } else {
385                 /*
386                  * Disallow BRANCH_EN without the PASSTHROUGH.
387                  */
388                 if (config & RTIT_CTL_BRANCH_EN)
389                         return false;
390         }
391 
392         return true;
393 }
394 
395 /*
396  * PT configuration helpers
397  * These all are cpu affine and operate on a local PT
398  */
399 
400 /* Address ranges and their corresponding msr configuration registers */
401 static const struct pt_address_range {
402         unsigned long   msr_a;
403         unsigned long   msr_b;
404         unsigned int    reg_off;
405 } pt_address_ranges[] = {
406         {
407                 .msr_a   = MSR_IA32_RTIT_ADDR0_A,
408                 .msr_b   = MSR_IA32_RTIT_ADDR0_B,
409                 .reg_off = RTIT_CTL_ADDR0_OFFSET,
410         },
411         {
412                 .msr_a   = MSR_IA32_RTIT_ADDR1_A,
413                 .msr_b   = MSR_IA32_RTIT_ADDR1_B,
414                 .reg_off = RTIT_CTL_ADDR1_OFFSET,
415         },
416         {
417                 .msr_a   = MSR_IA32_RTIT_ADDR2_A,
418                 .msr_b   = MSR_IA32_RTIT_ADDR2_B,
419                 .reg_off = RTIT_CTL_ADDR2_OFFSET,
420         },
421         {
422                 .msr_a   = MSR_IA32_RTIT_ADDR3_A,
423                 .msr_b   = MSR_IA32_RTIT_ADDR3_B,
424                 .reg_off = RTIT_CTL_ADDR3_OFFSET,
425         }
426 };
427 
428 static u64 pt_config_filters(struct perf_event *event)
429 {
430         struct pt_filters *filters = event->hw.addr_filters;
431         struct pt *pt = this_cpu_ptr(&pt_ctx);
432         unsigned int range = 0;
433         u64 rtit_ctl = 0;
434 
435         if (!filters)
436                 return 0;
437 
438         perf_event_addr_filters_sync(event);
439 
440         for (range = 0; range < filters->nr_filters; range++) {
441                 struct pt_filter *filter = &filters->filter[range];
442 
443                 /*
444                  * Note, if the range has zero start/end addresses due
445                  * to its dynamic object not being loaded yet, we just
446                  * go ahead and program zeroed range, which will simply
447                  * produce no data. Note^2: if executable code at 0x0
448                  * is a concern, we can set up an "invalid" configuration
449                  * such as msr_b < msr_a.
450                  */
451 
452                 /* avoid redundant msr writes */
453                 if (pt->filters.filter[range].msr_a != filter->msr_a) {
454                         wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
455                         pt->filters.filter[range].msr_a = filter->msr_a;
456                 }
457 
458                 if (pt->filters.filter[range].msr_b != filter->msr_b) {
459                         wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
460                         pt->filters.filter[range].msr_b = filter->msr_b;
461                 }
462 
463                 rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
464         }
465 
466         return rtit_ctl;
467 }
468 
469 static void pt_config(struct perf_event *event)
470 {
471         struct pt *pt = this_cpu_ptr(&pt_ctx);
472         u64 reg;
473 
474         /* First round: clear STATUS, in particular the PSB byte counter. */
475         if (!event->hw.config) {
476                 perf_event_itrace_started(event);
477                 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
478         }
479 
480         reg = pt_config_filters(event);
481         reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN;
482 
483         /*
484          * Previously, we had BRANCH_EN on by default, but now that PT has
485          * grown features outside of branch tracing, it is useful to allow
486          * the user to disable it. Setting bit 0 in the event's attr.config
487          * allows BRANCH_EN to pass through instead of being always on. See
488          * also the comment in pt_event_valid().
489          */
490         if (event->attr.config & BIT(0)) {
491                 reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
492         } else {
493                 reg |= RTIT_CTL_BRANCH_EN;
494         }
495 
496         if (!event->attr.exclude_kernel)
497                 reg |= RTIT_CTL_OS;
498         if (!event->attr.exclude_user)
499                 reg |= RTIT_CTL_USR;
500 
501         reg |= (event->attr.config & PT_CONFIG_MASK);
502 
503         event->hw.config = reg;
504         if (READ_ONCE(pt->vmx_on))
505                 perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
506         else
507                 wrmsrl(MSR_IA32_RTIT_CTL, reg);
508 }
509 
510 static void pt_config_stop(struct perf_event *event)
511 {
512         struct pt *pt = this_cpu_ptr(&pt_ctx);
513         u64 ctl = READ_ONCE(event->hw.config);
514 
515         /* may be already stopped by a PMI */
516         if (!(ctl & RTIT_CTL_TRACEEN))
517                 return;
518 
519         ctl &= ~RTIT_CTL_TRACEEN;
520         if (!READ_ONCE(pt->vmx_on))
521                 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
522 
523         WRITE_ONCE(event->hw.config, ctl);
524 
525         /*
526          * A wrmsr that disables trace generation serializes other PT
527          * registers and causes all data packets to be written to memory,
528          * but a fence is required for the data to become globally visible.
529          *
530          * The below WMB, separating data store and aux_head store matches
531          * the consumer's RMB that separates aux_head load and data load.
532          */
533         wmb();
534 }
535 
536 static void pt_config_buffer(void *buf, unsigned int topa_idx,
537                              unsigned int output_off)
538 {
539         u64 reg;
540 
541         wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
542 
543         reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
544 
545         wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
546 }
547 
548 /*
549  * Keep ToPA table-related metadata on the same page as the actual table,
550  * taking up a few words from the top
551  */
552 
553 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
554 
555 /**
556  * struct topa - page-sized ToPA table with metadata at the top
557  * @table:      actual ToPA table entries, as understood by PT hardware
558  * @list:       linkage to struct pt_buffer's list of tables
559  * @phys:       physical address of this page
560  * @offset:     offset of the first entry in this table in the buffer
561  * @size:       total size of all entries in this table
562  * @last:       index of the last initialized entry in this table
563  */
564 struct topa {
565         struct topa_entry       table[TENTS_PER_PAGE];
566         struct list_head        list;
567         u64                     phys;
568         u64                     offset;
569         size_t                  size;
570         int                     last;
571 };
572 
573 /* make -1 stand for the last table entry */
574 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
575 
576 /**
577  * topa_alloc() - allocate page-sized ToPA table
578  * @cpu:        CPU on which to allocate.
579  * @gfp:        Allocation flags.
580  *
581  * Return:      On success, return the pointer to ToPA table page.
582  */
583 static struct topa *topa_alloc(int cpu, gfp_t gfp)
584 {
585         int node = cpu_to_node(cpu);
586         struct topa *topa;
587         struct page *p;
588 
589         p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
590         if (!p)
591                 return NULL;
592 
593         topa = page_address(p);
594         topa->last = 0;
595         topa->phys = page_to_phys(p);
596 
597         /*
598          * In case of singe-entry ToPA, always put the self-referencing END
599          * link as the 2nd entry in the table
600          */
601         if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
602                 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
603                 TOPA_ENTRY(topa, 1)->end = 1;
604         }
605 
606         return topa;
607 }
608 
609 /**
610  * topa_free() - free a page-sized ToPA table
611  * @topa:       Table to deallocate.
612  */
613 static void topa_free(struct topa *topa)
614 {
615         free_page((unsigned long)topa);
616 }
617 
618 /**
619  * topa_insert_table() - insert a ToPA table into a buffer
620  * @buf:         PT buffer that's being extended.
621  * @topa:        New topa table to be inserted.
622  *
623  * If it's the first table in this buffer, set up buffer's pointers
624  * accordingly; otherwise, add a END=1 link entry to @topa to the current
625  * "last" table and adjust the last table pointer to @topa.
626  */
627 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
628 {
629         struct topa *last = buf->last;
630 
631         list_add_tail(&topa->list, &buf->tables);
632 
633         if (!buf->first) {
634                 buf->first = buf->last = buf->cur = topa;
635                 return;
636         }
637 
638         topa->offset = last->offset + last->size;
639         buf->last = topa;
640 
641         if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
642                 return;
643 
644         BUG_ON(last->last != TENTS_PER_PAGE - 1);
645 
646         TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
647         TOPA_ENTRY(last, -1)->end = 1;
648 }
649 
650 /**
651  * topa_table_full() - check if a ToPA table is filled up
652  * @topa:       ToPA table.
653  */
654 static bool topa_table_full(struct topa *topa)
655 {
656         /* single-entry ToPA is a special case */
657         if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
658                 return !!topa->last;
659 
660         return topa->last == TENTS_PER_PAGE - 1;
661 }
662 
663 /**
664  * topa_insert_pages() - create a list of ToPA tables
665  * @buf:        PT buffer being initialized.
666  * @gfp:        Allocation flags.
667  *
668  * This initializes a list of ToPA tables with entries from
669  * the data_pages provided by rb_alloc_aux().
670  *
671  * Return:      0 on success or error code.
672  */
673 static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
674 {
675         struct topa *topa = buf->last;
676         int order = 0;
677         struct page *p;
678 
679         p = virt_to_page(buf->data_pages[buf->nr_pages]);
680         if (PagePrivate(p))
681                 order = page_private(p);
682 
683         if (topa_table_full(topa)) {
684                 topa = topa_alloc(buf->cpu, gfp);
685                 if (!topa)
686                         return -ENOMEM;
687 
688                 topa_insert_table(buf, topa);
689         }
690 
691         TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
692         TOPA_ENTRY(topa, -1)->size = order;
693         if (!buf->snapshot &&
694             !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
695                 TOPA_ENTRY(topa, -1)->intr = 1;
696                 TOPA_ENTRY(topa, -1)->stop = 1;
697         }
698 
699         topa->last++;
700         topa->size += sizes(order);
701 
702         buf->nr_pages += 1ul << order;
703 
704         return 0;
705 }
706 
707 /**
708  * pt_topa_dump() - print ToPA tables and their entries
709  * @buf:        PT buffer.
710  */
711 static void pt_topa_dump(struct pt_buffer *buf)
712 {
713         struct topa *topa;
714 
715         list_for_each_entry(topa, &buf->tables, list) {
716                 int i;
717 
718                 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
719                          topa->phys, topa->offset, topa->size);
720                 for (i = 0; i < TENTS_PER_PAGE; i++) {
721                         pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
722                                  &topa->table[i],
723                                  (unsigned long)topa->table[i].base << TOPA_SHIFT,
724                                  sizes(topa->table[i].size),
725                                  topa->table[i].end ?  'E' : ' ',
726                                  topa->table[i].intr ? 'I' : ' ',
727                                  topa->table[i].stop ? 'S' : ' ',
728                                  *(u64 *)&topa->table[i]);
729                         if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
730                              topa->table[i].stop) ||
731                             topa->table[i].end)
732                                 break;
733                 }
734         }
735 }
736 
737 /**
738  * pt_buffer_advance() - advance to the next output region
739  * @buf:        PT buffer.
740  *
741  * Advance the current pointers in the buffer to the next ToPA entry.
742  */
743 static void pt_buffer_advance(struct pt_buffer *buf)
744 {
745         buf->output_off = 0;
746         buf->cur_idx++;
747 
748         if (buf->cur_idx == buf->cur->last) {
749                 if (buf->cur == buf->last)
750                         buf->cur = buf->first;
751                 else
752                         buf->cur = list_entry(buf->cur->list.next, struct topa,
753                                               list);
754                 buf->cur_idx = 0;
755         }
756 }
757 
758 /**
759  * pt_update_head() - calculate current offsets and sizes
760  * @pt:         Per-cpu pt context.
761  *
762  * Update buffer's current write pointer position and data size.
763  */
764 static void pt_update_head(struct pt *pt)
765 {
766         struct pt_buffer *buf = perf_get_aux(&pt->handle);
767         u64 topa_idx, base, old;
768 
769         /* offset of the first region in this table from the beginning of buf */
770         base = buf->cur->offset + buf->output_off;
771 
772         /* offset of the current output region within this table */
773         for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
774                 base += sizes(buf->cur->table[topa_idx].size);
775 
776         if (buf->snapshot) {
777                 local_set(&buf->data_size, base);
778         } else {
779                 old = (local64_xchg(&buf->head, base) &
780                        ((buf->nr_pages << PAGE_SHIFT) - 1));
781                 if (base < old)
782                         base += buf->nr_pages << PAGE_SHIFT;
783 
784                 local_add(base - old, &buf->data_size);
785         }
786 }
787 
788 /**
789  * pt_buffer_region() - obtain current output region's address
790  * @buf:        PT buffer.
791  */
792 static void *pt_buffer_region(struct pt_buffer *buf)
793 {
794         return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
795 }
796 
797 /**
798  * pt_buffer_region_size() - obtain current output region's size
799  * @buf:        PT buffer.
800  */
801 static size_t pt_buffer_region_size(struct pt_buffer *buf)
802 {
803         return sizes(buf->cur->table[buf->cur_idx].size);
804 }
805 
806 /**
807  * pt_handle_status() - take care of possible status conditions
808  * @pt:         Per-cpu pt context.
809  */
810 static void pt_handle_status(struct pt *pt)
811 {
812         struct pt_buffer *buf = perf_get_aux(&pt->handle);
813         int advance = 0;
814         u64 status;
815 
816         rdmsrl(MSR_IA32_RTIT_STATUS, status);
817 
818         if (status & RTIT_STATUS_ERROR) {
819                 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
820                 pt_topa_dump(buf);
821                 status &= ~RTIT_STATUS_ERROR;
822         }
823 
824         if (status & RTIT_STATUS_STOPPED) {
825                 status &= ~RTIT_STATUS_STOPPED;
826 
827                 /*
828                  * On systems that only do single-entry ToPA, hitting STOP
829                  * means we are already losing data; need to let the decoder
830                  * know.
831                  */
832                 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
833                     buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
834                         perf_aux_output_flag(&pt->handle,
835                                              PERF_AUX_FLAG_TRUNCATED);
836                         advance++;
837                 }
838         }
839 
840         /*
841          * Also on single-entry ToPA implementations, interrupt will come
842          * before the output reaches its output region's boundary.
843          */
844         if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
845             !buf->snapshot &&
846             pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
847                 void *head = pt_buffer_region(buf);
848 
849                 /* everything within this margin needs to be zeroed out */
850                 memset(head + buf->output_off, 0,
851                        pt_buffer_region_size(buf) -
852                        buf->output_off);
853                 advance++;
854         }
855 
856         if (advance)
857                 pt_buffer_advance(buf);
858 
859         wrmsrl(MSR_IA32_RTIT_STATUS, status);
860 }
861 
862 /**
863  * pt_read_offset() - translate registers into buffer pointers
864  * @buf:        PT buffer.
865  *
866  * Set buffer's output pointers from MSR values.
867  */
868 static void pt_read_offset(struct pt_buffer *buf)
869 {
870         u64 offset, base_topa;
871 
872         rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
873         buf->cur = phys_to_virt(base_topa);
874 
875         rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
876         /* offset within current output region */
877         buf->output_off = offset >> 32;
878         /* index of current output region within this table */
879         buf->cur_idx = (offset & 0xffffff80) >> 7;
880 }
881 
882 /**
883  * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
884  * @buf:        PT buffer.
885  * @pg:         Page offset in the buffer.
886  *
887  * When advancing to the next output region (ToPA entry), given a page offset
888  * into the buffer, we need to find the offset of the first page in the next
889  * region.
890  */
891 static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
892 {
893         struct topa_entry *te = buf->topa_index[pg];
894 
895         /* one region */
896         if (buf->first == buf->last && buf->first->last == 1)
897                 return pg;
898 
899         do {
900                 pg++;
901                 pg &= buf->nr_pages - 1;
902         } while (buf->topa_index[pg] == te);
903 
904         return pg;
905 }
906 
907 /**
908  * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
909  * @buf:        PT buffer.
910  * @handle:     Current output handle.
911  *
912  * Place INT and STOP marks to prevent overwriting old data that the consumer
913  * hasn't yet collected and waking up the consumer after a certain fraction of
914  * the buffer has filled up. Only needed and sensible for non-snapshot counters.
915  *
916  * This obviously relies on buf::head to figure out buffer markers, so it has
917  * to be called after pt_buffer_reset_offsets() and before the hardware tracing
918  * is enabled.
919  */
920 static int pt_buffer_reset_markers(struct pt_buffer *buf,
921                                    struct perf_output_handle *handle)
922 
923 {
924         unsigned long head = local64_read(&buf->head);
925         unsigned long idx, npages, wakeup;
926 
927         /* can't stop in the middle of an output region */
928         if (buf->output_off + handle->size + 1 <
929             sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
930                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
931                 return -EINVAL;
932         }
933 
934 
935         /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
936         if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
937                 return 0;
938 
939         /* clear STOP and INT from current entry */
940         buf->topa_index[buf->stop_pos]->stop = 0;
941         buf->topa_index[buf->stop_pos]->intr = 0;
942         buf->topa_index[buf->intr_pos]->intr = 0;
943 
944         /* how many pages till the STOP marker */
945         npages = handle->size >> PAGE_SHIFT;
946 
947         /* if it's on a page boundary, fill up one more page */
948         if (!offset_in_page(head + handle->size + 1))
949                 npages++;
950 
951         idx = (head >> PAGE_SHIFT) + npages;
952         idx &= buf->nr_pages - 1;
953         buf->stop_pos = idx;
954 
955         wakeup = handle->wakeup >> PAGE_SHIFT;
956 
957         /* in the worst case, wake up the consumer one page before hard stop */
958         idx = (head >> PAGE_SHIFT) + npages - 1;
959         if (idx > wakeup)
960                 idx = wakeup;
961 
962         idx &= buf->nr_pages - 1;
963         buf->intr_pos = idx;
964 
965         buf->topa_index[buf->stop_pos]->stop = 1;
966         buf->topa_index[buf->stop_pos]->intr = 1;
967         buf->topa_index[buf->intr_pos]->intr = 1;
968 
969         return 0;
970 }
971 
972 /**
973  * pt_buffer_setup_topa_index() - build topa_index[] table of regions
974  * @buf:        PT buffer.
975  *
976  * topa_index[] references output regions indexed by offset into the
977  * buffer for purposes of quick reverse lookup.
978  */
979 static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
980 {
981         struct topa *cur = buf->first, *prev = buf->last;
982         struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
983                 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
984         int pg = 0, idx = 0;
985 
986         while (pg < buf->nr_pages) {
987                 int tidx;
988 
989                 /* pages within one topa entry */
990                 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
991                         buf->topa_index[pg] = te_prev;
992 
993                 te_prev = te_cur;
994 
995                 if (idx == cur->last - 1) {
996                         /* advance to next topa table */
997                         idx = 0;
998                         cur = list_entry(cur->list.next, struct topa, list);
999                 } else {
1000                         idx++;
1001                 }
1002                 te_cur = TOPA_ENTRY(cur, idx);
1003         }
1004 
1005 }
1006 
1007 /**
1008  * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
1009  * @buf:        PT buffer.
1010  * @head:       Write pointer (aux_head) from AUX buffer.
1011  *
1012  * Find the ToPA table and entry corresponding to given @head and set buffer's
1013  * "current" pointers accordingly. This is done after we have obtained the
1014  * current aux_head position from a successful call to perf_aux_output_begin()
1015  * to make sure the hardware is writing to the right place.
1016  *
1017  * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
1018  * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
1019  * which are used to determine INT and STOP markers' locations by a subsequent
1020  * call to pt_buffer_reset_markers().
1021  */
1022 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
1023 {
1024         int pg;
1025 
1026         if (buf->snapshot)
1027                 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
1028 
1029         pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
1030         pg = pt_topa_next_entry(buf, pg);
1031 
1032         buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
1033         buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
1034                         (unsigned long)buf->cur) / sizeof(struct topa_entry);
1035         buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
1036 
1037         local64_set(&buf->head, head);
1038         local_set(&buf->data_size, 0);
1039 }
1040 
1041 /**
1042  * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
1043  * @buf:        PT buffer.
1044  */
1045 static void pt_buffer_fini_topa(struct pt_buffer *buf)
1046 {
1047         struct topa *topa, *iter;
1048 
1049         list_for_each_entry_safe(topa, iter, &buf->tables, list) {
1050                 /*
1051                  * right now, this is in free_aux() path only, so
1052                  * no need to unlink this table from the list
1053                  */
1054                 topa_free(topa);
1055         }
1056 }
1057 
1058 /**
1059  * pt_buffer_init_topa() - initialize ToPA table for pt buffer
1060  * @buf:        PT buffer.
1061  * @size:       Total size of all regions within this ToPA.
1062  * @gfp:        Allocation flags.
1063  */
1064 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
1065                                gfp_t gfp)
1066 {
1067         struct topa *topa;
1068         int err;
1069 
1070         topa = topa_alloc(buf->cpu, gfp);
1071         if (!topa)
1072                 return -ENOMEM;
1073 
1074         topa_insert_table(buf, topa);
1075 
1076         while (buf->nr_pages < nr_pages) {
1077                 err = topa_insert_pages(buf, gfp);
1078                 if (err) {
1079                         pt_buffer_fini_topa(buf);
1080                         return -ENOMEM;
1081                 }
1082         }
1083 
1084         pt_buffer_setup_topa_index(buf);
1085 
1086         /* link last table to the first one, unless we're double buffering */
1087         if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
1088                 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
1089                 TOPA_ENTRY(buf->last, -1)->end = 1;
1090         }
1091 
1092         pt_topa_dump(buf);
1093         return 0;
1094 }
1095 
1096 /**
1097  * pt_buffer_setup_aux() - set up topa tables for a PT buffer
1098  * @cpu:        Cpu on which to allocate, -1 means current.
1099  * @pages:      Array of pointers to buffer pages passed from perf core.
1100  * @nr_pages:   Number of pages in the buffer.
1101  * @snapshot:   If this is a snapshot/overwrite counter.
1102  *
1103  * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1104  * bookkeeping for an AUX buffer.
1105  *
1106  * Return:      Our private PT buffer structure.
1107  */
1108 static void *
1109 pt_buffer_setup_aux(struct perf_event *event, void **pages,
1110                     int nr_pages, bool snapshot)
1111 {
1112         struct pt_buffer *buf;
1113         int node, ret, cpu = event->cpu;
1114 
1115         if (!nr_pages)
1116                 return NULL;
1117 
1118         if (cpu == -1)
1119                 cpu = raw_smp_processor_id();
1120         node = cpu_to_node(cpu);
1121 
1122         buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
1123                            GFP_KERNEL, node);
1124         if (!buf)
1125                 return NULL;
1126 
1127         buf->cpu = cpu;
1128         buf->snapshot = snapshot;
1129         buf->data_pages = pages;
1130 
1131         INIT_LIST_HEAD(&buf->tables);
1132 
1133         ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
1134         if (ret) {
1135                 kfree(buf);
1136                 return NULL;
1137         }
1138 
1139         return buf;
1140 }
1141 
1142 /**
1143  * pt_buffer_free_aux() - perf AUX deallocation path callback
1144  * @data:       PT buffer.
1145  */
1146 static void pt_buffer_free_aux(void *data)
1147 {
1148         struct pt_buffer *buf = data;
1149 
1150         pt_buffer_fini_topa(buf);
1151         kfree(buf);
1152 }
1153 
1154 static int pt_addr_filters_init(struct perf_event *event)
1155 {
1156         struct pt_filters *filters;
1157         int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1158 
1159         if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1160                 return 0;
1161 
1162         filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1163         if (!filters)
1164                 return -ENOMEM;
1165 
1166         if (event->parent)
1167                 memcpy(filters, event->parent->hw.addr_filters,
1168                        sizeof(*filters));
1169 
1170         event->hw.addr_filters = filters;
1171 
1172         return 0;
1173 }
1174 
1175 static void pt_addr_filters_fini(struct perf_event *event)
1176 {
1177         kfree(event->hw.addr_filters);
1178         event->hw.addr_filters = NULL;
1179 }
1180 
1181 static inline bool valid_kernel_ip(unsigned long ip)
1182 {
1183         return virt_addr_valid(ip) && kernel_ip(ip);
1184 }
1185 
1186 static int pt_event_addr_filters_validate(struct list_head *filters)
1187 {
1188         struct perf_addr_filter *filter;
1189         int range = 0;
1190 
1191         list_for_each_entry(filter, filters, entry) {
1192                 /*
1193                  * PT doesn't support single address triggers and
1194                  * 'start' filters.
1195                  */
1196                 if (!filter->size ||
1197                     filter->action == PERF_ADDR_FILTER_ACTION_START)
1198                         return -EOPNOTSUPP;
1199 
1200                 if (!filter->path.dentry) {
1201                         if (!valid_kernel_ip(filter->offset))
1202                                 return -EINVAL;
1203 
1204                         if (!valid_kernel_ip(filter->offset + filter->size))
1205                                 return -EINVAL;
1206                 }
1207 
1208                 if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1209                         return -EOPNOTSUPP;
1210         }
1211 
1212         return 0;
1213 }
1214 
1215 static void pt_event_addr_filters_sync(struct perf_event *event)
1216 {
1217         struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1218         unsigned long msr_a, msr_b;
1219         struct perf_addr_filter_range *fr = event->addr_filter_ranges;
1220         struct pt_filters *filters = event->hw.addr_filters;
1221         struct perf_addr_filter *filter;
1222         int range = 0;
1223 
1224         if (!filters)
1225                 return;
1226 
1227         list_for_each_entry(filter, &head->list, entry) {
1228                 if (filter->path.dentry && !fr[range].start) {
1229                         msr_a = msr_b = 0;
1230                 } else {
1231                         /* apply the offset */
1232                         msr_a = fr[range].start;
1233                         msr_b = msr_a + fr[range].size - 1;
1234                 }
1235 
1236                 filters->filter[range].msr_a  = msr_a;
1237                 filters->filter[range].msr_b  = msr_b;
1238                 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER)
1239                         filters->filter[range].config = 1;
1240                 else
1241                         filters->filter[range].config = 2;
1242                 range++;
1243         }
1244 
1245         filters->nr_filters = range;
1246 }
1247 
1248 /**
1249  * intel_pt_interrupt() - PT PMI handler
1250  */
1251 void intel_pt_interrupt(void)
1252 {
1253         struct pt *pt = this_cpu_ptr(&pt_ctx);
1254         struct pt_buffer *buf;
1255         struct perf_event *event = pt->handle.event;
1256 
1257         /*
1258          * There may be a dangling PT bit in the interrupt status register
1259          * after PT has been disabled by pt_event_stop(). Make sure we don't
1260          * do anything (particularly, re-enable) for this event here.
1261          */
1262         if (!READ_ONCE(pt->handle_nmi))
1263                 return;
1264 
1265         if (!event)
1266                 return;
1267 
1268         pt_config_stop(event);
1269 
1270         buf = perf_get_aux(&pt->handle);
1271         if (!buf)
1272                 return;
1273 
1274         pt_read_offset(buf);
1275 
1276         pt_handle_status(pt);
1277 
1278         pt_update_head(pt);
1279 
1280         perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1281 
1282         if (!event->hw.state) {
1283                 int ret;
1284 
1285                 buf = perf_aux_output_begin(&pt->handle, event);
1286                 if (!buf) {
1287                         event->hw.state = PERF_HES_STOPPED;
1288                         return;
1289                 }
1290 
1291                 pt_buffer_reset_offsets(buf, pt->handle.head);
1292                 /* snapshot counters don't use PMI, so it's safe */
1293                 ret = pt_buffer_reset_markers(buf, &pt->handle);
1294                 if (ret) {
1295                         perf_aux_output_end(&pt->handle, 0);
1296                         return;
1297                 }
1298 
1299                 pt_config_buffer(buf->cur->table, buf->cur_idx,
1300                                  buf->output_off);
1301                 pt_config(event);
1302         }
1303 }
1304 
1305 void intel_pt_handle_vmx(int on)
1306 {
1307         struct pt *pt = this_cpu_ptr(&pt_ctx);
1308         struct perf_event *event;
1309         unsigned long flags;
1310 
1311         /* PT plays nice with VMX, do nothing */
1312         if (pt_pmu.vmx)
1313                 return;
1314 
1315         /*
1316          * VMXON will clear RTIT_CTL.TraceEn; we need to make
1317          * sure to not try to set it while VMX is on. Disable
1318          * interrupts to avoid racing with pmu callbacks;
1319          * concurrent PMI should be handled fine.
1320          */
1321         local_irq_save(flags);
1322         WRITE_ONCE(pt->vmx_on, on);
1323 
1324         /*
1325          * If an AUX transaction is in progress, it will contain
1326          * gap(s), so flag it PARTIAL to inform the user.
1327          */
1328         event = pt->handle.event;
1329         if (event)
1330                 perf_aux_output_flag(&pt->handle,
1331                                      PERF_AUX_FLAG_PARTIAL);
1332 
1333         /* Turn PTs back on */
1334         if (!on && event)
1335                 wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config);
1336 
1337         local_irq_restore(flags);
1338 }
1339 EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1340 
1341 /*
1342  * PMU callbacks
1343  */
1344 
1345 static void pt_event_start(struct perf_event *event, int mode)
1346 {
1347         struct hw_perf_event *hwc = &event->hw;
1348         struct pt *pt = this_cpu_ptr(&pt_ctx);
1349         struct pt_buffer *buf;
1350 
1351         buf = perf_aux_output_begin(&pt->handle, event);
1352         if (!buf)
1353                 goto fail_stop;
1354 
1355         pt_buffer_reset_offsets(buf, pt->handle.head);
1356         if (!buf->snapshot) {
1357                 if (pt_buffer_reset_markers(buf, &pt->handle))
1358                         goto fail_end_stop;
1359         }
1360 
1361         WRITE_ONCE(pt->handle_nmi, 1);
1362         hwc->state = 0;
1363 
1364         pt_config_buffer(buf->cur->table, buf->cur_idx,
1365                          buf->output_off);
1366         pt_config(event);
1367 
1368         return;
1369 
1370 fail_end_stop:
1371         perf_aux_output_end(&pt->handle, 0);
1372 fail_stop:
1373         hwc->state = PERF_HES_STOPPED;
1374 }
1375 
1376 static void pt_event_stop(struct perf_event *event, int mode)
1377 {
1378         struct pt *pt = this_cpu_ptr(&pt_ctx);
1379 
1380         /*
1381          * Protect against the PMI racing with disabling wrmsr,
1382          * see comment in intel_pt_interrupt().
1383          */
1384         WRITE_ONCE(pt->handle_nmi, 0);
1385 
1386         pt_config_stop(event);
1387 
1388         if (event->hw.state == PERF_HES_STOPPED)
1389                 return;
1390 
1391         event->hw.state = PERF_HES_STOPPED;
1392 
1393         if (mode & PERF_EF_UPDATE) {
1394                 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1395 
1396                 if (!buf)
1397                         return;
1398 
1399                 if (WARN_ON_ONCE(pt->handle.event != event))
1400                         return;
1401 
1402                 pt_read_offset(buf);
1403 
1404                 pt_handle_status(pt);
1405 
1406                 pt_update_head(pt);
1407 
1408                 if (buf->snapshot)
1409                         pt->handle.head =
1410                                 local_xchg(&buf->data_size,
1411                                            buf->nr_pages << PAGE_SHIFT);
1412                 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1413         }
1414 }
1415 
1416 static void pt_event_del(struct perf_event *event, int mode)
1417 {
1418         pt_event_stop(event, PERF_EF_UPDATE);
1419 }
1420 
1421 static int pt_event_add(struct perf_event *event, int mode)
1422 {
1423         struct pt *pt = this_cpu_ptr(&pt_ctx);
1424         struct hw_perf_event *hwc = &event->hw;
1425         int ret = -EBUSY;
1426 
1427         if (pt->handle.event)
1428                 goto fail;
1429 
1430         if (mode & PERF_EF_START) {
1431                 pt_event_start(event, 0);
1432                 ret = -EINVAL;
1433                 if (hwc->state == PERF_HES_STOPPED)
1434                         goto fail;
1435         } else {
1436                 hwc->state = PERF_HES_STOPPED;
1437         }
1438 
1439         ret = 0;
1440 fail:
1441 
1442         return ret;
1443 }
1444 
1445 static void pt_event_read(struct perf_event *event)
1446 {
1447 }
1448 
1449 static void pt_event_destroy(struct perf_event *event)
1450 {
1451         pt_addr_filters_fini(event);
1452         x86_del_exclusive(x86_lbr_exclusive_pt);
1453 }
1454 
1455 static int pt_event_init(struct perf_event *event)
1456 {
1457         if (event->attr.type != pt_pmu.pmu.type)
1458                 return -ENOENT;
1459 
1460         if (!pt_event_valid(event))
1461                 return -EINVAL;
1462 
1463         if (x86_add_exclusive(x86_lbr_exclusive_pt))
1464                 return -EBUSY;
1465 
1466         if (pt_addr_filters_init(event)) {
1467                 x86_del_exclusive(x86_lbr_exclusive_pt);
1468                 return -ENOMEM;
1469         }
1470 
1471         event->destroy = pt_event_destroy;
1472 
1473         return 0;
1474 }
1475 
1476 void cpu_emergency_stop_pt(void)
1477 {
1478         struct pt *pt = this_cpu_ptr(&pt_ctx);
1479 
1480         if (pt->handle.event)
1481                 pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1482 }
1483 
1484 static __init int pt_init(void)
1485 {
1486         int ret, cpu, prior_warn = 0;
1487 
1488         BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1489 
1490         if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1491                 return -ENODEV;
1492 
1493         get_online_cpus();
1494         for_each_online_cpu(cpu) {
1495                 u64 ctl;
1496 
1497                 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1498                 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1499                         prior_warn++;
1500         }
1501         put_online_cpus();
1502 
1503         if (prior_warn) {
1504                 x86_add_exclusive(x86_lbr_exclusive_pt);
1505                 pr_warn("PT is enabled at boot time, doing nothing\n");
1506 
1507                 return -EBUSY;
1508         }
1509 
1510         ret = pt_pmu_hw_init();
1511         if (ret)
1512                 return ret;
1513 
1514         if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
1515                 pr_warn("ToPA output is not supported on this CPU\n");
1516                 return -ENODEV;
1517         }
1518 
1519         if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1520                 pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
1521 
1522         pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1523         pt_pmu.pmu.attr_groups           = pt_attr_groups;
1524         pt_pmu.pmu.task_ctx_nr           = perf_sw_context;
1525         pt_pmu.pmu.event_init            = pt_event_init;
1526         pt_pmu.pmu.add                   = pt_event_add;
1527         pt_pmu.pmu.del                   = pt_event_del;
1528         pt_pmu.pmu.start                 = pt_event_start;
1529         pt_pmu.pmu.stop                  = pt_event_stop;
1530         pt_pmu.pmu.read                  = pt_event_read;
1531         pt_pmu.pmu.setup_aux             = pt_buffer_setup_aux;
1532         pt_pmu.pmu.free_aux              = pt_buffer_free_aux;
1533         pt_pmu.pmu.addr_filters_sync     = pt_event_addr_filters_sync;
1534         pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1535         pt_pmu.pmu.nr_addr_filters       =
1536                 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
1537 
1538         ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1539 
1540         return ret;
1541 }
1542 arch_initcall(pt_init);
1543 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp