~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/perf/arch/x86/util/intel-pt.c

Version: ~ [ linux-5.19-rc8 ] ~ [ linux-5.18.14 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.57 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.133 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.207 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.253 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.289 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.324 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * intel_pt.c: Intel Processor Trace support
  4  * Copyright (c) 2013-2015, Intel Corporation.
  5  */
  6 
  7 #include <errno.h>
  8 #include <stdbool.h>
  9 #include <linux/kernel.h>
 10 #include <linux/types.h>
 11 #include <linux/bitops.h>
 12 #include <linux/log2.h>
 13 #include <linux/zalloc.h>
 14 #include <cpuid.h>
 15 
 16 #include "../../../util/session.h"
 17 #include "../../../util/event.h"
 18 #include "../../../util/evlist.h"
 19 #include "../../../util/evsel.h"
 20 #include "../../../util/evsel_config.h"
 21 #include "../../../util/cpumap.h"
 22 #include "../../../util/mmap.h"
 23 #include <subcmd/parse-options.h>
 24 #include "../../../util/parse-events.h"
 25 #include "../../../util/pmu.h"
 26 #include "../../../util/debug.h"
 27 #include "../../../util/auxtrace.h"
 28 #include "../../../util/record.h"
 29 #include "../../../util/target.h"
 30 #include "../../../util/tsc.h"
 31 #include <internal/lib.h> // page_size
 32 #include "../../../util/intel-pt.h"
 33 
 34 #define KiB(x) ((x) * 1024)
 35 #define MiB(x) ((x) * 1024 * 1024)
 36 #define KiB_MASK(x) (KiB(x) - 1)
 37 #define MiB_MASK(x) (MiB(x) - 1)
 38 
 39 #define INTEL_PT_PSB_PERIOD_NEAR        256
 40 
 41 struct intel_pt_snapshot_ref {
 42         void *ref_buf;
 43         size_t ref_offset;
 44         bool wrapped;
 45 };
 46 
 47 struct intel_pt_recording {
 48         struct auxtrace_record          itr;
 49         struct perf_pmu                 *intel_pt_pmu;
 50         int                             have_sched_switch;
 51         struct evlist           *evlist;
 52         bool                            snapshot_mode;
 53         bool                            snapshot_init_done;
 54         size_t                          snapshot_size;
 55         size_t                          snapshot_ref_buf_size;
 56         int                             snapshot_ref_cnt;
 57         struct intel_pt_snapshot_ref    *snapshot_refs;
 58         size_t                          priv_size;
 59 };
 60 
 61 static int intel_pt_parse_terms_with_default(struct list_head *formats,
 62                                              const char *str,
 63                                              u64 *config)
 64 {
 65         struct list_head *terms;
 66         struct perf_event_attr attr = { .size = 0, };
 67         int err;
 68 
 69         terms = malloc(sizeof(struct list_head));
 70         if (!terms)
 71                 return -ENOMEM;
 72 
 73         INIT_LIST_HEAD(terms);
 74 
 75         err = parse_events_terms(terms, str);
 76         if (err)
 77                 goto out_free;
 78 
 79         attr.config = *config;
 80         err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
 81         if (err)
 82                 goto out_free;
 83 
 84         *config = attr.config;
 85 out_free:
 86         parse_events_terms__delete(terms);
 87         return err;
 88 }
 89 
 90 static int intel_pt_parse_terms(struct list_head *formats, const char *str,
 91                                 u64 *config)
 92 {
 93         *config = 0;
 94         return intel_pt_parse_terms_with_default(formats, str, config);
 95 }
 96 
 97 static u64 intel_pt_masked_bits(u64 mask, u64 bits)
 98 {
 99         const u64 top_bit = 1ULL << 63;
100         u64 res = 0;
101         int i;
102 
103         for (i = 0; i < 64; i++) {
104                 if (mask & top_bit) {
105                         res <<= 1;
106                         if (bits & top_bit)
107                                 res |= 1;
108                 }
109                 mask <<= 1;
110                 bits <<= 1;
111         }
112 
113         return res;
114 }
115 
116 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
117                                 struct evlist *evlist, u64 *res)
118 {
119         struct evsel *evsel;
120         u64 mask;
121 
122         *res = 0;
123 
124         mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
125         if (!mask)
126                 return -EINVAL;
127 
128         evlist__for_each_entry(evlist, evsel) {
129                 if (evsel->core.attr.type == intel_pt_pmu->type) {
130                         *res = intel_pt_masked_bits(mask, evsel->core.attr.config);
131                         return 0;
132                 }
133         }
134 
135         return -EINVAL;
136 }
137 
138 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
139                                   struct evlist *evlist)
140 {
141         u64 val;
142         int err, topa_multiple_entries;
143         size_t psb_period;
144 
145         if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
146                                 "%d", &topa_multiple_entries) != 1)
147                 topa_multiple_entries = 0;
148 
149         /*
150          * Use caps/topa_multiple_entries to indicate early hardware that had
151          * extra frequent PSBs.
152          */
153         if (!topa_multiple_entries) {
154                 psb_period = 256;
155                 goto out;
156         }
157 
158         err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
159         if (err)
160                 val = 0;
161 
162         psb_period = 1 << (val + 11);
163 out:
164         pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
165         return psb_period;
166 }
167 
168 static int intel_pt_pick_bit(int bits, int target)
169 {
170         int pos, pick = -1;
171 
172         for (pos = 0; bits; bits >>= 1, pos++) {
173                 if (bits & 1) {
174                         if (pos <= target || pick < 0)
175                                 pick = pos;
176                         if (pos >= target)
177                                 break;
178                 }
179         }
180 
181         return pick;
182 }
183 
184 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
185 {
186         char buf[256];
187         int mtc, mtc_periods = 0, mtc_period;
188         int psb_cyc, psb_periods, psb_period;
189         int pos = 0;
190         u64 config;
191         char c;
192 
193         pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
194 
195         if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
196                                 &mtc) != 1)
197                 mtc = 1;
198 
199         if (mtc) {
200                 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
201                                         &mtc_periods) != 1)
202                         mtc_periods = 0;
203                 if (mtc_periods) {
204                         mtc_period = intel_pt_pick_bit(mtc_periods, 3);
205                         pos += scnprintf(buf + pos, sizeof(buf) - pos,
206                                          ",mtc,mtc_period=%d", mtc_period);
207                 }
208         }
209 
210         if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
211                                 &psb_cyc) != 1)
212                 psb_cyc = 1;
213 
214         if (psb_cyc && mtc_periods) {
215                 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
216                                         &psb_periods) != 1)
217                         psb_periods = 0;
218                 if (psb_periods) {
219                         psb_period = intel_pt_pick_bit(psb_periods, 3);
220                         pos += scnprintf(buf + pos, sizeof(buf) - pos,
221                                          ",psb_period=%d", psb_period);
222                 }
223         }
224 
225         if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
226             perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
227                 pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
228 
229         pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
230 
231         intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
232 
233         return config;
234 }
235 
236 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
237                                            struct record_opts *opts,
238                                            const char *str)
239 {
240         struct intel_pt_recording *ptr =
241                         container_of(itr, struct intel_pt_recording, itr);
242         unsigned long long snapshot_size = 0;
243         char *endptr;
244 
245         if (str) {
246                 snapshot_size = strtoull(str, &endptr, 0);
247                 if (*endptr || snapshot_size > SIZE_MAX)
248                         return -1;
249         }
250 
251         opts->auxtrace_snapshot_mode = true;
252         opts->auxtrace_snapshot_size = snapshot_size;
253 
254         ptr->snapshot_size = snapshot_size;
255 
256         return 0;
257 }
258 
259 struct perf_event_attr *
260 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
261 {
262         struct perf_event_attr *attr;
263 
264         attr = zalloc(sizeof(struct perf_event_attr));
265         if (!attr)
266                 return NULL;
267 
268         attr->config = intel_pt_default_config(intel_pt_pmu);
269 
270         intel_pt_pmu->selectable = true;
271 
272         return attr;
273 }
274 
275 static const char *intel_pt_find_filter(struct evlist *evlist,
276                                         struct perf_pmu *intel_pt_pmu)
277 {
278         struct evsel *evsel;
279 
280         evlist__for_each_entry(evlist, evsel) {
281                 if (evsel->core.attr.type == intel_pt_pmu->type)
282                         return evsel->filter;
283         }
284 
285         return NULL;
286 }
287 
288 static size_t intel_pt_filter_bytes(const char *filter)
289 {
290         size_t len = filter ? strlen(filter) : 0;
291 
292         return len ? roundup(len + 1, 8) : 0;
293 }
294 
295 static size_t
296 intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
297 {
298         struct intel_pt_recording *ptr =
299                         container_of(itr, struct intel_pt_recording, itr);
300         const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
301 
302         ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
303                          intel_pt_filter_bytes(filter);
304 
305         return ptr->priv_size;
306 }
307 
308 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
309 {
310         unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
311 
312         __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
313         *n = ebx;
314         *d = eax;
315 }
316 
317 static int intel_pt_info_fill(struct auxtrace_record *itr,
318                               struct perf_session *session,
319                               struct perf_record_auxtrace_info *auxtrace_info,
320                               size_t priv_size)
321 {
322         struct intel_pt_recording *ptr =
323                         container_of(itr, struct intel_pt_recording, itr);
324         struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
325         struct perf_event_mmap_page *pc;
326         struct perf_tsc_conversion tc = { .time_mult = 0, };
327         bool cap_user_time_zero = false, per_cpu_mmaps;
328         u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
329         u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
330         unsigned long max_non_turbo_ratio;
331         size_t filter_str_len;
332         const char *filter;
333         __u64 *info;
334         int err;
335 
336         if (priv_size != ptr->priv_size)
337                 return -EINVAL;
338 
339         intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
340         intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
341                              &noretcomp_bit);
342         intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
343         mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
344                                               "mtc_period");
345         intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
346 
347         intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
348 
349         if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
350                                 "%lu", &max_non_turbo_ratio) != 1)
351                 max_non_turbo_ratio = 0;
352 
353         filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
354         filter_str_len = filter ? strlen(filter) : 0;
355 
356         if (!session->evlist->core.nr_mmaps)
357                 return -EINVAL;
358 
359         pc = session->evlist->mmap[0].core.base;
360         if (pc) {
361                 err = perf_read_tsc_conversion(pc, &tc);
362                 if (err) {
363                         if (err != -EOPNOTSUPP)
364                                 return err;
365                 } else {
366                         cap_user_time_zero = tc.time_mult != 0;
367                 }
368                 if (!cap_user_time_zero)
369                         ui__warning("Intel Processor Trace: TSC not available\n");
370         }
371 
372         per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
373 
374         auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
375         auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
376         auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
377         auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
378         auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
379         auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
380         auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
381         auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
382         auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
383         auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
384         auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
385         auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
386         auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
387         auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
388         auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
389         auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
390         auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
391         auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
392 
393         info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
394 
395         if (filter_str_len) {
396                 size_t len = intel_pt_filter_bytes(filter);
397 
398                 strncpy((char *)info, filter, len);
399                 info += len >> 3;
400         }
401 
402         return 0;
403 }
404 
405 static int intel_pt_track_switches(struct evlist *evlist)
406 {
407         const char *sched_switch = "sched:sched_switch";
408         struct evsel *evsel;
409         int err;
410 
411         if (!perf_evlist__can_select_event(evlist, sched_switch))
412                 return -EPERM;
413 
414         err = parse_events(evlist, sched_switch, NULL);
415         if (err) {
416                 pr_debug2("%s: failed to parse %s, error %d\n",
417                           __func__, sched_switch, err);
418                 return err;
419         }
420 
421         evsel = evlist__last(evlist);
422 
423         perf_evsel__set_sample_bit(evsel, CPU);
424         perf_evsel__set_sample_bit(evsel, TIME);
425 
426         evsel->core.system_wide = true;
427         evsel->no_aux_samples = true;
428         evsel->immediate = true;
429 
430         return 0;
431 }
432 
433 static void intel_pt_valid_str(char *str, size_t len, u64 valid)
434 {
435         unsigned int val, last = 0, state = 1;
436         int p = 0;
437 
438         str[0] = '\0';
439 
440         for (val = 0; val <= 64; val++, valid >>= 1) {
441                 if (valid & 1) {
442                         last = val;
443                         switch (state) {
444                         case 0:
445                                 p += scnprintf(str + p, len - p, ",");
446                                 /* Fall through */
447                         case 1:
448                                 p += scnprintf(str + p, len - p, "%u", val);
449                                 state = 2;
450                                 break;
451                         case 2:
452                                 state = 3;
453                                 break;
454                         case 3:
455                                 state = 4;
456                                 break;
457                         default:
458                                 break;
459                         }
460                 } else {
461                         switch (state) {
462                         case 3:
463                                 p += scnprintf(str + p, len - p, ",%u", last);
464                                 state = 0;
465                                 break;
466                         case 4:
467                                 p += scnprintf(str + p, len - p, "-%u", last);
468                                 state = 0;
469                                 break;
470                         default:
471                                 break;
472                         }
473                         if (state != 1)
474                                 state = 0;
475                 }
476         }
477 }
478 
479 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
480                                     const char *caps, const char *name,
481                                     const char *supported, u64 config)
482 {
483         char valid_str[256];
484         unsigned int shift;
485         unsigned long long valid;
486         u64 bits;
487         int ok;
488 
489         if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
490                 valid = 0;
491 
492         if (supported &&
493             perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
494                 valid = 0;
495 
496         valid |= 1;
497 
498         bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
499 
500         config &= bits;
501 
502         for (shift = 0; bits && !(bits & 1); shift++)
503                 bits >>= 1;
504 
505         config >>= shift;
506 
507         if (config > 63)
508                 goto out_err;
509 
510         if (valid & (1 << config))
511                 return 0;
512 out_err:
513         intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
514         pr_err("Invalid %s for %s. Valid values are: %s\n",
515                name, INTEL_PT_PMU_NAME, valid_str);
516         return -EINVAL;
517 }
518 
519 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
520                                     struct evsel *evsel)
521 {
522         int err;
523         char c;
524 
525         if (!evsel)
526                 return 0;
527 
528         /*
529          * If supported, force pass-through config term (pt=1) even if user
530          * sets pt=0, which avoids senseless kernel errors.
531          */
532         if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
533             !(evsel->core.attr.config & 1)) {
534                 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
535                 evsel->core.attr.config |= 1;
536         }
537 
538         err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
539                                        "cyc_thresh", "caps/psb_cyc",
540                                        evsel->core.attr.config);
541         if (err)
542                 return err;
543 
544         err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
545                                        "mtc_period", "caps/mtc",
546                                        evsel->core.attr.config);
547         if (err)
548                 return err;
549 
550         return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
551                                         "psb_period", "caps/psb_cyc",
552                                         evsel->core.attr.config);
553 }
554 
555 static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu,
556                                         struct evsel *evsel)
557 {
558         struct perf_evsel_config_term *term;
559         u64 user_bits = 0, bits;
560 
561         term = perf_evsel__get_config_term(evsel, CFG_CHG);
562         if (term)
563                 user_bits = term->val.cfg_chg;
564 
565         bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period");
566 
567         /* Did user change psb_period */
568         if (bits & user_bits)
569                 return;
570 
571         /* Set psb_period to 0 */
572         evsel->core.attr.config &= ~bits;
573 }
574 
575 static void intel_pt_min_max_sample_sz(struct evlist *evlist,
576                                        size_t *min_sz, size_t *max_sz)
577 {
578         struct evsel *evsel;
579 
580         evlist__for_each_entry(evlist, evsel) {
581                 size_t sz = evsel->core.attr.aux_sample_size;
582 
583                 if (!sz)
584                         continue;
585                 if (min_sz && (sz < *min_sz || !*min_sz))
586                         *min_sz = sz;
587                 if (max_sz && sz > *max_sz)
588                         *max_sz = sz;
589         }
590 }
591 
592 /*
593  * Currently, there is not enough information to disambiguate different PEBS
594  * events, so only allow one.
595  */
596 static bool intel_pt_too_many_aux_output(struct evlist *evlist)
597 {
598         struct evsel *evsel;
599         int aux_output_cnt = 0;
600 
601         evlist__for_each_entry(evlist, evsel)
602                 aux_output_cnt += !!evsel->core.attr.aux_output;
603 
604         if (aux_output_cnt > 1) {
605                 pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n");
606                 return true;
607         }
608 
609         return false;
610 }
611 
612 static int intel_pt_recording_options(struct auxtrace_record *itr,
613                                       struct evlist *evlist,
614                                       struct record_opts *opts)
615 {
616         struct intel_pt_recording *ptr =
617                         container_of(itr, struct intel_pt_recording, itr);
618         struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
619         bool have_timing_info, need_immediate = false;
620         struct evsel *evsel, *intel_pt_evsel = NULL;
621         const struct perf_cpu_map *cpus = evlist->core.cpus;
622         bool privileged = perf_event_paranoid_check(-1);
623         u64 tsc_bit;
624         int err;
625 
626         ptr->evlist = evlist;
627         ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
628 
629         evlist__for_each_entry(evlist, evsel) {
630                 if (evsel->core.attr.type == intel_pt_pmu->type) {
631                         if (intel_pt_evsel) {
632                                 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
633                                 return -EINVAL;
634                         }
635                         evsel->core.attr.freq = 0;
636                         evsel->core.attr.sample_period = 1;
637                         evsel->no_aux_samples = true;
638                         intel_pt_evsel = evsel;
639                         opts->full_auxtrace = true;
640                 }
641         }
642 
643         if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
644                 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
645                 return -EINVAL;
646         }
647 
648         if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) {
649                 pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n");
650                 return -EINVAL;
651         }
652 
653         if (opts->use_clockid) {
654                 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
655                 return -EINVAL;
656         }
657 
658         if (intel_pt_too_many_aux_output(evlist))
659                 return -EINVAL;
660 
661         if (!opts->full_auxtrace)
662                 return 0;
663 
664         if (opts->auxtrace_sample_mode)
665                 intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel);
666 
667         err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
668         if (err)
669                 return err;
670 
671         /* Set default sizes for snapshot mode */
672         if (opts->auxtrace_snapshot_mode) {
673                 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
674 
675                 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
676                         if (privileged) {
677                                 opts->auxtrace_mmap_pages = MiB(4) / page_size;
678                         } else {
679                                 opts->auxtrace_mmap_pages = KiB(128) / page_size;
680                                 if (opts->mmap_pages == UINT_MAX)
681                                         opts->mmap_pages = KiB(256) / page_size;
682                         }
683                 } else if (!opts->auxtrace_mmap_pages && !privileged &&
684                            opts->mmap_pages == UINT_MAX) {
685                         opts->mmap_pages = KiB(256) / page_size;
686                 }
687                 if (!opts->auxtrace_snapshot_size)
688                         opts->auxtrace_snapshot_size =
689                                 opts->auxtrace_mmap_pages * (size_t)page_size;
690                 if (!opts->auxtrace_mmap_pages) {
691                         size_t sz = opts->auxtrace_snapshot_size;
692 
693                         sz = round_up(sz, page_size) / page_size;
694                         opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
695                 }
696                 if (opts->auxtrace_snapshot_size >
697                                 opts->auxtrace_mmap_pages * (size_t)page_size) {
698                         pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
699                                opts->auxtrace_snapshot_size,
700                                opts->auxtrace_mmap_pages * (size_t)page_size);
701                         return -EINVAL;
702                 }
703                 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
704                         pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
705                         return -EINVAL;
706                 }
707                 pr_debug2("Intel PT snapshot size: %zu\n",
708                           opts->auxtrace_snapshot_size);
709                 if (psb_period &&
710                     opts->auxtrace_snapshot_size <= psb_period +
711                                                   INTEL_PT_PSB_PERIOD_NEAR)
712                         ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
713                                     opts->auxtrace_snapshot_size, psb_period);
714         }
715 
716         /* Set default sizes for sample mode */
717         if (opts->auxtrace_sample_mode) {
718                 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
719                 size_t min_sz = 0, max_sz = 0;
720 
721                 intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz);
722                 if (!opts->auxtrace_mmap_pages && !privileged &&
723                     opts->mmap_pages == UINT_MAX)
724                         opts->mmap_pages = KiB(256) / page_size;
725                 if (!opts->auxtrace_mmap_pages) {
726                         size_t sz = round_up(max_sz, page_size) / page_size;
727 
728                         opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
729                 }
730                 if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
731                         pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
732                                max_sz,
733                                opts->auxtrace_mmap_pages * (size_t)page_size);
734                         return -EINVAL;
735                 }
736                 pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
737                           min_sz, max_sz);
738                 if (psb_period &&
739                     min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
740                         ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
741                                     min_sz, psb_period);
742         }
743 
744         /* Set default sizes for full trace mode */
745         if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
746                 if (privileged) {
747                         opts->auxtrace_mmap_pages = MiB(4) / page_size;
748                 } else {
749                         opts->auxtrace_mmap_pages = KiB(128) / page_size;
750                         if (opts->mmap_pages == UINT_MAX)
751                                 opts->mmap_pages = KiB(256) / page_size;
752                 }
753         }
754 
755         /* Validate auxtrace_mmap_pages */
756         if (opts->auxtrace_mmap_pages) {
757                 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
758                 size_t min_sz;
759 
760                 if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode)
761                         min_sz = KiB(4);
762                 else
763                         min_sz = KiB(8);
764 
765                 if (sz < min_sz || !is_power_of_2(sz)) {
766                         pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
767                                min_sz / 1024);
768                         return -EINVAL;
769                 }
770         }
771 
772         intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
773 
774         if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
775                 have_timing_info = true;
776         else
777                 have_timing_info = false;
778 
779         /*
780          * Per-cpu recording needs sched_switch events to distinguish different
781          * threads.
782          */
783         if (have_timing_info && !perf_cpu_map__empty(cpus)) {
784                 if (perf_can_record_switch_events()) {
785                         bool cpu_wide = !target__none(&opts->target) &&
786                                         !target__has_task(&opts->target);
787 
788                         if (!cpu_wide && perf_can_record_cpu_wide()) {
789                                 struct evsel *switch_evsel;
790 
791                                 err = parse_events(evlist, "dummy:u", NULL);
792                                 if (err)
793                                         return err;
794 
795                                 switch_evsel = evlist__last(evlist);
796 
797                                 switch_evsel->core.attr.freq = 0;
798                                 switch_evsel->core.attr.sample_period = 1;
799                                 switch_evsel->core.attr.context_switch = 1;
800 
801                                 switch_evsel->core.system_wide = true;
802                                 switch_evsel->no_aux_samples = true;
803                                 switch_evsel->immediate = true;
804 
805                                 perf_evsel__set_sample_bit(switch_evsel, TID);
806                                 perf_evsel__set_sample_bit(switch_evsel, TIME);
807                                 perf_evsel__set_sample_bit(switch_evsel, CPU);
808                                 perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
809 
810                                 opts->record_switch_events = false;
811                                 ptr->have_sched_switch = 3;
812                         } else {
813                                 opts->record_switch_events = true;
814                                 need_immediate = true;
815                                 if (cpu_wide)
816                                         ptr->have_sched_switch = 3;
817                                 else
818                                         ptr->have_sched_switch = 2;
819                         }
820                 } else {
821                         err = intel_pt_track_switches(evlist);
822                         if (err == -EPERM)
823                                 pr_debug2("Unable to select sched:sched_switch\n");
824                         else if (err)
825                                 return err;
826                         else
827                                 ptr->have_sched_switch = 1;
828                 }
829         }
830 
831         if (intel_pt_evsel) {
832                 /*
833                  * To obtain the auxtrace buffer file descriptor, the auxtrace
834                  * event must come first.
835                  */
836                 perf_evlist__to_front(evlist, intel_pt_evsel);
837                 /*
838                  * In the case of per-cpu mmaps, we need the CPU on the
839                  * AUX event.
840                  */
841                 if (!perf_cpu_map__empty(cpus))
842                         perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
843         }
844 
845         /* Add dummy event to keep tracking */
846         if (opts->full_auxtrace) {
847                 struct evsel *tracking_evsel;
848 
849                 err = parse_events(evlist, "dummy:u", NULL);
850                 if (err)
851                         return err;
852 
853                 tracking_evsel = evlist__last(evlist);
854 
855                 perf_evlist__set_tracking_event(evlist, tracking_evsel);
856 
857                 tracking_evsel->core.attr.freq = 0;
858                 tracking_evsel->core.attr.sample_period = 1;
859 
860                 tracking_evsel->no_aux_samples = true;
861                 if (need_immediate)
862                         tracking_evsel->immediate = true;
863 
864                 /* In per-cpu case, always need the time of mmap events etc */
865                 if (!perf_cpu_map__empty(cpus)) {
866                         perf_evsel__set_sample_bit(tracking_evsel, TIME);
867                         /* And the CPU for switch events */
868                         perf_evsel__set_sample_bit(tracking_evsel, CPU);
869                 }
870                 perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
871         }
872 
873         /*
874          * Warn the user when we do not have enough information to decode i.e.
875          * per-cpu with no sched_switch (except workload-only).
876          */
877         if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
878             !target__none(&opts->target))
879                 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
880 
881         return 0;
882 }
883 
884 static int intel_pt_snapshot_start(struct auxtrace_record *itr)
885 {
886         struct intel_pt_recording *ptr =
887                         container_of(itr, struct intel_pt_recording, itr);
888         struct evsel *evsel;
889 
890         evlist__for_each_entry(ptr->evlist, evsel) {
891                 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
892                         return evsel__disable(evsel);
893         }
894         return -EINVAL;
895 }
896 
897 static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
898 {
899         struct intel_pt_recording *ptr =
900                         container_of(itr, struct intel_pt_recording, itr);
901         struct evsel *evsel;
902 
903         evlist__for_each_entry(ptr->evlist, evsel) {
904                 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
905                         return evsel__enable(evsel);
906         }
907         return -EINVAL;
908 }
909 
910 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
911 {
912         const size_t sz = sizeof(struct intel_pt_snapshot_ref);
913         int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
914         struct intel_pt_snapshot_ref *refs;
915 
916         if (!new_cnt)
917                 new_cnt = 16;
918 
919         while (new_cnt <= idx)
920                 new_cnt *= 2;
921 
922         refs = calloc(new_cnt, sz);
923         if (!refs)
924                 return -ENOMEM;
925 
926         memcpy(refs, ptr->snapshot_refs, cnt * sz);
927 
928         ptr->snapshot_refs = refs;
929         ptr->snapshot_ref_cnt = new_cnt;
930 
931         return 0;
932 }
933 
934 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
935 {
936         int i;
937 
938         for (i = 0; i < ptr->snapshot_ref_cnt; i++)
939                 zfree(&ptr->snapshot_refs[i].ref_buf);
940         zfree(&ptr->snapshot_refs);
941 }
942 
943 static void intel_pt_recording_free(struct auxtrace_record *itr)
944 {
945         struct intel_pt_recording *ptr =
946                         container_of(itr, struct intel_pt_recording, itr);
947 
948         intel_pt_free_snapshot_refs(ptr);
949         free(ptr);
950 }
951 
952 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
953                                        size_t snapshot_buf_size)
954 {
955         size_t ref_buf_size = ptr->snapshot_ref_buf_size;
956         void *ref_buf;
957 
958         ref_buf = zalloc(ref_buf_size);
959         if (!ref_buf)
960                 return -ENOMEM;
961 
962         ptr->snapshot_refs[idx].ref_buf = ref_buf;
963         ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
964 
965         return 0;
966 }
967 
968 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
969                                              size_t snapshot_buf_size)
970 {
971         const size_t max_size = 256 * 1024;
972         size_t buf_size = 0, psb_period;
973 
974         if (ptr->snapshot_size <= 64 * 1024)
975                 return 0;
976 
977         psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
978         if (psb_period)
979                 buf_size = psb_period * 2;
980 
981         if (!buf_size || buf_size > max_size)
982                 buf_size = max_size;
983 
984         if (buf_size >= snapshot_buf_size)
985                 return 0;
986 
987         if (buf_size >= ptr->snapshot_size / 2)
988                 return 0;
989 
990         return buf_size;
991 }
992 
993 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
994                                   size_t snapshot_buf_size)
995 {
996         if (ptr->snapshot_init_done)
997                 return 0;
998 
999         ptr->snapshot_init_done = true;
1000 
1001         ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
1002                                                         snapshot_buf_size);
1003 
1004         return 0;
1005 }
1006 
1007 /**
1008  * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
1009  * @buf1: first buffer
1010  * @compare_size: number of bytes to compare
1011  * @buf2: second buffer (a circular buffer)
1012  * @offs2: offset in second buffer
1013  * @buf2_size: size of second buffer
1014  *
1015  * The comparison allows for the possibility that the bytes to compare in the
1016  * circular buffer are not contiguous.  It is assumed that @compare_size <=
1017  * @buf2_size.  This function returns %false if the bytes are identical, %true
1018  * otherwise.
1019  */
1020 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
1021                                      void *buf2, size_t offs2, size_t buf2_size)
1022 {
1023         size_t end2 = offs2 + compare_size, part_size;
1024 
1025         if (end2 <= buf2_size)
1026                 return memcmp(buf1, buf2 + offs2, compare_size);
1027 
1028         part_size = end2 - buf2_size;
1029         if (memcmp(buf1, buf2 + offs2, part_size))
1030                 return true;
1031 
1032         compare_size -= part_size;
1033 
1034         return memcmp(buf1 + part_size, buf2, compare_size);
1035 }
1036 
1037 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
1038                                  size_t ref_size, size_t buf_size,
1039                                  void *data, size_t head)
1040 {
1041         size_t ref_end = ref_offset + ref_size;
1042 
1043         if (ref_end > buf_size) {
1044                 if (head > ref_offset || head < ref_end - buf_size)
1045                         return true;
1046         } else if (head > ref_offset && head < ref_end) {
1047                 return true;
1048         }
1049 
1050         return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
1051                                         buf_size);
1052 }
1053 
1054 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
1055                               void *data, size_t head)
1056 {
1057         if (head >= ref_size) {
1058                 memcpy(ref_buf, data + head - ref_size, ref_size);
1059         } else {
1060                 memcpy(ref_buf, data, head);
1061                 ref_size -= head;
1062                 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
1063         }
1064 }
1065 
1066 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
1067                              struct auxtrace_mmap *mm, unsigned char *data,
1068                              u64 head)
1069 {
1070         struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
1071         bool wrapped;
1072 
1073         wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
1074                                        ptr->snapshot_ref_buf_size, mm->len,
1075                                        data, head);
1076 
1077         intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
1078                           data, head);
1079 
1080         return wrapped;
1081 }
1082 
1083 static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
1084 {
1085         int i, a, b;
1086 
1087         b = buf_size >> 3;
1088         a = b - 512;
1089         if (a < 0)
1090                 a = 0;
1091 
1092         for (i = a; i < b; i++) {
1093                 if (data[i])
1094                         return true;
1095         }
1096 
1097         return false;
1098 }
1099 
1100 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
1101                                   struct auxtrace_mmap *mm, unsigned char *data,
1102                                   u64 *head, u64 *old)
1103 {
1104         struct intel_pt_recording *ptr =
1105                         container_of(itr, struct intel_pt_recording, itr);
1106         bool wrapped;
1107         int err;
1108 
1109         pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
1110                   __func__, idx, (size_t)*old, (size_t)*head);
1111 
1112         err = intel_pt_snapshot_init(ptr, mm->len);
1113         if (err)
1114                 goto out_err;
1115 
1116         if (idx >= ptr->snapshot_ref_cnt) {
1117                 err = intel_pt_alloc_snapshot_refs(ptr, idx);
1118                 if (err)
1119                         goto out_err;
1120         }
1121 
1122         if (ptr->snapshot_ref_buf_size) {
1123                 if (!ptr->snapshot_refs[idx].ref_buf) {
1124                         err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
1125                         if (err)
1126                                 goto out_err;
1127                 }
1128                 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
1129         } else {
1130                 wrapped = ptr->snapshot_refs[idx].wrapped;
1131                 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
1132                         ptr->snapshot_refs[idx].wrapped = true;
1133                         wrapped = true;
1134                 }
1135         }
1136 
1137         /*
1138          * In full trace mode 'head' continually increases.  However in snapshot
1139          * mode 'head' is an offset within the buffer.  Here 'old' and 'head'
1140          * are adjusted to match the full trace case which expects that 'old' is
1141          * always less than 'head'.
1142          */
1143         if (wrapped) {
1144                 *old = *head;
1145                 *head += mm->len;
1146         } else {
1147                 if (mm->mask)
1148                         *old &= mm->mask;
1149                 else
1150                         *old %= mm->len;
1151                 if (*old > *head)
1152                         *head += mm->len;
1153         }
1154 
1155         pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
1156                   __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
1157 
1158         return 0;
1159 
1160 out_err:
1161         pr_err("%s: failed, error %d\n", __func__, err);
1162         return err;
1163 }
1164 
1165 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1166 {
1167         return rdtsc();
1168 }
1169 
1170 struct auxtrace_record *intel_pt_recording_init(int *err)
1171 {
1172         struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
1173         struct intel_pt_recording *ptr;
1174 
1175         if (!intel_pt_pmu)
1176                 return NULL;
1177 
1178         if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
1179                 *err = -errno;
1180                 return NULL;
1181         }
1182 
1183         ptr = zalloc(sizeof(struct intel_pt_recording));
1184         if (!ptr) {
1185                 *err = -ENOMEM;
1186                 return NULL;
1187         }
1188 
1189         ptr->intel_pt_pmu = intel_pt_pmu;
1190         ptr->itr.pmu = intel_pt_pmu;
1191         ptr->itr.recording_options = intel_pt_recording_options;
1192         ptr->itr.info_priv_size = intel_pt_info_priv_size;
1193         ptr->itr.info_fill = intel_pt_info_fill;
1194         ptr->itr.free = intel_pt_recording_free;
1195         ptr->itr.snapshot_start = intel_pt_snapshot_start;
1196         ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1197         ptr->itr.find_snapshot = intel_pt_find_snapshot;
1198         ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1199         ptr->itr.reference = intel_pt_reference;
1200         ptr->itr.read_finish = auxtrace_record__read_finish;
1201         /*
1202          * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
1203          * should give at least 1 PSB per sample.
1204          */
1205         ptr->itr.default_aux_sample_size = 4096;
1206         return &ptr->itr;
1207 }
1208 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp