~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/perf/util/session.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/kernel.h>
  2 #include <traceevent/event-parse.h>
  3 
  4 #include <byteswap.h>
  5 #include <unistd.h>
  6 #include <sys/types.h>
  7 #include <sys/mman.h>
  8 
  9 #include "evlist.h"
 10 #include "evsel.h"
 11 #include "session.h"
 12 #include "tool.h"
 13 #include "sort.h"
 14 #include "util.h"
 15 #include "cpumap.h"
 16 #include "perf_regs.h"
 17 #include "asm/bug.h"
 18 #include "auxtrace.h"
 19 #include "thread-stack.h"
 20 #include "stat.h"
 21 
 22 static int perf_session__deliver_event(struct perf_session *session,
 23                                        union perf_event *event,
 24                                        struct perf_sample *sample,
 25                                        struct perf_tool *tool,
 26                                        u64 file_offset);
 27 
 28 static int perf_session__open(struct perf_session *session)
 29 {
 30         struct perf_data_file *file = session->file;
 31 
 32         if (perf_session__read_header(session) < 0) {
 33                 pr_err("incompatible file format (rerun with -v to learn more)\n");
 34                 return -1;
 35         }
 36 
 37         if (perf_data_file__is_pipe(file))
 38                 return 0;
 39 
 40         if (perf_header__has_feat(&session->header, HEADER_STAT))
 41                 return 0;
 42 
 43         if (!perf_evlist__valid_sample_type(session->evlist)) {
 44                 pr_err("non matching sample_type\n");
 45                 return -1;
 46         }
 47 
 48         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
 49                 pr_err("non matching sample_id_all\n");
 50                 return -1;
 51         }
 52 
 53         if (!perf_evlist__valid_read_format(session->evlist)) {
 54                 pr_err("non matching read_format\n");
 55                 return -1;
 56         }
 57 
 58         return 0;
 59 }
 60 
 61 void perf_session__set_id_hdr_size(struct perf_session *session)
 62 {
 63         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
 64 
 65         machines__set_id_hdr_size(&session->machines, id_hdr_size);
 66 }
 67 
 68 int perf_session__create_kernel_maps(struct perf_session *session)
 69 {
 70         int ret = machine__create_kernel_maps(&session->machines.host);
 71 
 72         if (ret >= 0)
 73                 ret = machines__create_guest_kernel_maps(&session->machines);
 74         return ret;
 75 }
 76 
 77 static void perf_session__destroy_kernel_maps(struct perf_session *session)
 78 {
 79         machines__destroy_kernel_maps(&session->machines);
 80 }
 81 
 82 static bool perf_session__has_comm_exec(struct perf_session *session)
 83 {
 84         struct perf_evsel *evsel;
 85 
 86         evlist__for_each_entry(session->evlist, evsel) {
 87                 if (evsel->attr.comm_exec)
 88                         return true;
 89         }
 90 
 91         return false;
 92 }
 93 
 94 static void perf_session__set_comm_exec(struct perf_session *session)
 95 {
 96         bool comm_exec = perf_session__has_comm_exec(session);
 97 
 98         machines__set_comm_exec(&session->machines, comm_exec);
 99 }
100 
101 static int ordered_events__deliver_event(struct ordered_events *oe,
102                                          struct ordered_event *event)
103 {
104         struct perf_sample sample;
105         struct perf_session *session = container_of(oe, struct perf_session,
106                                                     ordered_events);
107         int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
108 
109         if (ret) {
110                 pr_err("Can't parse sample, err = %d\n", ret);
111                 return ret;
112         }
113 
114         return perf_session__deliver_event(session, event->event, &sample,
115                                            session->tool, event->file_offset);
116 }
117 
118 struct perf_session *perf_session__new(struct perf_data_file *file,
119                                        bool repipe, struct perf_tool *tool)
120 {
121         struct perf_session *session = zalloc(sizeof(*session));
122 
123         if (!session)
124                 goto out;
125 
126         session->repipe = repipe;
127         session->tool   = tool;
128         INIT_LIST_HEAD(&session->auxtrace_index);
129         machines__init(&session->machines);
130         ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
131 
132         if (file) {
133                 if (perf_data_file__open(file))
134                         goto out_delete;
135 
136                 session->file = file;
137 
138                 if (perf_data_file__is_read(file)) {
139                         if (perf_session__open(session) < 0)
140                                 goto out_close;
141 
142                         perf_session__set_id_hdr_size(session);
143                         perf_session__set_comm_exec(session);
144                 }
145         } else  {
146                 session->machines.host.env = &perf_env;
147         }
148 
149         if (!file || perf_data_file__is_write(file)) {
150                 /*
151                  * In O_RDONLY mode this will be performed when reading the
152                  * kernel MMAP event, in perf_event__process_mmap().
153                  */
154                 if (perf_session__create_kernel_maps(session) < 0)
155                         pr_warning("Cannot read kernel map\n");
156         }
157 
158         if (tool && tool->ordering_requires_timestamps &&
159             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
160                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161                 tool->ordered_events = false;
162         }
163 
164         return session;
165 
166  out_close:
167         perf_data_file__close(file);
168  out_delete:
169         perf_session__delete(session);
170  out:
171         return NULL;
172 }
173 
174 static void perf_session__delete_threads(struct perf_session *session)
175 {
176         machine__delete_threads(&session->machines.host);
177 }
178 
179 void perf_session__delete(struct perf_session *session)
180 {
181         if (session == NULL)
182                 return;
183         auxtrace__free(session);
184         auxtrace_index__free(&session->auxtrace_index);
185         perf_session__destroy_kernel_maps(session);
186         perf_session__delete_threads(session);
187         perf_env__exit(&session->header.env);
188         machines__exit(&session->machines);
189         if (session->file)
190                 perf_data_file__close(session->file);
191         free(session);
192 }
193 
194 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
195                                                  __maybe_unused,
196                                                  union perf_event *event
197                                                  __maybe_unused,
198                                                  struct perf_session *session
199                                                 __maybe_unused)
200 {
201         dump_printf(": unhandled!\n");
202         return 0;
203 }
204 
205 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
206                                          union perf_event *event __maybe_unused,
207                                          struct perf_evlist **pevlist
208                                          __maybe_unused)
209 {
210         dump_printf(": unhandled!\n");
211         return 0;
212 }
213 
214 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
215                                                  union perf_event *event __maybe_unused,
216                                                  struct perf_evlist **pevlist
217                                                  __maybe_unused)
218 {
219         if (dump_trace)
220                 perf_event__fprintf_event_update(event, stdout);
221 
222         dump_printf(": unhandled!\n");
223         return 0;
224 }
225 
226 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
227                                      union perf_event *event __maybe_unused,
228                                      struct perf_sample *sample __maybe_unused,
229                                      struct perf_evsel *evsel __maybe_unused,
230                                      struct machine *machine __maybe_unused)
231 {
232         dump_printf(": unhandled!\n");
233         return 0;
234 }
235 
236 static int process_event_stub(struct perf_tool *tool __maybe_unused,
237                               union perf_event *event __maybe_unused,
238                               struct perf_sample *sample __maybe_unused,
239                               struct machine *machine __maybe_unused)
240 {
241         dump_printf(": unhandled!\n");
242         return 0;
243 }
244 
245 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
246                                        union perf_event *event __maybe_unused,
247                                        struct ordered_events *oe __maybe_unused)
248 {
249         dump_printf(": unhandled!\n");
250         return 0;
251 }
252 
253 static int process_finished_round(struct perf_tool *tool,
254                                   union perf_event *event,
255                                   struct ordered_events *oe);
256 
257 static int skipn(int fd, off_t n)
258 {
259         char buf[4096];
260         ssize_t ret;
261 
262         while (n > 0) {
263                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
264                 if (ret <= 0)
265                         return ret;
266                 n -= ret;
267         }
268 
269         return 0;
270 }
271 
272 static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
273                                        union perf_event *event,
274                                        struct perf_session *session
275                                        __maybe_unused)
276 {
277         dump_printf(": unhandled!\n");
278         if (perf_data_file__is_pipe(session->file))
279                 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
280         return event->auxtrace.size;
281 }
282 
283 static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
284                                   union perf_event *event __maybe_unused,
285                                   struct perf_session *session __maybe_unused)
286 {
287         dump_printf(": unhandled!\n");
288         return 0;
289 }
290 
291 
292 static
293 int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
294                                   union perf_event *event __maybe_unused,
295                                   struct perf_session *session __maybe_unused)
296 {
297         if (dump_trace)
298                 perf_event__fprintf_thread_map(event, stdout);
299 
300         dump_printf(": unhandled!\n");
301         return 0;
302 }
303 
304 static
305 int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
306                                union perf_event *event __maybe_unused,
307                                struct perf_session *session __maybe_unused)
308 {
309         if (dump_trace)
310                 perf_event__fprintf_cpu_map(event, stdout);
311 
312         dump_printf(": unhandled!\n");
313         return 0;
314 }
315 
316 static
317 int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
318                                    union perf_event *event __maybe_unused,
319                                    struct perf_session *session __maybe_unused)
320 {
321         if (dump_trace)
322                 perf_event__fprintf_stat_config(event, stdout);
323 
324         dump_printf(": unhandled!\n");
325         return 0;
326 }
327 
328 static int process_stat_stub(struct perf_tool *tool __maybe_unused,
329                              union perf_event *event __maybe_unused,
330                              struct perf_session *perf_session
331                              __maybe_unused)
332 {
333         if (dump_trace)
334                 perf_event__fprintf_stat(event, stdout);
335 
336         dump_printf(": unhandled!\n");
337         return 0;
338 }
339 
340 static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
341                                    union perf_event *event __maybe_unused,
342                                    struct perf_session *perf_session
343                                    __maybe_unused)
344 {
345         if (dump_trace)
346                 perf_event__fprintf_stat_round(event, stdout);
347 
348         dump_printf(": unhandled!\n");
349         return 0;
350 }
351 
352 void perf_tool__fill_defaults(struct perf_tool *tool)
353 {
354         if (tool->sample == NULL)
355                 tool->sample = process_event_sample_stub;
356         if (tool->mmap == NULL)
357                 tool->mmap = process_event_stub;
358         if (tool->mmap2 == NULL)
359                 tool->mmap2 = process_event_stub;
360         if (tool->comm == NULL)
361                 tool->comm = process_event_stub;
362         if (tool->fork == NULL)
363                 tool->fork = process_event_stub;
364         if (tool->exit == NULL)
365                 tool->exit = process_event_stub;
366         if (tool->lost == NULL)
367                 tool->lost = perf_event__process_lost;
368         if (tool->lost_samples == NULL)
369                 tool->lost_samples = perf_event__process_lost_samples;
370         if (tool->aux == NULL)
371                 tool->aux = perf_event__process_aux;
372         if (tool->itrace_start == NULL)
373                 tool->itrace_start = perf_event__process_itrace_start;
374         if (tool->context_switch == NULL)
375                 tool->context_switch = perf_event__process_switch;
376         if (tool->read == NULL)
377                 tool->read = process_event_sample_stub;
378         if (tool->throttle == NULL)
379                 tool->throttle = process_event_stub;
380         if (tool->unthrottle == NULL)
381                 tool->unthrottle = process_event_stub;
382         if (tool->attr == NULL)
383                 tool->attr = process_event_synth_attr_stub;
384         if (tool->event_update == NULL)
385                 tool->event_update = process_event_synth_event_update_stub;
386         if (tool->tracing_data == NULL)
387                 tool->tracing_data = process_event_synth_tracing_data_stub;
388         if (tool->build_id == NULL)
389                 tool->build_id = process_event_op2_stub;
390         if (tool->finished_round == NULL) {
391                 if (tool->ordered_events)
392                         tool->finished_round = process_finished_round;
393                 else
394                         tool->finished_round = process_finished_round_stub;
395         }
396         if (tool->id_index == NULL)
397                 tool->id_index = process_event_op2_stub;
398         if (tool->auxtrace_info == NULL)
399                 tool->auxtrace_info = process_event_op2_stub;
400         if (tool->auxtrace == NULL)
401                 tool->auxtrace = process_event_auxtrace_stub;
402         if (tool->auxtrace_error == NULL)
403                 tool->auxtrace_error = process_event_op2_stub;
404         if (tool->thread_map == NULL)
405                 tool->thread_map = process_event_thread_map_stub;
406         if (tool->cpu_map == NULL)
407                 tool->cpu_map = process_event_cpu_map_stub;
408         if (tool->stat_config == NULL)
409                 tool->stat_config = process_event_stat_config_stub;
410         if (tool->stat == NULL)
411                 tool->stat = process_stat_stub;
412         if (tool->stat_round == NULL)
413                 tool->stat_round = process_stat_round_stub;
414         if (tool->time_conv == NULL)
415                 tool->time_conv = process_event_op2_stub;
416 }
417 
418 static void swap_sample_id_all(union perf_event *event, void *data)
419 {
420         void *end = (void *) event + event->header.size;
421         int size = end - data;
422 
423         BUG_ON(size % sizeof(u64));
424         mem_bswap_64(data, size);
425 }
426 
427 static void perf_event__all64_swap(union perf_event *event,
428                                    bool sample_id_all __maybe_unused)
429 {
430         struct perf_event_header *hdr = &event->header;
431         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
432 }
433 
434 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
435 {
436         event->comm.pid = bswap_32(event->comm.pid);
437         event->comm.tid = bswap_32(event->comm.tid);
438 
439         if (sample_id_all) {
440                 void *data = &event->comm.comm;
441 
442                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
443                 swap_sample_id_all(event, data);
444         }
445 }
446 
447 static void perf_event__mmap_swap(union perf_event *event,
448                                   bool sample_id_all)
449 {
450         event->mmap.pid   = bswap_32(event->mmap.pid);
451         event->mmap.tid   = bswap_32(event->mmap.tid);
452         event->mmap.start = bswap_64(event->mmap.start);
453         event->mmap.len   = bswap_64(event->mmap.len);
454         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
455 
456         if (sample_id_all) {
457                 void *data = &event->mmap.filename;
458 
459                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
460                 swap_sample_id_all(event, data);
461         }
462 }
463 
464 static void perf_event__mmap2_swap(union perf_event *event,
465                                   bool sample_id_all)
466 {
467         event->mmap2.pid   = bswap_32(event->mmap2.pid);
468         event->mmap2.tid   = bswap_32(event->mmap2.tid);
469         event->mmap2.start = bswap_64(event->mmap2.start);
470         event->mmap2.len   = bswap_64(event->mmap2.len);
471         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
472         event->mmap2.maj   = bswap_32(event->mmap2.maj);
473         event->mmap2.min   = bswap_32(event->mmap2.min);
474         event->mmap2.ino   = bswap_64(event->mmap2.ino);
475 
476         if (sample_id_all) {
477                 void *data = &event->mmap2.filename;
478 
479                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
480                 swap_sample_id_all(event, data);
481         }
482 }
483 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
484 {
485         event->fork.pid  = bswap_32(event->fork.pid);
486         event->fork.tid  = bswap_32(event->fork.tid);
487         event->fork.ppid = bswap_32(event->fork.ppid);
488         event->fork.ptid = bswap_32(event->fork.ptid);
489         event->fork.time = bswap_64(event->fork.time);
490 
491         if (sample_id_all)
492                 swap_sample_id_all(event, &event->fork + 1);
493 }
494 
495 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
496 {
497         event->read.pid          = bswap_32(event->read.pid);
498         event->read.tid          = bswap_32(event->read.tid);
499         event->read.value        = bswap_64(event->read.value);
500         event->read.time_enabled = bswap_64(event->read.time_enabled);
501         event->read.time_running = bswap_64(event->read.time_running);
502         event->read.id           = bswap_64(event->read.id);
503 
504         if (sample_id_all)
505                 swap_sample_id_all(event, &event->read + 1);
506 }
507 
508 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
509 {
510         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
511         event->aux.aux_size   = bswap_64(event->aux.aux_size);
512         event->aux.flags      = bswap_64(event->aux.flags);
513 
514         if (sample_id_all)
515                 swap_sample_id_all(event, &event->aux + 1);
516 }
517 
518 static void perf_event__itrace_start_swap(union perf_event *event,
519                                           bool sample_id_all)
520 {
521         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
522         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
523 
524         if (sample_id_all)
525                 swap_sample_id_all(event, &event->itrace_start + 1);
526 }
527 
528 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
529 {
530         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
531                 event->context_switch.next_prev_pid =
532                                 bswap_32(event->context_switch.next_prev_pid);
533                 event->context_switch.next_prev_tid =
534                                 bswap_32(event->context_switch.next_prev_tid);
535         }
536 
537         if (sample_id_all)
538                 swap_sample_id_all(event, &event->context_switch + 1);
539 }
540 
541 static void perf_event__throttle_swap(union perf_event *event,
542                                       bool sample_id_all)
543 {
544         event->throttle.time      = bswap_64(event->throttle.time);
545         event->throttle.id        = bswap_64(event->throttle.id);
546         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
547 
548         if (sample_id_all)
549                 swap_sample_id_all(event, &event->throttle + 1);
550 }
551 
552 static u8 revbyte(u8 b)
553 {
554         int rev = (b >> 4) | ((b & 0xf) << 4);
555         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
556         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
557         return (u8) rev;
558 }
559 
560 /*
561  * XXX this is hack in attempt to carry flags bitfield
562  * through endian village. ABI says:
563  *
564  * Bit-fields are allocated from right to left (least to most significant)
565  * on little-endian implementations and from left to right (most to least
566  * significant) on big-endian implementations.
567  *
568  * The above seems to be byte specific, so we need to reverse each
569  * byte of the bitfield. 'Internet' also says this might be implementation
570  * specific and we probably need proper fix and carry perf_event_attr
571  * bitfield flags in separate data file FEAT_ section. Thought this seems
572  * to work for now.
573  */
574 static void swap_bitfield(u8 *p, unsigned len)
575 {
576         unsigned i;
577 
578         for (i = 0; i < len; i++) {
579                 *p = revbyte(*p);
580                 p++;
581         }
582 }
583 
584 /* exported for swapping attributes in file header */
585 void perf_event__attr_swap(struct perf_event_attr *attr)
586 {
587         attr->type              = bswap_32(attr->type);
588         attr->size              = bswap_32(attr->size);
589 
590 #define bswap_safe(f, n)                                        \
591         (attr->size > (offsetof(struct perf_event_attr, f) +    \
592                        sizeof(attr->f) * (n)))
593 #define bswap_field(f, sz)                      \
594 do {                                            \
595         if (bswap_safe(f, 0))                   \
596                 attr->f = bswap_##sz(attr->f);  \
597 } while(0)
598 #define bswap_field_16(f) bswap_field(f, 16)
599 #define bswap_field_32(f) bswap_field(f, 32)
600 #define bswap_field_64(f) bswap_field(f, 64)
601 
602         bswap_field_64(config);
603         bswap_field_64(sample_period);
604         bswap_field_64(sample_type);
605         bswap_field_64(read_format);
606         bswap_field_32(wakeup_events);
607         bswap_field_32(bp_type);
608         bswap_field_64(bp_addr);
609         bswap_field_64(bp_len);
610         bswap_field_64(branch_sample_type);
611         bswap_field_64(sample_regs_user);
612         bswap_field_32(sample_stack_user);
613         bswap_field_32(aux_watermark);
614         bswap_field_16(sample_max_stack);
615 
616         /*
617          * After read_format are bitfields. Check read_format because
618          * we are unable to use offsetof on bitfield.
619          */
620         if (bswap_safe(read_format, 1))
621                 swap_bitfield((u8 *) (&attr->read_format + 1),
622                               sizeof(u64));
623 #undef bswap_field_64
624 #undef bswap_field_32
625 #undef bswap_field
626 #undef bswap_safe
627 }
628 
629 static void perf_event__hdr_attr_swap(union perf_event *event,
630                                       bool sample_id_all __maybe_unused)
631 {
632         size_t size;
633 
634         perf_event__attr_swap(&event->attr.attr);
635 
636         size = event->header.size;
637         size -= (void *)&event->attr.id - (void *)event;
638         mem_bswap_64(event->attr.id, size);
639 }
640 
641 static void perf_event__event_update_swap(union perf_event *event,
642                                           bool sample_id_all __maybe_unused)
643 {
644         event->event_update.type = bswap_64(event->event_update.type);
645         event->event_update.id   = bswap_64(event->event_update.id);
646 }
647 
648 static void perf_event__event_type_swap(union perf_event *event,
649                                         bool sample_id_all __maybe_unused)
650 {
651         event->event_type.event_type.event_id =
652                 bswap_64(event->event_type.event_type.event_id);
653 }
654 
655 static void perf_event__tracing_data_swap(union perf_event *event,
656                                           bool sample_id_all __maybe_unused)
657 {
658         event->tracing_data.size = bswap_32(event->tracing_data.size);
659 }
660 
661 static void perf_event__auxtrace_info_swap(union perf_event *event,
662                                            bool sample_id_all __maybe_unused)
663 {
664         size_t size;
665 
666         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
667 
668         size = event->header.size;
669         size -= (void *)&event->auxtrace_info.priv - (void *)event;
670         mem_bswap_64(event->auxtrace_info.priv, size);
671 }
672 
673 static void perf_event__auxtrace_swap(union perf_event *event,
674                                       bool sample_id_all __maybe_unused)
675 {
676         event->auxtrace.size      = bswap_64(event->auxtrace.size);
677         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
678         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
679         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
680         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
681         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
682 }
683 
684 static void perf_event__auxtrace_error_swap(union perf_event *event,
685                                             bool sample_id_all __maybe_unused)
686 {
687         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
688         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
689         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
690         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
691         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
692         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
693 }
694 
695 static void perf_event__thread_map_swap(union perf_event *event,
696                                         bool sample_id_all __maybe_unused)
697 {
698         unsigned i;
699 
700         event->thread_map.nr = bswap_64(event->thread_map.nr);
701 
702         for (i = 0; i < event->thread_map.nr; i++)
703                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
704 }
705 
706 static void perf_event__cpu_map_swap(union perf_event *event,
707                                      bool sample_id_all __maybe_unused)
708 {
709         struct cpu_map_data *data = &event->cpu_map.data;
710         struct cpu_map_entries *cpus;
711         struct cpu_map_mask *mask;
712         unsigned i;
713 
714         data->type = bswap_64(data->type);
715 
716         switch (data->type) {
717         case PERF_CPU_MAP__CPUS:
718                 cpus = (struct cpu_map_entries *)data->data;
719 
720                 cpus->nr = bswap_16(cpus->nr);
721 
722                 for (i = 0; i < cpus->nr; i++)
723                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
724                 break;
725         case PERF_CPU_MAP__MASK:
726                 mask = (struct cpu_map_mask *) data->data;
727 
728                 mask->nr = bswap_16(mask->nr);
729                 mask->long_size = bswap_16(mask->long_size);
730 
731                 switch (mask->long_size) {
732                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
733                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
734                 default:
735                         pr_err("cpu_map swap: unsupported long size\n");
736                 }
737         default:
738                 break;
739         }
740 }
741 
742 static void perf_event__stat_config_swap(union perf_event *event,
743                                          bool sample_id_all __maybe_unused)
744 {
745         u64 size;
746 
747         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
748         size += 1; /* nr item itself */
749         mem_bswap_64(&event->stat_config.nr, size);
750 }
751 
752 static void perf_event__stat_swap(union perf_event *event,
753                                   bool sample_id_all __maybe_unused)
754 {
755         event->stat.id     = bswap_64(event->stat.id);
756         event->stat.thread = bswap_32(event->stat.thread);
757         event->stat.cpu    = bswap_32(event->stat.cpu);
758         event->stat.val    = bswap_64(event->stat.val);
759         event->stat.ena    = bswap_64(event->stat.ena);
760         event->stat.run    = bswap_64(event->stat.run);
761 }
762 
763 static void perf_event__stat_round_swap(union perf_event *event,
764                                         bool sample_id_all __maybe_unused)
765 {
766         event->stat_round.type = bswap_64(event->stat_round.type);
767         event->stat_round.time = bswap_64(event->stat_round.time);
768 }
769 
770 typedef void (*perf_event__swap_op)(union perf_event *event,
771                                     bool sample_id_all);
772 
773 static perf_event__swap_op perf_event__swap_ops[] = {
774         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
775         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
776         [PERF_RECORD_COMM]                = perf_event__comm_swap,
777         [PERF_RECORD_FORK]                = perf_event__task_swap,
778         [PERF_RECORD_EXIT]                = perf_event__task_swap,
779         [PERF_RECORD_LOST]                = perf_event__all64_swap,
780         [PERF_RECORD_READ]                = perf_event__read_swap,
781         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
782         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
783         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
784         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
785         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
786         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
787         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
788         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
789         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
790         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
791         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
792         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
793         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
794         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
795         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
796         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
797         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
798         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
799         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
800         [PERF_RECORD_STAT]                = perf_event__stat_swap,
801         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
802         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
803         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
804         [PERF_RECORD_HEADER_MAX]          = NULL,
805 };
806 
807 /*
808  * When perf record finishes a pass on every buffers, it records this pseudo
809  * event.
810  * We record the max timestamp t found in the pass n.
811  * Assuming these timestamps are monotonic across cpus, we know that if
812  * a buffer still has events with timestamps below t, they will be all
813  * available and then read in the pass n + 1.
814  * Hence when we start to read the pass n + 2, we can safely flush every
815  * events with timestamps below t.
816  *
817  *    ============ PASS n =================
818  *       CPU 0         |   CPU 1
819  *                     |
820  *    cnt1 timestamps  |   cnt2 timestamps
821  *          1          |         2
822  *          2          |         3
823  *          -          |         4  <--- max recorded
824  *
825  *    ============ PASS n + 1 ==============
826  *       CPU 0         |   CPU 1
827  *                     |
828  *    cnt1 timestamps  |   cnt2 timestamps
829  *          3          |         5
830  *          4          |         6
831  *          5          |         7 <---- max recorded
832  *
833  *      Flush every events below timestamp 4
834  *
835  *    ============ PASS n + 2 ==============
836  *       CPU 0         |   CPU 1
837  *                     |
838  *    cnt1 timestamps  |   cnt2 timestamps
839  *          6          |         8
840  *          7          |         9
841  *          -          |         10
842  *
843  *      Flush every events below timestamp 7
844  *      etc...
845  */
846 static int process_finished_round(struct perf_tool *tool __maybe_unused,
847                                   union perf_event *event __maybe_unused,
848                                   struct ordered_events *oe)
849 {
850         if (dump_trace)
851                 fprintf(stdout, "\n");
852         return ordered_events__flush(oe, OE_FLUSH__ROUND);
853 }
854 
855 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
856                               struct perf_sample *sample, u64 file_offset)
857 {
858         return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
859 }
860 
861 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
862 {
863         struct ip_callchain *callchain = sample->callchain;
864         struct branch_stack *lbr_stack = sample->branch_stack;
865         u64 kernel_callchain_nr = callchain->nr;
866         unsigned int i;
867 
868         for (i = 0; i < kernel_callchain_nr; i++) {
869                 if (callchain->ips[i] == PERF_CONTEXT_USER)
870                         break;
871         }
872 
873         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
874                 u64 total_nr;
875                 /*
876                  * LBR callstack can only get user call chain,
877                  * i is kernel call chain number,
878                  * 1 is PERF_CONTEXT_USER.
879                  *
880                  * The user call chain is stored in LBR registers.
881                  * LBR are pair registers. The caller is stored
882                  * in "from" register, while the callee is stored
883                  * in "to" register.
884                  * For example, there is a call stack
885                  * "A"->"B"->"C"->"D".
886                  * The LBR registers will recorde like
887                  * "C"->"D", "B"->"C", "A"->"B".
888                  * So only the first "to" register and all "from"
889                  * registers are needed to construct the whole stack.
890                  */
891                 total_nr = i + 1 + lbr_stack->nr + 1;
892                 kernel_callchain_nr = i + 1;
893 
894                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
895 
896                 for (i = 0; i < kernel_callchain_nr; i++)
897                         printf("..... %2d: %016" PRIx64 "\n",
898                                i, callchain->ips[i]);
899 
900                 printf("..... %2d: %016" PRIx64 "\n",
901                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
902                 for (i = 0; i < lbr_stack->nr; i++)
903                         printf("..... %2d: %016" PRIx64 "\n",
904                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
905         }
906 }
907 
908 static void callchain__printf(struct perf_evsel *evsel,
909                               struct perf_sample *sample)
910 {
911         unsigned int i;
912         struct ip_callchain *callchain = sample->callchain;
913 
914         if (perf_evsel__has_branch_callstack(evsel))
915                 callchain__lbr_callstack_printf(sample);
916 
917         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
918 
919         for (i = 0; i < callchain->nr; i++)
920                 printf("..... %2d: %016" PRIx64 "\n",
921                        i, callchain->ips[i]);
922 }
923 
924 static void branch_stack__printf(struct perf_sample *sample)
925 {
926         uint64_t i;
927 
928         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
929 
930         for (i = 0; i < sample->branch_stack->nr; i++) {
931                 struct branch_entry *e = &sample->branch_stack->entries[i];
932 
933                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
934                         i, e->from, e->to,
935                         e->flags.cycles,
936                         e->flags.mispred ? "M" : " ",
937                         e->flags.predicted ? "P" : " ",
938                         e->flags.abort ? "A" : " ",
939                         e->flags.in_tx ? "T" : " ",
940                         (unsigned)e->flags.reserved);
941         }
942 }
943 
944 static void regs_dump__printf(u64 mask, u64 *regs)
945 {
946         unsigned rid, i = 0;
947 
948         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
949                 u64 val = regs[i++];
950 
951                 printf(".... %-5s 0x%" PRIx64 "\n",
952                        perf_reg_name(rid), val);
953         }
954 }
955 
956 static const char *regs_abi[] = {
957         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
958         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
959         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
960 };
961 
962 static inline const char *regs_dump_abi(struct regs_dump *d)
963 {
964         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
965                 return "unknown";
966 
967         return regs_abi[d->abi];
968 }
969 
970 static void regs__printf(const char *type, struct regs_dump *regs)
971 {
972         u64 mask = regs->mask;
973 
974         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
975                type,
976                mask,
977                regs_dump_abi(regs));
978 
979         regs_dump__printf(mask, regs->regs);
980 }
981 
982 static void regs_user__printf(struct perf_sample *sample)
983 {
984         struct regs_dump *user_regs = &sample->user_regs;
985 
986         if (user_regs->regs)
987                 regs__printf("user", user_regs);
988 }
989 
990 static void regs_intr__printf(struct perf_sample *sample)
991 {
992         struct regs_dump *intr_regs = &sample->intr_regs;
993 
994         if (intr_regs->regs)
995                 regs__printf("intr", intr_regs);
996 }
997 
998 static void stack_user__printf(struct stack_dump *dump)
999 {
1000         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1001                dump->size, dump->offset);
1002 }
1003 
1004 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1005                                        union perf_event *event,
1006                                        struct perf_sample *sample)
1007 {
1008         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1009 
1010         if (event->header.type != PERF_RECORD_SAMPLE &&
1011             !perf_evlist__sample_id_all(evlist)) {
1012                 fputs("-1 -1 ", stdout);
1013                 return;
1014         }
1015 
1016         if ((sample_type & PERF_SAMPLE_CPU))
1017                 printf("%u ", sample->cpu);
1018 
1019         if (sample_type & PERF_SAMPLE_TIME)
1020                 printf("%" PRIu64 " ", sample->time);
1021 }
1022 
1023 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1024 {
1025         printf("... sample_read:\n");
1026 
1027         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1028                 printf("...... time enabled %016" PRIx64 "\n",
1029                        sample->read.time_enabled);
1030 
1031         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1032                 printf("...... time running %016" PRIx64 "\n",
1033                        sample->read.time_running);
1034 
1035         if (read_format & PERF_FORMAT_GROUP) {
1036                 u64 i;
1037 
1038                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1039 
1040                 for (i = 0; i < sample->read.group.nr; i++) {
1041                         struct sample_read_value *value;
1042 
1043                         value = &sample->read.group.values[i];
1044                         printf("..... id %016" PRIx64
1045                                ", value %016" PRIx64 "\n",
1046                                value->id, value->value);
1047                 }
1048         } else
1049                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1050                         sample->read.one.id, sample->read.one.value);
1051 }
1052 
1053 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1054                        u64 file_offset, struct perf_sample *sample)
1055 {
1056         if (!dump_trace)
1057                 return;
1058 
1059         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1060                file_offset, event->header.size, event->header.type);
1061 
1062         trace_event(event);
1063 
1064         if (sample)
1065                 perf_evlist__print_tstamp(evlist, event, sample);
1066 
1067         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1068                event->header.size, perf_event__name(event->header.type));
1069 }
1070 
1071 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1072                         struct perf_sample *sample)
1073 {
1074         u64 sample_type;
1075 
1076         if (!dump_trace)
1077                 return;
1078 
1079         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1080                event->header.misc, sample->pid, sample->tid, sample->ip,
1081                sample->period, sample->addr);
1082 
1083         sample_type = evsel->attr.sample_type;
1084 
1085         if (sample_type & PERF_SAMPLE_CALLCHAIN)
1086                 callchain__printf(evsel, sample);
1087 
1088         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1089                 branch_stack__printf(sample);
1090 
1091         if (sample_type & PERF_SAMPLE_REGS_USER)
1092                 regs_user__printf(sample);
1093 
1094         if (sample_type & PERF_SAMPLE_REGS_INTR)
1095                 regs_intr__printf(sample);
1096 
1097         if (sample_type & PERF_SAMPLE_STACK_USER)
1098                 stack_user__printf(&sample->user_stack);
1099 
1100         if (sample_type & PERF_SAMPLE_WEIGHT)
1101                 printf("... weight: %" PRIu64 "\n", sample->weight);
1102 
1103         if (sample_type & PERF_SAMPLE_DATA_SRC)
1104                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1105 
1106         if (sample_type & PERF_SAMPLE_TRANSACTION)
1107                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1108 
1109         if (sample_type & PERF_SAMPLE_READ)
1110                 sample_read__printf(sample, evsel->attr.read_format);
1111 }
1112 
1113 static struct machine *machines__find_for_cpumode(struct machines *machines,
1114                                                union perf_event *event,
1115                                                struct perf_sample *sample)
1116 {
1117         struct machine *machine;
1118 
1119         if (perf_guest &&
1120             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1121              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1122                 u32 pid;
1123 
1124                 if (event->header.type == PERF_RECORD_MMAP
1125                     || event->header.type == PERF_RECORD_MMAP2)
1126                         pid = event->mmap.pid;
1127                 else
1128                         pid = sample->pid;
1129 
1130                 machine = machines__find(machines, pid);
1131                 if (!machine)
1132                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1133                 return machine;
1134         }
1135 
1136         return &machines->host;
1137 }
1138 
1139 static int deliver_sample_value(struct perf_evlist *evlist,
1140                                 struct perf_tool *tool,
1141                                 union perf_event *event,
1142                                 struct perf_sample *sample,
1143                                 struct sample_read_value *v,
1144                                 struct machine *machine)
1145 {
1146         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1147 
1148         if (sid) {
1149                 sample->id     = v->id;
1150                 sample->period = v->value - sid->period;
1151                 sid->period    = v->value;
1152         }
1153 
1154         if (!sid || sid->evsel == NULL) {
1155                 ++evlist->stats.nr_unknown_id;
1156                 return 0;
1157         }
1158 
1159         return tool->sample(tool, event, sample, sid->evsel, machine);
1160 }
1161 
1162 static int deliver_sample_group(struct perf_evlist *evlist,
1163                                 struct perf_tool *tool,
1164                                 union  perf_event *event,
1165                                 struct perf_sample *sample,
1166                                 struct machine *machine)
1167 {
1168         int ret = -EINVAL;
1169         u64 i;
1170 
1171         for (i = 0; i < sample->read.group.nr; i++) {
1172                 ret = deliver_sample_value(evlist, tool, event, sample,
1173                                            &sample->read.group.values[i],
1174                                            machine);
1175                 if (ret)
1176                         break;
1177         }
1178 
1179         return ret;
1180 }
1181 
1182 static int
1183  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1184                              struct perf_tool *tool,
1185                              union  perf_event *event,
1186                              struct perf_sample *sample,
1187                              struct perf_evsel *evsel,
1188                              struct machine *machine)
1189 {
1190         /* We know evsel != NULL. */
1191         u64 sample_type = evsel->attr.sample_type;
1192         u64 read_format = evsel->attr.read_format;
1193 
1194         /* Standard sample delievery. */
1195         if (!(sample_type & PERF_SAMPLE_READ))
1196                 return tool->sample(tool, event, sample, evsel, machine);
1197 
1198         /* For PERF_SAMPLE_READ we have either single or group mode. */
1199         if (read_format & PERF_FORMAT_GROUP)
1200                 return deliver_sample_group(evlist, tool, event, sample,
1201                                             machine);
1202         else
1203                 return deliver_sample_value(evlist, tool, event, sample,
1204                                             &sample->read.one, machine);
1205 }
1206 
1207 static int machines__deliver_event(struct machines *machines,
1208                                    struct perf_evlist *evlist,
1209                                    union perf_event *event,
1210                                    struct perf_sample *sample,
1211                                    struct perf_tool *tool, u64 file_offset)
1212 {
1213         struct perf_evsel *evsel;
1214         struct machine *machine;
1215 
1216         dump_event(evlist, event, file_offset, sample);
1217 
1218         evsel = perf_evlist__id2evsel(evlist, sample->id);
1219 
1220         machine = machines__find_for_cpumode(machines, event, sample);
1221 
1222         switch (event->header.type) {
1223         case PERF_RECORD_SAMPLE:
1224                 if (evsel == NULL) {
1225                         ++evlist->stats.nr_unknown_id;
1226                         return 0;
1227                 }
1228                 dump_sample(evsel, event, sample);
1229                 if (machine == NULL) {
1230                         ++evlist->stats.nr_unprocessable_samples;
1231                         return 0;
1232                 }
1233                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1234         case PERF_RECORD_MMAP:
1235                 return tool->mmap(tool, event, sample, machine);
1236         case PERF_RECORD_MMAP2:
1237                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1238                         ++evlist->stats.nr_proc_map_timeout;
1239                 return tool->mmap2(tool, event, sample, machine);
1240         case PERF_RECORD_COMM:
1241                 return tool->comm(tool, event, sample, machine);
1242         case PERF_RECORD_FORK:
1243                 return tool->fork(tool, event, sample, machine);
1244         case PERF_RECORD_EXIT:
1245                 return tool->exit(tool, event, sample, machine);
1246         case PERF_RECORD_LOST:
1247                 if (tool->lost == perf_event__process_lost)
1248                         evlist->stats.total_lost += event->lost.lost;
1249                 return tool->lost(tool, event, sample, machine);
1250         case PERF_RECORD_LOST_SAMPLES:
1251                 if (tool->lost_samples == perf_event__process_lost_samples)
1252                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1253                 return tool->lost_samples(tool, event, sample, machine);
1254         case PERF_RECORD_READ:
1255                 return tool->read(tool, event, sample, evsel, machine);
1256         case PERF_RECORD_THROTTLE:
1257                 return tool->throttle(tool, event, sample, machine);
1258         case PERF_RECORD_UNTHROTTLE:
1259                 return tool->unthrottle(tool, event, sample, machine);
1260         case PERF_RECORD_AUX:
1261                 if (tool->aux == perf_event__process_aux &&
1262                     (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
1263                         evlist->stats.total_aux_lost += 1;
1264                 return tool->aux(tool, event, sample, machine);
1265         case PERF_RECORD_ITRACE_START:
1266                 return tool->itrace_start(tool, event, sample, machine);
1267         case PERF_RECORD_SWITCH:
1268         case PERF_RECORD_SWITCH_CPU_WIDE:
1269                 return tool->context_switch(tool, event, sample, machine);
1270         default:
1271                 ++evlist->stats.nr_unknown_events;
1272                 return -1;
1273         }
1274 }
1275 
1276 static int perf_session__deliver_event(struct perf_session *session,
1277                                        union perf_event *event,
1278                                        struct perf_sample *sample,
1279                                        struct perf_tool *tool,
1280                                        u64 file_offset)
1281 {
1282         int ret;
1283 
1284         ret = auxtrace__process_event(session, event, sample, tool);
1285         if (ret < 0)
1286                 return ret;
1287         if (ret > 0)
1288                 return 0;
1289 
1290         return machines__deliver_event(&session->machines, session->evlist,
1291                                        event, sample, tool, file_offset);
1292 }
1293 
1294 static s64 perf_session__process_user_event(struct perf_session *session,
1295                                             union perf_event *event,
1296                                             u64 file_offset)
1297 {
1298         struct ordered_events *oe = &session->ordered_events;
1299         struct perf_tool *tool = session->tool;
1300         int fd = perf_data_file__fd(session->file);
1301         int err;
1302 
1303         dump_event(session->evlist, event, file_offset, NULL);
1304 
1305         /* These events are processed right away */
1306         switch (event->header.type) {
1307         case PERF_RECORD_HEADER_ATTR:
1308                 err = tool->attr(tool, event, &session->evlist);
1309                 if (err == 0) {
1310                         perf_session__set_id_hdr_size(session);
1311                         perf_session__set_comm_exec(session);
1312                 }
1313                 return err;
1314         case PERF_RECORD_EVENT_UPDATE:
1315                 return tool->event_update(tool, event, &session->evlist);
1316         case PERF_RECORD_HEADER_EVENT_TYPE:
1317                 /*
1318                  * Depreceated, but we need to handle it for sake
1319                  * of old data files create in pipe mode.
1320                  */
1321                 return 0;
1322         case PERF_RECORD_HEADER_TRACING_DATA:
1323                 /* setup for reading amidst mmap */
1324                 lseek(fd, file_offset, SEEK_SET);
1325                 return tool->tracing_data(tool, event, session);
1326         case PERF_RECORD_HEADER_BUILD_ID:
1327                 return tool->build_id(tool, event, session);
1328         case PERF_RECORD_FINISHED_ROUND:
1329                 return tool->finished_round(tool, event, oe);
1330         case PERF_RECORD_ID_INDEX:
1331                 return tool->id_index(tool, event, session);
1332         case PERF_RECORD_AUXTRACE_INFO:
1333                 return tool->auxtrace_info(tool, event, session);
1334         case PERF_RECORD_AUXTRACE:
1335                 /* setup for reading amidst mmap */
1336                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1337                 return tool->auxtrace(tool, event, session);
1338         case PERF_RECORD_AUXTRACE_ERROR:
1339                 perf_session__auxtrace_error_inc(session, event);
1340                 return tool->auxtrace_error(tool, event, session);
1341         case PERF_RECORD_THREAD_MAP:
1342                 return tool->thread_map(tool, event, session);
1343         case PERF_RECORD_CPU_MAP:
1344                 return tool->cpu_map(tool, event, session);
1345         case PERF_RECORD_STAT_CONFIG:
1346                 return tool->stat_config(tool, event, session);
1347         case PERF_RECORD_STAT:
1348                 return tool->stat(tool, event, session);
1349         case PERF_RECORD_STAT_ROUND:
1350                 return tool->stat_round(tool, event, session);
1351         case PERF_RECORD_TIME_CONV:
1352                 session->time_conv = event->time_conv;
1353                 return tool->time_conv(tool, event, session);
1354         default:
1355                 return -EINVAL;
1356         }
1357 }
1358 
1359 int perf_session__deliver_synth_event(struct perf_session *session,
1360                                       union perf_event *event,
1361                                       struct perf_sample *sample)
1362 {
1363         struct perf_evlist *evlist = session->evlist;
1364         struct perf_tool *tool = session->tool;
1365 
1366         events_stats__inc(&evlist->stats, event->header.type);
1367 
1368         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1369                 return perf_session__process_user_event(session, event, 0);
1370 
1371         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1372 }
1373 
1374 static void event_swap(union perf_event *event, bool sample_id_all)
1375 {
1376         perf_event__swap_op swap;
1377 
1378         swap = perf_event__swap_ops[event->header.type];
1379         if (swap)
1380                 swap(event, sample_id_all);
1381 }
1382 
1383 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1384                              void *buf, size_t buf_sz,
1385                              union perf_event **event_ptr,
1386                              struct perf_sample *sample)
1387 {
1388         union perf_event *event;
1389         size_t hdr_sz, rest;
1390         int fd;
1391 
1392         if (session->one_mmap && !session->header.needs_swap) {
1393                 event = file_offset - session->one_mmap_offset +
1394                         session->one_mmap_addr;
1395                 goto out_parse_sample;
1396         }
1397 
1398         if (perf_data_file__is_pipe(session->file))
1399                 return -1;
1400 
1401         fd = perf_data_file__fd(session->file);
1402         hdr_sz = sizeof(struct perf_event_header);
1403 
1404         if (buf_sz < hdr_sz)
1405                 return -1;
1406 
1407         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1408             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1409                 return -1;
1410 
1411         event = (union perf_event *)buf;
1412 
1413         if (session->header.needs_swap)
1414                 perf_event_header__bswap(&event->header);
1415 
1416         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1417                 return -1;
1418 
1419         rest = event->header.size - hdr_sz;
1420 
1421         if (readn(fd, buf, rest) != (ssize_t)rest)
1422                 return -1;
1423 
1424         if (session->header.needs_swap)
1425                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1426 
1427 out_parse_sample:
1428 
1429         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1430             perf_evlist__parse_sample(session->evlist, event, sample))
1431                 return -1;
1432 
1433         *event_ptr = event;
1434 
1435         return 0;
1436 }
1437 
1438 static s64 perf_session__process_event(struct perf_session *session,
1439                                        union perf_event *event, u64 file_offset)
1440 {
1441         struct perf_evlist *evlist = session->evlist;
1442         struct perf_tool *tool = session->tool;
1443         struct perf_sample sample;
1444         int ret;
1445 
1446         if (session->header.needs_swap)
1447                 event_swap(event, perf_evlist__sample_id_all(evlist));
1448 
1449         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1450                 return -EINVAL;
1451 
1452         events_stats__inc(&evlist->stats, event->header.type);
1453 
1454         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1455                 return perf_session__process_user_event(session, event, file_offset);
1456 
1457         /*
1458          * For all kernel events we get the sample data
1459          */
1460         ret = perf_evlist__parse_sample(evlist, event, &sample);
1461         if (ret)
1462                 return ret;
1463 
1464         if (tool->ordered_events) {
1465                 ret = perf_session__queue_event(session, event, &sample, file_offset);
1466                 if (ret != -ETIME)
1467                         return ret;
1468         }
1469 
1470         return perf_session__deliver_event(session, event, &sample, tool,
1471                                            file_offset);
1472 }
1473 
1474 void perf_event_header__bswap(struct perf_event_header *hdr)
1475 {
1476         hdr->type = bswap_32(hdr->type);
1477         hdr->misc = bswap_16(hdr->misc);
1478         hdr->size = bswap_16(hdr->size);
1479 }
1480 
1481 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1482 {
1483         return machine__findnew_thread(&session->machines.host, -1, pid);
1484 }
1485 
1486 int perf_session__register_idle_thread(struct perf_session *session)
1487 {
1488         struct thread *thread;
1489         int err = 0;
1490 
1491         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1492         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1493                 pr_err("problem inserting idle task.\n");
1494                 err = -1;
1495         }
1496 
1497         /* machine__findnew_thread() got the thread, so put it */
1498         thread__put(thread);
1499         return err;
1500 }
1501 
1502 static void
1503 perf_session__warn_order(const struct perf_session *session)
1504 {
1505         const struct ordered_events *oe = &session->ordered_events;
1506         struct perf_evsel *evsel;
1507         bool should_warn = true;
1508 
1509         evlist__for_each_entry(session->evlist, evsel) {
1510                 if (evsel->attr.write_backward)
1511                         should_warn = false;
1512         }
1513 
1514         if (!should_warn)
1515                 return;
1516         if (oe->nr_unordered_events != 0)
1517                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1518 }
1519 
1520 static void perf_session__warn_about_errors(const struct perf_session *session)
1521 {
1522         const struct events_stats *stats = &session->evlist->stats;
1523 
1524         if (session->tool->lost == perf_event__process_lost &&
1525             stats->nr_events[PERF_RECORD_LOST] != 0) {
1526                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1527                             "Check IO/CPU overload!\n\n",
1528                             stats->nr_events[0],
1529                             stats->nr_events[PERF_RECORD_LOST]);
1530         }
1531 
1532         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1533                 double drop_rate;
1534 
1535                 drop_rate = (double)stats->total_lost_samples /
1536                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1537                 if (drop_rate > 0.05) {
1538                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1539                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1540                                     drop_rate * 100.0);
1541                 }
1542         }
1543 
1544         if (session->tool->aux == perf_event__process_aux &&
1545             stats->total_aux_lost != 0) {
1546                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1547                             stats->total_aux_lost,
1548                             stats->nr_events[PERF_RECORD_AUX]);
1549         }
1550 
1551         if (stats->nr_unknown_events != 0) {
1552                 ui__warning("Found %u unknown events!\n\n"
1553                             "Is this an older tool processing a perf.data "
1554                             "file generated by a more recent tool?\n\n"
1555                             "If that is not the case, consider "
1556                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1557                             stats->nr_unknown_events);
1558         }
1559 
1560         if (stats->nr_unknown_id != 0) {
1561                 ui__warning("%u samples with id not present in the header\n",
1562                             stats->nr_unknown_id);
1563         }
1564 
1565         if (stats->nr_invalid_chains != 0) {
1566                 ui__warning("Found invalid callchains!\n\n"
1567                             "%u out of %u events were discarded for this reason.\n\n"
1568                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1569                             stats->nr_invalid_chains,
1570                             stats->nr_events[PERF_RECORD_SAMPLE]);
1571         }
1572 
1573         if (stats->nr_unprocessable_samples != 0) {
1574                 ui__warning("%u unprocessable samples recorded.\n"
1575                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1576                             stats->nr_unprocessable_samples);
1577         }
1578 
1579         perf_session__warn_order(session);
1580 
1581         events_stats__auxtrace_error_warn(stats);
1582 
1583         if (stats->nr_proc_map_timeout != 0) {
1584                 ui__warning("%d map information files for pre-existing threads were\n"
1585                             "not processed, if there are samples for addresses they\n"
1586                             "will not be resolved, you may find out which are these\n"
1587                             "threads by running with -v and redirecting the output\n"
1588                             "to a file.\n"
1589                             "The time limit to process proc map is too short?\n"
1590                             "Increase it by --proc-map-timeout\n",
1591                             stats->nr_proc_map_timeout);
1592         }
1593 }
1594 
1595 static int perf_session__flush_thread_stack(struct thread *thread,
1596                                             void *p __maybe_unused)
1597 {
1598         return thread_stack__flush(thread);
1599 }
1600 
1601 static int perf_session__flush_thread_stacks(struct perf_session *session)
1602 {
1603         return machines__for_each_thread(&session->machines,
1604                                          perf_session__flush_thread_stack,
1605                                          NULL);
1606 }
1607 
1608 volatile int session_done;
1609 
1610 static int __perf_session__process_pipe_events(struct perf_session *session)
1611 {
1612         struct ordered_events *oe = &session->ordered_events;
1613         struct perf_tool *tool = session->tool;
1614         int fd = perf_data_file__fd(session->file);
1615         union perf_event *event;
1616         uint32_t size, cur_size = 0;
1617         void *buf = NULL;
1618         s64 skip = 0;
1619         u64 head;
1620         ssize_t err;
1621         void *p;
1622 
1623         perf_tool__fill_defaults(tool);
1624 
1625         head = 0;
1626         cur_size = sizeof(union perf_event);
1627 
1628         buf = malloc(cur_size);
1629         if (!buf)
1630                 return -errno;
1631 more:
1632         event = buf;
1633         err = readn(fd, event, sizeof(struct perf_event_header));
1634         if (err <= 0) {
1635                 if (err == 0)
1636                         goto done;
1637 
1638                 pr_err("failed to read event header\n");
1639                 goto out_err;
1640         }
1641 
1642         if (session->header.needs_swap)
1643                 perf_event_header__bswap(&event->header);
1644 
1645         size = event->header.size;
1646         if (size < sizeof(struct perf_event_header)) {
1647                 pr_err("bad event header size\n");
1648                 goto out_err;
1649         }
1650 
1651         if (size > cur_size) {
1652                 void *new = realloc(buf, size);
1653                 if (!new) {
1654                         pr_err("failed to allocate memory to read event\n");
1655                         goto out_err;
1656                 }
1657                 buf = new;
1658                 cur_size = size;
1659                 event = buf;
1660         }
1661         p = event;
1662         p += sizeof(struct perf_event_header);
1663 
1664         if (size - sizeof(struct perf_event_header)) {
1665                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1666                 if (err <= 0) {
1667                         if (err == 0) {
1668                                 pr_err("unexpected end of event stream\n");
1669                                 goto done;
1670                         }
1671 
1672                         pr_err("failed to read event data\n");
1673                         goto out_err;
1674                 }
1675         }
1676 
1677         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1678                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1679                        head, event->header.size, event->header.type);
1680                 err = -EINVAL;
1681                 goto out_err;
1682         }
1683 
1684         head += size;
1685 
1686         if (skip > 0)
1687                 head += skip;
1688 
1689         if (!session_done())
1690                 goto more;
1691 done:
1692         /* do the final flush for ordered samples */
1693         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1694         if (err)
1695                 goto out_err;
1696         err = auxtrace__flush_events(session, tool);
1697         if (err)
1698                 goto out_err;
1699         err = perf_session__flush_thread_stacks(session);
1700 out_err:
1701         free(buf);
1702         perf_session__warn_about_errors(session);
1703         ordered_events__free(&session->ordered_events);
1704         auxtrace__free_events(session);
1705         return err;
1706 }
1707 
1708 static union perf_event *
1709 fetch_mmaped_event(struct perf_session *session,
1710                    u64 head, size_t mmap_size, char *buf)
1711 {
1712         union perf_event *event;
1713 
1714         /*
1715          * Ensure we have enough space remaining to read
1716          * the size of the event in the headers.
1717          */
1718         if (head + sizeof(event->header) > mmap_size)
1719                 return NULL;
1720 
1721         event = (union perf_event *)(buf + head);
1722 
1723         if (session->header.needs_swap)
1724                 perf_event_header__bswap(&event->header);
1725 
1726         if (head + event->header.size > mmap_size) {
1727                 /* We're not fetching the event so swap back again */
1728                 if (session->header.needs_swap)
1729                         perf_event_header__bswap(&event->header);
1730                 return NULL;
1731         }
1732 
1733         return event;
1734 }
1735 
1736 /*
1737  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1738  * slices. On 32bit we use 32MB.
1739  */
1740 #if BITS_PER_LONG == 64
1741 #define MMAP_SIZE ULLONG_MAX
1742 #define NUM_MMAPS 1
1743 #else
1744 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1745 #define NUM_MMAPS 128
1746 #endif
1747 
1748 static int __perf_session__process_events(struct perf_session *session,
1749                                           u64 data_offset, u64 data_size,
1750                                           u64 file_size)
1751 {
1752         struct ordered_events *oe = &session->ordered_events;
1753         struct perf_tool *tool = session->tool;
1754         int fd = perf_data_file__fd(session->file);
1755         u64 head, page_offset, file_offset, file_pos, size;
1756         int err, mmap_prot, mmap_flags, map_idx = 0;
1757         size_t  mmap_size;
1758         char *buf, *mmaps[NUM_MMAPS];
1759         union perf_event *event;
1760         struct ui_progress prog;
1761         s64 skip;
1762 
1763         perf_tool__fill_defaults(tool);
1764 
1765         page_offset = page_size * (data_offset / page_size);
1766         file_offset = page_offset;
1767         head = data_offset - page_offset;
1768 
1769         if (data_size == 0)
1770                 goto out;
1771 
1772         if (data_offset + data_size < file_size)
1773                 file_size = data_offset + data_size;
1774 
1775         ui_progress__init(&prog, file_size, "Processing events...");
1776 
1777         mmap_size = MMAP_SIZE;
1778         if (mmap_size > file_size) {
1779                 mmap_size = file_size;
1780                 session->one_mmap = true;
1781         }
1782 
1783         memset(mmaps, 0, sizeof(mmaps));
1784 
1785         mmap_prot  = PROT_READ;
1786         mmap_flags = MAP_SHARED;
1787 
1788         if (session->header.needs_swap) {
1789                 mmap_prot  |= PROT_WRITE;
1790                 mmap_flags = MAP_PRIVATE;
1791         }
1792 remap:
1793         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1794                    file_offset);
1795         if (buf == MAP_FAILED) {
1796                 pr_err("failed to mmap file\n");
1797                 err = -errno;
1798                 goto out_err;
1799         }
1800         mmaps[map_idx] = buf;
1801         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1802         file_pos = file_offset + head;
1803         if (session->one_mmap) {
1804                 session->one_mmap_addr = buf;
1805                 session->one_mmap_offset = file_offset;
1806         }
1807 
1808 more:
1809         event = fetch_mmaped_event(session, head, mmap_size, buf);
1810         if (!event) {
1811                 if (mmaps[map_idx]) {
1812                         munmap(mmaps[map_idx], mmap_size);
1813                         mmaps[map_idx] = NULL;
1814                 }
1815 
1816                 page_offset = page_size * (head / page_size);
1817                 file_offset += page_offset;
1818                 head -= page_offset;
1819                 goto remap;
1820         }
1821 
1822         size = event->header.size;
1823 
1824         if (size < sizeof(struct perf_event_header) ||
1825             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1826                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1827                        file_offset + head, event->header.size,
1828                        event->header.type);
1829                 err = -EINVAL;
1830                 goto out_err;
1831         }
1832 
1833         if (skip)
1834                 size += skip;
1835 
1836         head += size;
1837         file_pos += size;
1838 
1839         ui_progress__update(&prog, size);
1840 
1841         if (session_done())
1842                 goto out;
1843 
1844         if (file_pos < file_size)
1845                 goto more;
1846 
1847 out:
1848         /* do the final flush for ordered samples */
1849         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1850         if (err)
1851                 goto out_err;
1852         err = auxtrace__flush_events(session, tool);
1853         if (err)
1854                 goto out_err;
1855         err = perf_session__flush_thread_stacks(session);
1856 out_err:
1857         ui_progress__finish();
1858         perf_session__warn_about_errors(session);
1859         /*
1860          * We may switching perf.data output, make ordered_events
1861          * reusable.
1862          */
1863         ordered_events__reinit(&session->ordered_events);
1864         auxtrace__free_events(session);
1865         session->one_mmap = false;
1866         return err;
1867 }
1868 
1869 int perf_session__process_events(struct perf_session *session)
1870 {
1871         u64 size = perf_data_file__size(session->file);
1872         int err;
1873 
1874         if (perf_session__register_idle_thread(session) < 0)
1875                 return -ENOMEM;
1876 
1877         if (!perf_data_file__is_pipe(session->file))
1878                 err = __perf_session__process_events(session,
1879                                                      session->header.data_offset,
1880                                                      session->header.data_size, size);
1881         else
1882                 err = __perf_session__process_pipe_events(session);
1883 
1884         return err;
1885 }
1886 
1887 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1888 {
1889         struct perf_evsel *evsel;
1890 
1891         evlist__for_each_entry(session->evlist, evsel) {
1892                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1893                         return true;
1894         }
1895 
1896         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1897         return false;
1898 }
1899 
1900 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1901                                      const char *symbol_name, u64 addr)
1902 {
1903         char *bracket;
1904         enum map_type i;
1905         struct ref_reloc_sym *ref;
1906 
1907         ref = zalloc(sizeof(struct ref_reloc_sym));
1908         if (ref == NULL)
1909                 return -ENOMEM;
1910 
1911         ref->name = strdup(symbol_name);
1912         if (ref->name == NULL) {
1913                 free(ref);
1914                 return -ENOMEM;
1915         }
1916 
1917         bracket = strchr(ref->name, ']');
1918         if (bracket)
1919                 *bracket = '\0';
1920 
1921         ref->addr = addr;
1922 
1923         for (i = 0; i < MAP__NR_TYPES; ++i) {
1924                 struct kmap *kmap = map__kmap(maps[i]);
1925 
1926                 if (!kmap)
1927                         continue;
1928                 kmap->ref_reloc_sym = ref;
1929         }
1930 
1931         return 0;
1932 }
1933 
1934 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1935 {
1936         return machines__fprintf_dsos(&session->machines, fp);
1937 }
1938 
1939 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1940                                           bool (skip)(struct dso *dso, int parm), int parm)
1941 {
1942         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1943 }
1944 
1945 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1946 {
1947         size_t ret;
1948         const char *msg = "";
1949 
1950         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
1951                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1952 
1953         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1954 
1955         ret += events_stats__fprintf(&session->evlist->stats, fp);
1956         return ret;
1957 }
1958 
1959 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1960 {
1961         /*
1962          * FIXME: Here we have to actually print all the machines in this
1963          * session, not just the host...
1964          */
1965         return machine__fprintf(&session->machines.host, fp);
1966 }
1967 
1968 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1969                                               unsigned int type)
1970 {
1971         struct perf_evsel *pos;
1972 
1973         evlist__for_each_entry(session->evlist, pos) {
1974                 if (pos->attr.type == type)
1975                         return pos;
1976         }
1977         return NULL;
1978 }
1979 
1980 int perf_session__cpu_bitmap(struct perf_session *session,
1981                              const char *cpu_list, unsigned long *cpu_bitmap)
1982 {
1983         int i, err = -1;
1984         struct cpu_map *map;
1985 
1986         for (i = 0; i < PERF_TYPE_MAX; ++i) {
1987                 struct perf_evsel *evsel;
1988 
1989                 evsel = perf_session__find_first_evtype(session, i);
1990                 if (!evsel)
1991                         continue;
1992 
1993                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1994                         pr_err("File does not contain CPU events. "
1995                                "Remove -c option to proceed.\n");
1996                         return -1;
1997                 }
1998         }
1999 
2000         map = cpu_map__new(cpu_list);
2001         if (map == NULL) {
2002                 pr_err("Invalid cpu_list\n");
2003                 return -1;
2004         }
2005 
2006         for (i = 0; i < map->nr; i++) {
2007                 int cpu = map->map[i];
2008 
2009                 if (cpu >= MAX_NR_CPUS) {
2010                         pr_err("Requested CPU %d too large. "
2011                                "Consider raising MAX_NR_CPUS\n", cpu);
2012                         goto out_delete_map;
2013                 }
2014 
2015                 set_bit(cpu, cpu_bitmap);
2016         }
2017 
2018         err = 0;
2019 
2020 out_delete_map:
2021         cpu_map__put(map);
2022         return err;
2023 }
2024 
2025 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2026                                 bool full)
2027 {
2028         if (session == NULL || fp == NULL)
2029                 return;
2030 
2031         fprintf(fp, "# ========\n");
2032         perf_header__fprintf_info(session, fp, full);
2033         fprintf(fp, "# ========\n#\n");
2034 }
2035 
2036 
2037 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2038                                              const struct perf_evsel_str_handler *assocs,
2039                                              size_t nr_assocs)
2040 {
2041         struct perf_evsel *evsel;
2042         size_t i;
2043         int err;
2044 
2045         for (i = 0; i < nr_assocs; i++) {
2046                 /*
2047                  * Adding a handler for an event not in the session,
2048                  * just ignore it.
2049                  */
2050                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2051                 if (evsel == NULL)
2052                         continue;
2053 
2054                 err = -EEXIST;
2055                 if (evsel->handler != NULL)
2056                         goto out;
2057                 evsel->handler = assocs[i].handler;
2058         }
2059 
2060         err = 0;
2061 out:
2062         return err;
2063 }
2064 
2065 int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2066                                  union perf_event *event,
2067                                  struct perf_session *session)
2068 {
2069         struct perf_evlist *evlist = session->evlist;
2070         struct id_index_event *ie = &event->id_index;
2071         size_t i, nr, max_nr;
2072 
2073         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2074                  sizeof(struct id_index_entry);
2075         nr = ie->nr;
2076         if (nr > max_nr)
2077                 return -EINVAL;
2078 
2079         if (dump_trace)
2080                 fprintf(stdout, " nr: %zu\n", nr);
2081 
2082         for (i = 0; i < nr; i++) {
2083                 struct id_index_entry *e = &ie->entries[i];
2084                 struct perf_sample_id *sid;
2085 
2086                 if (dump_trace) {
2087                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2088                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2089                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2090                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2091                 }
2092 
2093                 sid = perf_evlist__id2sid(evlist, e->id);
2094                 if (!sid)
2095                         return -ENOENT;
2096                 sid->idx = e->idx;
2097                 sid->cpu = e->cpu;
2098                 sid->tid = e->tid;
2099         }
2100         return 0;
2101 }
2102 
2103 int perf_event__synthesize_id_index(struct perf_tool *tool,
2104                                     perf_event__handler_t process,
2105                                     struct perf_evlist *evlist,
2106                                     struct machine *machine)
2107 {
2108         union perf_event *ev;
2109         struct perf_evsel *evsel;
2110         size_t nr = 0, i = 0, sz, max_nr, n;
2111         int err;
2112 
2113         pr_debug2("Synthesizing id index\n");
2114 
2115         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2116                  sizeof(struct id_index_entry);
2117 
2118         evlist__for_each_entry(evlist, evsel)
2119                 nr += evsel->ids;
2120 
2121         n = nr > max_nr ? max_nr : nr;
2122         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2123         ev = zalloc(sz);
2124         if (!ev)
2125                 return -ENOMEM;
2126 
2127         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2128         ev->id_index.header.size = sz;
2129         ev->id_index.nr = n;
2130 
2131         evlist__for_each_entry(evlist, evsel) {
2132                 u32 j;
2133 
2134                 for (j = 0; j < evsel->ids; j++) {
2135                         struct id_index_entry *e;
2136                         struct perf_sample_id *sid;
2137 
2138                         if (i >= n) {
2139                                 err = process(tool, ev, NULL, machine);
2140                                 if (err)
2141                                         goto out_err;
2142                                 nr -= n;
2143                                 i = 0;
2144                         }
2145 
2146                         e = &ev->id_index.entries[i++];
2147 
2148                         e->id = evsel->id[j];
2149 
2150                         sid = perf_evlist__id2sid(evlist, e->id);
2151                         if (!sid) {
2152                                 free(ev);
2153                                 return -ENOENT;
2154                         }
2155 
2156                         e->idx = sid->idx;
2157                         e->cpu = sid->cpu;
2158                         e->tid = sid->tid;
2159                 }
2160         }
2161 
2162         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2163         ev->id_index.header.size = sz;
2164         ev->id_index.nr = nr;
2165 
2166         err = process(tool, ev, NULL, machine);
2167 out_err:
2168         free(ev);
2169 
2170         return err;
2171 }
2172 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp