~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/perf/util/header.c

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <errno.h>
  3 #include <inttypes.h>
  4 #include "string2.h"
  5 #include <sys/param.h>
  6 #include <sys/types.h>
  7 #include <byteswap.h>
  8 #include <unistd.h>
  9 #include <stdio.h>
 10 #include <stdlib.h>
 11 #include <linux/compiler.h>
 12 #include <linux/list.h>
 13 #include <linux/kernel.h>
 14 #include <linux/bitops.h>
 15 #include <linux/string.h>
 16 #include <linux/stringify.h>
 17 #include <linux/zalloc.h>
 18 #include <sys/stat.h>
 19 #include <sys/utsname.h>
 20 #include <linux/time64.h>
 21 #include <dirent.h>
 22 #include <bpf/libbpf.h>
 23 
 24 #include "evlist.h"
 25 #include "evsel.h"
 26 #include "header.h"
 27 #include "memswap.h"
 28 #include "../perf.h"
 29 #include "trace-event.h"
 30 #include "session.h"
 31 #include "symbol.h"
 32 #include "debug.h"
 33 #include "cpumap.h"
 34 #include "pmu.h"
 35 #include "vdso.h"
 36 #include "strbuf.h"
 37 #include "build-id.h"
 38 #include "data.h"
 39 #include <api/fs/fs.h>
 40 #include "asm/bug.h"
 41 #include "tool.h"
 42 #include "time-utils.h"
 43 #include "units.h"
 44 #include "cputopo.h"
 45 #include "bpf-event.h"
 46 
 47 #include <linux/ctype.h>
 48 
 49 /*
 50  * magic2 = "PERFILE2"
 51  * must be a numerical value to let the endianness
 52  * determine the memory layout. That way we are able
 53  * to detect endianness when reading the perf.data file
 54  * back.
 55  *
 56  * we check for legacy (PERFFILE) format.
 57  */
 58 static const char *__perf_magic1 = "PERFFILE";
 59 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
 60 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
 61 
 62 #define PERF_MAGIC      __perf_magic2
 63 
 64 const char perf_version_string[] = PERF_VERSION;
 65 
 66 struct perf_file_attr {
 67         struct perf_event_attr  attr;
 68         struct perf_file_section        ids;
 69 };
 70 
 71 struct feat_fd {
 72         struct perf_header      *ph;
 73         int                     fd;
 74         void                    *buf;   /* Either buf != NULL or fd >= 0 */
 75         ssize_t                 offset;
 76         size_t                  size;
 77         struct perf_evsel       *events;
 78 };
 79 
 80 void perf_header__set_feat(struct perf_header *header, int feat)
 81 {
 82         set_bit(feat, header->adds_features);
 83 }
 84 
 85 void perf_header__clear_feat(struct perf_header *header, int feat)
 86 {
 87         clear_bit(feat, header->adds_features);
 88 }
 89 
 90 bool perf_header__has_feat(const struct perf_header *header, int feat)
 91 {
 92         return test_bit(feat, header->adds_features);
 93 }
 94 
 95 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
 96 {
 97         ssize_t ret = writen(ff->fd, buf, size);
 98 
 99         if (ret != (ssize_t)size)
100                 return ret < 0 ? (int)ret : -1;
101         return 0;
102 }
103 
104 static int __do_write_buf(struct feat_fd *ff,  const void *buf, size_t size)
105 {
106         /* struct perf_event_header::size is u16 */
107         const size_t max_size = 0xffff - sizeof(struct perf_event_header);
108         size_t new_size = ff->size;
109         void *addr;
110 
111         if (size + ff->offset > max_size)
112                 return -E2BIG;
113 
114         while (size > (new_size - ff->offset))
115                 new_size <<= 1;
116         new_size = min(max_size, new_size);
117 
118         if (ff->size < new_size) {
119                 addr = realloc(ff->buf, new_size);
120                 if (!addr)
121                         return -ENOMEM;
122                 ff->buf = addr;
123                 ff->size = new_size;
124         }
125 
126         memcpy(ff->buf + ff->offset, buf, size);
127         ff->offset += size;
128 
129         return 0;
130 }
131 
132 /* Return: 0 if succeded, -ERR if failed. */
133 int do_write(struct feat_fd *ff, const void *buf, size_t size)
134 {
135         if (!ff->buf)
136                 return __do_write_fd(ff, buf, size);
137         return __do_write_buf(ff, buf, size);
138 }
139 
140 /* Return: 0 if succeded, -ERR if failed. */
141 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
142 {
143         u64 *p = (u64 *) set;
144         int i, ret;
145 
146         ret = do_write(ff, &size, sizeof(size));
147         if (ret < 0)
148                 return ret;
149 
150         for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
151                 ret = do_write(ff, p + i, sizeof(*p));
152                 if (ret < 0)
153                         return ret;
154         }
155 
156         return 0;
157 }
158 
159 /* Return: 0 if succeded, -ERR if failed. */
160 int write_padded(struct feat_fd *ff, const void *bf,
161                  size_t count, size_t count_aligned)
162 {
163         static const char zero_buf[NAME_ALIGN];
164         int err = do_write(ff, bf, count);
165 
166         if (!err)
167                 err = do_write(ff, zero_buf, count_aligned - count);
168 
169         return err;
170 }
171 
172 #define string_size(str)                                                \
173         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
174 
175 /* Return: 0 if succeded, -ERR if failed. */
176 static int do_write_string(struct feat_fd *ff, const char *str)
177 {
178         u32 len, olen;
179         int ret;
180 
181         olen = strlen(str) + 1;
182         len = PERF_ALIGN(olen, NAME_ALIGN);
183 
184         /* write len, incl. \0 */
185         ret = do_write(ff, &len, sizeof(len));
186         if (ret < 0)
187                 return ret;
188 
189         return write_padded(ff, str, olen, len);
190 }
191 
192 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
193 {
194         ssize_t ret = readn(ff->fd, addr, size);
195 
196         if (ret != size)
197                 return ret < 0 ? (int)ret : -1;
198         return 0;
199 }
200 
201 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
202 {
203         if (size > (ssize_t)ff->size - ff->offset)
204                 return -1;
205 
206         memcpy(addr, ff->buf + ff->offset, size);
207         ff->offset += size;
208 
209         return 0;
210 
211 }
212 
213 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
214 {
215         if (!ff->buf)
216                 return __do_read_fd(ff, addr, size);
217         return __do_read_buf(ff, addr, size);
218 }
219 
220 static int do_read_u32(struct feat_fd *ff, u32 *addr)
221 {
222         int ret;
223 
224         ret = __do_read(ff, addr, sizeof(*addr));
225         if (ret)
226                 return ret;
227 
228         if (ff->ph->needs_swap)
229                 *addr = bswap_32(*addr);
230         return 0;
231 }
232 
233 static int do_read_u64(struct feat_fd *ff, u64 *addr)
234 {
235         int ret;
236 
237         ret = __do_read(ff, addr, sizeof(*addr));
238         if (ret)
239                 return ret;
240 
241         if (ff->ph->needs_swap)
242                 *addr = bswap_64(*addr);
243         return 0;
244 }
245 
246 static char *do_read_string(struct feat_fd *ff)
247 {
248         u32 len;
249         char *buf;
250 
251         if (do_read_u32(ff, &len))
252                 return NULL;
253 
254         buf = malloc(len);
255         if (!buf)
256                 return NULL;
257 
258         if (!__do_read(ff, buf, len)) {
259                 /*
260                  * strings are padded by zeroes
261                  * thus the actual strlen of buf
262                  * may be less than len
263                  */
264                 return buf;
265         }
266 
267         free(buf);
268         return NULL;
269 }
270 
271 /* Return: 0 if succeded, -ERR if failed. */
272 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
273 {
274         unsigned long *set;
275         u64 size, *p;
276         int i, ret;
277 
278         ret = do_read_u64(ff, &size);
279         if (ret)
280                 return ret;
281 
282         set = bitmap_alloc(size);
283         if (!set)
284                 return -ENOMEM;
285 
286         p = (u64 *) set;
287 
288         for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
289                 ret = do_read_u64(ff, p + i);
290                 if (ret < 0) {
291                         free(set);
292                         return ret;
293                 }
294         }
295 
296         *pset  = set;
297         *psize = size;
298         return 0;
299 }
300 
301 static int write_tracing_data(struct feat_fd *ff,
302                               struct perf_evlist *evlist)
303 {
304         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
305                 return -1;
306 
307         return read_tracing_data(ff->fd, &evlist->entries);
308 }
309 
310 static int write_build_id(struct feat_fd *ff,
311                           struct perf_evlist *evlist __maybe_unused)
312 {
313         struct perf_session *session;
314         int err;
315 
316         session = container_of(ff->ph, struct perf_session, header);
317 
318         if (!perf_session__read_build_ids(session, true))
319                 return -1;
320 
321         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
322                 return -1;
323 
324         err = perf_session__write_buildid_table(session, ff);
325         if (err < 0) {
326                 pr_debug("failed to write buildid table\n");
327                 return err;
328         }
329         perf_session__cache_build_ids(session);
330 
331         return 0;
332 }
333 
334 static int write_hostname(struct feat_fd *ff,
335                           struct perf_evlist *evlist __maybe_unused)
336 {
337         struct utsname uts;
338         int ret;
339 
340         ret = uname(&uts);
341         if (ret < 0)
342                 return -1;
343 
344         return do_write_string(ff, uts.nodename);
345 }
346 
347 static int write_osrelease(struct feat_fd *ff,
348                            struct perf_evlist *evlist __maybe_unused)
349 {
350         struct utsname uts;
351         int ret;
352 
353         ret = uname(&uts);
354         if (ret < 0)
355                 return -1;
356 
357         return do_write_string(ff, uts.release);
358 }
359 
360 static int write_arch(struct feat_fd *ff,
361                       struct perf_evlist *evlist __maybe_unused)
362 {
363         struct utsname uts;
364         int ret;
365 
366         ret = uname(&uts);
367         if (ret < 0)
368                 return -1;
369 
370         return do_write_string(ff, uts.machine);
371 }
372 
373 static int write_version(struct feat_fd *ff,
374                          struct perf_evlist *evlist __maybe_unused)
375 {
376         return do_write_string(ff, perf_version_string);
377 }
378 
379 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
380 {
381         FILE *file;
382         char *buf = NULL;
383         char *s, *p;
384         const char *search = cpuinfo_proc;
385         size_t len = 0;
386         int ret = -1;
387 
388         if (!search)
389                 return -1;
390 
391         file = fopen("/proc/cpuinfo", "r");
392         if (!file)
393                 return -1;
394 
395         while (getline(&buf, &len, file) > 0) {
396                 ret = strncmp(buf, search, strlen(search));
397                 if (!ret)
398                         break;
399         }
400 
401         if (ret) {
402                 ret = -1;
403                 goto done;
404         }
405 
406         s = buf;
407 
408         p = strchr(buf, ':');
409         if (p && *(p+1) == ' ' && *(p+2))
410                 s = p + 2;
411         p = strchr(s, '\n');
412         if (p)
413                 *p = '\0';
414 
415         /* squash extra space characters (branding string) */
416         p = s;
417         while (*p) {
418                 if (isspace(*p)) {
419                         char *r = p + 1;
420                         char *q = skip_spaces(r);
421                         *p = ' ';
422                         if (q != (p+1))
423                                 while ((*r++ = *q++));
424                 }
425                 p++;
426         }
427         ret = do_write_string(ff, s);
428 done:
429         free(buf);
430         fclose(file);
431         return ret;
432 }
433 
434 static int write_cpudesc(struct feat_fd *ff,
435                        struct perf_evlist *evlist __maybe_unused)
436 {
437         const char *cpuinfo_procs[] = CPUINFO_PROC;
438         unsigned int i;
439 
440         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
441                 int ret;
442                 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
443                 if (ret >= 0)
444                         return ret;
445         }
446         return -1;
447 }
448 
449 
450 static int write_nrcpus(struct feat_fd *ff,
451                         struct perf_evlist *evlist __maybe_unused)
452 {
453         long nr;
454         u32 nrc, nra;
455         int ret;
456 
457         nrc = cpu__max_present_cpu();
458 
459         nr = sysconf(_SC_NPROCESSORS_ONLN);
460         if (nr < 0)
461                 return -1;
462 
463         nra = (u32)(nr & UINT_MAX);
464 
465         ret = do_write(ff, &nrc, sizeof(nrc));
466         if (ret < 0)
467                 return ret;
468 
469         return do_write(ff, &nra, sizeof(nra));
470 }
471 
472 static int write_event_desc(struct feat_fd *ff,
473                             struct perf_evlist *evlist)
474 {
475         struct perf_evsel *evsel;
476         u32 nre, nri, sz;
477         int ret;
478 
479         nre = evlist->nr_entries;
480 
481         /*
482          * write number of events
483          */
484         ret = do_write(ff, &nre, sizeof(nre));
485         if (ret < 0)
486                 return ret;
487 
488         /*
489          * size of perf_event_attr struct
490          */
491         sz = (u32)sizeof(evsel->attr);
492         ret = do_write(ff, &sz, sizeof(sz));
493         if (ret < 0)
494                 return ret;
495 
496         evlist__for_each_entry(evlist, evsel) {
497                 ret = do_write(ff, &evsel->attr, sz);
498                 if (ret < 0)
499                         return ret;
500                 /*
501                  * write number of unique id per event
502                  * there is one id per instance of an event
503                  *
504                  * copy into an nri to be independent of the
505                  * type of ids,
506                  */
507                 nri = evsel->ids;
508                 ret = do_write(ff, &nri, sizeof(nri));
509                 if (ret < 0)
510                         return ret;
511 
512                 /*
513                  * write event string as passed on cmdline
514                  */
515                 ret = do_write_string(ff, perf_evsel__name(evsel));
516                 if (ret < 0)
517                         return ret;
518                 /*
519                  * write unique ids for this event
520                  */
521                 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
522                 if (ret < 0)
523                         return ret;
524         }
525         return 0;
526 }
527 
528 static int write_cmdline(struct feat_fd *ff,
529                          struct perf_evlist *evlist __maybe_unused)
530 {
531         char pbuf[MAXPATHLEN], *buf;
532         int i, ret, n;
533 
534         /* actual path to perf binary */
535         buf = perf_exe(pbuf, MAXPATHLEN);
536 
537         /* account for binary path */
538         n = perf_env.nr_cmdline + 1;
539 
540         ret = do_write(ff, &n, sizeof(n));
541         if (ret < 0)
542                 return ret;
543 
544         ret = do_write_string(ff, buf);
545         if (ret < 0)
546                 return ret;
547 
548         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
549                 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
550                 if (ret < 0)
551                         return ret;
552         }
553         return 0;
554 }
555 
556 
557 static int write_cpu_topology(struct feat_fd *ff,
558                               struct perf_evlist *evlist __maybe_unused)
559 {
560         struct cpu_topology *tp;
561         u32 i;
562         int ret, j;
563 
564         tp = cpu_topology__new();
565         if (!tp)
566                 return -1;
567 
568         ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
569         if (ret < 0)
570                 goto done;
571 
572         for (i = 0; i < tp->core_sib; i++) {
573                 ret = do_write_string(ff, tp->core_siblings[i]);
574                 if (ret < 0)
575                         goto done;
576         }
577         ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
578         if (ret < 0)
579                 goto done;
580 
581         for (i = 0; i < tp->thread_sib; i++) {
582                 ret = do_write_string(ff, tp->thread_siblings[i]);
583                 if (ret < 0)
584                         break;
585         }
586 
587         ret = perf_env__read_cpu_topology_map(&perf_env);
588         if (ret < 0)
589                 goto done;
590 
591         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
592                 ret = do_write(ff, &perf_env.cpu[j].core_id,
593                                sizeof(perf_env.cpu[j].core_id));
594                 if (ret < 0)
595                         return ret;
596                 ret = do_write(ff, &perf_env.cpu[j].socket_id,
597                                sizeof(perf_env.cpu[j].socket_id));
598                 if (ret < 0)
599                         return ret;
600         }
601 
602         if (!tp->die_sib)
603                 goto done;
604 
605         ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
606         if (ret < 0)
607                 goto done;
608 
609         for (i = 0; i < tp->die_sib; i++) {
610                 ret = do_write_string(ff, tp->die_siblings[i]);
611                 if (ret < 0)
612                         goto done;
613         }
614 
615         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
616                 ret = do_write(ff, &perf_env.cpu[j].die_id,
617                                sizeof(perf_env.cpu[j].die_id));
618                 if (ret < 0)
619                         return ret;
620         }
621 
622 done:
623         cpu_topology__delete(tp);
624         return ret;
625 }
626 
627 
628 
629 static int write_total_mem(struct feat_fd *ff,
630                            struct perf_evlist *evlist __maybe_unused)
631 {
632         char *buf = NULL;
633         FILE *fp;
634         size_t len = 0;
635         int ret = -1, n;
636         uint64_t mem;
637 
638         fp = fopen("/proc/meminfo", "r");
639         if (!fp)
640                 return -1;
641 
642         while (getline(&buf, &len, fp) > 0) {
643                 ret = strncmp(buf, "MemTotal:", 9);
644                 if (!ret)
645                         break;
646         }
647         if (!ret) {
648                 n = sscanf(buf, "%*s %"PRIu64, &mem);
649                 if (n == 1)
650                         ret = do_write(ff, &mem, sizeof(mem));
651         } else
652                 ret = -1;
653         free(buf);
654         fclose(fp);
655         return ret;
656 }
657 
658 static int write_numa_topology(struct feat_fd *ff,
659                                struct perf_evlist *evlist __maybe_unused)
660 {
661         struct numa_topology *tp;
662         int ret = -1;
663         u32 i;
664 
665         tp = numa_topology__new();
666         if (!tp)
667                 return -ENOMEM;
668 
669         ret = do_write(ff, &tp->nr, sizeof(u32));
670         if (ret < 0)
671                 goto err;
672 
673         for (i = 0; i < tp->nr; i++) {
674                 struct numa_topology_node *n = &tp->nodes[i];
675 
676                 ret = do_write(ff, &n->node, sizeof(u32));
677                 if (ret < 0)
678                         goto err;
679 
680                 ret = do_write(ff, &n->mem_total, sizeof(u64));
681                 if (ret)
682                         goto err;
683 
684                 ret = do_write(ff, &n->mem_free, sizeof(u64));
685                 if (ret)
686                         goto err;
687 
688                 ret = do_write_string(ff, n->cpus);
689                 if (ret < 0)
690                         goto err;
691         }
692 
693         ret = 0;
694 
695 err:
696         numa_topology__delete(tp);
697         return ret;
698 }
699 
700 /*
701  * File format:
702  *
703  * struct pmu_mappings {
704  *      u32     pmu_num;
705  *      struct pmu_map {
706  *              u32     type;
707  *              char    name[];
708  *      }[pmu_num];
709  * };
710  */
711 
712 static int write_pmu_mappings(struct feat_fd *ff,
713                               struct perf_evlist *evlist __maybe_unused)
714 {
715         struct perf_pmu *pmu = NULL;
716         u32 pmu_num = 0;
717         int ret;
718 
719         /*
720          * Do a first pass to count number of pmu to avoid lseek so this
721          * works in pipe mode as well.
722          */
723         while ((pmu = perf_pmu__scan(pmu))) {
724                 if (!pmu->name)
725                         continue;
726                 pmu_num++;
727         }
728 
729         ret = do_write(ff, &pmu_num, sizeof(pmu_num));
730         if (ret < 0)
731                 return ret;
732 
733         while ((pmu = perf_pmu__scan(pmu))) {
734                 if (!pmu->name)
735                         continue;
736 
737                 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
738                 if (ret < 0)
739                         return ret;
740 
741                 ret = do_write_string(ff, pmu->name);
742                 if (ret < 0)
743                         return ret;
744         }
745 
746         return 0;
747 }
748 
749 /*
750  * File format:
751  *
752  * struct group_descs {
753  *      u32     nr_groups;
754  *      struct group_desc {
755  *              char    name[];
756  *              u32     leader_idx;
757  *              u32     nr_members;
758  *      }[nr_groups];
759  * };
760  */
761 static int write_group_desc(struct feat_fd *ff,
762                             struct perf_evlist *evlist)
763 {
764         u32 nr_groups = evlist->nr_groups;
765         struct perf_evsel *evsel;
766         int ret;
767 
768         ret = do_write(ff, &nr_groups, sizeof(nr_groups));
769         if (ret < 0)
770                 return ret;
771 
772         evlist__for_each_entry(evlist, evsel) {
773                 if (perf_evsel__is_group_leader(evsel) &&
774                     evsel->nr_members > 1) {
775                         const char *name = evsel->group_name ?: "{anon_group}";
776                         u32 leader_idx = evsel->idx;
777                         u32 nr_members = evsel->nr_members;
778 
779                         ret = do_write_string(ff, name);
780                         if (ret < 0)
781                                 return ret;
782 
783                         ret = do_write(ff, &leader_idx, sizeof(leader_idx));
784                         if (ret < 0)
785                                 return ret;
786 
787                         ret = do_write(ff, &nr_members, sizeof(nr_members));
788                         if (ret < 0)
789                                 return ret;
790                 }
791         }
792         return 0;
793 }
794 
795 /*
796  * Return the CPU id as a raw string.
797  *
798  * Each architecture should provide a more precise id string that
799  * can be use to match the architecture's "mapfile".
800  */
801 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
802 {
803         return NULL;
804 }
805 
806 /* Return zero when the cpuid from the mapfile.csv matches the
807  * cpuid string generated on this platform.
808  * Otherwise return non-zero.
809  */
810 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
811 {
812         regex_t re;
813         regmatch_t pmatch[1];
814         int match;
815 
816         if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
817                 /* Warn unable to generate match particular string. */
818                 pr_info("Invalid regular expression %s\n", mapcpuid);
819                 return 1;
820         }
821 
822         match = !regexec(&re, cpuid, 1, pmatch, 0);
823         regfree(&re);
824         if (match) {
825                 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
826 
827                 /* Verify the entire string matched. */
828                 if (match_len == strlen(cpuid))
829                         return 0;
830         }
831         return 1;
832 }
833 
834 /*
835  * default get_cpuid(): nothing gets recorded
836  * actual implementation must be in arch/$(SRCARCH)/util/header.c
837  */
838 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
839 {
840         return -1;
841 }
842 
843 static int write_cpuid(struct feat_fd *ff,
844                        struct perf_evlist *evlist __maybe_unused)
845 {
846         char buffer[64];
847         int ret;
848 
849         ret = get_cpuid(buffer, sizeof(buffer));
850         if (ret)
851                 return -1;
852 
853         return do_write_string(ff, buffer);
854 }
855 
856 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
857                               struct perf_evlist *evlist __maybe_unused)
858 {
859         return 0;
860 }
861 
862 static int write_auxtrace(struct feat_fd *ff,
863                           struct perf_evlist *evlist __maybe_unused)
864 {
865         struct perf_session *session;
866         int err;
867 
868         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
869                 return -1;
870 
871         session = container_of(ff->ph, struct perf_session, header);
872 
873         err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
874         if (err < 0)
875                 pr_err("Failed to write auxtrace index\n");
876         return err;
877 }
878 
879 static int write_clockid(struct feat_fd *ff,
880                          struct perf_evlist *evlist __maybe_unused)
881 {
882         return do_write(ff, &ff->ph->env.clockid_res_ns,
883                         sizeof(ff->ph->env.clockid_res_ns));
884 }
885 
886 static int write_dir_format(struct feat_fd *ff,
887                             struct perf_evlist *evlist __maybe_unused)
888 {
889         struct perf_session *session;
890         struct perf_data *data;
891 
892         session = container_of(ff->ph, struct perf_session, header);
893         data = session->data;
894 
895         if (WARN_ON(!perf_data__is_dir(data)))
896                 return -1;
897 
898         return do_write(ff, &data->dir.version, sizeof(data->dir.version));
899 }
900 
901 #ifdef HAVE_LIBBPF_SUPPORT
902 static int write_bpf_prog_info(struct feat_fd *ff,
903                                struct perf_evlist *evlist __maybe_unused)
904 {
905         struct perf_env *env = &ff->ph->env;
906         struct rb_root *root;
907         struct rb_node *next;
908         int ret;
909 
910         down_read(&env->bpf_progs.lock);
911 
912         ret = do_write(ff, &env->bpf_progs.infos_cnt,
913                        sizeof(env->bpf_progs.infos_cnt));
914         if (ret < 0)
915                 goto out;
916 
917         root = &env->bpf_progs.infos;
918         next = rb_first(root);
919         while (next) {
920                 struct bpf_prog_info_node *node;
921                 size_t len;
922 
923                 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
924                 next = rb_next(&node->rb_node);
925                 len = sizeof(struct bpf_prog_info_linear) +
926                         node->info_linear->data_len;
927 
928                 /* before writing to file, translate address to offset */
929                 bpf_program__bpil_addr_to_offs(node->info_linear);
930                 ret = do_write(ff, node->info_linear, len);
931                 /*
932                  * translate back to address even when do_write() fails,
933                  * so that this function never changes the data.
934                  */
935                 bpf_program__bpil_offs_to_addr(node->info_linear);
936                 if (ret < 0)
937                         goto out;
938         }
939 out:
940         up_read(&env->bpf_progs.lock);
941         return ret;
942 }
943 #else // HAVE_LIBBPF_SUPPORT
944 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
945                                struct perf_evlist *evlist __maybe_unused)
946 {
947         return 0;
948 }
949 #endif // HAVE_LIBBPF_SUPPORT
950 
951 static int write_bpf_btf(struct feat_fd *ff,
952                          struct perf_evlist *evlist __maybe_unused)
953 {
954         struct perf_env *env = &ff->ph->env;
955         struct rb_root *root;
956         struct rb_node *next;
957         int ret;
958 
959         down_read(&env->bpf_progs.lock);
960 
961         ret = do_write(ff, &env->bpf_progs.btfs_cnt,
962                        sizeof(env->bpf_progs.btfs_cnt));
963 
964         if (ret < 0)
965                 goto out;
966 
967         root = &env->bpf_progs.btfs;
968         next = rb_first(root);
969         while (next) {
970                 struct btf_node *node;
971 
972                 node = rb_entry(next, struct btf_node, rb_node);
973                 next = rb_next(&node->rb_node);
974                 ret = do_write(ff, &node->id,
975                                sizeof(u32) * 2 + node->data_size);
976                 if (ret < 0)
977                         goto out;
978         }
979 out:
980         up_read(&env->bpf_progs.lock);
981         return ret;
982 }
983 
984 static int cpu_cache_level__sort(const void *a, const void *b)
985 {
986         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
987         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
988 
989         return cache_a->level - cache_b->level;
990 }
991 
992 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
993 {
994         if (a->level != b->level)
995                 return false;
996 
997         if (a->line_size != b->line_size)
998                 return false;
999 
1000         if (a->sets != b->sets)
1001                 return false;
1002 
1003         if (a->ways != b->ways)
1004                 return false;
1005 
1006         if (strcmp(a->type, b->type))
1007                 return false;
1008 
1009         if (strcmp(a->size, b->size))
1010                 return false;
1011 
1012         if (strcmp(a->map, b->map))
1013                 return false;
1014 
1015         return true;
1016 }
1017 
1018 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1019 {
1020         char path[PATH_MAX], file[PATH_MAX];
1021         struct stat st;
1022         size_t len;
1023 
1024         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1025         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1026 
1027         if (stat(file, &st))
1028                 return 1;
1029 
1030         scnprintf(file, PATH_MAX, "%s/level", path);
1031         if (sysfs__read_int(file, (int *) &cache->level))
1032                 return -1;
1033 
1034         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1035         if (sysfs__read_int(file, (int *) &cache->line_size))
1036                 return -1;
1037 
1038         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1039         if (sysfs__read_int(file, (int *) &cache->sets))
1040                 return -1;
1041 
1042         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1043         if (sysfs__read_int(file, (int *) &cache->ways))
1044                 return -1;
1045 
1046         scnprintf(file, PATH_MAX, "%s/type", path);
1047         if (sysfs__read_str(file, &cache->type, &len))
1048                 return -1;
1049 
1050         cache->type[len] = 0;
1051         cache->type = strim(cache->type);
1052 
1053         scnprintf(file, PATH_MAX, "%s/size", path);
1054         if (sysfs__read_str(file, &cache->size, &len)) {
1055                 zfree(&cache->type);
1056                 return -1;
1057         }
1058 
1059         cache->size[len] = 0;
1060         cache->size = strim(cache->size);
1061 
1062         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1063         if (sysfs__read_str(file, &cache->map, &len)) {
1064                 zfree(&cache->size);
1065                 zfree(&cache->type);
1066                 return -1;
1067         }
1068 
1069         cache->map[len] = 0;
1070         cache->map = strim(cache->map);
1071         return 0;
1072 }
1073 
1074 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1075 {
1076         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1077 }
1078 
1079 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1080 {
1081         u32 i, cnt = 0;
1082         long ncpus;
1083         u32 nr, cpu;
1084         u16 level;
1085 
1086         ncpus = sysconf(_SC_NPROCESSORS_CONF);
1087         if (ncpus < 0)
1088                 return -1;
1089 
1090         nr = (u32)(ncpus & UINT_MAX);
1091 
1092         for (cpu = 0; cpu < nr; cpu++) {
1093                 for (level = 0; level < 10; level++) {
1094                         struct cpu_cache_level c;
1095                         int err;
1096 
1097                         err = cpu_cache_level__read(&c, cpu, level);
1098                         if (err < 0)
1099                                 return err;
1100 
1101                         if (err == 1)
1102                                 break;
1103 
1104                         for (i = 0; i < cnt; i++) {
1105                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1106                                         break;
1107                         }
1108 
1109                         if (i == cnt)
1110                                 caches[cnt++] = c;
1111                         else
1112                                 cpu_cache_level__free(&c);
1113 
1114                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1115                                 goto out;
1116                 }
1117         }
1118  out:
1119         *cntp = cnt;
1120         return 0;
1121 }
1122 
1123 #define MAX_CACHES (MAX_NR_CPUS * 4)
1124 
1125 static int write_cache(struct feat_fd *ff,
1126                        struct perf_evlist *evlist __maybe_unused)
1127 {
1128         struct cpu_cache_level caches[MAX_CACHES];
1129         u32 cnt = 0, i, version = 1;
1130         int ret;
1131 
1132         ret = build_caches(caches, MAX_CACHES, &cnt);
1133         if (ret)
1134                 goto out;
1135 
1136         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1137 
1138         ret = do_write(ff, &version, sizeof(u32));
1139         if (ret < 0)
1140                 goto out;
1141 
1142         ret = do_write(ff, &cnt, sizeof(u32));
1143         if (ret < 0)
1144                 goto out;
1145 
1146         for (i = 0; i < cnt; i++) {
1147                 struct cpu_cache_level *c = &caches[i];
1148 
1149                 #define _W(v)                                   \
1150                         ret = do_write(ff, &c->v, sizeof(u32)); \
1151                         if (ret < 0)                            \
1152                                 goto out;
1153 
1154                 _W(level)
1155                 _W(line_size)
1156                 _W(sets)
1157                 _W(ways)
1158                 #undef _W
1159 
1160                 #define _W(v)                                           \
1161                         ret = do_write_string(ff, (const char *) c->v); \
1162                         if (ret < 0)                                    \
1163                                 goto out;
1164 
1165                 _W(type)
1166                 _W(size)
1167                 _W(map)
1168                 #undef _W
1169         }
1170 
1171 out:
1172         for (i = 0; i < cnt; i++)
1173                 cpu_cache_level__free(&caches[i]);
1174         return ret;
1175 }
1176 
1177 static int write_stat(struct feat_fd *ff __maybe_unused,
1178                       struct perf_evlist *evlist __maybe_unused)
1179 {
1180         return 0;
1181 }
1182 
1183 static int write_sample_time(struct feat_fd *ff,
1184                              struct perf_evlist *evlist)
1185 {
1186         int ret;
1187 
1188         ret = do_write(ff, &evlist->first_sample_time,
1189                        sizeof(evlist->first_sample_time));
1190         if (ret < 0)
1191                 return ret;
1192 
1193         return do_write(ff, &evlist->last_sample_time,
1194                         sizeof(evlist->last_sample_time));
1195 }
1196 
1197 
1198 static int memory_node__read(struct memory_node *n, unsigned long idx)
1199 {
1200         unsigned int phys, size = 0;
1201         char path[PATH_MAX];
1202         struct dirent *ent;
1203         DIR *dir;
1204 
1205 #define for_each_memory(mem, dir)                                       \
1206         while ((ent = readdir(dir)))                                    \
1207                 if (strcmp(ent->d_name, ".") &&                         \
1208                     strcmp(ent->d_name, "..") &&                        \
1209                     sscanf(ent->d_name, "memory%u", &mem) == 1)
1210 
1211         scnprintf(path, PATH_MAX,
1212                   "%s/devices/system/node/node%lu",
1213                   sysfs__mountpoint(), idx);
1214 
1215         dir = opendir(path);
1216         if (!dir) {
1217                 pr_warning("failed: cant' open memory sysfs data\n");
1218                 return -1;
1219         }
1220 
1221         for_each_memory(phys, dir) {
1222                 size = max(phys, size);
1223         }
1224 
1225         size++;
1226 
1227         n->set = bitmap_alloc(size);
1228         if (!n->set) {
1229                 closedir(dir);
1230                 return -ENOMEM;
1231         }
1232 
1233         n->node = idx;
1234         n->size = size;
1235 
1236         rewinddir(dir);
1237 
1238         for_each_memory(phys, dir) {
1239                 set_bit(phys, n->set);
1240         }
1241 
1242         closedir(dir);
1243         return 0;
1244 }
1245 
1246 static int memory_node__sort(const void *a, const void *b)
1247 {
1248         const struct memory_node *na = a;
1249         const struct memory_node *nb = b;
1250 
1251         return na->node - nb->node;
1252 }
1253 
1254 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1255 {
1256         char path[PATH_MAX];
1257         struct dirent *ent;
1258         DIR *dir;
1259         u64 cnt = 0;
1260         int ret = 0;
1261 
1262         scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1263                   sysfs__mountpoint());
1264 
1265         dir = opendir(path);
1266         if (!dir) {
1267                 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1268                           __func__, path);
1269                 return -1;
1270         }
1271 
1272         while (!ret && (ent = readdir(dir))) {
1273                 unsigned int idx;
1274                 int r;
1275 
1276                 if (!strcmp(ent->d_name, ".") ||
1277                     !strcmp(ent->d_name, ".."))
1278                         continue;
1279 
1280                 r = sscanf(ent->d_name, "node%u", &idx);
1281                 if (r != 1)
1282                         continue;
1283 
1284                 if (WARN_ONCE(cnt >= size,
1285                         "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
1286                         closedir(dir);
1287                         return -1;
1288                 }
1289 
1290                 ret = memory_node__read(&nodes[cnt++], idx);
1291         }
1292 
1293         *cntp = cnt;
1294         closedir(dir);
1295 
1296         if (!ret)
1297                 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1298 
1299         return ret;
1300 }
1301 
1302 #define MAX_MEMORY_NODES 2000
1303 
1304 /*
1305  * The MEM_TOPOLOGY holds physical memory map for every
1306  * node in system. The format of data is as follows:
1307  *
1308  *  0 - version          | for future changes
1309  *  8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1310  * 16 - count            | number of nodes
1311  *
1312  * For each node we store map of physical indexes for
1313  * each node:
1314  *
1315  * 32 - node id          | node index
1316  * 40 - size             | size of bitmap
1317  * 48 - bitmap           | bitmap of memory indexes that belongs to node
1318  */
1319 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1320                               struct perf_evlist *evlist __maybe_unused)
1321 {
1322         static struct memory_node nodes[MAX_MEMORY_NODES];
1323         u64 bsize, version = 1, i, nr;
1324         int ret;
1325 
1326         ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1327                               (unsigned long long *) &bsize);
1328         if (ret)
1329                 return ret;
1330 
1331         ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1332         if (ret)
1333                 return ret;
1334 
1335         ret = do_write(ff, &version, sizeof(version));
1336         if (ret < 0)
1337                 goto out;
1338 
1339         ret = do_write(ff, &bsize, sizeof(bsize));
1340         if (ret < 0)
1341                 goto out;
1342 
1343         ret = do_write(ff, &nr, sizeof(nr));
1344         if (ret < 0)
1345                 goto out;
1346 
1347         for (i = 0; i < nr; i++) {
1348                 struct memory_node *n = &nodes[i];
1349 
1350                 #define _W(v)                                           \
1351                         ret = do_write(ff, &n->v, sizeof(n->v));        \
1352                         if (ret < 0)                                    \
1353                                 goto out;
1354 
1355                 _W(node)
1356                 _W(size)
1357 
1358                 #undef _W
1359 
1360                 ret = do_write_bitmap(ff, n->set, n->size);
1361                 if (ret < 0)
1362                         goto out;
1363         }
1364 
1365 out:
1366         return ret;
1367 }
1368 
1369 static int write_compressed(struct feat_fd *ff __maybe_unused,
1370                             struct perf_evlist *evlist __maybe_unused)
1371 {
1372         int ret;
1373 
1374         ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1375         if (ret)
1376                 return ret;
1377 
1378         ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1379         if (ret)
1380                 return ret;
1381 
1382         ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1383         if (ret)
1384                 return ret;
1385 
1386         ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1387         if (ret)
1388                 return ret;
1389 
1390         return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1391 }
1392 
1393 static void print_hostname(struct feat_fd *ff, FILE *fp)
1394 {
1395         fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1396 }
1397 
1398 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1399 {
1400         fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1401 }
1402 
1403 static void print_arch(struct feat_fd *ff, FILE *fp)
1404 {
1405         fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1406 }
1407 
1408 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1409 {
1410         fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1411 }
1412 
1413 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1414 {
1415         fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1416         fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1417 }
1418 
1419 static void print_version(struct feat_fd *ff, FILE *fp)
1420 {
1421         fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1422 }
1423 
1424 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1425 {
1426         int nr, i;
1427 
1428         nr = ff->ph->env.nr_cmdline;
1429 
1430         fprintf(fp, "# cmdline : ");
1431 
1432         for (i = 0; i < nr; i++) {
1433                 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1434                 if (!argv_i) {
1435                         fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1436                 } else {
1437                         char *mem = argv_i;
1438                         do {
1439                                 char *quote = strchr(argv_i, '\'');
1440                                 if (!quote)
1441                                         break;
1442                                 *quote++ = '\0';
1443                                 fprintf(fp, "%s\\\'", argv_i);
1444                                 argv_i = quote;
1445                         } while (1);
1446                         fprintf(fp, "%s ", argv_i);
1447                         free(mem);
1448                 }
1449         }
1450         fputc('\n', fp);
1451 }
1452 
1453 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1454 {
1455         struct perf_header *ph = ff->ph;
1456         int cpu_nr = ph->env.nr_cpus_avail;
1457         int nr, i;
1458         char *str;
1459 
1460         nr = ph->env.nr_sibling_cores;
1461         str = ph->env.sibling_cores;
1462 
1463         for (i = 0; i < nr; i++) {
1464                 fprintf(fp, "# sibling sockets : %s\n", str);
1465                 str += strlen(str) + 1;
1466         }
1467 
1468         if (ph->env.nr_sibling_dies) {
1469                 nr = ph->env.nr_sibling_dies;
1470                 str = ph->env.sibling_dies;
1471 
1472                 for (i = 0; i < nr; i++) {
1473                         fprintf(fp, "# sibling dies    : %s\n", str);
1474                         str += strlen(str) + 1;
1475                 }
1476         }
1477 
1478         nr = ph->env.nr_sibling_threads;
1479         str = ph->env.sibling_threads;
1480 
1481         for (i = 0; i < nr; i++) {
1482                 fprintf(fp, "# sibling threads : %s\n", str);
1483                 str += strlen(str) + 1;
1484         }
1485 
1486         if (ph->env.nr_sibling_dies) {
1487                 if (ph->env.cpu != NULL) {
1488                         for (i = 0; i < cpu_nr; i++)
1489                                 fprintf(fp, "# CPU %d: Core ID %d, "
1490                                             "Die ID %d, Socket ID %d\n",
1491                                             i, ph->env.cpu[i].core_id,
1492                                             ph->env.cpu[i].die_id,
1493                                             ph->env.cpu[i].socket_id);
1494                 } else
1495                         fprintf(fp, "# Core ID, Die ID and Socket ID "
1496                                     "information is not available\n");
1497         } else {
1498                 if (ph->env.cpu != NULL) {
1499                         for (i = 0; i < cpu_nr; i++)
1500                                 fprintf(fp, "# CPU %d: Core ID %d, "
1501                                             "Socket ID %d\n",
1502                                             i, ph->env.cpu[i].core_id,
1503                                             ph->env.cpu[i].socket_id);
1504                 } else
1505                         fprintf(fp, "# Core ID and Socket ID "
1506                                     "information is not available\n");
1507         }
1508 }
1509 
1510 static void print_clockid(struct feat_fd *ff, FILE *fp)
1511 {
1512         fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1513                 ff->ph->env.clockid_res_ns * 1000);
1514 }
1515 
1516 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1517 {
1518         struct perf_session *session;
1519         struct perf_data *data;
1520 
1521         session = container_of(ff->ph, struct perf_session, header);
1522         data = session->data;
1523 
1524         fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1525 }
1526 
1527 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1528 {
1529         struct perf_env *env = &ff->ph->env;
1530         struct rb_root *root;
1531         struct rb_node *next;
1532 
1533         down_read(&env->bpf_progs.lock);
1534 
1535         root = &env->bpf_progs.infos;
1536         next = rb_first(root);
1537 
1538         while (next) {
1539                 struct bpf_prog_info_node *node;
1540 
1541                 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1542                 next = rb_next(&node->rb_node);
1543 
1544                 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1545                                                env, fp);
1546         }
1547 
1548         up_read(&env->bpf_progs.lock);
1549 }
1550 
1551 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1552 {
1553         struct perf_env *env = &ff->ph->env;
1554         struct rb_root *root;
1555         struct rb_node *next;
1556 
1557         down_read(&env->bpf_progs.lock);
1558 
1559         root = &env->bpf_progs.btfs;
1560         next = rb_first(root);
1561 
1562         while (next) {
1563                 struct btf_node *node;
1564 
1565                 node = rb_entry(next, struct btf_node, rb_node);
1566                 next = rb_next(&node->rb_node);
1567                 fprintf(fp, "# btf info of id %u\n", node->id);
1568         }
1569 
1570         up_read(&env->bpf_progs.lock);
1571 }
1572 
1573 static void free_event_desc(struct perf_evsel *events)
1574 {
1575         struct perf_evsel *evsel;
1576 
1577         if (!events)
1578                 return;
1579 
1580         for (evsel = events; evsel->attr.size; evsel++) {
1581                 zfree(&evsel->name);
1582                 zfree(&evsel->id);
1583         }
1584 
1585         free(events);
1586 }
1587 
1588 static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1589 {
1590         struct perf_evsel *evsel, *events = NULL;
1591         u64 *id;
1592         void *buf = NULL;
1593         u32 nre, sz, nr, i, j;
1594         size_t msz;
1595 
1596         /* number of events */
1597         if (do_read_u32(ff, &nre))
1598                 goto error;
1599 
1600         if (do_read_u32(ff, &sz))
1601                 goto error;
1602 
1603         /* buffer to hold on file attr struct */
1604         buf = malloc(sz);
1605         if (!buf)
1606                 goto error;
1607 
1608         /* the last event terminates with evsel->attr.size == 0: */
1609         events = calloc(nre + 1, sizeof(*events));
1610         if (!events)
1611                 goto error;
1612 
1613         msz = sizeof(evsel->attr);
1614         if (sz < msz)
1615                 msz = sz;
1616 
1617         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1618                 evsel->idx = i;
1619 
1620                 /*
1621                  * must read entire on-file attr struct to
1622                  * sync up with layout.
1623                  */
1624                 if (__do_read(ff, buf, sz))
1625                         goto error;
1626 
1627                 if (ff->ph->needs_swap)
1628                         perf_event__attr_swap(buf);
1629 
1630                 memcpy(&evsel->attr, buf, msz);
1631 
1632                 if (do_read_u32(ff, &nr))
1633                         goto error;
1634 
1635                 if (ff->ph->needs_swap)
1636                         evsel->needs_swap = true;
1637 
1638                 evsel->name = do_read_string(ff);
1639                 if (!evsel->name)
1640                         goto error;
1641 
1642                 if (!nr)
1643                         continue;
1644 
1645                 id = calloc(nr, sizeof(*id));
1646                 if (!id)
1647                         goto error;
1648                 evsel->ids = nr;
1649                 evsel->id = id;
1650 
1651                 for (j = 0 ; j < nr; j++) {
1652                         if (do_read_u64(ff, id))
1653                                 goto error;
1654                         id++;
1655                 }
1656         }
1657 out:
1658         free(buf);
1659         return events;
1660 error:
1661         free_event_desc(events);
1662         events = NULL;
1663         goto out;
1664 }
1665 
1666 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1667                                 void *priv __maybe_unused)
1668 {
1669         return fprintf(fp, ", %s = %s", name, val);
1670 }
1671 
1672 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1673 {
1674         struct perf_evsel *evsel, *events;
1675         u32 j;
1676         u64 *id;
1677 
1678         if (ff->events)
1679                 events = ff->events;
1680         else
1681                 events = read_event_desc(ff);
1682 
1683         if (!events) {
1684                 fprintf(fp, "# event desc: not available or unable to read\n");
1685                 return;
1686         }
1687 
1688         for (evsel = events; evsel->attr.size; evsel++) {
1689                 fprintf(fp, "# event : name = %s, ", evsel->name);
1690 
1691                 if (evsel->ids) {
1692                         fprintf(fp, ", id = {");
1693                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1694                                 if (j)
1695                                         fputc(',', fp);
1696                                 fprintf(fp, " %"PRIu64, *id);
1697                         }
1698                         fprintf(fp, " }");
1699                 }
1700 
1701                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1702 
1703                 fputc('\n', fp);
1704         }
1705 
1706         free_event_desc(events);
1707         ff->events = NULL;
1708 }
1709 
1710 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1711 {
1712         fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1713 }
1714 
1715 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1716 {
1717         int i;
1718         struct numa_node *n;
1719 
1720         for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1721                 n = &ff->ph->env.numa_nodes[i];
1722 
1723                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1724                             " free = %"PRIu64" kB\n",
1725                         n->node, n->mem_total, n->mem_free);
1726 
1727                 fprintf(fp, "# node%u cpu list : ", n->node);
1728                 cpu_map__fprintf(n->map, fp);
1729         }
1730 }
1731 
1732 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1733 {
1734         fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1735 }
1736 
1737 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1738 {
1739         fprintf(fp, "# contains samples with branch stack\n");
1740 }
1741 
1742 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1743 {
1744         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1745 }
1746 
1747 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1748 {
1749         fprintf(fp, "# contains stat data\n");
1750 }
1751 
1752 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1753 {
1754         int i;
1755 
1756         fprintf(fp, "# CPU cache info:\n");
1757         for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1758                 fprintf(fp, "#  ");
1759                 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1760         }
1761 }
1762 
1763 static void print_compressed(struct feat_fd *ff, FILE *fp)
1764 {
1765         fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1766                 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1767                 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1768 }
1769 
1770 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1771 {
1772         const char *delimiter = "# pmu mappings: ";
1773         char *str, *tmp;
1774         u32 pmu_num;
1775         u32 type;
1776 
1777         pmu_num = ff->ph->env.nr_pmu_mappings;
1778         if (!pmu_num) {
1779                 fprintf(fp, "# pmu mappings: not available\n");
1780                 return;
1781         }
1782 
1783         str = ff->ph->env.pmu_mappings;
1784 
1785         while (pmu_num) {
1786                 type = strtoul(str, &tmp, 0);
1787                 if (*tmp != ':')
1788                         goto error;
1789 
1790                 str = tmp + 1;
1791                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1792 
1793                 delimiter = ", ";
1794                 str += strlen(str) + 1;
1795                 pmu_num--;
1796         }
1797 
1798         fprintf(fp, "\n");
1799 
1800         if (!pmu_num)
1801                 return;
1802 error:
1803         fprintf(fp, "# pmu mappings: unable to read\n");
1804 }
1805 
1806 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1807 {
1808         struct perf_session *session;
1809         struct perf_evsel *evsel;
1810         u32 nr = 0;
1811 
1812         session = container_of(ff->ph, struct perf_session, header);
1813 
1814         evlist__for_each_entry(session->evlist, evsel) {
1815                 if (perf_evsel__is_group_leader(evsel) &&
1816                     evsel->nr_members > 1) {
1817                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1818                                 perf_evsel__name(evsel));
1819 
1820                         nr = evsel->nr_members - 1;
1821                 } else if (nr) {
1822                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1823 
1824                         if (--nr == 0)
1825                                 fprintf(fp, "}\n");
1826                 }
1827         }
1828 }
1829 
1830 static void print_sample_time(struct feat_fd *ff, FILE *fp)
1831 {
1832         struct perf_session *session;
1833         char time_buf[32];
1834         double d;
1835 
1836         session = container_of(ff->ph, struct perf_session, header);
1837 
1838         timestamp__scnprintf_usec(session->evlist->first_sample_time,
1839                                   time_buf, sizeof(time_buf));
1840         fprintf(fp, "# time of first sample : %s\n", time_buf);
1841 
1842         timestamp__scnprintf_usec(session->evlist->last_sample_time,
1843                                   time_buf, sizeof(time_buf));
1844         fprintf(fp, "# time of last sample : %s\n", time_buf);
1845 
1846         d = (double)(session->evlist->last_sample_time -
1847                 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1848 
1849         fprintf(fp, "# sample duration : %10.3f ms\n", d);
1850 }
1851 
1852 static void memory_node__fprintf(struct memory_node *n,
1853                                  unsigned long long bsize, FILE *fp)
1854 {
1855         char buf_map[100], buf_size[50];
1856         unsigned long long size;
1857 
1858         size = bsize * bitmap_weight(n->set, n->size);
1859         unit_number__scnprintf(buf_size, 50, size);
1860 
1861         bitmap_scnprintf(n->set, n->size, buf_map, 100);
1862         fprintf(fp, "#  %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1863 }
1864 
1865 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1866 {
1867         struct memory_node *nodes;
1868         int i, nr;
1869 
1870         nodes = ff->ph->env.memory_nodes;
1871         nr    = ff->ph->env.nr_memory_nodes;
1872 
1873         fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1874                 nr, ff->ph->env.memory_bsize);
1875 
1876         for (i = 0; i < nr; i++) {
1877                 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1878         }
1879 }
1880 
1881 static int __event_process_build_id(struct build_id_event *bev,
1882                                     char *filename,
1883                                     struct perf_session *session)
1884 {
1885         int err = -1;
1886         struct machine *machine;
1887         u16 cpumode;
1888         struct dso *dso;
1889         enum dso_kernel_type dso_type;
1890 
1891         machine = perf_session__findnew_machine(session, bev->pid);
1892         if (!machine)
1893                 goto out;
1894 
1895         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1896 
1897         switch (cpumode) {
1898         case PERF_RECORD_MISC_KERNEL:
1899                 dso_type = DSO_TYPE_KERNEL;
1900                 break;
1901         case PERF_RECORD_MISC_GUEST_KERNEL:
1902                 dso_type = DSO_TYPE_GUEST_KERNEL;
1903                 break;
1904         case PERF_RECORD_MISC_USER:
1905         case PERF_RECORD_MISC_GUEST_USER:
1906                 dso_type = DSO_TYPE_USER;
1907                 break;
1908         default:
1909                 goto out;
1910         }
1911 
1912         dso = machine__findnew_dso(machine, filename);
1913         if (dso != NULL) {
1914                 char sbuild_id[SBUILD_ID_SIZE];
1915 
1916                 dso__set_build_id(dso, &bev->build_id);
1917 
1918                 if (dso_type != DSO_TYPE_USER) {
1919                         struct kmod_path m = { .name = NULL, };
1920 
1921                         if (!kmod_path__parse_name(&m, filename) && m.kmod)
1922                                 dso__set_module_info(dso, &m, machine);
1923                         else
1924                                 dso->kernel = dso_type;
1925 
1926                         free(m.name);
1927                 }
1928 
1929                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1930                                   sbuild_id);
1931                 pr_debug("build id event received for %s: %s\n",
1932                          dso->long_name, sbuild_id);
1933                 dso__put(dso);
1934         }
1935 
1936         err = 0;
1937 out:
1938         return err;
1939 }
1940 
1941 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1942                                                  int input, u64 offset, u64 size)
1943 {
1944         struct perf_session *session = container_of(header, struct perf_session, header);
1945         struct {
1946                 struct perf_event_header   header;
1947                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1948                 char                       filename[0];
1949         } old_bev;
1950         struct build_id_event bev;
1951         char filename[PATH_MAX];
1952         u64 limit = offset + size;
1953 
1954         while (offset < limit) {
1955                 ssize_t len;
1956 
1957                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1958                         return -1;
1959 
1960                 if (header->needs_swap)
1961                         perf_event_header__bswap(&old_bev.header);
1962 
1963                 len = old_bev.header.size - sizeof(old_bev);
1964                 if (readn(input, filename, len) != len)
1965                         return -1;
1966 
1967                 bev.header = old_bev.header;
1968 
1969                 /*
1970                  * As the pid is the missing value, we need to fill
1971                  * it properly. The header.misc value give us nice hint.
1972                  */
1973                 bev.pid = HOST_KERNEL_ID;
1974                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1975                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1976                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1977 
1978                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1979                 __event_process_build_id(&bev, filename, session);
1980 
1981                 offset += bev.header.size;
1982         }
1983 
1984         return 0;
1985 }
1986 
1987 static int perf_header__read_build_ids(struct perf_header *header,
1988                                        int input, u64 offset, u64 size)
1989 {
1990         struct perf_session *session = container_of(header, struct perf_session, header);
1991         struct build_id_event bev;
1992         char filename[PATH_MAX];
1993         u64 limit = offset + size, orig_offset = offset;
1994         int err = -1;
1995 
1996         while (offset < limit) {
1997                 ssize_t len;
1998 
1999                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2000                         goto out;
2001 
2002                 if (header->needs_swap)
2003                         perf_event_header__bswap(&bev.header);
2004 
2005                 len = bev.header.size - sizeof(bev);
2006                 if (readn(input, filename, len) != len)
2007                         goto out;
2008                 /*
2009                  * The a1645ce1 changeset:
2010                  *
2011                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
2012                  *
2013                  * Added a field to struct build_id_event that broke the file
2014                  * format.
2015                  *
2016                  * Since the kernel build-id is the first entry, process the
2017                  * table using the old format if the well known
2018                  * '[kernel.kallsyms]' string for the kernel build-id has the
2019                  * first 4 characters chopped off (where the pid_t sits).
2020                  */
2021                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2022                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2023                                 return -1;
2024                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2025                 }
2026 
2027                 __event_process_build_id(&bev, filename, session);
2028 
2029                 offset += bev.header.size;
2030         }
2031         err = 0;
2032 out:
2033         return err;
2034 }
2035 
2036 /* Macro for features that simply need to read and store a string. */
2037 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2038 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2039 {\
2040         ff->ph->env.__feat_env = do_read_string(ff); \
2041         return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2042 }
2043 
2044 FEAT_PROCESS_STR_FUN(hostname, hostname);
2045 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2046 FEAT_PROCESS_STR_FUN(version, version);
2047 FEAT_PROCESS_STR_FUN(arch, arch);
2048 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2049 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2050 
2051 static int process_tracing_data(struct feat_fd *ff, void *data)
2052 {
2053         ssize_t ret = trace_report(ff->fd, data, false);
2054 
2055         return ret < 0 ? -1 : 0;
2056 }
2057 
2058 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2059 {
2060         if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2061                 pr_debug("Failed to read buildids, continuing...\n");
2062         return 0;
2063 }
2064 
2065 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2066 {
2067         int ret;
2068         u32 nr_cpus_avail, nr_cpus_online;
2069 
2070         ret = do_read_u32(ff, &nr_cpus_avail);
2071         if (ret)
2072                 return ret;
2073 
2074         ret = do_read_u32(ff, &nr_cpus_online);
2075         if (ret)
2076                 return ret;
2077         ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2078         ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2079         return 0;
2080 }
2081 
2082 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2083 {
2084         u64 total_mem;
2085         int ret;
2086 
2087         ret = do_read_u64(ff, &total_mem);
2088         if (ret)
2089                 return -1;
2090         ff->ph->env.total_mem = (unsigned long long)total_mem;
2091         return 0;
2092 }
2093 
2094 static struct perf_evsel *
2095 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
2096 {
2097         struct perf_evsel *evsel;
2098 
2099         evlist__for_each_entry(evlist, evsel) {
2100                 if (evsel->idx == idx)
2101                         return evsel;
2102         }
2103 
2104         return NULL;
2105 }
2106 
2107 static void
2108 perf_evlist__set_event_name(struct perf_evlist *evlist,
2109                             struct perf_evsel *event)
2110 {
2111         struct perf_evsel *evsel;
2112 
2113         if (!event->name)
2114                 return;
2115 
2116         evsel = perf_evlist__find_by_index(evlist, event->idx);
2117         if (!evsel)
2118                 return;
2119 
2120         if (evsel->name)
2121                 return;
2122 
2123         evsel->name = strdup(event->name);
2124 }
2125 
2126 static int
2127 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2128 {
2129         struct perf_session *session;
2130         struct perf_evsel *evsel, *events = read_event_desc(ff);
2131 
2132         if (!events)
2133                 return 0;
2134 
2135         session = container_of(ff->ph, struct perf_session, header);
2136 
2137         if (session->data->is_pipe) {
2138                 /* Save events for reading later by print_event_desc,
2139                  * since they can't be read again in pipe mode. */
2140                 ff->events = events;
2141         }
2142 
2143         for (evsel = events; evsel->attr.size; evsel++)
2144                 perf_evlist__set_event_name(session->evlist, evsel);
2145 
2146         if (!session->data->is_pipe)
2147                 free_event_desc(events);
2148 
2149         return 0;
2150 }
2151 
2152 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2153 {
2154         char *str, *cmdline = NULL, **argv = NULL;
2155         u32 nr, i, len = 0;
2156 
2157         if (do_read_u32(ff, &nr))
2158                 return -1;
2159 
2160         ff->ph->env.nr_cmdline = nr;
2161 
2162         cmdline = zalloc(ff->size + nr + 1);
2163         if (!cmdline)
2164                 return -1;
2165 
2166         argv = zalloc(sizeof(char *) * (nr + 1));
2167         if (!argv)
2168                 goto error;
2169 
2170         for (i = 0; i < nr; i++) {
2171                 str = do_read_string(ff);
2172                 if (!str)
2173                         goto error;
2174 
2175                 argv[i] = cmdline + len;
2176                 memcpy(argv[i], str, strlen(str) + 1);
2177                 len += strlen(str) + 1;
2178                 free(str);
2179         }
2180         ff->ph->env.cmdline = cmdline;
2181         ff->ph->env.cmdline_argv = (const char **) argv;
2182         return 0;
2183 
2184 error:
2185         free(argv);
2186         free(cmdline);
2187         return -1;
2188 }
2189 
2190 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2191 {
2192         u32 nr, i;
2193         char *str;
2194         struct strbuf sb;
2195         int cpu_nr = ff->ph->env.nr_cpus_avail;
2196         u64 size = 0;
2197         struct perf_header *ph = ff->ph;
2198         bool do_core_id_test = true;
2199 
2200         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2201         if (!ph->env.cpu)
2202                 return -1;
2203 
2204         if (do_read_u32(ff, &nr))
2205                 goto free_cpu;
2206 
2207         ph->env.nr_sibling_cores = nr;
2208         size += sizeof(u32);
2209         if (strbuf_init(&sb, 128) < 0)
2210                 goto free_cpu;
2211 
2212         for (i = 0; i < nr; i++) {
2213                 str = do_read_string(ff);
2214                 if (!str)
2215                         goto error;
2216 
2217                 /* include a NULL character at the end */
2218                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2219                         goto error;
2220                 size += string_size(str);
2221                 free(str);
2222         }
2223         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2224 
2225         if (do_read_u32(ff, &nr))
2226                 return -1;
2227 
2228         ph->env.nr_sibling_threads = nr;
2229         size += sizeof(u32);
2230 
2231         for (i = 0; i < nr; i++) {
2232                 str = do_read_string(ff);
2233                 if (!str)
2234                         goto error;
2235 
2236                 /* include a NULL character at the end */
2237                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2238                         goto error;
2239                 size += string_size(str);
2240                 free(str);
2241         }
2242         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2243 
2244         /*
2245          * The header may be from old perf,
2246          * which doesn't include core id and socket id information.
2247          */
2248         if (ff->size <= size) {
2249                 zfree(&ph->env.cpu);
2250                 return 0;
2251         }
2252 
2253         /* On s390 the socket_id number is not related to the numbers of cpus.
2254          * The socket_id number might be higher than the numbers of cpus.
2255          * This depends on the configuration.
2256          * AArch64 is the same.
2257          */
2258         if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2259                           || !strncmp(ph->env.arch, "aarch64", 7)))
2260                 do_core_id_test = false;
2261 
2262         for (i = 0; i < (u32)cpu_nr; i++) {
2263                 if (do_read_u32(ff, &nr))
2264                         goto free_cpu;
2265 
2266                 ph->env.cpu[i].core_id = nr;
2267                 size += sizeof(u32);
2268 
2269                 if (do_read_u32(ff, &nr))
2270                         goto free_cpu;
2271 
2272                 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2273                         pr_debug("socket_id number is too big."
2274                                  "You may need to upgrade the perf tool.\n");
2275                         goto free_cpu;
2276                 }
2277 
2278                 ph->env.cpu[i].socket_id = nr;
2279                 size += sizeof(u32);
2280         }
2281 
2282         /*
2283          * The header may be from old perf,
2284          * which doesn't include die information.
2285          */
2286         if (ff->size <= size)
2287                 return 0;
2288 
2289         if (do_read_u32(ff, &nr))
2290                 return -1;
2291 
2292         ph->env.nr_sibling_dies = nr;
2293         size += sizeof(u32);
2294 
2295         for (i = 0; i < nr; i++) {
2296                 str = do_read_string(ff);
2297                 if (!str)
2298                         goto error;
2299 
2300                 /* include a NULL character at the end */
2301                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2302                         goto error;
2303                 size += string_size(str);
2304                 free(str);
2305         }
2306         ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2307 
2308         for (i = 0; i < (u32)cpu_nr; i++) {
2309                 if (do_read_u32(ff, &nr))
2310                         goto free_cpu;
2311 
2312                 ph->env.cpu[i].die_id = nr;
2313         }
2314 
2315         return 0;
2316 
2317 error:
2318         strbuf_release(&sb);
2319 free_cpu:
2320         zfree(&ph->env.cpu);
2321         return -1;
2322 }
2323 
2324 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2325 {
2326         struct numa_node *nodes, *n;
2327         u32 nr, i;
2328         char *str;
2329 
2330         /* nr nodes */
2331         if (do_read_u32(ff, &nr))
2332                 return -1;
2333 
2334         nodes = zalloc(sizeof(*nodes) * nr);
2335         if (!nodes)
2336                 return -ENOMEM;
2337 
2338         for (i = 0; i < nr; i++) {
2339                 n = &nodes[i];
2340 
2341                 /* node number */
2342                 if (do_read_u32(ff, &n->node))
2343                         goto error;
2344 
2345                 if (do_read_u64(ff, &n->mem_total))
2346                         goto error;
2347 
2348                 if (do_read_u64(ff, &n->mem_free))
2349                         goto error;
2350 
2351                 str = do_read_string(ff);
2352                 if (!str)
2353                         goto error;
2354 
2355                 n->map = cpu_map__new(str);
2356                 if (!n->map)
2357                         goto error;
2358 
2359                 free(str);
2360         }
2361         ff->ph->env.nr_numa_nodes = nr;
2362         ff->ph->env.numa_nodes = nodes;
2363         return 0;
2364 
2365 error:
2366         free(nodes);
2367         return -1;
2368 }
2369 
2370 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2371 {
2372         char *name;
2373         u32 pmu_num;
2374         u32 type;
2375         struct strbuf sb;
2376 
2377         if (do_read_u32(ff, &pmu_num))
2378                 return -1;
2379 
2380         if (!pmu_num) {
2381                 pr_debug("pmu mappings not available\n");
2382                 return 0;
2383         }
2384 
2385         ff->ph->env.nr_pmu_mappings = pmu_num;
2386         if (strbuf_init(&sb, 128) < 0)
2387                 return -1;
2388 
2389         while (pmu_num) {
2390                 if (do_read_u32(ff, &type))
2391                         goto error;
2392 
2393                 name = do_read_string(ff);
2394                 if (!name)
2395                         goto error;
2396 
2397                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2398                         goto error;
2399                 /* include a NULL character at the end */
2400                 if (strbuf_add(&sb, "", 1) < 0)
2401                         goto error;
2402 
2403                 if (!strcmp(name, "msr"))
2404                         ff->ph->env.msr_pmu_type = type;
2405 
2406                 free(name);
2407                 pmu_num--;
2408         }
2409         ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2410         return 0;
2411 
2412 error:
2413         strbuf_release(&sb);
2414         return -1;
2415 }
2416 
2417 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2418 {
2419         size_t ret = -1;
2420         u32 i, nr, nr_groups;
2421         struct perf_session *session;
2422         struct perf_evsel *evsel, *leader = NULL;
2423         struct group_desc {
2424                 char *name;
2425                 u32 leader_idx;
2426                 u32 nr_members;
2427         } *desc;
2428 
2429         if (do_read_u32(ff, &nr_groups))
2430                 return -1;
2431 
2432         ff->ph->env.nr_groups = nr_groups;
2433         if (!nr_groups) {
2434                 pr_debug("group desc not available\n");
2435                 return 0;
2436         }
2437 
2438         desc = calloc(nr_groups, sizeof(*desc));
2439         if (!desc)
2440                 return -1;
2441 
2442         for (i = 0; i < nr_groups; i++) {
2443                 desc[i].name = do_read_string(ff);
2444                 if (!desc[i].name)
2445                         goto out_free;
2446 
2447                 if (do_read_u32(ff, &desc[i].leader_idx))
2448                         goto out_free;
2449 
2450                 if (do_read_u32(ff, &desc[i].nr_members))
2451                         goto out_free;
2452         }
2453 
2454         /*
2455          * Rebuild group relationship based on the group_desc
2456          */
2457         session = container_of(ff->ph, struct perf_session, header);
2458         session->evlist->nr_groups = nr_groups;
2459 
2460         i = nr = 0;
2461         evlist__for_each_entry(session->evlist, evsel) {
2462                 if (evsel->idx == (int) desc[i].leader_idx) {
2463                         evsel->leader = evsel;
2464                         /* {anon_group} is a dummy name */
2465                         if (strcmp(desc[i].name, "{anon_group}")) {
2466                                 evsel->group_name = desc[i].name;
2467                                 desc[i].name = NULL;
2468                         }
2469                         evsel->nr_members = desc[i].nr_members;
2470 
2471                         if (i >= nr_groups || nr > 0) {
2472                                 pr_debug("invalid group desc\n");
2473                                 goto out_free;
2474                         }
2475 
2476                         leader = evsel;
2477                         nr = evsel->nr_members - 1;
2478                         i++;
2479                 } else if (nr) {
2480                         /* This is a group member */
2481                         evsel->leader = leader;
2482 
2483                         nr--;
2484                 }
2485         }
2486 
2487         if (i != nr_groups || nr != 0) {
2488                 pr_debug("invalid group desc\n");
2489                 goto out_free;
2490         }
2491 
2492         ret = 0;
2493 out_free:
2494         for (i = 0; i < nr_groups; i++)
2495                 zfree(&desc[i].name);
2496         free(desc);
2497 
2498         return ret;
2499 }
2500 
2501 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2502 {
2503         struct perf_session *session;
2504         int err;
2505 
2506         session = container_of(ff->ph, struct perf_session, header);
2507 
2508         err = auxtrace_index__process(ff->fd, ff->size, session,
2509                                       ff->ph->needs_swap);
2510         if (err < 0)
2511                 pr_err("Failed to process auxtrace index\n");
2512         return err;
2513 }
2514 
2515 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2516 {
2517         struct cpu_cache_level *caches;
2518         u32 cnt, i, version;
2519 
2520         if (do_read_u32(ff, &version))
2521                 return -1;
2522 
2523         if (version != 1)
2524                 return -1;
2525 
2526         if (do_read_u32(ff, &cnt))
2527                 return -1;
2528 
2529         caches = zalloc(sizeof(*caches) * cnt);
2530         if (!caches)
2531                 return -1;
2532 
2533         for (i = 0; i < cnt; i++) {
2534                 struct cpu_cache_level c;
2535 
2536                 #define _R(v)                                           \
2537                         if (do_read_u32(ff, &c.v))\
2538                                 goto out_free_caches;                   \
2539 
2540                 _R(level)
2541                 _R(line_size)
2542                 _R(sets)
2543                 _R(ways)
2544                 #undef _R
2545 
2546                 #define _R(v)                                   \
2547                         c.v = do_read_string(ff);               \
2548                         if (!c.v)                               \
2549                                 goto out_free_caches;
2550 
2551                 _R(type)
2552                 _R(size)
2553                 _R(map)
2554                 #undef _R
2555 
2556                 caches[i] = c;
2557         }
2558 
2559         ff->ph->env.caches = caches;
2560         ff->ph->env.caches_cnt = cnt;
2561         return 0;
2562 out_free_caches:
2563         free(caches);
2564         return -1;
2565 }
2566 
2567 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2568 {
2569         struct perf_session *session;
2570         u64 first_sample_time, last_sample_time;
2571         int ret;
2572 
2573         session = container_of(ff->ph, struct perf_session, header);
2574 
2575         ret = do_read_u64(ff, &first_sample_time);
2576         if (ret)
2577                 return -1;
2578 
2579         ret = do_read_u64(ff, &last_sample_time);
2580         if (ret)
2581                 return -1;
2582 
2583         session->evlist->first_sample_time = first_sample_time;
2584         session->evlist->last_sample_time = last_sample_time;
2585         return 0;
2586 }
2587 
2588 static int process_mem_topology(struct feat_fd *ff,
2589                                 void *data __maybe_unused)
2590 {
2591         struct memory_node *nodes;
2592         u64 version, i, nr, bsize;
2593         int ret = -1;
2594 
2595         if (do_read_u64(ff, &version))
2596                 return -1;
2597 
2598         if (version != 1)
2599                 return -1;
2600 
2601         if (do_read_u64(ff, &bsize))
2602                 return -1;
2603 
2604         if (do_read_u64(ff, &nr))
2605                 return -1;
2606 
2607         nodes = zalloc(sizeof(*nodes) * nr);
2608         if (!nodes)
2609                 return -1;
2610 
2611         for (i = 0; i < nr; i++) {
2612                 struct memory_node n;
2613 
2614                 #define _R(v)                           \
2615                         if (do_read_u64(ff, &n.v))      \
2616                                 goto out;               \
2617 
2618                 _R(node)
2619                 _R(size)
2620 
2621                 #undef _R
2622 
2623                 if (do_read_bitmap(ff, &n.set, &n.size))
2624                         goto out;
2625 
2626                 nodes[i] = n;
2627         }
2628 
2629         ff->ph->env.memory_bsize    = bsize;
2630         ff->ph->env.memory_nodes    = nodes;
2631         ff->ph->env.nr_memory_nodes = nr;
2632         ret = 0;
2633 
2634 out:
2635         if (ret)
2636                 free(nodes);
2637         return ret;
2638 }
2639 
2640 static int process_clockid(struct feat_fd *ff,
2641                            void *data __maybe_unused)
2642 {
2643         if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2644                 return -1;
2645 
2646         return 0;
2647 }
2648 
2649 static int process_dir_format(struct feat_fd *ff,
2650                               void *_data __maybe_unused)
2651 {
2652         struct perf_session *session;
2653         struct perf_data *data;
2654 
2655         session = container_of(ff->ph, struct perf_session, header);
2656         data = session->data;
2657 
2658         if (WARN_ON(!perf_data__is_dir(data)))
2659                 return -1;
2660 
2661         return do_read_u64(ff, &data->dir.version);
2662 }
2663 
2664 #ifdef HAVE_LIBBPF_SUPPORT
2665 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2666 {
2667         struct bpf_prog_info_linear *info_linear;
2668         struct bpf_prog_info_node *info_node;
2669         struct perf_env *env = &ff->ph->env;
2670         u32 count, i;
2671         int err = -1;
2672 
2673         if (ff->ph->needs_swap) {
2674                 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2675                 return 0;
2676         }
2677 
2678         if (do_read_u32(ff, &count))
2679                 return -1;
2680 
2681         down_write(&env->bpf_progs.lock);
2682 
2683         for (i = 0; i < count; ++i) {
2684                 u32 info_len, data_len;
2685 
2686                 info_linear = NULL;
2687                 info_node = NULL;
2688                 if (do_read_u32(ff, &info_len))
2689                         goto out;
2690                 if (do_read_u32(ff, &data_len))
2691                         goto out;
2692 
2693                 if (info_len > sizeof(struct bpf_prog_info)) {
2694                         pr_warning("detected invalid bpf_prog_info\n");
2695                         goto out;
2696                 }
2697 
2698                 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2699                                      data_len);
2700                 if (!info_linear)
2701                         goto out;
2702                 info_linear->info_len = sizeof(struct bpf_prog_info);
2703                 info_linear->data_len = data_len;
2704                 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2705                         goto out;
2706                 if (__do_read(ff, &info_linear->info, info_len))
2707                         goto out;
2708                 if (info_len < sizeof(struct bpf_prog_info))
2709                         memset(((void *)(&info_linear->info)) + info_len, 0,
2710                                sizeof(struct bpf_prog_info) - info_len);
2711 
2712                 if (__do_read(ff, info_linear->data, data_len))
2713                         goto out;
2714 
2715                 info_node = malloc(sizeof(struct bpf_prog_info_node));
2716                 if (!info_node)
2717                         goto out;
2718 
2719                 /* after reading from file, translate offset to address */
2720                 bpf_program__bpil_offs_to_addr(info_linear);
2721                 info_node->info_linear = info_linear;
2722                 perf_env__insert_bpf_prog_info(env, info_node);
2723         }
2724 
2725         up_write(&env->bpf_progs.lock);
2726         return 0;
2727 out:
2728         free(info_linear);
2729         free(info_node);
2730         up_write(&env->bpf_progs.lock);
2731         return err;
2732 }
2733 #else // HAVE_LIBBPF_SUPPORT
2734 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2735 {
2736         return 0;
2737 }
2738 #endif // HAVE_LIBBPF_SUPPORT
2739 
2740 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2741 {
2742         struct perf_env *env = &ff->ph->env;
2743         struct btf_node *node = NULL;
2744         u32 count, i;
2745         int err = -1;
2746 
2747         if (ff->ph->needs_swap) {
2748                 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2749                 return 0;
2750         }
2751 
2752         if (do_read_u32(ff, &count))
2753                 return -1;
2754 
2755         down_write(&env->bpf_progs.lock);
2756 
2757         for (i = 0; i < count; ++i) {
2758                 u32 id, data_size;
2759 
2760                 if (do_read_u32(ff, &id))
2761                         goto out;
2762                 if (do_read_u32(ff, &data_size))
2763                         goto out;
2764 
2765                 node = malloc(sizeof(struct btf_node) + data_size);
2766                 if (!node)
2767                         goto out;
2768 
2769                 node->id = id;
2770                 node->data_size = data_size;
2771 
2772                 if (__do_read(ff, node->data, data_size))
2773                         goto out;
2774 
2775                 perf_env__insert_btf(env, node);
2776                 node = NULL;
2777         }
2778 
2779         err = 0;
2780 out:
2781         up_write(&env->bpf_progs.lock);
2782         free(node);
2783         return err;
2784 }
2785 
2786 static int process_compressed(struct feat_fd *ff,
2787                               void *data __maybe_unused)
2788 {
2789         if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2790                 return -1;
2791 
2792         if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2793                 return -1;
2794 
2795         if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2796                 return -1;
2797 
2798         if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2799                 return -1;
2800 
2801         if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2802                 return -1;
2803 
2804         return 0;
2805 }
2806 
2807 struct feature_ops {
2808         int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2809         void (*print)(struct feat_fd *ff, FILE *fp);
2810         int (*process)(struct feat_fd *ff, void *data);
2811         const char *name;
2812         bool full_only;
2813         bool synthesize;
2814 };
2815 
2816 #define FEAT_OPR(n, func, __full_only) \
2817         [HEADER_##n] = {                                        \
2818                 .name       = __stringify(n),                   \
2819                 .write      = write_##func,                     \
2820                 .print      = print_##func,                     \
2821                 .full_only  = __full_only,                      \
2822                 .process    = process_##func,                   \
2823                 .synthesize = true                              \
2824         }
2825 
2826 #define FEAT_OPN(n, func, __full_only) \
2827         [HEADER_##n] = {                                        \
2828                 .name       = __stringify(n),                   \
2829                 .write      = write_##func,                     \
2830                 .print      = print_##func,                     \
2831                 .full_only  = __full_only,                      \
2832                 .process    = process_##func                    \
2833         }
2834 
2835 /* feature_ops not implemented: */
2836 #define print_tracing_data      NULL
2837 #define print_build_id          NULL
2838 
2839 #define process_branch_stack    NULL
2840 #define process_stat            NULL
2841 
2842 
2843 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2844         FEAT_OPN(TRACING_DATA,  tracing_data,   false),
2845         FEAT_OPN(BUILD_ID,      build_id,       false),
2846         FEAT_OPR(HOSTNAME,      hostname,       false),
2847         FEAT_OPR(OSRELEASE,     osrelease,      false),
2848         FEAT_OPR(VERSION,       version,        false),
2849         FEAT_OPR(ARCH,          arch,           false),
2850         FEAT_OPR(NRCPUS,        nrcpus,         false),
2851         FEAT_OPR(CPUDESC,       cpudesc,        false),
2852         FEAT_OPR(CPUID,         cpuid,          false),
2853         FEAT_OPR(TOTAL_MEM,     total_mem,      false),
2854         FEAT_OPR(EVENT_DESC,    event_desc,     false),
2855         FEAT_OPR(CMDLINE,       cmdline,        false),
2856         FEAT_OPR(CPU_TOPOLOGY,  cpu_topology,   true),
2857         FEAT_OPR(NUMA_TOPOLOGY, numa_topology,  true),
2858         FEAT_OPN(BRANCH_STACK,  branch_stack,   false),
2859         FEAT_OPR(PMU_MAPPINGS,  pmu_mappings,   false),
2860         FEAT_OPR(GROUP_DESC,    group_desc,     false),
2861         FEAT_OPN(AUXTRACE,      auxtrace,       false),
2862         FEAT_OPN(STAT,          stat,           false),
2863         FEAT_OPN(CACHE,         cache,          true),
2864         FEAT_OPR(SAMPLE_TIME,   sample_time,    false),
2865         FEAT_OPR(MEM_TOPOLOGY,  mem_topology,   true),
2866         FEAT_OPR(CLOCKID,       clockid,        false),
2867         FEAT_OPN(DIR_FORMAT,    dir_format,     false),
2868         FEAT_OPR(BPF_PROG_INFO, bpf_prog_info,  false),
2869         FEAT_OPR(BPF_BTF,       bpf_btf,        false),
2870         FEAT_OPR(COMPRESSED,    compressed,     false),
2871 };
2872 
2873 struct header_print_data {
2874         FILE *fp;
2875         bool full; /* extended list of headers */
2876 };
2877 
2878 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2879                                            struct perf_header *ph,
2880                                            int feat, int fd, void *data)
2881 {
2882         struct header_print_data *hd = data;
2883         struct feat_fd ff;
2884 
2885         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2886                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2887                                 "%d, continuing...\n", section->offset, feat);
2888                 return 0;
2889         }
2890         if (feat >= HEADER_LAST_FEATURE) {
2891                 pr_warning("unknown feature %d\n", feat);
2892                 return 0;
2893         }
2894         if (!feat_ops[feat].print)
2895                 return 0;
2896 
2897         ff = (struct  feat_fd) {
2898                 .fd = fd,
2899                 .ph = ph,
2900         };
2901 
2902         if (!feat_ops[feat].full_only || hd->full)
2903                 feat_ops[feat].print(&ff, hd->fp);
2904         else
2905                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2906                         feat_ops[feat].name);
2907 
2908         return 0;
2909 }
2910 
2911 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2912 {
2913         struct header_print_data hd;
2914         struct perf_header *header = &session->header;
2915         int fd = perf_data__fd(session->data);
2916         struct stat st;
2917         time_t stctime;
2918         int ret, bit;
2919 
2920         hd.fp = fp;
2921         hd.full = full;
2922 
2923         ret = fstat(fd, &st);
2924         if (ret == -1)
2925                 return -1;
2926 
2927         stctime = st.st_ctime;
2928         fprintf(fp, "# captured on    : %s", ctime(&stctime));
2929 
2930         fprintf(fp, "# header version : %u\n", header->version);
2931         fprintf(fp, "# data offset    : %" PRIu64 "\n", header->data_offset);
2932         fprintf(fp, "# data size      : %" PRIu64 "\n", header->data_size);
2933         fprintf(fp, "# feat offset    : %" PRIu64 "\n", header->feat_offset);
2934 
2935         perf_header__process_sections(header, fd, &hd,
2936                                       perf_file_section__fprintf_info);
2937 
2938         if (session->data->is_pipe)
2939                 return 0;
2940 
2941         fprintf(fp, "# missing features: ");
2942         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2943                 if (bit)
2944                         fprintf(fp, "%s ", feat_ops[bit].name);
2945         }
2946 
2947         fprintf(fp, "\n");
2948         return 0;
2949 }
2950 
2951 static int do_write_feat(struct feat_fd *ff, int type,
2952                          struct perf_file_section **p,
2953                          struct perf_evlist *evlist)
2954 {
2955         int err;
2956         int ret = 0;
2957 
2958         if (perf_header__has_feat(ff->ph, type)) {
2959                 if (!feat_ops[type].write)
2960                         return -1;
2961 
2962                 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2963                         return -1;
2964 
2965                 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2966 
2967                 err = feat_ops[type].write(ff, evlist);
2968                 if (err < 0) {
2969                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2970 
2971                         /* undo anything written */
2972                         lseek(ff->fd, (*p)->offset, SEEK_SET);
2973 
2974                         return -1;
2975                 }
2976                 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2977                 (*p)++;
2978         }
2979         return ret;
2980 }
2981 
2982 static int perf_header__adds_write(struct perf_header *header,
2983                                    struct perf_evlist *evlist, int fd)
2984 {
2985         int nr_sections;
2986         struct feat_fd ff;
2987         struct perf_file_section *feat_sec, *p;
2988         int sec_size;
2989         u64 sec_start;
2990         int feat;
2991         int err;
2992 
2993         ff = (struct feat_fd){
2994                 .fd  = fd,
2995                 .ph = header,
2996         };
2997 
2998         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2999         if (!nr_sections)
3000                 return 0;
3001 
3002         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3003         if (feat_sec == NULL)
3004                 return -ENOMEM;
3005 
3006         sec_size = sizeof(*feat_sec) * nr_sections;
3007 
3008         sec_start = header->feat_offset;
3009         lseek(fd, sec_start + sec_size, SEEK_SET);
3010 
3011         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3012                 if (do_write_feat(&ff, feat, &p, evlist))
3013                         perf_header__clear_feat(header, feat);
3014         }
3015 
3016         lseek(fd, sec_start, SEEK_SET);
3017         /*
3018          * may write more than needed due to dropped feature, but
3019          * this is okay, reader will skip the missing entries
3020          */
3021         err = do_write(&ff, feat_sec, sec_size);
3022         if (err < 0)
3023                 pr_debug("failed to write feature section\n");
3024         free(feat_sec);
3025         return err;
3026 }
3027 
3028 int perf_header__write_pipe(int fd)
3029 {
3030         struct perf_pipe_file_header f_header;
3031         struct feat_fd ff;
3032         int err;
3033 
3034         ff = (struct feat_fd){ .fd = fd };
3035 
3036         f_header = (struct perf_pipe_file_header){
3037                 .magic     = PERF_MAGIC,
3038                 .size      = sizeof(f_header),
3039         };
3040 
3041         err = do_write(&ff, &f_header, sizeof(f_header));
3042         if (err < 0) {
3043                 pr_debug("failed to write perf pipe header\n");
3044                 return err;
3045         }
3046 
3047         return 0;
3048 }
3049 
3050 int perf_session__write_header(struct perf_session *session,
3051                                struct perf_evlist *evlist,
3052                                int fd, bool at_exit)
3053 {
3054         struct perf_file_header f_header;
3055         struct perf_file_attr   f_attr;
3056         struct perf_header *header = &session->header;
3057         struct perf_evsel *evsel;
3058         struct feat_fd ff;
3059         u64 attr_offset;
3060         int err;
3061 
3062         ff = (struct feat_fd){ .fd = fd};
3063         lseek(fd, sizeof(f_header), SEEK_SET);
3064 
3065         evlist__for_each_entry(session->evlist, evsel) {
3066                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
3067                 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
3068                 if (err < 0) {
3069                         pr_debug("failed to write perf header\n");
3070                         return err;
3071                 }
3072         }
3073 
3074         attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3075 
3076         evlist__for_each_entry(evlist, evsel) {
3077                 f_attr = (struct perf_file_attr){
3078                         .attr = evsel->attr,
3079                         .ids  = {
3080                                 .offset = evsel->id_offset,
3081                                 .size   = evsel->ids * sizeof(u64),
3082                         }
3083                 };
3084                 err = do_write(&ff, &f_attr, sizeof(f_attr));
3085                 if (err < 0) {
3086                         pr_debug("failed to write perf header attribute\n");
3087                         return err;
3088                 }
3089         }
3090 
3091         if (!header->data_offset)
3092                 header->data_offset = lseek(fd, 0, SEEK_CUR);
3093         header->feat_offset = header->data_offset + header->data_size;
3094 
3095         if (at_exit) {
3096                 err = perf_header__adds_write(header, evlist, fd);
3097                 if (err < 0)
3098                         return err;
3099         }
3100 
3101         f_header = (struct perf_file_header){
3102                 .magic     = PERF_MAGIC,
3103                 .size      = sizeof(f_header),
3104                 .attr_size = sizeof(f_attr),
3105                 .attrs = {
3106                         .offset = attr_offset,
3107                         .size   = evlist->nr_entries * sizeof(f_attr),
3108                 },
3109                 .data = {
3110                         .offset = header->data_offset,
3111                         .size   = header->data_size,
3112                 },
3113                 /* event_types is ignored, store zeros */
3114         };
3115 
3116         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3117 
3118         lseek(fd, 0, SEEK_SET);
3119         err = do_write(&ff, &f_header, sizeof(f_header));
3120         if (err < 0) {
3121                 pr_debug("failed to write perf header\n");
3122                 return err;
3123         }
3124         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
3125 
3126         return 0;
3127 }
3128 
3129 static int perf_header__getbuffer64(struct perf_header *header,
3130                                     int fd, void *buf, size_t size)
3131 {
3132         if (readn(fd, buf, size) <= 0)
3133                 return -1;
3134 
3135         if (header->needs_swap)
3136                 mem_bswap_64(buf, size);
3137 
3138         return 0;
3139 }
3140 
3141 int perf_header__process_sections(struct perf_header *header, int fd,
3142                                   void *data,
3143                                   int (*process)(struct perf_file_section *section,
3144                                                  struct perf_header *ph,
3145                                                  int feat, int fd, void *data))
3146 {
3147         struct perf_file_section *feat_sec, *sec;
3148         int nr_sections;
3149         int sec_size;
3150         int feat;
3151         int err;
3152 
3153         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3154         if (!nr_sections)
3155                 return 0;
3156 
3157         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3158         if (!feat_sec)
3159                 return -1;
3160 
3161         sec_size = sizeof(*feat_sec) * nr_sections;
3162 
3163         lseek(fd, header->feat_offset, SEEK_SET);
3164 
3165         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3166         if (err < 0)
3167                 goto out_free;
3168 
3169         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3170                 err = process(sec++, header, feat, fd, data);
3171                 if (err < 0)
3172                         goto out_free;
3173         }
3174         err = 0;
3175 out_free:
3176         free(feat_sec);
3177         return err;
3178 }
3179 
3180 static const int attr_file_abi_sizes[] = {
3181         [0] = PERF_ATTR_SIZE_VER0,
3182         [1] = PERF_ATTR_SIZE_VER1,
3183         [2] = PERF_ATTR_SIZE_VER2,
3184         [3] = PERF_ATTR_SIZE_VER3,
3185         [4] = PERF_ATTR_SIZE_VER4,
3186         0,
3187 };
3188 
3189 /*
3190  * In the legacy file format, the magic number is not used to encode endianness.
3191  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3192  * on ABI revisions, we need to try all combinations for all endianness to
3193  * detect the endianness.
3194  */
3195 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3196 {
3197         uint64_t ref_size, attr_size;
3198         int i;
3199 
3200         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3201                 ref_size = attr_file_abi_sizes[i]
3202                          + sizeof(struct perf_file_section);
3203                 if (hdr_sz != ref_size) {
3204                         attr_size = bswap_64(hdr_sz);
3205                         if (attr_size != ref_size)
3206                                 continue;
3207 
3208                         ph->needs_swap = true;
3209                 }
3210                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3211                          i,
3212                          ph->needs_swap);
3213                 return 0;
3214         }
3215         /* could not determine endianness */
3216         return -1;
3217 }
3218 
3219 #define PERF_PIPE_HDR_VER0      16
3220 
3221 static const size_t attr_pipe_abi_sizes[] = {
3222         [0] = PERF_PIPE_HDR_VER0,
3223         0,
3224 };
3225 
3226 /*
3227  * In the legacy pipe format, there is an implicit assumption that endiannesss
3228  * between host recording the samples, and host parsing the samples is the
3229  * same. This is not always the case given that the pipe output may always be
3230  * redirected into a file and analyzed on a different machine with possibly a
3231  * different endianness and perf_event ABI revsions in the perf tool itself.
3232  */
3233 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3234 {
3235         u64 attr_size;
3236         int i;
3237 
3238         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3239                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3240                         attr_size = bswap_64(hdr_sz);
3241                         if (attr_size != hdr_sz)
3242                                 continue;
3243 
3244                         ph->needs_swap = true;
3245                 }
3246                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3247                 return 0;
3248         }
3249         return -1;
3250 }
3251 
3252 bool is_perf_magic(u64 magic)
3253 {
3254         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3255                 || magic == __perf_magic2
3256                 || magic == __perf_magic2_sw)
3257                 return true;
3258 
3259         return false;
3260 }
3261 
3262 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3263                               bool is_pipe, struct perf_header *ph)
3264 {
3265         int ret;
3266 
3267         /* check for legacy format */
3268         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3269         if (ret == 0) {
3270                 ph->version = PERF_HEADER_VERSION_1;
3271                 pr_debug("legacy perf.data format\n");
3272                 if (is_pipe)
3273                         return try_all_pipe_abis(hdr_sz, ph);
3274 
3275                 return try_all_file_abis(hdr_sz, ph);
3276         }
3277         /*
3278          * the new magic number serves two purposes:
3279          * - unique number to identify actual perf.data files
3280          * - encode endianness of file
3281          */
3282         ph->version = PERF_HEADER_VERSION_2;
3283 
3284         /* check magic number with one endianness */
3285         if (magic == __perf_magic2)
3286                 return 0;
3287 
3288         /* check magic number with opposite endianness */
3289         if (magic != __perf_magic2_sw)
3290                 return -1;
3291 
3292         ph->needs_swap = true;
3293 
3294         return 0;
3295 }
3296 
3297 int perf_file_header__read(struct perf_file_header *header,
3298                            struct perf_header *ph, int fd)
3299 {
3300         ssize_t ret;
3301 
3302         lseek(fd, 0, SEEK_SET);
3303 
3304         ret = readn(fd, header, sizeof(*header));
3305         if (ret <= 0)
3306                 return -1;
3307 
3308         if (check_magic_endian(header->magic,
3309                                header->attr_size, false, ph) < 0) {
3310                 pr_debug("magic/endian check failed\n");
3311                 return -1;
3312         }
3313 
3314         if (ph->needs_swap) {
3315                 mem_bswap_64(header, offsetof(struct perf_file_header,
3316                              adds_features));
3317         }
3318 
3319         if (header->size != sizeof(*header)) {
3320                 /* Support the previous format */
3321                 if (header->size == offsetof(typeof(*header), adds_features))
3322                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3323                 else
3324                         return -1;
3325         } else if (ph->needs_swap) {
3326                 /*
3327                  * feature bitmap is declared as an array of unsigned longs --
3328                  * not good since its size can differ between the host that
3329                  * generated the data file and the host analyzing the file.
3330                  *
3331                  * We need to handle endianness, but we don't know the size of
3332                  * the unsigned long where the file was generated. Take a best
3333                  * guess at determining it: try 64-bit swap first (ie., file
3334                  * created on a 64-bit host), and check if the hostname feature
3335                  * bit is set (this feature bit is forced on as of fbe96f2).
3336                  * If the bit is not, undo the 64-bit swap and try a 32-bit
3337                  * swap. If the hostname bit is still not set (e.g., older data
3338                  * file), punt and fallback to the original behavior --
3339                  * clearing all feature bits and setting buildid.
3340                  */
3341                 mem_bswap_64(&header->adds_features,
3342                             BITS_TO_U64(HEADER_FEAT_BITS));
3343 
3344                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3345                         /* unswap as u64 */
3346                         mem_bswap_64(&header->adds_features,
3347                                     BITS_TO_U64(HEADER_FEAT_BITS));
3348 
3349                         /* unswap as u32 */
3350                         mem_bswap_32(&header->adds_features,
3351                                     BITS_TO_U32(HEADER_FEAT_BITS));
3352                 }
3353 
3354                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3355                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3356                         set_bit(HEADER_BUILD_ID, header->adds_features);
3357                 }
3358         }
3359 
3360         memcpy(&ph->adds_features, &header->adds_features,
3361                sizeof(ph->adds_features));
3362 
3363         ph->data_offset  = header->data.offset;
3364         ph->data_size    = header->data.size;
3365         ph->feat_offset  = header->data.offset + header->data.size;
3366         return 0;
3367 }
3368 
3369 static int perf_file_section__process(struct perf_file_section *section,
3370                                       struct perf_header *ph,
3371                                       int feat, int fd, void *data)
3372 {
3373         struct feat_fd fdd = {
3374                 .fd     = fd,
3375                 .ph     = ph,
3376                 .size   = section->size,
3377                 .offset = section->offset,
3378         };
3379 
3380         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3381                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3382                           "%d, continuing...\n", section->offset, feat);
3383                 return 0;
3384         }
3385 
3386         if (feat >= HEADER_LAST_FEATURE) {
3387                 pr_debug("unknown feature %d, continuing...\n", feat);
3388                 return 0;
3389         }
3390 
3391         if (!feat_ops[feat].process)
3392                 return 0;
3393 
3394         return feat_ops[feat].process(&fdd, data);
3395 }
3396 
3397 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3398                                        struct perf_header *ph, int fd,
3399                                        bool repipe)
3400 {
3401         struct feat_fd ff = {
3402                 .fd = STDOUT_FILENO,
3403                 .ph = ph,
3404         };
3405         ssize_t ret;
3406 
3407         ret = readn(fd, header, sizeof(*header));
3408         if (ret <= 0)
3409                 return -1;
3410 
3411         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3412                 pr_debug("endian/magic failed\n");
3413                 return -1;
3414         }
3415 
3416         if (ph->needs_swap)
3417                 header->size = bswap_64(header->size);
3418 
3419         if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3420                 return -1;
3421 
3422         return 0;
3423 }
3424 
3425 static int perf_header__read_pipe(struct perf_session *session)
3426 {
3427         struct perf_header *header = &session->header;
3428         struct perf_pipe_file_header f_header;
3429 
3430         if (perf_file_header__read_pipe(&f_header, header,
3431                                         perf_data__fd(session->data),
3432                                         session->repipe) < 0) {
3433                 pr_debug("incompatible file format\n");
3434                 return -EINVAL;
3435         }
3436 
3437         return 0;
3438 }
3439 
3440 static int read_attr(int fd, struct perf_header *ph,
3441                      struct perf_file_attr *f_attr)
3442 {
3443         struct perf_event_attr *attr = &f_attr->attr;
3444         size_t sz, left;
3445         size_t our_sz = sizeof(f_attr->attr);
3446         ssize_t ret;
3447 
3448         memset(f_attr, 0, sizeof(*f_attr));
3449 
3450         /* read minimal guaranteed structure */
3451         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3452         if (ret <= 0) {
3453                 pr_debug("cannot read %d bytes of header attr\n",
3454                          PERF_ATTR_SIZE_VER0);
3455                 return -1;
3456         }
3457 
3458         /* on file perf_event_attr size */
3459         sz = attr->size;
3460 
3461         if (ph->needs_swap)
3462                 sz = bswap_32(sz);
3463 
3464         if (sz == 0) {
3465                 /* assume ABI0 */
3466                 sz =  PERF_ATTR_SIZE_VER0;
3467         } else if (sz > our_sz) {
3468                 pr_debug("file uses a more recent and unsupported ABI"
3469                          " (%zu bytes extra)\n", sz - our_sz);
3470                 return -1;
3471         }
3472         /* what we have not yet read and that we know about */
3473         left = sz - PERF_ATTR_SIZE_VER0;
3474         if (left) {
3475                 void *ptr = attr;
3476                 ptr += PERF_ATTR_SIZE_VER0;
3477 
3478                 ret = readn(fd, ptr, left);
3479         }
3480         /* read perf_file_section, ids are read in caller */
3481         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3482 
3483         return ret <= 0 ? -1 : 0;
3484 }
3485 
3486 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3487                                                 struct tep_handle *pevent)
3488 {
3489         struct tep_event *event;
3490         char bf[128];
3491 
3492         /* already prepared */
3493         if (evsel->tp_format)
3494                 return 0;
3495 
3496         if (pevent == NULL) {
3497                 pr_debug("broken or missing trace data\n");
3498                 return -1;
3499         }
3500 
3501         event = tep_find_event(pevent, evsel->attr.config);
3502         if (event == NULL) {
3503                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
3504                 return -1;
3505         }
3506 
3507         if (!evsel->name) {
3508                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3509                 evsel->name = strdup(bf);
3510                 if (evsel->name == NULL)
3511                         return -1;
3512         }
3513 
3514         evsel->tp_format = event;
3515         return 0;
3516 }
3517 
3518 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
3519                                                   struct tep_handle *pevent)
3520 {
3521         struct perf_evsel *pos;
3522 
3523         evlist__for_each_entry(evlist, pos) {
3524                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
3525                     perf_evsel__prepare_tracepoint_event(pos, pevent))
3526                         return -1;
3527         }
3528 
3529         return 0;
3530 }
3531 
3532 int perf_session__read_header(struct perf_session *session)
3533 {
3534         struct perf_data *data = session->data;
3535         struct perf_header *header = &session->header;
3536         struct perf_file_header f_header;
3537         struct perf_file_attr   f_attr;
3538         u64                     f_id;
3539         int nr_attrs, nr_ids, i, j;
3540         int fd = perf_data__fd(data);
3541 
3542         session->evlist = perf_evlist__new();
3543         if (session->evlist == NULL)
3544                 return -ENOMEM;
3545 
3546         session->evlist->env = &header->env;
3547         session->machines.host.env = &header->env;
3548         if (perf_data__is_pipe(data))
3549                 return perf_header__read_pipe(session);
3550 
3551         if (perf_file_header__read(&f_header, header, fd) < 0)
3552                 return -EINVAL;
3553 
3554         /*
3555          * Sanity check that perf.data was written cleanly; data size is
3556          * initialized to 0 and updated only if the on_exit function is run.
3557          * If data size is still 0 then the file contains only partial
3558          * information.  Just warn user and process it as much as it can.
3559          */
3560         if (f_header.data.size == 0) {
3561                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3562                            "Was the 'perf record' command properly terminated?\n",
3563                            data->file.path);
3564         }
3565 
3566         if (f_header.attr_size == 0) {
3567                 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3568                        "Was the 'perf record' command properly terminated?\n",
3569                        data->file.path);
3570                 return -EINVAL;
3571         }
3572 
3573         nr_attrs = f_header.attrs.size / f_header.attr_size;
3574         lseek(fd, f_header.attrs.offset, SEEK_SET);
3575 
3576         for (i = 0; i < nr_attrs; i++) {
3577                 struct perf_evsel *evsel;
3578                 off_t tmp;
3579 
3580                 if (read_attr(fd, header, &f_attr) < 0)
3581                         goto out_errno;
3582 
3583                 if (header->needs_swap) {
3584                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
3585                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3586                         perf_event__attr_swap(&f_attr.attr);
3587                 }
3588 
3589                 tmp = lseek(fd, 0, SEEK_CUR);
3590                 evsel = perf_evsel__new(&f_attr.attr);
3591 
3592                 if (evsel == NULL)
3593                         goto out_delete_evlist;
3594 
3595                 evsel->needs_swap = header->needs_swap;
3596                 /*
3597                  * Do it before so that if perf_evsel__alloc_id fails, this
3598                  * entry gets purged too at perf_evlist__delete().
3599                  */
3600                 perf_evlist__add(session->evlist, evsel);
3601 
3602                 nr_ids = f_attr.ids.size / sizeof(u64);
3603                 /*
3604                  * We don't have the cpu and thread maps on the header, so
3605                  * for allocating the perf_sample_id table we fake 1 cpu and
3606                  * hattr->ids threads.
3607                  */
3608                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3609                         goto out_delete_evlist;
3610 
3611                 lseek(fd, f_attr.ids.offset, SEEK_SET);
3612 
3613                 for (j = 0; j < nr_ids; j++) {
3614                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3615                                 goto out_errno;
3616 
3617                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
3618                 }
3619 
3620                 lseek(fd, tmp, SEEK_SET);
3621         }
3622 
3623         perf_header__process_sections(header, fd, &session->tevent,
3624                                       perf_file_section__process);
3625 
3626         if (perf_evlist__prepare_tracepoint_events(session->evlist,
3627                                                    session->tevent.pevent))
3628                 goto out_delete_evlist;
3629 
3630         return 0;
3631 out_errno:
3632         return -errno;
3633 
3634 out_delete_evlist:
3635         perf_evlist__delete(session->evlist);
3636         session->evlist = NULL;
3637         return -ENOMEM;
3638 }
3639 
3640 int perf_event__synthesize_attr(struct perf_tool *tool,
3641                                 struct perf_event_attr *attr, u32 ids, u64 *id,
3642                                 perf_event__handler_t process)
3643 {
3644         union perf_event *ev;
3645         size_t size;
3646         int err;
3647 
3648         size = sizeof(struct perf_event_attr);
3649         size = PERF_ALIGN(size, sizeof(u64));
3650         size += sizeof(struct perf_event_header);
3651         size += ids * sizeof(u64);
3652 
3653         ev = zalloc(size);
3654 
3655         if (ev == NULL)
3656                 return -ENOMEM;
3657 
3658         ev->attr.attr = *attr;
3659         memcpy(ev->attr.id, id, ids * sizeof(u64));
3660 
3661         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
3662         ev->attr.header.size = (u16)size;
3663 
3664         if (ev->attr.header.size == size)
3665                 err = process(tool, ev, NULL, NULL);
3666         else
3667                 err = -E2BIG;
3668 
3669         free(ev);
3670 
3671         return err;
3672 }
3673 
3674 int perf_event__synthesize_features(struct perf_tool *tool,
3675                                     struct perf_session *session,
3676                                     struct perf_evlist *evlist,
3677                                     perf_event__handler_t process)
3678 {
3679         struct perf_header *header = &session->header;
3680         struct feat_fd ff;
3681         struct feature_event *fe;
3682         size_t sz, sz_hdr;
3683         int feat, ret;
3684 
3685         sz_hdr = sizeof(fe->header);
3686         sz = sizeof(union perf_event);
3687         /* get a nice alignment */
3688         sz = PERF_ALIGN(sz, page_size);
3689 
3690         memset(&ff, 0, sizeof(ff));
3691 
3692         ff.buf = malloc(sz);
3693         if (!ff.buf)
3694                 return -ENOMEM;
3695 
3696         ff.size = sz - sz_hdr;
3697         ff.ph = &session->header;
3698 
3699         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3700                 if (!feat_ops[feat].synthesize) {
3701                         pr_debug("No record header feature for header :%d\n", feat);
3702                         continue;
3703                 }
3704 
3705                 ff.offset = sizeof(*fe);
3706 
3707                 ret = feat_ops[feat].write(&ff, evlist);
3708                 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3709                         pr_debug("Error writing feature\n");
3710                         continue;
3711                 }
3712                 /* ff.buf may have changed due to realloc in do_write() */
3713                 fe = ff.buf;
3714                 memset(fe, 0, sizeof(*fe));
3715 
3716                 fe->feat_id = feat;
3717                 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3718                 fe->header.size = ff.offset;
3719 
3720                 ret = process(tool, ff.buf, NULL, NULL);
3721                 if (ret) {
3722                         free(ff.buf);
3723                         return ret;
3724                 }
3725         }
3726 
3727         /* Send HEADER_LAST_FEATURE mark. */
3728         fe = ff.buf;
3729         fe->feat_id     = HEADER_LAST_FEATURE;
3730         fe->header.type = PERF_RECORD_HEADER_FEATURE;
3731         fe->header.size = sizeof(*fe);
3732 
3733         ret = process(tool, ff.buf, NULL, NULL);
3734 
3735         free(ff.buf);
3736         return ret;
3737 }
3738 
3739 int perf_event__process_feature(struct perf_session *session,
3740                                 union perf_event *event)
3741 {
3742         struct perf_tool *tool = session->tool;
3743         struct feat_fd ff = { .fd = 0 };
3744         struct feature_event *fe = (struct feature_event *)event;
3745         int type = fe->header.type;
3746         u64 feat = fe->feat_id;
3747 
3748         if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3749                 pr_warning("invalid record type %d in pipe-mode\n", type);
3750                 return 0;
3751         }
3752         if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3753                 pr_warning("invalid record type %d in pipe-mode\n", type);
3754                 return -1;
3755         }
3756 
3757         if (!feat_ops[feat].process)
3758                 return 0;
3759 
3760         ff.buf  = (void *)fe->data;
3761         ff.size = event->header.size - sizeof(*fe);
3762         ff.ph = &session->header;
3763 
3764         if (feat_ops[feat].process(&ff, NULL))
3765                 return -1;
3766 
3767         if (!feat_ops[feat].print || !tool->show_feat_hdr)
3768                 return 0;
3769 
3770         if (!feat_ops[feat].full_only ||
3771             tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3772                 feat_ops[feat].print(&ff, stdout);
3773         } else {
3774                 fprintf(stdout, "# %s info available, use -I to display\n",
3775                         feat_ops[feat].name);
3776         }
3777 
3778         return 0;
3779 }
3780 
3781 static struct event_update_event *
3782 event_update_event__new(size_t size, u64 type, u64 id)
3783 {
3784         struct event_update_event *ev;
3785 
3786         size += sizeof(*ev);
3787         size  = PERF_ALIGN(size, sizeof(u64));
3788 
3789         ev = zalloc(size);
3790         if (ev) {
3791                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3792                 ev->header.size = (u16)size;
3793                 ev->type = type;
3794                 ev->id = id;
3795         }
3796         return ev;
3797 }
3798 
3799 int
3800 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3801                                          struct perf_evsel *evsel,
3802                                          perf_event__handler_t process)
3803 {
3804         struct event_update_event *ev;
3805         size_t size = strlen(evsel->unit);
3806         int err;
3807 
3808         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3809         if (ev == NULL)
3810                 return -ENOMEM;
3811 
3812         strlcpy(ev->data, evsel->unit, size + 1);
3813         err = process(tool, (union perf_event *)ev, NULL, NULL);
3814         free(ev);
3815         return err;
3816 }
3817 
3818 int
3819 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3820                                           struct perf_evsel *evsel,
3821                                           perf_event__handler_t process)
3822 {
3823         struct event_update_event *ev;
3824         struct event_update_event_scale *ev_data;
3825         int err;
3826 
3827         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3828         if (ev == NULL)
3829                 return -ENOMEM;
3830 
3831         ev_data = (struct event_update_event_scale *) ev->data;
3832         ev_data->scale = evsel->scale;
3833         err = process(tool, (union perf_event*) ev, NULL, NULL);
3834         free(ev);
3835         return err;
3836 }
3837 
3838 int
3839 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3840                                          struct perf_evsel *evsel,
3841                                          perf_event__handler_t process)
3842 {
3843         struct event_update_event *ev;
3844         size_t len = strlen(evsel->name);
3845         int err;
3846 
3847         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3848         if (ev == NULL)
3849                 return -ENOMEM;
3850 
3851         strlcpy(ev->data, evsel->name, len + 1);
3852         err = process(tool, (union perf_event*) ev, NULL, NULL);
3853         free(ev);
3854         return err;
3855 }
3856 
3857 int
3858 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3859                                         struct perf_evsel *evsel,
3860                                         perf_event__handler_t process)
3861 {
3862         size_t size = sizeof(struct event_update_event);
3863         struct event_update_event *ev;
3864         int max, err;
3865         u16 type;
3866 
3867         if (!evsel->own_cpus)
3868                 return 0;
3869 
3870         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3871         if (!ev)
3872                 return -ENOMEM;
3873 
3874         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3875         ev->header.size = (u16)size;
3876         ev->type = PERF_EVENT_UPDATE__CPUS;
3877         ev->id   = evsel->id[0];
3878 
3879         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3880                                  evsel->own_cpus,
3881                                  type, max);
3882 
3883         err = process(tool, (union perf_event*) ev, NULL, NULL);
3884         free(ev);
3885         return err;
3886 }
3887 
3888 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3889 {
3890         struct event_update_event *ev = &event->event_update;
3891         struct event_update_event_scale *ev_scale;
3892         struct event_update_event_cpus *ev_cpus;
3893         struct cpu_map *map;
3894         size_t ret;
3895 
3896         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3897 
3898         switch (ev->type) {
3899         case PERF_EVENT_UPDATE__SCALE:
3900                 ev_scale = (struct event_update_event_scale *) ev->data;
3901                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3902                 break;
3903         case PERF_EVENT_UPDATE__UNIT:
3904                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3905                 break;
3906         case PERF_EVENT_UPDATE__NAME:
3907                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3908                 break;
3909         case PERF_EVENT_UPDATE__CPUS:
3910                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3911                 ret += fprintf(fp, "... ");
3912 
3913                 map = cpu_map__new_data(&ev_cpus->cpus);
3914                 if (map)
3915                         ret += cpu_map__fprintf(map, fp);
3916                 else
3917                         ret += fprintf(fp, "failed to get cpus\n");
3918                 break;
3919         default:
3920                 ret += fprintf(fp, "... unknown type\n");
3921                 break;
3922         }
3923 
3924         return ret;
3925 }
3926 
3927 int perf_event__synthesize_attrs(struct perf_tool *tool,
3928                                  struct perf_evlist *evlist,
3929                                  perf_event__handler_t process)
3930 {
3931         struct perf_evsel *evsel;
3932         int err = 0;
3933 
3934         evlist__for_each_entry(evlist, evsel) {
3935                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3936                                                   evsel->id, process);
3937                 if (err) {
3938                         pr_debug("failed to create perf header attribute\n");
3939                         return err;
3940                 }
3941         }
3942 
3943         return err;
3944 }
3945 
3946 static bool has_unit(struct perf_evsel *counter)
3947 {
3948         return counter->unit && *counter->unit;
3949 }
3950 
3951 static bool has_scale(struct perf_evsel *counter)
3952 {
3953         return counter->scale != 1;
3954 }
3955 
3956 int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3957                                       struct perf_evlist *evsel_list,
3958                                       perf_event__handler_t process,
3959                                       bool is_pipe)
3960 {
3961         struct perf_evsel *counter;
3962         int err;
3963 
3964         /*
3965          * Synthesize other events stuff not carried within
3966          * attr event - unit, scale, name
3967          */
3968         evlist__for_each_entry(evsel_list, counter) {
3969                 if (!counter->supported)
3970                         continue;
3971 
3972                 /*
3973                  * Synthesize unit and scale only if it's defined.
3974                  */
3975                 if (has_unit(counter)) {
3976                         err = perf_event__synthesize_event_update_unit(tool, counter, process);
3977                         if (err < 0) {
3978                                 pr_err("Couldn't synthesize evsel unit.\n");
3979                                 return err;
3980                         }
3981                 }
3982 
3983                 if (has_scale(counter)) {
3984                         err = perf_event__synthesize_event_update_scale(tool, counter, process);
3985                         if (err < 0) {
3986                                 pr_err("Couldn't synthesize evsel counter.\n");
3987                                 return err;
3988                         }
3989                 }
3990 
3991                 if (counter->own_cpus) {
3992                         err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3993                         if (err < 0) {
3994                                 pr_err("Couldn't synthesize evsel cpus.\n");
3995                                 return err;
3996                         }
3997                 }
3998 
3999                 /*
4000                  * Name is needed only for pipe output,
4001                  * perf.data carries event names.
4002                  */
4003                 if (is_pipe) {
4004                         err = perf_event__synthesize_event_update_name(tool, counter, process);
4005                         if (err < 0) {
4006                                 pr_err("Couldn't synthesize evsel name.\n");
4007                                 return err;
4008                         }
4009                 }
4010         }
4011         return 0;
4012 }
4013 
4014 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4015                              union perf_event *event,
4016                              struct perf_evlist **pevlist)
4017 {
4018         u32 i, ids, n_ids;
4019         struct perf_evsel *evsel;
4020         struct perf_evlist *evlist = *pevlist;
4021 
4022         if (evlist == NULL) {
4023                 *pevlist = evlist = perf_evlist__new();
4024                 if (evlist == NULL)
4025                         return -ENOMEM;
4026         }
4027 
4028         evsel = perf_evsel__new(&event->attr.attr);
4029         if (evsel == NULL)
4030                 return -ENOMEM;
4031 
4032         perf_evlist__add(evlist, evsel);
4033 
4034         ids = event->header.size;
4035         ids -= (void *)&event->attr.id - (void *)event;
4036         n_ids = ids / sizeof(u64);
4037         /*
4038          * We don't have the cpu and thread maps on the header, so
4039          * for allocating the perf_sample_id table we fake 1 cpu and
4040          * hattr->ids threads.
4041          */
4042         if (perf_evsel__alloc_id(evsel, 1, n_ids))
4043                 return -ENOMEM;
4044 
4045         for (i = 0; i < n_ids; i++) {
4046                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
4047         }
4048 
4049         return 0;
4050 }
4051 
4052 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4053                                      union perf_event *event,
4054                                      struct perf_evlist **pevlist)
4055 {
4056         struct event_update_event *ev = &event->event_update;
4057         struct event_update_event_scale *ev_scale;
4058         struct event_update_event_cpus *ev_cpus;
4059         struct perf_evlist *evlist;
4060         struct perf_evsel *evsel;
4061         struct cpu_map *map;
4062 
4063         if (!pevlist || *pevlist == NULL)
4064                 return -EINVAL;
4065 
4066         evlist = *pevlist;
4067 
4068         evsel = perf_evlist__id2evsel(evlist, ev->id);
4069         if (evsel == NULL)
4070                 return -EINVAL;
4071 
4072         switch (ev->type) {
4073         case PERF_EVENT_UPDATE__UNIT:
4074                 evsel->unit = strdup(ev->data);
4075                 break;
4076         case PERF_EVENT_UPDATE__NAME:
4077                 evsel->name = strdup(ev->data);
4078                 break;
4079         case PERF_EVENT_UPDATE__SCALE:
4080                 ev_scale = (struct event_update_event_scale *) ev->data;
4081                 evsel->scale = ev_scale->scale;
4082                 break;
4083         case PERF_EVENT_UPDATE__CPUS:
4084                 ev_cpus = (struct event_update_event_cpus *) ev->data;
4085 
4086                 map = cpu_map__new_data(&ev_cpus->cpus);
4087                 if (map)
4088                         evsel->own_cpus = map;
4089                 else
4090                         pr_err("failed to get event_update cpus\n");
4091         default:
4092                 break;
4093         }
4094 
4095         return 0;
4096 }
4097 
4098 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
4099                                         struct perf_evlist *evlist,
4100                                         perf_event__handler_t process)
4101 {
4102         union perf_event ev;
4103         struct tracing_data *tdata;
4104         ssize_t size = 0, aligned_size = 0, padding;
4105         struct feat_fd ff;
4106         int err __maybe_unused = 0;
4107 
4108         /*
4109          * We are going to store the size of the data followed
4110          * by the data contents. Since the fd descriptor is a pipe,
4111          * we cannot seek back to store the size of the data once
4112          * we know it. Instead we:
4113          *
4114          * - write the tracing data to the temp file
4115          * - get/write the data size to pipe
4116          * - write the tracing data from the temp file
4117          *   to the pipe
4118          */
4119         tdata = tracing_data_get(&evlist->entries, fd, true);
4120         if (!tdata)
4121                 return -1;
4122 
4123         memset(&ev, 0, sizeof(ev));
4124 
4125         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
4126         size = tdata->size;
4127         aligned_size = PERF_ALIGN(size, sizeof(u64));
4128         padding = aligned_size - size;
4129         ev.tracing_data.header.size = sizeof(ev.tracing_data);
4130         ev.tracing_data.size = aligned_size;
4131 
4132         process(tool, &ev, NULL, NULL);
4133 
4134         /*
4135          * The put function will copy all the tracing data
4136          * stored in temp file to the pipe.
4137          */
4138         tracing_data_put(tdata);
4139 
4140         ff = (struct feat_fd){ .fd = fd };
4141         if (write_padded(&ff, NULL, 0, padding))
4142                 return -1;
4143 
4144         return aligned_size;
4145 }
4146 
4147 int perf_event__process_tracing_data(struct perf_session *session,
4148                                      union perf_event *event)
4149 {
4150         ssize_t size_read, padding, size = event->tracing_data.size;
4151         int fd = perf_data__fd(session->data);
4152         off_t offset = lseek(fd, 0, SEEK_CUR);
4153         char buf[BUFSIZ];
4154 
4155         /* setup for reading amidst mmap */
4156         lseek(fd, offset + sizeof(struct tracing_data_event),
4157               SEEK_SET);
4158 
4159         size_read = trace_report(fd, &session->tevent,
4160                                  session->repipe);
4161         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4162 
4163         if (readn(fd, buf, padding) < 0) {
4164                 pr_err("%s: reading input file", __func__);
4165                 return -1;
4166         }
4167         if (session->repipe) {
4168                 int retw = write(STDOUT_FILENO, buf, padding);
4169                 if (retw <= 0 || retw != padding) {
4170                         pr_err("%s: repiping tracing data padding", __func__);
4171                         return -1;
4172                 }
4173         }
4174 
4175         if (size_read + padding != size) {
4176                 pr_err("%s: tracing data size mismatch", __func__);
4177                 return -1;
4178         }
4179 
4180         perf_evlist__prepare_tracepoint_events(session->evlist,
4181                                                session->tevent.pevent);
4182 
4183         return size_read + padding;
4184 }
4185 
4186 int perf_event__synthesize_build_id(struct perf_tool *tool,
4187                                     struct dso *pos, u16 misc,
4188                                     perf_event__handler_t process,
4189                                     struct machine *machine)
4190 {
4191         union perf_event ev;
4192         size_t len;
4193         int err = 0;
4194 
4195         if (!pos->hit)
4196                 return err;
4197 
4198         memset(&ev, 0, sizeof(ev));
4199 
4200         len = pos->long_name_len + 1;
4201         len = PERF_ALIGN(len, NAME_ALIGN);
4202         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4203         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4204         ev.build_id.header.misc = misc;
4205         ev.build_id.pid = machine->pid;
4206         ev.build_id.header.size = sizeof(ev.build_id) + len;
4207         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4208 
4209         err = process(tool, &ev, NULL, machine);
4210 
4211         return err;
4212 }
4213 
4214 int perf_event__process_build_id(struct perf_session *session,
4215                                  union perf_event *event)
4216 {
4217         __event_process_build_id(&event->build_id,
4218                                  event->build_id.filename,
4219                                  session);
4220         return 0;
4221 }
4222 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp