Linux-libre 4.14.68-gnu
[librecmc/linux-libre.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <traceevent/event-parse.h>
6 #include <api/fs/fs.h>
7
8 #include <byteswap.h>
9 #include <unistd.h>
10 #include <sys/types.h>
11 #include <sys/mman.h>
12
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "memswap.h"
16 #include "session.h"
17 #include "tool.h"
18 #include "sort.h"
19 #include "util.h"
20 #include "cpumap.h"
21 #include "perf_regs.h"
22 #include "asm/bug.h"
23 #include "auxtrace.h"
24 #include "thread.h"
25 #include "thread-stack.h"
26 #include "stat.h"
27
28 static int perf_session__deliver_event(struct perf_session *session,
29                                        union perf_event *event,
30                                        struct perf_sample *sample,
31                                        struct perf_tool *tool,
32                                        u64 file_offset);
33
34 static int perf_session__open(struct perf_session *session)
35 {
36         struct perf_data_file *file = session->file;
37
38         if (perf_session__read_header(session) < 0) {
39                 pr_err("incompatible file format (rerun with -v to learn more)\n");
40                 return -1;
41         }
42
43         if (perf_data_file__is_pipe(file))
44                 return 0;
45
46         if (perf_header__has_feat(&session->header, HEADER_STAT))
47                 return 0;
48
49         if (!perf_evlist__valid_sample_type(session->evlist)) {
50                 pr_err("non matching sample_type\n");
51                 return -1;
52         }
53
54         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
55                 pr_err("non matching sample_id_all\n");
56                 return -1;
57         }
58
59         if (!perf_evlist__valid_read_format(session->evlist)) {
60                 pr_err("non matching read_format\n");
61                 return -1;
62         }
63
64         return 0;
65 }
66
67 void perf_session__set_id_hdr_size(struct perf_session *session)
68 {
69         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
70
71         machines__set_id_hdr_size(&session->machines, id_hdr_size);
72 }
73
74 int perf_session__create_kernel_maps(struct perf_session *session)
75 {
76         int ret = machine__create_kernel_maps(&session->machines.host);
77
78         if (ret >= 0)
79                 ret = machines__create_guest_kernel_maps(&session->machines);
80         return ret;
81 }
82
83 static void perf_session__destroy_kernel_maps(struct perf_session *session)
84 {
85         machines__destroy_kernel_maps(&session->machines);
86 }
87
88 static bool perf_session__has_comm_exec(struct perf_session *session)
89 {
90         struct perf_evsel *evsel;
91
92         evlist__for_each_entry(session->evlist, evsel) {
93                 if (evsel->attr.comm_exec)
94                         return true;
95         }
96
97         return false;
98 }
99
100 static void perf_session__set_comm_exec(struct perf_session *session)
101 {
102         bool comm_exec = perf_session__has_comm_exec(session);
103
104         machines__set_comm_exec(&session->machines, comm_exec);
105 }
106
107 static int ordered_events__deliver_event(struct ordered_events *oe,
108                                          struct ordered_event *event)
109 {
110         struct perf_sample sample;
111         struct perf_session *session = container_of(oe, struct perf_session,
112                                                     ordered_events);
113         int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
114
115         if (ret) {
116                 pr_err("Can't parse sample, err = %d\n", ret);
117                 return ret;
118         }
119
120         return perf_session__deliver_event(session, event->event, &sample,
121                                            session->tool, event->file_offset);
122 }
123
124 struct perf_session *perf_session__new(struct perf_data_file *file,
125                                        bool repipe, struct perf_tool *tool)
126 {
127         struct perf_session *session = zalloc(sizeof(*session));
128
129         if (!session)
130                 goto out;
131
132         session->repipe = repipe;
133         session->tool   = tool;
134         INIT_LIST_HEAD(&session->auxtrace_index);
135         machines__init(&session->machines);
136         ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
137
138         if (file) {
139                 if (perf_data_file__open(file))
140                         goto out_delete;
141
142                 session->file = file;
143
144                 if (perf_data_file__is_read(file)) {
145                         if (perf_session__open(session) < 0)
146                                 goto out_close;
147
148                         /*
149                          * set session attributes that are present in perf.data
150                          * but not in pipe-mode.
151                          */
152                         if (!file->is_pipe) {
153                                 perf_session__set_id_hdr_size(session);
154                                 perf_session__set_comm_exec(session);
155                         }
156                 }
157         } else  {
158                 session->machines.host.env = &perf_env;
159         }
160
161         if (!file || perf_data_file__is_write(file)) {
162                 /*
163                  * In O_RDONLY mode this will be performed when reading the
164                  * kernel MMAP event, in perf_event__process_mmap().
165                  */
166                 if (perf_session__create_kernel_maps(session) < 0)
167                         pr_warning("Cannot read kernel map\n");
168         }
169
170         /*
171          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
172          * processed, so perf_evlist__sample_id_all is not meaningful here.
173          */
174         if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
175             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
176                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
177                 tool->ordered_events = false;
178         }
179
180         return session;
181
182  out_close:
183         perf_data_file__close(file);
184  out_delete:
185         perf_session__delete(session);
186  out:
187         return NULL;
188 }
189
190 static void perf_session__delete_threads(struct perf_session *session)
191 {
192         machine__delete_threads(&session->machines.host);
193 }
194
195 void perf_session__delete(struct perf_session *session)
196 {
197         if (session == NULL)
198                 return;
199         auxtrace__free(session);
200         auxtrace_index__free(&session->auxtrace_index);
201         perf_session__destroy_kernel_maps(session);
202         perf_session__delete_threads(session);
203         perf_env__exit(&session->header.env);
204         machines__exit(&session->machines);
205         if (session->file)
206                 perf_data_file__close(session->file);
207         free(session);
208 }
209
210 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
211                                                  __maybe_unused,
212                                                  union perf_event *event
213                                                  __maybe_unused,
214                                                  struct perf_session *session
215                                                 __maybe_unused)
216 {
217         dump_printf(": unhandled!\n");
218         return 0;
219 }
220
221 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
222                                          union perf_event *event __maybe_unused,
223                                          struct perf_evlist **pevlist
224                                          __maybe_unused)
225 {
226         dump_printf(": unhandled!\n");
227         return 0;
228 }
229
230 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
231                                                  union perf_event *event __maybe_unused,
232                                                  struct perf_evlist **pevlist
233                                                  __maybe_unused)
234 {
235         if (dump_trace)
236                 perf_event__fprintf_event_update(event, stdout);
237
238         dump_printf(": unhandled!\n");
239         return 0;
240 }
241
242 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
243                                      union perf_event *event __maybe_unused,
244                                      struct perf_sample *sample __maybe_unused,
245                                      struct perf_evsel *evsel __maybe_unused,
246                                      struct machine *machine __maybe_unused)
247 {
248         dump_printf(": unhandled!\n");
249         return 0;
250 }
251
252 static int process_event_stub(struct perf_tool *tool __maybe_unused,
253                               union perf_event *event __maybe_unused,
254                               struct perf_sample *sample __maybe_unused,
255                               struct machine *machine __maybe_unused)
256 {
257         dump_printf(": unhandled!\n");
258         return 0;
259 }
260
261 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
262                                        union perf_event *event __maybe_unused,
263                                        struct ordered_events *oe __maybe_unused)
264 {
265         dump_printf(": unhandled!\n");
266         return 0;
267 }
268
269 static int process_finished_round(struct perf_tool *tool,
270                                   union perf_event *event,
271                                   struct ordered_events *oe);
272
273 static int skipn(int fd, off_t n)
274 {
275         char buf[4096];
276         ssize_t ret;
277
278         while (n > 0) {
279                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
280                 if (ret <= 0)
281                         return ret;
282                 n -= ret;
283         }
284
285         return 0;
286 }
287
288 static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
289                                        union perf_event *event,
290                                        struct perf_session *session
291                                        __maybe_unused)
292 {
293         dump_printf(": unhandled!\n");
294         if (perf_data_file__is_pipe(session->file))
295                 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
296         return event->auxtrace.size;
297 }
298
299 static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
300                                   union perf_event *event __maybe_unused,
301                                   struct perf_session *session __maybe_unused)
302 {
303         dump_printf(": unhandled!\n");
304         return 0;
305 }
306
307
308 static
309 int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
310                                   union perf_event *event __maybe_unused,
311                                   struct perf_session *session __maybe_unused)
312 {
313         if (dump_trace)
314                 perf_event__fprintf_thread_map(event, stdout);
315
316         dump_printf(": unhandled!\n");
317         return 0;
318 }
319
320 static
321 int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
322                                union perf_event *event __maybe_unused,
323                                struct perf_session *session __maybe_unused)
324 {
325         if (dump_trace)
326                 perf_event__fprintf_cpu_map(event, stdout);
327
328         dump_printf(": unhandled!\n");
329         return 0;
330 }
331
332 static
333 int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
334                                    union perf_event *event __maybe_unused,
335                                    struct perf_session *session __maybe_unused)
336 {
337         if (dump_trace)
338                 perf_event__fprintf_stat_config(event, stdout);
339
340         dump_printf(": unhandled!\n");
341         return 0;
342 }
343
344 static int process_stat_stub(struct perf_tool *tool __maybe_unused,
345                              union perf_event *event __maybe_unused,
346                              struct perf_session *perf_session
347                              __maybe_unused)
348 {
349         if (dump_trace)
350                 perf_event__fprintf_stat(event, stdout);
351
352         dump_printf(": unhandled!\n");
353         return 0;
354 }
355
356 static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
357                                    union perf_event *event __maybe_unused,
358                                    struct perf_session *perf_session
359                                    __maybe_unused)
360 {
361         if (dump_trace)
362                 perf_event__fprintf_stat_round(event, stdout);
363
364         dump_printf(": unhandled!\n");
365         return 0;
366 }
367
368 void perf_tool__fill_defaults(struct perf_tool *tool)
369 {
370         if (tool->sample == NULL)
371                 tool->sample = process_event_sample_stub;
372         if (tool->mmap == NULL)
373                 tool->mmap = process_event_stub;
374         if (tool->mmap2 == NULL)
375                 tool->mmap2 = process_event_stub;
376         if (tool->comm == NULL)
377                 tool->comm = process_event_stub;
378         if (tool->namespaces == NULL)
379                 tool->namespaces = process_event_stub;
380         if (tool->fork == NULL)
381                 tool->fork = process_event_stub;
382         if (tool->exit == NULL)
383                 tool->exit = process_event_stub;
384         if (tool->lost == NULL)
385                 tool->lost = perf_event__process_lost;
386         if (tool->lost_samples == NULL)
387                 tool->lost_samples = perf_event__process_lost_samples;
388         if (tool->aux == NULL)
389                 tool->aux = perf_event__process_aux;
390         if (tool->itrace_start == NULL)
391                 tool->itrace_start = perf_event__process_itrace_start;
392         if (tool->context_switch == NULL)
393                 tool->context_switch = perf_event__process_switch;
394         if (tool->read == NULL)
395                 tool->read = process_event_sample_stub;
396         if (tool->throttle == NULL)
397                 tool->throttle = process_event_stub;
398         if (tool->unthrottle == NULL)
399                 tool->unthrottle = process_event_stub;
400         if (tool->attr == NULL)
401                 tool->attr = process_event_synth_attr_stub;
402         if (tool->event_update == NULL)
403                 tool->event_update = process_event_synth_event_update_stub;
404         if (tool->tracing_data == NULL)
405                 tool->tracing_data = process_event_synth_tracing_data_stub;
406         if (tool->build_id == NULL)
407                 tool->build_id = process_event_op2_stub;
408         if (tool->finished_round == NULL) {
409                 if (tool->ordered_events)
410                         tool->finished_round = process_finished_round;
411                 else
412                         tool->finished_round = process_finished_round_stub;
413         }
414         if (tool->id_index == NULL)
415                 tool->id_index = process_event_op2_stub;
416         if (tool->auxtrace_info == NULL)
417                 tool->auxtrace_info = process_event_op2_stub;
418         if (tool->auxtrace == NULL)
419                 tool->auxtrace = process_event_auxtrace_stub;
420         if (tool->auxtrace_error == NULL)
421                 tool->auxtrace_error = process_event_op2_stub;
422         if (tool->thread_map == NULL)
423                 tool->thread_map = process_event_thread_map_stub;
424         if (tool->cpu_map == NULL)
425                 tool->cpu_map = process_event_cpu_map_stub;
426         if (tool->stat_config == NULL)
427                 tool->stat_config = process_event_stat_config_stub;
428         if (tool->stat == NULL)
429                 tool->stat = process_stat_stub;
430         if (tool->stat_round == NULL)
431                 tool->stat_round = process_stat_round_stub;
432         if (tool->time_conv == NULL)
433                 tool->time_conv = process_event_op2_stub;
434         if (tool->feature == NULL)
435                 tool->feature = process_event_op2_stub;
436 }
437
438 static void swap_sample_id_all(union perf_event *event, void *data)
439 {
440         void *end = (void *) event + event->header.size;
441         int size = end - data;
442
443         BUG_ON(size % sizeof(u64));
444         mem_bswap_64(data, size);
445 }
446
447 static void perf_event__all64_swap(union perf_event *event,
448                                    bool sample_id_all __maybe_unused)
449 {
450         struct perf_event_header *hdr = &event->header;
451         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
452 }
453
454 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
455 {
456         event->comm.pid = bswap_32(event->comm.pid);
457         event->comm.tid = bswap_32(event->comm.tid);
458
459         if (sample_id_all) {
460                 void *data = &event->comm.comm;
461
462                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
463                 swap_sample_id_all(event, data);
464         }
465 }
466
467 static void perf_event__mmap_swap(union perf_event *event,
468                                   bool sample_id_all)
469 {
470         event->mmap.pid   = bswap_32(event->mmap.pid);
471         event->mmap.tid   = bswap_32(event->mmap.tid);
472         event->mmap.start = bswap_64(event->mmap.start);
473         event->mmap.len   = bswap_64(event->mmap.len);
474         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
475
476         if (sample_id_all) {
477                 void *data = &event->mmap.filename;
478
479                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
480                 swap_sample_id_all(event, data);
481         }
482 }
483
484 static void perf_event__mmap2_swap(union perf_event *event,
485                                   bool sample_id_all)
486 {
487         event->mmap2.pid   = bswap_32(event->mmap2.pid);
488         event->mmap2.tid   = bswap_32(event->mmap2.tid);
489         event->mmap2.start = bswap_64(event->mmap2.start);
490         event->mmap2.len   = bswap_64(event->mmap2.len);
491         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
492         event->mmap2.maj   = bswap_32(event->mmap2.maj);
493         event->mmap2.min   = bswap_32(event->mmap2.min);
494         event->mmap2.ino   = bswap_64(event->mmap2.ino);
495
496         if (sample_id_all) {
497                 void *data = &event->mmap2.filename;
498
499                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
500                 swap_sample_id_all(event, data);
501         }
502 }
503 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
504 {
505         event->fork.pid  = bswap_32(event->fork.pid);
506         event->fork.tid  = bswap_32(event->fork.tid);
507         event->fork.ppid = bswap_32(event->fork.ppid);
508         event->fork.ptid = bswap_32(event->fork.ptid);
509         event->fork.time = bswap_64(event->fork.time);
510
511         if (sample_id_all)
512                 swap_sample_id_all(event, &event->fork + 1);
513 }
514
515 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
516 {
517         event->read.pid          = bswap_32(event->read.pid);
518         event->read.tid          = bswap_32(event->read.tid);
519         event->read.value        = bswap_64(event->read.value);
520         event->read.time_enabled = bswap_64(event->read.time_enabled);
521         event->read.time_running = bswap_64(event->read.time_running);
522         event->read.id           = bswap_64(event->read.id);
523
524         if (sample_id_all)
525                 swap_sample_id_all(event, &event->read + 1);
526 }
527
528 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
529 {
530         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
531         event->aux.aux_size   = bswap_64(event->aux.aux_size);
532         event->aux.flags      = bswap_64(event->aux.flags);
533
534         if (sample_id_all)
535                 swap_sample_id_all(event, &event->aux + 1);
536 }
537
538 static void perf_event__itrace_start_swap(union perf_event *event,
539                                           bool sample_id_all)
540 {
541         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
542         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
543
544         if (sample_id_all)
545                 swap_sample_id_all(event, &event->itrace_start + 1);
546 }
547
548 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
549 {
550         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
551                 event->context_switch.next_prev_pid =
552                                 bswap_32(event->context_switch.next_prev_pid);
553                 event->context_switch.next_prev_tid =
554                                 bswap_32(event->context_switch.next_prev_tid);
555         }
556
557         if (sample_id_all)
558                 swap_sample_id_all(event, &event->context_switch + 1);
559 }
560
561 static void perf_event__throttle_swap(union perf_event *event,
562                                       bool sample_id_all)
563 {
564         event->throttle.time      = bswap_64(event->throttle.time);
565         event->throttle.id        = bswap_64(event->throttle.id);
566         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
567
568         if (sample_id_all)
569                 swap_sample_id_all(event, &event->throttle + 1);
570 }
571
572 static u8 revbyte(u8 b)
573 {
574         int rev = (b >> 4) | ((b & 0xf) << 4);
575         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
576         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
577         return (u8) rev;
578 }
579
580 /*
581  * XXX this is hack in attempt to carry flags bitfield
582  * through endian village. ABI says:
583  *
584  * Bit-fields are allocated from right to left (least to most significant)
585  * on little-endian implementations and from left to right (most to least
586  * significant) on big-endian implementations.
587  *
588  * The above seems to be byte specific, so we need to reverse each
589  * byte of the bitfield. 'Internet' also says this might be implementation
590  * specific and we probably need proper fix and carry perf_event_attr
591  * bitfield flags in separate data file FEAT_ section. Thought this seems
592  * to work for now.
593  */
594 static void swap_bitfield(u8 *p, unsigned len)
595 {
596         unsigned i;
597
598         for (i = 0; i < len; i++) {
599                 *p = revbyte(*p);
600                 p++;
601         }
602 }
603
604 /* exported for swapping attributes in file header */
605 void perf_event__attr_swap(struct perf_event_attr *attr)
606 {
607         attr->type              = bswap_32(attr->type);
608         attr->size              = bswap_32(attr->size);
609
610 #define bswap_safe(f, n)                                        \
611         (attr->size > (offsetof(struct perf_event_attr, f) +    \
612                        sizeof(attr->f) * (n)))
613 #define bswap_field(f, sz)                      \
614 do {                                            \
615         if (bswap_safe(f, 0))                   \
616                 attr->f = bswap_##sz(attr->f);  \
617 } while(0)
618 #define bswap_field_16(f) bswap_field(f, 16)
619 #define bswap_field_32(f) bswap_field(f, 32)
620 #define bswap_field_64(f) bswap_field(f, 64)
621
622         bswap_field_64(config);
623         bswap_field_64(sample_period);
624         bswap_field_64(sample_type);
625         bswap_field_64(read_format);
626         bswap_field_32(wakeup_events);
627         bswap_field_32(bp_type);
628         bswap_field_64(bp_addr);
629         bswap_field_64(bp_len);
630         bswap_field_64(branch_sample_type);
631         bswap_field_64(sample_regs_user);
632         bswap_field_32(sample_stack_user);
633         bswap_field_32(aux_watermark);
634         bswap_field_16(sample_max_stack);
635
636         /*
637          * After read_format are bitfields. Check read_format because
638          * we are unable to use offsetof on bitfield.
639          */
640         if (bswap_safe(read_format, 1))
641                 swap_bitfield((u8 *) (&attr->read_format + 1),
642                               sizeof(u64));
643 #undef bswap_field_64
644 #undef bswap_field_32
645 #undef bswap_field
646 #undef bswap_safe
647 }
648
649 static void perf_event__hdr_attr_swap(union perf_event *event,
650                                       bool sample_id_all __maybe_unused)
651 {
652         size_t size;
653
654         perf_event__attr_swap(&event->attr.attr);
655
656         size = event->header.size;
657         size -= (void *)&event->attr.id - (void *)event;
658         mem_bswap_64(event->attr.id, size);
659 }
660
661 static void perf_event__event_update_swap(union perf_event *event,
662                                           bool sample_id_all __maybe_unused)
663 {
664         event->event_update.type = bswap_64(event->event_update.type);
665         event->event_update.id   = bswap_64(event->event_update.id);
666 }
667
668 static void perf_event__event_type_swap(union perf_event *event,
669                                         bool sample_id_all __maybe_unused)
670 {
671         event->event_type.event_type.event_id =
672                 bswap_64(event->event_type.event_type.event_id);
673 }
674
675 static void perf_event__tracing_data_swap(union perf_event *event,
676                                           bool sample_id_all __maybe_unused)
677 {
678         event->tracing_data.size = bswap_32(event->tracing_data.size);
679 }
680
681 static void perf_event__auxtrace_info_swap(union perf_event *event,
682                                            bool sample_id_all __maybe_unused)
683 {
684         size_t size;
685
686         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
687
688         size = event->header.size;
689         size -= (void *)&event->auxtrace_info.priv - (void *)event;
690         mem_bswap_64(event->auxtrace_info.priv, size);
691 }
692
693 static void perf_event__auxtrace_swap(union perf_event *event,
694                                       bool sample_id_all __maybe_unused)
695 {
696         event->auxtrace.size      = bswap_64(event->auxtrace.size);
697         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
698         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
699         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
700         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
701         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
702 }
703
704 static void perf_event__auxtrace_error_swap(union perf_event *event,
705                                             bool sample_id_all __maybe_unused)
706 {
707         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
708         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
709         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
710         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
711         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
712         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
713 }
714
715 static void perf_event__thread_map_swap(union perf_event *event,
716                                         bool sample_id_all __maybe_unused)
717 {
718         unsigned i;
719
720         event->thread_map.nr = bswap_64(event->thread_map.nr);
721
722         for (i = 0; i < event->thread_map.nr; i++)
723                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
724 }
725
726 static void perf_event__cpu_map_swap(union perf_event *event,
727                                      bool sample_id_all __maybe_unused)
728 {
729         struct cpu_map_data *data = &event->cpu_map.data;
730         struct cpu_map_entries *cpus;
731         struct cpu_map_mask *mask;
732         unsigned i;
733
734         data->type = bswap_64(data->type);
735
736         switch (data->type) {
737         case PERF_CPU_MAP__CPUS:
738                 cpus = (struct cpu_map_entries *)data->data;
739
740                 cpus->nr = bswap_16(cpus->nr);
741
742                 for (i = 0; i < cpus->nr; i++)
743                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
744                 break;
745         case PERF_CPU_MAP__MASK:
746                 mask = (struct cpu_map_mask *) data->data;
747
748                 mask->nr = bswap_16(mask->nr);
749                 mask->long_size = bswap_16(mask->long_size);
750
751                 switch (mask->long_size) {
752                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
753                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
754                 default:
755                         pr_err("cpu_map swap: unsupported long size\n");
756                 }
757         default:
758                 break;
759         }
760 }
761
762 static void perf_event__stat_config_swap(union perf_event *event,
763                                          bool sample_id_all __maybe_unused)
764 {
765         u64 size;
766
767         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
768         size += 1; /* nr item itself */
769         mem_bswap_64(&event->stat_config.nr, size);
770 }
771
772 static void perf_event__stat_swap(union perf_event *event,
773                                   bool sample_id_all __maybe_unused)
774 {
775         event->stat.id     = bswap_64(event->stat.id);
776         event->stat.thread = bswap_32(event->stat.thread);
777         event->stat.cpu    = bswap_32(event->stat.cpu);
778         event->stat.val    = bswap_64(event->stat.val);
779         event->stat.ena    = bswap_64(event->stat.ena);
780         event->stat.run    = bswap_64(event->stat.run);
781 }
782
783 static void perf_event__stat_round_swap(union perf_event *event,
784                                         bool sample_id_all __maybe_unused)
785 {
786         event->stat_round.type = bswap_64(event->stat_round.type);
787         event->stat_round.time = bswap_64(event->stat_round.time);
788 }
789
790 typedef void (*perf_event__swap_op)(union perf_event *event,
791                                     bool sample_id_all);
792
793 static perf_event__swap_op perf_event__swap_ops[] = {
794         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
795         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
796         [PERF_RECORD_COMM]                = perf_event__comm_swap,
797         [PERF_RECORD_FORK]                = perf_event__task_swap,
798         [PERF_RECORD_EXIT]                = perf_event__task_swap,
799         [PERF_RECORD_LOST]                = perf_event__all64_swap,
800         [PERF_RECORD_READ]                = perf_event__read_swap,
801         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
802         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
803         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
804         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
805         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
806         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
807         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
808         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
809         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
810         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
811         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
812         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
813         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
814         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
815         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
816         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
817         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
818         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
819         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
820         [PERF_RECORD_STAT]                = perf_event__stat_swap,
821         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
822         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
823         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
824         [PERF_RECORD_HEADER_MAX]          = NULL,
825 };
826
827 /*
828  * When perf record finishes a pass on every buffers, it records this pseudo
829  * event.
830  * We record the max timestamp t found in the pass n.
831  * Assuming these timestamps are monotonic across cpus, we know that if
832  * a buffer still has events with timestamps below t, they will be all
833  * available and then read in the pass n + 1.
834  * Hence when we start to read the pass n + 2, we can safely flush every
835  * events with timestamps below t.
836  *
837  *    ============ PASS n =================
838  *       CPU 0         |   CPU 1
839  *                     |
840  *    cnt1 timestamps  |   cnt2 timestamps
841  *          1          |         2
842  *          2          |         3
843  *          -          |         4  <--- max recorded
844  *
845  *    ============ PASS n + 1 ==============
846  *       CPU 0         |   CPU 1
847  *                     |
848  *    cnt1 timestamps  |   cnt2 timestamps
849  *          3          |         5
850  *          4          |         6
851  *          5          |         7 <---- max recorded
852  *
853  *      Flush every events below timestamp 4
854  *
855  *    ============ PASS n + 2 ==============
856  *       CPU 0         |   CPU 1
857  *                     |
858  *    cnt1 timestamps  |   cnt2 timestamps
859  *          6          |         8
860  *          7          |         9
861  *          -          |         10
862  *
863  *      Flush every events below timestamp 7
864  *      etc...
865  */
866 static int process_finished_round(struct perf_tool *tool __maybe_unused,
867                                   union perf_event *event __maybe_unused,
868                                   struct ordered_events *oe)
869 {
870         if (dump_trace)
871                 fprintf(stdout, "\n");
872         return ordered_events__flush(oe, OE_FLUSH__ROUND);
873 }
874
875 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
876                               struct perf_sample *sample, u64 file_offset)
877 {
878         return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
879 }
880
881 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
882 {
883         struct ip_callchain *callchain = sample->callchain;
884         struct branch_stack *lbr_stack = sample->branch_stack;
885         u64 kernel_callchain_nr = callchain->nr;
886         unsigned int i;
887
888         for (i = 0; i < kernel_callchain_nr; i++) {
889                 if (callchain->ips[i] == PERF_CONTEXT_USER)
890                         break;
891         }
892
893         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
894                 u64 total_nr;
895                 /*
896                  * LBR callstack can only get user call chain,
897                  * i is kernel call chain number,
898                  * 1 is PERF_CONTEXT_USER.
899                  *
900                  * The user call chain is stored in LBR registers.
901                  * LBR are pair registers. The caller is stored
902                  * in "from" register, while the callee is stored
903                  * in "to" register.
904                  * For example, there is a call stack
905                  * "A"->"B"->"C"->"D".
906                  * The LBR registers will recorde like
907                  * "C"->"D", "B"->"C", "A"->"B".
908                  * So only the first "to" register and all "from"
909                  * registers are needed to construct the whole stack.
910                  */
911                 total_nr = i + 1 + lbr_stack->nr + 1;
912                 kernel_callchain_nr = i + 1;
913
914                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
915
916                 for (i = 0; i < kernel_callchain_nr; i++)
917                         printf("..... %2d: %016" PRIx64 "\n",
918                                i, callchain->ips[i]);
919
920                 printf("..... %2d: %016" PRIx64 "\n",
921                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
922                 for (i = 0; i < lbr_stack->nr; i++)
923                         printf("..... %2d: %016" PRIx64 "\n",
924                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
925         }
926 }
927
928 static void callchain__printf(struct perf_evsel *evsel,
929                               struct perf_sample *sample)
930 {
931         unsigned int i;
932         struct ip_callchain *callchain = sample->callchain;
933
934         if (perf_evsel__has_branch_callstack(evsel))
935                 callchain__lbr_callstack_printf(sample);
936
937         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
938
939         for (i = 0; i < callchain->nr; i++)
940                 printf("..... %2d: %016" PRIx64 "\n",
941                        i, callchain->ips[i]);
942 }
943
944 static void branch_stack__printf(struct perf_sample *sample)
945 {
946         uint64_t i;
947
948         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
949
950         for (i = 0; i < sample->branch_stack->nr; i++) {
951                 struct branch_entry *e = &sample->branch_stack->entries[i];
952
953                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
954                         i, e->from, e->to,
955                         (unsigned short)e->flags.cycles,
956                         e->flags.mispred ? "M" : " ",
957                         e->flags.predicted ? "P" : " ",
958                         e->flags.abort ? "A" : " ",
959                         e->flags.in_tx ? "T" : " ",
960                         (unsigned)e->flags.reserved);
961         }
962 }
963
964 static void regs_dump__printf(u64 mask, u64 *regs)
965 {
966         unsigned rid, i = 0;
967
968         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
969                 u64 val = regs[i++];
970
971                 printf(".... %-5s 0x%" PRIx64 "\n",
972                        perf_reg_name(rid), val);
973         }
974 }
975
976 static const char *regs_abi[] = {
977         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
978         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
979         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
980 };
981
982 static inline const char *regs_dump_abi(struct regs_dump *d)
983 {
984         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
985                 return "unknown";
986
987         return regs_abi[d->abi];
988 }
989
990 static void regs__printf(const char *type, struct regs_dump *regs)
991 {
992         u64 mask = regs->mask;
993
994         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
995                type,
996                mask,
997                regs_dump_abi(regs));
998
999         regs_dump__printf(mask, regs->regs);
1000 }
1001
1002 static void regs_user__printf(struct perf_sample *sample)
1003 {
1004         struct regs_dump *user_regs = &sample->user_regs;
1005
1006         if (user_regs->regs)
1007                 regs__printf("user", user_regs);
1008 }
1009
1010 static void regs_intr__printf(struct perf_sample *sample)
1011 {
1012         struct regs_dump *intr_regs = &sample->intr_regs;
1013
1014         if (intr_regs->regs)
1015                 regs__printf("intr", intr_regs);
1016 }
1017
1018 static void stack_user__printf(struct stack_dump *dump)
1019 {
1020         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1021                dump->size, dump->offset);
1022 }
1023
1024 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1025                                        union perf_event *event,
1026                                        struct perf_sample *sample)
1027 {
1028         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1029
1030         if (event->header.type != PERF_RECORD_SAMPLE &&
1031             !perf_evlist__sample_id_all(evlist)) {
1032                 fputs("-1 -1 ", stdout);
1033                 return;
1034         }
1035
1036         if ((sample_type & PERF_SAMPLE_CPU))
1037                 printf("%u ", sample->cpu);
1038
1039         if (sample_type & PERF_SAMPLE_TIME)
1040                 printf("%" PRIu64 " ", sample->time);
1041 }
1042
1043 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1044 {
1045         printf("... sample_read:\n");
1046
1047         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1048                 printf("...... time enabled %016" PRIx64 "\n",
1049                        sample->read.time_enabled);
1050
1051         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1052                 printf("...... time running %016" PRIx64 "\n",
1053                        sample->read.time_running);
1054
1055         if (read_format & PERF_FORMAT_GROUP) {
1056                 u64 i;
1057
1058                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1059
1060                 for (i = 0; i < sample->read.group.nr; i++) {
1061                         struct sample_read_value *value;
1062
1063                         value = &sample->read.group.values[i];
1064                         printf("..... id %016" PRIx64
1065                                ", value %016" PRIx64 "\n",
1066                                value->id, value->value);
1067                 }
1068         } else
1069                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1070                         sample->read.one.id, sample->read.one.value);
1071 }
1072
1073 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1074                        u64 file_offset, struct perf_sample *sample)
1075 {
1076         if (!dump_trace)
1077                 return;
1078
1079         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1080                file_offset, event->header.size, event->header.type);
1081
1082         trace_event(event);
1083
1084         if (sample)
1085                 perf_evlist__print_tstamp(evlist, event, sample);
1086
1087         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1088                event->header.size, perf_event__name(event->header.type));
1089 }
1090
1091 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1092                         struct perf_sample *sample)
1093 {
1094         u64 sample_type;
1095
1096         if (!dump_trace)
1097                 return;
1098
1099         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1100                event->header.misc, sample->pid, sample->tid, sample->ip,
1101                sample->period, sample->addr);
1102
1103         sample_type = evsel->attr.sample_type;
1104
1105         if (sample_type & PERF_SAMPLE_CALLCHAIN)
1106                 callchain__printf(evsel, sample);
1107
1108         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1109                 branch_stack__printf(sample);
1110
1111         if (sample_type & PERF_SAMPLE_REGS_USER)
1112                 regs_user__printf(sample);
1113
1114         if (sample_type & PERF_SAMPLE_REGS_INTR)
1115                 regs_intr__printf(sample);
1116
1117         if (sample_type & PERF_SAMPLE_STACK_USER)
1118                 stack_user__printf(&sample->user_stack);
1119
1120         if (sample_type & PERF_SAMPLE_WEIGHT)
1121                 printf("... weight: %" PRIu64 "\n", sample->weight);
1122
1123         if (sample_type & PERF_SAMPLE_DATA_SRC)
1124                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1125
1126         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1127                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1128
1129         if (sample_type & PERF_SAMPLE_TRANSACTION)
1130                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1131
1132         if (sample_type & PERF_SAMPLE_READ)
1133                 sample_read__printf(sample, evsel->attr.read_format);
1134 }
1135
1136 static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1137 {
1138         struct read_event *read_event = &event->read;
1139         u64 read_format;
1140
1141         if (!dump_trace)
1142                 return;
1143
1144         printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1145                evsel ? perf_evsel__name(evsel) : "FAIL",
1146                event->read.value);
1147
1148         read_format = evsel->attr.read_format;
1149
1150         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1151                 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1152
1153         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1154                 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1155
1156         if (read_format & PERF_FORMAT_ID)
1157                 printf("... id           : %" PRIu64 "\n", read_event->id);
1158 }
1159
1160 static struct machine *machines__find_for_cpumode(struct machines *machines,
1161                                                union perf_event *event,
1162                                                struct perf_sample *sample)
1163 {
1164         struct machine *machine;
1165
1166         if (perf_guest &&
1167             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1168              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1169                 u32 pid;
1170
1171                 if (event->header.type == PERF_RECORD_MMAP
1172                     || event->header.type == PERF_RECORD_MMAP2)
1173                         pid = event->mmap.pid;
1174                 else
1175                         pid = sample->pid;
1176
1177                 machine = machines__find(machines, pid);
1178                 if (!machine)
1179                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1180                 return machine;
1181         }
1182
1183         return &machines->host;
1184 }
1185
1186 static int deliver_sample_value(struct perf_evlist *evlist,
1187                                 struct perf_tool *tool,
1188                                 union perf_event *event,
1189                                 struct perf_sample *sample,
1190                                 struct sample_read_value *v,
1191                                 struct machine *machine)
1192 {
1193         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1194
1195         if (sid) {
1196                 sample->id     = v->id;
1197                 sample->period = v->value - sid->period;
1198                 sid->period    = v->value;
1199         }
1200
1201         if (!sid || sid->evsel == NULL) {
1202                 ++evlist->stats.nr_unknown_id;
1203                 return 0;
1204         }
1205
1206         return tool->sample(tool, event, sample, sid->evsel, machine);
1207 }
1208
1209 static int deliver_sample_group(struct perf_evlist *evlist,
1210                                 struct perf_tool *tool,
1211                                 union  perf_event *event,
1212                                 struct perf_sample *sample,
1213                                 struct machine *machine)
1214 {
1215         int ret = -EINVAL;
1216         u64 i;
1217
1218         for (i = 0; i < sample->read.group.nr; i++) {
1219                 ret = deliver_sample_value(evlist, tool, event, sample,
1220                                            &sample->read.group.values[i],
1221                                            machine);
1222                 if (ret)
1223                         break;
1224         }
1225
1226         return ret;
1227 }
1228
1229 static int
1230  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1231                              struct perf_tool *tool,
1232                              union  perf_event *event,
1233                              struct perf_sample *sample,
1234                              struct perf_evsel *evsel,
1235                              struct machine *machine)
1236 {
1237         /* We know evsel != NULL. */
1238         u64 sample_type = evsel->attr.sample_type;
1239         u64 read_format = evsel->attr.read_format;
1240
1241         /* Standard sample delivery. */
1242         if (!(sample_type & PERF_SAMPLE_READ))
1243                 return tool->sample(tool, event, sample, evsel, machine);
1244
1245         /* For PERF_SAMPLE_READ we have either single or group mode. */
1246         if (read_format & PERF_FORMAT_GROUP)
1247                 return deliver_sample_group(evlist, tool, event, sample,
1248                                             machine);
1249         else
1250                 return deliver_sample_value(evlist, tool, event, sample,
1251                                             &sample->read.one, machine);
1252 }
1253
1254 static int machines__deliver_event(struct machines *machines,
1255                                    struct perf_evlist *evlist,
1256                                    union perf_event *event,
1257                                    struct perf_sample *sample,
1258                                    struct perf_tool *tool, u64 file_offset)
1259 {
1260         struct perf_evsel *evsel;
1261         struct machine *machine;
1262
1263         dump_event(evlist, event, file_offset, sample);
1264
1265         evsel = perf_evlist__id2evsel(evlist, sample->id);
1266
1267         machine = machines__find_for_cpumode(machines, event, sample);
1268
1269         switch (event->header.type) {
1270         case PERF_RECORD_SAMPLE:
1271                 if (evsel == NULL) {
1272                         ++evlist->stats.nr_unknown_id;
1273                         return 0;
1274                 }
1275                 dump_sample(evsel, event, sample);
1276                 if (machine == NULL) {
1277                         ++evlist->stats.nr_unprocessable_samples;
1278                         return 0;
1279                 }
1280                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1281         case PERF_RECORD_MMAP:
1282                 return tool->mmap(tool, event, sample, machine);
1283         case PERF_RECORD_MMAP2:
1284                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1285                         ++evlist->stats.nr_proc_map_timeout;
1286                 return tool->mmap2(tool, event, sample, machine);
1287         case PERF_RECORD_COMM:
1288                 return tool->comm(tool, event, sample, machine);
1289         case PERF_RECORD_NAMESPACES:
1290                 return tool->namespaces(tool, event, sample, machine);
1291         case PERF_RECORD_FORK:
1292                 return tool->fork(tool, event, sample, machine);
1293         case PERF_RECORD_EXIT:
1294                 return tool->exit(tool, event, sample, machine);
1295         case PERF_RECORD_LOST:
1296                 if (tool->lost == perf_event__process_lost)
1297                         evlist->stats.total_lost += event->lost.lost;
1298                 return tool->lost(tool, event, sample, machine);
1299         case PERF_RECORD_LOST_SAMPLES:
1300                 if (tool->lost_samples == perf_event__process_lost_samples)
1301                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1302                 return tool->lost_samples(tool, event, sample, machine);
1303         case PERF_RECORD_READ:
1304                 dump_read(evsel, event);
1305                 return tool->read(tool, event, sample, evsel, machine);
1306         case PERF_RECORD_THROTTLE:
1307                 return tool->throttle(tool, event, sample, machine);
1308         case PERF_RECORD_UNTHROTTLE:
1309                 return tool->unthrottle(tool, event, sample, machine);
1310         case PERF_RECORD_AUX:
1311                 if (tool->aux == perf_event__process_aux) {
1312                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1313                                 evlist->stats.total_aux_lost += 1;
1314                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1315                                 evlist->stats.total_aux_partial += 1;
1316                 }
1317                 return tool->aux(tool, event, sample, machine);
1318         case PERF_RECORD_ITRACE_START:
1319                 return tool->itrace_start(tool, event, sample, machine);
1320         case PERF_RECORD_SWITCH:
1321         case PERF_RECORD_SWITCH_CPU_WIDE:
1322                 return tool->context_switch(tool, event, sample, machine);
1323         default:
1324                 ++evlist->stats.nr_unknown_events;
1325                 return -1;
1326         }
1327 }
1328
1329 static int perf_session__deliver_event(struct perf_session *session,
1330                                        union perf_event *event,
1331                                        struct perf_sample *sample,
1332                                        struct perf_tool *tool,
1333                                        u64 file_offset)
1334 {
1335         int ret;
1336
1337         ret = auxtrace__process_event(session, event, sample, tool);
1338         if (ret < 0)
1339                 return ret;
1340         if (ret > 0)
1341                 return 0;
1342
1343         return machines__deliver_event(&session->machines, session->evlist,
1344                                        event, sample, tool, file_offset);
1345 }
1346
1347 static s64 perf_session__process_user_event(struct perf_session *session,
1348                                             union perf_event *event,
1349                                             u64 file_offset)
1350 {
1351         struct ordered_events *oe = &session->ordered_events;
1352         struct perf_tool *tool = session->tool;
1353         int fd = perf_data_file__fd(session->file);
1354         int err;
1355
1356         dump_event(session->evlist, event, file_offset, NULL);
1357
1358         /* These events are processed right away */
1359         switch (event->header.type) {
1360         case PERF_RECORD_HEADER_ATTR:
1361                 err = tool->attr(tool, event, &session->evlist);
1362                 if (err == 0) {
1363                         perf_session__set_id_hdr_size(session);
1364                         perf_session__set_comm_exec(session);
1365                 }
1366                 return err;
1367         case PERF_RECORD_EVENT_UPDATE:
1368                 return tool->event_update(tool, event, &session->evlist);
1369         case PERF_RECORD_HEADER_EVENT_TYPE:
1370                 /*
1371                  * Depreceated, but we need to handle it for sake
1372                  * of old data files create in pipe mode.
1373                  */
1374                 return 0;
1375         case PERF_RECORD_HEADER_TRACING_DATA:
1376                 /* setup for reading amidst mmap */
1377                 lseek(fd, file_offset, SEEK_SET);
1378                 return tool->tracing_data(tool, event, session);
1379         case PERF_RECORD_HEADER_BUILD_ID:
1380                 return tool->build_id(tool, event, session);
1381         case PERF_RECORD_FINISHED_ROUND:
1382                 return tool->finished_round(tool, event, oe);
1383         case PERF_RECORD_ID_INDEX:
1384                 return tool->id_index(tool, event, session);
1385         case PERF_RECORD_AUXTRACE_INFO:
1386                 return tool->auxtrace_info(tool, event, session);
1387         case PERF_RECORD_AUXTRACE:
1388                 /* setup for reading amidst mmap */
1389                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1390                 return tool->auxtrace(tool, event, session);
1391         case PERF_RECORD_AUXTRACE_ERROR:
1392                 perf_session__auxtrace_error_inc(session, event);
1393                 return tool->auxtrace_error(tool, event, session);
1394         case PERF_RECORD_THREAD_MAP:
1395                 return tool->thread_map(tool, event, session);
1396         case PERF_RECORD_CPU_MAP:
1397                 return tool->cpu_map(tool, event, session);
1398         case PERF_RECORD_STAT_CONFIG:
1399                 return tool->stat_config(tool, event, session);
1400         case PERF_RECORD_STAT:
1401                 return tool->stat(tool, event, session);
1402         case PERF_RECORD_STAT_ROUND:
1403                 return tool->stat_round(tool, event, session);
1404         case PERF_RECORD_TIME_CONV:
1405                 session->time_conv = event->time_conv;
1406                 return tool->time_conv(tool, event, session);
1407         case PERF_RECORD_HEADER_FEATURE:
1408                 return tool->feature(tool, event, session);
1409         default:
1410                 return -EINVAL;
1411         }
1412 }
1413
1414 int perf_session__deliver_synth_event(struct perf_session *session,
1415                                       union perf_event *event,
1416                                       struct perf_sample *sample)
1417 {
1418         struct perf_evlist *evlist = session->evlist;
1419         struct perf_tool *tool = session->tool;
1420
1421         events_stats__inc(&evlist->stats, event->header.type);
1422
1423         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1424                 return perf_session__process_user_event(session, event, 0);
1425
1426         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1427 }
1428
1429 static void event_swap(union perf_event *event, bool sample_id_all)
1430 {
1431         perf_event__swap_op swap;
1432
1433         swap = perf_event__swap_ops[event->header.type];
1434         if (swap)
1435                 swap(event, sample_id_all);
1436 }
1437
1438 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1439                              void *buf, size_t buf_sz,
1440                              union perf_event **event_ptr,
1441                              struct perf_sample *sample)
1442 {
1443         union perf_event *event;
1444         size_t hdr_sz, rest;
1445         int fd;
1446
1447         if (session->one_mmap && !session->header.needs_swap) {
1448                 event = file_offset - session->one_mmap_offset +
1449                         session->one_mmap_addr;
1450                 goto out_parse_sample;
1451         }
1452
1453         if (perf_data_file__is_pipe(session->file))
1454                 return -1;
1455
1456         fd = perf_data_file__fd(session->file);
1457         hdr_sz = sizeof(struct perf_event_header);
1458
1459         if (buf_sz < hdr_sz)
1460                 return -1;
1461
1462         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1463             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1464                 return -1;
1465
1466         event = (union perf_event *)buf;
1467
1468         if (session->header.needs_swap)
1469                 perf_event_header__bswap(&event->header);
1470
1471         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1472                 return -1;
1473
1474         rest = event->header.size - hdr_sz;
1475
1476         if (readn(fd, buf, rest) != (ssize_t)rest)
1477                 return -1;
1478
1479         if (session->header.needs_swap)
1480                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1481
1482 out_parse_sample:
1483
1484         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1485             perf_evlist__parse_sample(session->evlist, event, sample))
1486                 return -1;
1487
1488         *event_ptr = event;
1489
1490         return 0;
1491 }
1492
1493 static s64 perf_session__process_event(struct perf_session *session,
1494                                        union perf_event *event, u64 file_offset)
1495 {
1496         struct perf_evlist *evlist = session->evlist;
1497         struct perf_tool *tool = session->tool;
1498         struct perf_sample sample;
1499         int ret;
1500
1501         if (session->header.needs_swap)
1502                 event_swap(event, perf_evlist__sample_id_all(evlist));
1503
1504         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1505                 return -EINVAL;
1506
1507         events_stats__inc(&evlist->stats, event->header.type);
1508
1509         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1510                 return perf_session__process_user_event(session, event, file_offset);
1511
1512         /*
1513          * For all kernel events we get the sample data
1514          */
1515         ret = perf_evlist__parse_sample(evlist, event, &sample);
1516         if (ret)
1517                 return ret;
1518
1519         if (tool->ordered_events) {
1520                 ret = perf_session__queue_event(session, event, &sample, file_offset);
1521                 if (ret != -ETIME)
1522                         return ret;
1523         }
1524
1525         return perf_session__deliver_event(session, event, &sample, tool,
1526                                            file_offset);
1527 }
1528
1529 void perf_event_header__bswap(struct perf_event_header *hdr)
1530 {
1531         hdr->type = bswap_32(hdr->type);
1532         hdr->misc = bswap_16(hdr->misc);
1533         hdr->size = bswap_16(hdr->size);
1534 }
1535
1536 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1537 {
1538         return machine__findnew_thread(&session->machines.host, -1, pid);
1539 }
1540
1541 int perf_session__register_idle_thread(struct perf_session *session)
1542 {
1543         struct thread *thread;
1544         int err = 0;
1545
1546         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1547         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1548                 pr_err("problem inserting idle task.\n");
1549                 err = -1;
1550         }
1551
1552         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1553                 pr_err("problem inserting idle task.\n");
1554                 err = -1;
1555         }
1556
1557         /* machine__findnew_thread() got the thread, so put it */
1558         thread__put(thread);
1559         return err;
1560 }
1561
1562 static void
1563 perf_session__warn_order(const struct perf_session *session)
1564 {
1565         const struct ordered_events *oe = &session->ordered_events;
1566         struct perf_evsel *evsel;
1567         bool should_warn = true;
1568
1569         evlist__for_each_entry(session->evlist, evsel) {
1570                 if (evsel->attr.write_backward)
1571                         should_warn = false;
1572         }
1573
1574         if (!should_warn)
1575                 return;
1576         if (oe->nr_unordered_events != 0)
1577                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1578 }
1579
1580 static void perf_session__warn_about_errors(const struct perf_session *session)
1581 {
1582         const struct events_stats *stats = &session->evlist->stats;
1583
1584         if (session->tool->lost == perf_event__process_lost &&
1585             stats->nr_events[PERF_RECORD_LOST] != 0) {
1586                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1587                             "Check IO/CPU overload!\n\n",
1588                             stats->nr_events[0],
1589                             stats->nr_events[PERF_RECORD_LOST]);
1590         }
1591
1592         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1593                 double drop_rate;
1594
1595                 drop_rate = (double)stats->total_lost_samples /
1596                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1597                 if (drop_rate > 0.05) {
1598                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1599                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1600                                     drop_rate * 100.0);
1601                 }
1602         }
1603
1604         if (session->tool->aux == perf_event__process_aux &&
1605             stats->total_aux_lost != 0) {
1606                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1607                             stats->total_aux_lost,
1608                             stats->nr_events[PERF_RECORD_AUX]);
1609         }
1610
1611         if (session->tool->aux == perf_event__process_aux &&
1612             stats->total_aux_partial != 0) {
1613                 bool vmm_exclusive = false;
1614
1615                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1616                                        &vmm_exclusive);
1617
1618                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1619                             "Are you running a KVM guest in the background?%s\n\n",
1620                             stats->total_aux_partial,
1621                             stats->nr_events[PERF_RECORD_AUX],
1622                             vmm_exclusive ?
1623                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1624                             "will reduce the gaps to only guest's timeslices." :
1625                             "");
1626         }
1627
1628         if (stats->nr_unknown_events != 0) {
1629                 ui__warning("Found %u unknown events!\n\n"
1630                             "Is this an older tool processing a perf.data "
1631                             "file generated by a more recent tool?\n\n"
1632                             "If that is not the case, consider "
1633                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1634                             stats->nr_unknown_events);
1635         }
1636
1637         if (stats->nr_unknown_id != 0) {
1638                 ui__warning("%u samples with id not present in the header\n",
1639                             stats->nr_unknown_id);
1640         }
1641
1642         if (stats->nr_invalid_chains != 0) {
1643                 ui__warning("Found invalid callchains!\n\n"
1644                             "%u out of %u events were discarded for this reason.\n\n"
1645                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1646                             stats->nr_invalid_chains,
1647                             stats->nr_events[PERF_RECORD_SAMPLE]);
1648         }
1649
1650         if (stats->nr_unprocessable_samples != 0) {
1651                 ui__warning("%u unprocessable samples recorded.\n"
1652                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1653                             stats->nr_unprocessable_samples);
1654         }
1655
1656         perf_session__warn_order(session);
1657
1658         events_stats__auxtrace_error_warn(stats);
1659
1660         if (stats->nr_proc_map_timeout != 0) {
1661                 ui__warning("%d map information files for pre-existing threads were\n"
1662                             "not processed, if there are samples for addresses they\n"
1663                             "will not be resolved, you may find out which are these\n"
1664                             "threads by running with -v and redirecting the output\n"
1665                             "to a file.\n"
1666                             "The time limit to process proc map is too short?\n"
1667                             "Increase it by --proc-map-timeout\n",
1668                             stats->nr_proc_map_timeout);
1669         }
1670 }
1671
1672 static int perf_session__flush_thread_stack(struct thread *thread,
1673                                             void *p __maybe_unused)
1674 {
1675         return thread_stack__flush(thread);
1676 }
1677
1678 static int perf_session__flush_thread_stacks(struct perf_session *session)
1679 {
1680         return machines__for_each_thread(&session->machines,
1681                                          perf_session__flush_thread_stack,
1682                                          NULL);
1683 }
1684
1685 volatile int session_done;
1686
1687 static int __perf_session__process_pipe_events(struct perf_session *session)
1688 {
1689         struct ordered_events *oe = &session->ordered_events;
1690         struct perf_tool *tool = session->tool;
1691         int fd = perf_data_file__fd(session->file);
1692         union perf_event *event;
1693         uint32_t size, cur_size = 0;
1694         void *buf = NULL;
1695         s64 skip = 0;
1696         u64 head;
1697         ssize_t err;
1698         void *p;
1699
1700         perf_tool__fill_defaults(tool);
1701
1702         head = 0;
1703         cur_size = sizeof(union perf_event);
1704
1705         buf = malloc(cur_size);
1706         if (!buf)
1707                 return -errno;
1708         ordered_events__set_copy_on_queue(oe, true);
1709 more:
1710         event = buf;
1711         err = readn(fd, event, sizeof(struct perf_event_header));
1712         if (err <= 0) {
1713                 if (err == 0)
1714                         goto done;
1715
1716                 pr_err("failed to read event header\n");
1717                 goto out_err;
1718         }
1719
1720         if (session->header.needs_swap)
1721                 perf_event_header__bswap(&event->header);
1722
1723         size = event->header.size;
1724         if (size < sizeof(struct perf_event_header)) {
1725                 pr_err("bad event header size\n");
1726                 goto out_err;
1727         }
1728
1729         if (size > cur_size) {
1730                 void *new = realloc(buf, size);
1731                 if (!new) {
1732                         pr_err("failed to allocate memory to read event\n");
1733                         goto out_err;
1734                 }
1735                 buf = new;
1736                 cur_size = size;
1737                 event = buf;
1738         }
1739         p = event;
1740         p += sizeof(struct perf_event_header);
1741
1742         if (size - sizeof(struct perf_event_header)) {
1743                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1744                 if (err <= 0) {
1745                         if (err == 0) {
1746                                 pr_err("unexpected end of event stream\n");
1747                                 goto done;
1748                         }
1749
1750                         pr_err("failed to read event data\n");
1751                         goto out_err;
1752                 }
1753         }
1754
1755         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1756                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1757                        head, event->header.size, event->header.type);
1758                 err = -EINVAL;
1759                 goto out_err;
1760         }
1761
1762         head += size;
1763
1764         if (skip > 0)
1765                 head += skip;
1766
1767         if (!session_done())
1768                 goto more;
1769 done:
1770         /* do the final flush for ordered samples */
1771         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1772         if (err)
1773                 goto out_err;
1774         err = auxtrace__flush_events(session, tool);
1775         if (err)
1776                 goto out_err;
1777         err = perf_session__flush_thread_stacks(session);
1778 out_err:
1779         free(buf);
1780         perf_session__warn_about_errors(session);
1781         ordered_events__free(&session->ordered_events);
1782         auxtrace__free_events(session);
1783         return err;
1784 }
1785
1786 static union perf_event *
1787 fetch_mmaped_event(struct perf_session *session,
1788                    u64 head, size_t mmap_size, char *buf)
1789 {
1790         union perf_event *event;
1791
1792         /*
1793          * Ensure we have enough space remaining to read
1794          * the size of the event in the headers.
1795          */
1796         if (head + sizeof(event->header) > mmap_size)
1797                 return NULL;
1798
1799         event = (union perf_event *)(buf + head);
1800
1801         if (session->header.needs_swap)
1802                 perf_event_header__bswap(&event->header);
1803
1804         if (head + event->header.size > mmap_size) {
1805                 /* We're not fetching the event so swap back again */
1806                 if (session->header.needs_swap)
1807                         perf_event_header__bswap(&event->header);
1808                 return NULL;
1809         }
1810
1811         return event;
1812 }
1813
1814 /*
1815  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1816  * slices. On 32bit we use 32MB.
1817  */
1818 #if BITS_PER_LONG == 64
1819 #define MMAP_SIZE ULLONG_MAX
1820 #define NUM_MMAPS 1
1821 #else
1822 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1823 #define NUM_MMAPS 128
1824 #endif
1825
1826 static int __perf_session__process_events(struct perf_session *session,
1827                                           u64 data_offset, u64 data_size,
1828                                           u64 file_size)
1829 {
1830         struct ordered_events *oe = &session->ordered_events;
1831         struct perf_tool *tool = session->tool;
1832         int fd = perf_data_file__fd(session->file);
1833         u64 head, page_offset, file_offset, file_pos, size;
1834         int err, mmap_prot, mmap_flags, map_idx = 0;
1835         size_t  mmap_size;
1836         char *buf, *mmaps[NUM_MMAPS];
1837         union perf_event *event;
1838         struct ui_progress prog;
1839         s64 skip;
1840
1841         perf_tool__fill_defaults(tool);
1842
1843         page_offset = page_size * (data_offset / page_size);
1844         file_offset = page_offset;
1845         head = data_offset - page_offset;
1846
1847         if (data_size == 0)
1848                 goto out;
1849
1850         if (data_offset + data_size < file_size)
1851                 file_size = data_offset + data_size;
1852
1853         ui_progress__init(&prog, file_size, "Processing events...");
1854
1855         mmap_size = MMAP_SIZE;
1856         if (mmap_size > file_size) {
1857                 mmap_size = file_size;
1858                 session->one_mmap = true;
1859         }
1860
1861         memset(mmaps, 0, sizeof(mmaps));
1862
1863         mmap_prot  = PROT_READ;
1864         mmap_flags = MAP_SHARED;
1865
1866         if (session->header.needs_swap) {
1867                 mmap_prot  |= PROT_WRITE;
1868                 mmap_flags = MAP_PRIVATE;
1869         }
1870 remap:
1871         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1872                    file_offset);
1873         if (buf == MAP_FAILED) {
1874                 pr_err("failed to mmap file\n");
1875                 err = -errno;
1876                 goto out_err;
1877         }
1878         mmaps[map_idx] = buf;
1879         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1880         file_pos = file_offset + head;
1881         if (session->one_mmap) {
1882                 session->one_mmap_addr = buf;
1883                 session->one_mmap_offset = file_offset;
1884         }
1885
1886 more:
1887         event = fetch_mmaped_event(session, head, mmap_size, buf);
1888         if (!event) {
1889                 if (mmaps[map_idx]) {
1890                         munmap(mmaps[map_idx], mmap_size);
1891                         mmaps[map_idx] = NULL;
1892                 }
1893
1894                 page_offset = page_size * (head / page_size);
1895                 file_offset += page_offset;
1896                 head -= page_offset;
1897                 goto remap;
1898         }
1899
1900         size = event->header.size;
1901
1902         if (size < sizeof(struct perf_event_header) ||
1903             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1904                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1905                        file_offset + head, event->header.size,
1906                        event->header.type);
1907                 err = -EINVAL;
1908                 goto out_err;
1909         }
1910
1911         if (skip)
1912                 size += skip;
1913
1914         head += size;
1915         file_pos += size;
1916
1917         ui_progress__update(&prog, size);
1918
1919         if (session_done())
1920                 goto out;
1921
1922         if (file_pos < file_size)
1923                 goto more;
1924
1925 out:
1926         /* do the final flush for ordered samples */
1927         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1928         if (err)
1929                 goto out_err;
1930         err = auxtrace__flush_events(session, tool);
1931         if (err)
1932                 goto out_err;
1933         err = perf_session__flush_thread_stacks(session);
1934 out_err:
1935         ui_progress__finish();
1936         perf_session__warn_about_errors(session);
1937         /*
1938          * We may switching perf.data output, make ordered_events
1939          * reusable.
1940          */
1941         ordered_events__reinit(&session->ordered_events);
1942         auxtrace__free_events(session);
1943         session->one_mmap = false;
1944         return err;
1945 }
1946
1947 int perf_session__process_events(struct perf_session *session)
1948 {
1949         u64 size = perf_data_file__size(session->file);
1950         int err;
1951
1952         if (perf_session__register_idle_thread(session) < 0)
1953                 return -ENOMEM;
1954
1955         if (!perf_data_file__is_pipe(session->file))
1956                 err = __perf_session__process_events(session,
1957                                                      session->header.data_offset,
1958                                                      session->header.data_size, size);
1959         else
1960                 err = __perf_session__process_pipe_events(session);
1961
1962         return err;
1963 }
1964
1965 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1966 {
1967         struct perf_evsel *evsel;
1968
1969         evlist__for_each_entry(session->evlist, evsel) {
1970                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1971                         return true;
1972         }
1973
1974         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1975         return false;
1976 }
1977
1978 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1979                                      const char *symbol_name, u64 addr)
1980 {
1981         char *bracket;
1982         int i;
1983         struct ref_reloc_sym *ref;
1984
1985         ref = zalloc(sizeof(struct ref_reloc_sym));
1986         if (ref == NULL)
1987                 return -ENOMEM;
1988
1989         ref->name = strdup(symbol_name);
1990         if (ref->name == NULL) {
1991                 free(ref);
1992                 return -ENOMEM;
1993         }
1994
1995         bracket = strchr(ref->name, ']');
1996         if (bracket)
1997                 *bracket = '\0';
1998
1999         ref->addr = addr;
2000
2001         for (i = 0; i < MAP__NR_TYPES; ++i) {
2002                 struct kmap *kmap = map__kmap(maps[i]);
2003
2004                 if (!kmap)
2005                         continue;
2006                 kmap->ref_reloc_sym = ref;
2007         }
2008
2009         return 0;
2010 }
2011
2012 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2013 {
2014         return machines__fprintf_dsos(&session->machines, fp);
2015 }
2016
2017 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2018                                           bool (skip)(struct dso *dso, int parm), int parm)
2019 {
2020         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2021 }
2022
2023 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2024 {
2025         size_t ret;
2026         const char *msg = "";
2027
2028         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2029                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2030
2031         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2032
2033         ret += events_stats__fprintf(&session->evlist->stats, fp);
2034         return ret;
2035 }
2036
2037 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2038 {
2039         /*
2040          * FIXME: Here we have to actually print all the machines in this
2041          * session, not just the host...
2042          */
2043         return machine__fprintf(&session->machines.host, fp);
2044 }
2045
2046 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2047                                               unsigned int type)
2048 {
2049         struct perf_evsel *pos;
2050
2051         evlist__for_each_entry(session->evlist, pos) {
2052                 if (pos->attr.type == type)
2053                         return pos;
2054         }
2055         return NULL;
2056 }
2057
2058 int perf_session__cpu_bitmap(struct perf_session *session,
2059                              const char *cpu_list, unsigned long *cpu_bitmap)
2060 {
2061         int i, err = -1;
2062         struct cpu_map *map;
2063
2064         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2065                 struct perf_evsel *evsel;
2066
2067                 evsel = perf_session__find_first_evtype(session, i);
2068                 if (!evsel)
2069                         continue;
2070
2071                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2072                         pr_err("File does not contain CPU events. "
2073                                "Remove -C option to proceed.\n");
2074                         return -1;
2075                 }
2076         }
2077
2078         map = cpu_map__new(cpu_list);
2079         if (map == NULL) {
2080                 pr_err("Invalid cpu_list\n");
2081                 return -1;
2082         }
2083
2084         for (i = 0; i < map->nr; i++) {
2085                 int cpu = map->map[i];
2086
2087                 if (cpu >= MAX_NR_CPUS) {
2088                         pr_err("Requested CPU %d too large. "
2089                                "Consider raising MAX_NR_CPUS\n", cpu);
2090                         goto out_delete_map;
2091                 }
2092
2093                 set_bit(cpu, cpu_bitmap);
2094         }
2095
2096         err = 0;
2097
2098 out_delete_map:
2099         cpu_map__put(map);
2100         return err;
2101 }
2102
2103 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2104                                 bool full)
2105 {
2106         if (session == NULL || fp == NULL)
2107                 return;
2108
2109         fprintf(fp, "# ========\n");
2110         perf_header__fprintf_info(session, fp, full);
2111         fprintf(fp, "# ========\n#\n");
2112 }
2113
2114
2115 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2116                                              const struct perf_evsel_str_handler *assocs,
2117                                              size_t nr_assocs)
2118 {
2119         struct perf_evsel *evsel;
2120         size_t i;
2121         int err;
2122
2123         for (i = 0; i < nr_assocs; i++) {
2124                 /*
2125                  * Adding a handler for an event not in the session,
2126                  * just ignore it.
2127                  */
2128                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2129                 if (evsel == NULL)
2130                         continue;
2131
2132                 err = -EEXIST;
2133                 if (evsel->handler != NULL)
2134                         goto out;
2135                 evsel->handler = assocs[i].handler;
2136         }
2137
2138         err = 0;
2139 out:
2140         return err;
2141 }
2142
2143 int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2144                                  union perf_event *event,
2145                                  struct perf_session *session)
2146 {
2147         struct perf_evlist *evlist = session->evlist;
2148         struct id_index_event *ie = &event->id_index;
2149         size_t i, nr, max_nr;
2150
2151         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2152                  sizeof(struct id_index_entry);
2153         nr = ie->nr;
2154         if (nr > max_nr)
2155                 return -EINVAL;
2156
2157         if (dump_trace)
2158                 fprintf(stdout, " nr: %zu\n", nr);
2159
2160         for (i = 0; i < nr; i++) {
2161                 struct id_index_entry *e = &ie->entries[i];
2162                 struct perf_sample_id *sid;
2163
2164                 if (dump_trace) {
2165                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2166                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2167                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2168                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2169                 }
2170
2171                 sid = perf_evlist__id2sid(evlist, e->id);
2172                 if (!sid)
2173                         return -ENOENT;
2174                 sid->idx = e->idx;
2175                 sid->cpu = e->cpu;
2176                 sid->tid = e->tid;
2177         }
2178         return 0;
2179 }
2180
2181 int perf_event__synthesize_id_index(struct perf_tool *tool,
2182                                     perf_event__handler_t process,
2183                                     struct perf_evlist *evlist,
2184                                     struct machine *machine)
2185 {
2186         union perf_event *ev;
2187         struct perf_evsel *evsel;
2188         size_t nr = 0, i = 0, sz, max_nr, n;
2189         int err;
2190
2191         pr_debug2("Synthesizing id index\n");
2192
2193         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2194                  sizeof(struct id_index_entry);
2195
2196         evlist__for_each_entry(evlist, evsel)
2197                 nr += evsel->ids;
2198
2199         n = nr > max_nr ? max_nr : nr;
2200         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2201         ev = zalloc(sz);
2202         if (!ev)
2203                 return -ENOMEM;
2204
2205         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2206         ev->id_index.header.size = sz;
2207         ev->id_index.nr = n;
2208
2209         evlist__for_each_entry(evlist, evsel) {
2210                 u32 j;
2211
2212                 for (j = 0; j < evsel->ids; j++) {
2213                         struct id_index_entry *e;
2214                         struct perf_sample_id *sid;
2215
2216                         if (i >= n) {
2217                                 err = process(tool, ev, NULL, machine);
2218                                 if (err)
2219                                         goto out_err;
2220                                 nr -= n;
2221                                 i = 0;
2222                         }
2223
2224                         e = &ev->id_index.entries[i++];
2225
2226                         e->id = evsel->id[j];
2227
2228                         sid = perf_evlist__id2sid(evlist, e->id);
2229                         if (!sid) {
2230                                 free(ev);
2231                                 return -ENOENT;
2232                         }
2233
2234                         e->idx = sid->idx;
2235                         e->cpu = sid->cpu;
2236                         e->tid = sid->tid;
2237                 }
2238         }
2239
2240         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2241         ev->id_index.header.size = sz;
2242         ev->id_index.nr = nr;
2243
2244         err = process(tool, ev, NULL, machine);
2245 out_err:
2246         free(ev);
2247
2248         return err;
2249 }