Linux-libre 3.14.42-gnu
[librecmc/linux-libre.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10                                        struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12                                           struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14                                           struct hist_entry *he);
15
16 enum hist_filter {
17         HIST_FILTER__DSO,
18         HIST_FILTER__THREAD,
19         HIST_FILTER__PARENT,
20         HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param  callchain_param = {
24         .mode   = CHAIN_GRAPH_REL,
25         .min_percent = 0.5,
26         .order  = ORDER_CALLEE,
27         .key    = CCKEY_FUNCTION
28 };
29
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 {
32         return hists->col_len[col];
33 }
34
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 {
37         hists->col_len[col] = len;
38 }
39
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 {
42         if (len > hists__col_len(hists, col)) {
43                 hists__set_col_len(hists, col, len);
44                 return true;
45         }
46         return false;
47 }
48
49 void hists__reset_col_len(struct hists *hists)
50 {
51         enum hist_column col;
52
53         for (col = 0; col < HISTC_NR_COLS; ++col)
54                 hists__set_col_len(hists, col, 0);
55 }
56
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 {
59         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60
61         if (hists__col_len(hists, dso) < unresolved_col_width &&
62             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63             !symbol_conf.dso_list)
64                 hists__set_col_len(hists, dso, unresolved_col_width);
65 }
66
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 {
69         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70         int symlen;
71         u16 len;
72
73         /*
74          * +4 accounts for '[x] ' priv level info
75          * +2 accounts for 0x prefix on raw addresses
76          * +3 accounts for ' y ' symtab origin info
77          */
78         if (h->ms.sym) {
79                 symlen = h->ms.sym->namelen + 4;
80                 if (verbose)
81                         symlen += BITS_PER_LONG / 4 + 2 + 3;
82                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
83         } else {
84                 symlen = unresolved_col_width + 4 + 2;
85                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
87         }
88
89         len = thread__comm_len(h->thread);
90         if (hists__new_col_len(hists, HISTC_COMM, len))
91                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
92
93         if (h->ms.map) {
94                 len = dso__name_len(h->ms.map->dso);
95                 hists__new_col_len(hists, HISTC_DSO, len);
96         }
97
98         if (h->parent)
99                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
100
101         if (h->branch_info) {
102                 if (h->branch_info->from.sym) {
103                         symlen = (int)h->branch_info->from.sym->namelen + 4;
104                         if (verbose)
105                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
106                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107
108                         symlen = dso__name_len(h->branch_info->from.map->dso);
109                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
110                 } else {
111                         symlen = unresolved_col_width + 4 + 2;
112                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
113                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
114                 }
115
116                 if (h->branch_info->to.sym) {
117                         symlen = (int)h->branch_info->to.sym->namelen + 4;
118                         if (verbose)
119                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
120                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
121
122                         symlen = dso__name_len(h->branch_info->to.map->dso);
123                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
124                 } else {
125                         symlen = unresolved_col_width + 4 + 2;
126                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
127                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
128                 }
129         }
130
131         if (h->mem_info) {
132                 if (h->mem_info->daddr.sym) {
133                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
134                                + unresolved_col_width + 2;
135                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136                                            symlen);
137                 } else {
138                         symlen = unresolved_col_width + 4 + 2;
139                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
140                                            symlen);
141                 }
142                 if (h->mem_info->daddr.map) {
143                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
144                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
145                                            symlen);
146                 } else {
147                         symlen = unresolved_col_width + 4 + 2;
148                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
149                 }
150         } else {
151                 symlen = unresolved_col_width + 4 + 2;
152                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
153                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154         }
155
156         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
157         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
158         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
159         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
160         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
161         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
162
163         if (h->transaction)
164                 hists__new_col_len(hists, HISTC_TRANSACTION,
165                                    hist_entry__transaction_len());
166 }
167
168 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
169 {
170         struct rb_node *next = rb_first(&hists->entries);
171         struct hist_entry *n;
172         int row = 0;
173
174         hists__reset_col_len(hists);
175
176         while (next && row++ < max_rows) {
177                 n = rb_entry(next, struct hist_entry, rb_node);
178                 if (!n->filtered)
179                         hists__calc_col_len(hists, n);
180                 next = rb_next(&n->rb_node);
181         }
182 }
183
184 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
185                                         unsigned int cpumode, u64 period)
186 {
187         switch (cpumode) {
188         case PERF_RECORD_MISC_KERNEL:
189                 he_stat->period_sys += period;
190                 break;
191         case PERF_RECORD_MISC_USER:
192                 he_stat->period_us += period;
193                 break;
194         case PERF_RECORD_MISC_GUEST_KERNEL:
195                 he_stat->period_guest_sys += period;
196                 break;
197         case PERF_RECORD_MISC_GUEST_USER:
198                 he_stat->period_guest_us += period;
199                 break;
200         default:
201                 break;
202         }
203 }
204
205 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
206                                 u64 weight)
207 {
208
209         he_stat->period         += period;
210         he_stat->weight         += weight;
211         he_stat->nr_events      += 1;
212 }
213
214 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
215 {
216         dest->period            += src->period;
217         dest->period_sys        += src->period_sys;
218         dest->period_us         += src->period_us;
219         dest->period_guest_sys  += src->period_guest_sys;
220         dest->period_guest_us   += src->period_guest_us;
221         dest->nr_events         += src->nr_events;
222         dest->weight            += src->weight;
223 }
224
225 static void he_stat__decay(struct he_stat *he_stat)
226 {
227         he_stat->period = (he_stat->period * 7) / 8;
228         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
229         /* XXX need decay for weight too? */
230 }
231
232 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
233 {
234         u64 prev_period = he->stat.period;
235
236         if (prev_period == 0)
237                 return true;
238
239         he_stat__decay(&he->stat);
240
241         if (!he->filtered)
242                 hists->stats.total_period -= prev_period - he->stat.period;
243
244         return he->stat.period == 0;
245 }
246
247 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
248 {
249         struct rb_node *next = rb_first(&hists->entries);
250         struct hist_entry *n;
251
252         while (next) {
253                 n = rb_entry(next, struct hist_entry, rb_node);
254                 next = rb_next(&n->rb_node);
255                 /*
256                  * We may be annotating this, for instance, so keep it here in
257                  * case some it gets new samples, we'll eventually free it when
258                  * the user stops browsing and it agains gets fully decayed.
259                  */
260                 if (((zap_user && n->level == '.') ||
261                      (zap_kernel && n->level != '.') ||
262                      hists__decay_entry(hists, n)) &&
263                     !n->used) {
264                         rb_erase(&n->rb_node, &hists->entries);
265
266                         if (sort__need_collapse)
267                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
268
269                         hist_entry__free(n);
270                         --hists->nr_entries;
271                 }
272         }
273 }
274
275 /*
276  * histogram, sorted on item, collects periods
277  */
278
279 static struct hist_entry *hist_entry__new(struct hist_entry *template)
280 {
281         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
282         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
283
284         if (he != NULL) {
285                 *he = *template;
286
287                 if (he->ms.map)
288                         he->ms.map->referenced = true;
289
290                 if (he->branch_info) {
291                         /*
292                          * This branch info is (a part of) allocated from
293                          * machine__resolve_bstack() and will be freed after
294                          * adding new entries.  So we need to save a copy.
295                          */
296                         he->branch_info = malloc(sizeof(*he->branch_info));
297                         if (he->branch_info == NULL) {
298                                 free(he);
299                                 return NULL;
300                         }
301
302                         memcpy(he->branch_info, template->branch_info,
303                                sizeof(*he->branch_info));
304
305                         if (he->branch_info->from.map)
306                                 he->branch_info->from.map->referenced = true;
307                         if (he->branch_info->to.map)
308                                 he->branch_info->to.map->referenced = true;
309                 }
310
311                 if (he->mem_info) {
312                         if (he->mem_info->iaddr.map)
313                                 he->mem_info->iaddr.map->referenced = true;
314                         if (he->mem_info->daddr.map)
315                                 he->mem_info->daddr.map->referenced = true;
316                 }
317
318                 if (symbol_conf.use_callchain)
319                         callchain_init(he->callchain);
320
321                 INIT_LIST_HEAD(&he->pairs.node);
322         }
323
324         return he;
325 }
326
327 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
328 {
329         if (!h->filtered) {
330                 hists__calc_col_len(hists, h);
331                 ++hists->nr_entries;
332                 hists->stats.total_period += h->stat.period;
333         }
334 }
335
336 static u8 symbol__parent_filter(const struct symbol *parent)
337 {
338         if (symbol_conf.exclude_other && parent == NULL)
339                 return 1 << HIST_FILTER__PARENT;
340         return 0;
341 }
342
343 static struct hist_entry *add_hist_entry(struct hists *hists,
344                                          struct hist_entry *entry,
345                                          struct addr_location *al)
346 {
347         struct rb_node **p;
348         struct rb_node *parent = NULL;
349         struct hist_entry *he;
350         int64_t cmp;
351         u64 period = entry->stat.period;
352         u64 weight = entry->stat.weight;
353
354         p = &hists->entries_in->rb_node;
355
356         while (*p != NULL) {
357                 parent = *p;
358                 he = rb_entry(parent, struct hist_entry, rb_node_in);
359
360                 /*
361                  * Make sure that it receives arguments in a same order as
362                  * hist_entry__collapse() so that we can use an appropriate
363                  * function when searching an entry regardless which sort
364                  * keys were used.
365                  */
366                 cmp = hist_entry__cmp(he, entry);
367
368                 if (!cmp) {
369                         he_stat__add_period(&he->stat, period, weight);
370
371                         /*
372                          * This mem info was allocated from machine__resolve_mem
373                          * and will not be used anymore.
374                          */
375                         zfree(&entry->mem_info);
376
377                         /* If the map of an existing hist_entry has
378                          * become out-of-date due to an exec() or
379                          * similar, update it.  Otherwise we will
380                          * mis-adjust symbol addresses when computing
381                          * the history counter to increment.
382                          */
383                         if (he->ms.map != entry->ms.map) {
384                                 he->ms.map = entry->ms.map;
385                                 if (he->ms.map)
386                                         he->ms.map->referenced = true;
387                         }
388                         goto out;
389                 }
390
391                 if (cmp < 0)
392                         p = &(*p)->rb_left;
393                 else
394                         p = &(*p)->rb_right;
395         }
396
397         he = hist_entry__new(entry);
398         if (!he)
399                 return NULL;
400
401         hists->nr_entries++;
402         rb_link_node(&he->rb_node_in, parent, p);
403         rb_insert_color(&he->rb_node_in, hists->entries_in);
404 out:
405         he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
406         return he;
407 }
408
409 struct hist_entry *__hists__add_entry(struct hists *hists,
410                                       struct addr_location *al,
411                                       struct symbol *sym_parent,
412                                       struct branch_info *bi,
413                                       struct mem_info *mi,
414                                       u64 period, u64 weight, u64 transaction)
415 {
416         struct hist_entry entry = {
417                 .thread = al->thread,
418                 .comm = thread__comm(al->thread),
419                 .ms = {
420                         .map    = al->map,
421                         .sym    = al->sym,
422                 },
423                 .cpu    = al->cpu,
424                 .ip     = al->addr,
425                 .level  = al->level,
426                 .stat = {
427                         .nr_events = 1,
428                         .period = period,
429                         .weight = weight,
430                 },
431                 .parent = sym_parent,
432                 .filtered = symbol__parent_filter(sym_parent),
433                 .hists  = hists,
434                 .branch_info = bi,
435                 .mem_info = mi,
436                 .transaction = transaction,
437         };
438
439         return add_hist_entry(hists, &entry, al);
440 }
441
442 int64_t
443 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
444 {
445         struct sort_entry *se;
446         int64_t cmp = 0;
447
448         list_for_each_entry(se, &hist_entry__sort_list, list) {
449                 cmp = se->se_cmp(left, right);
450                 if (cmp)
451                         break;
452         }
453
454         return cmp;
455 }
456
457 int64_t
458 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
459 {
460         struct sort_entry *se;
461         int64_t cmp = 0;
462
463         list_for_each_entry(se, &hist_entry__sort_list, list) {
464                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
465
466                 f = se->se_collapse ?: se->se_cmp;
467
468                 cmp = f(left, right);
469                 if (cmp)
470                         break;
471         }
472
473         return cmp;
474 }
475
476 void hist_entry__free(struct hist_entry *he)
477 {
478         zfree(&he->branch_info);
479         zfree(&he->mem_info);
480         free_srcline(he->srcline);
481         free(he);
482 }
483
484 /*
485  * collapse the histogram
486  */
487
488 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
489                                          struct rb_root *root,
490                                          struct hist_entry *he)
491 {
492         struct rb_node **p = &root->rb_node;
493         struct rb_node *parent = NULL;
494         struct hist_entry *iter;
495         int64_t cmp;
496
497         while (*p != NULL) {
498                 parent = *p;
499                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
500
501                 cmp = hist_entry__collapse(iter, he);
502
503                 if (!cmp) {
504                         he_stat__add_stat(&iter->stat, &he->stat);
505
506                         if (symbol_conf.use_callchain) {
507                                 callchain_cursor_reset(&callchain_cursor);
508                                 callchain_merge(&callchain_cursor,
509                                                 iter->callchain,
510                                                 he->callchain);
511                         }
512                         hist_entry__free(he);
513                         return false;
514                 }
515
516                 if (cmp < 0)
517                         p = &(*p)->rb_left;
518                 else
519                         p = &(*p)->rb_right;
520         }
521
522         rb_link_node(&he->rb_node_in, parent, p);
523         rb_insert_color(&he->rb_node_in, root);
524         return true;
525 }
526
527 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
528 {
529         struct rb_root *root;
530
531         pthread_mutex_lock(&hists->lock);
532
533         root = hists->entries_in;
534         if (++hists->entries_in > &hists->entries_in_array[1])
535                 hists->entries_in = &hists->entries_in_array[0];
536
537         pthread_mutex_unlock(&hists->lock);
538
539         return root;
540 }
541
542 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
543 {
544         hists__filter_entry_by_dso(hists, he);
545         hists__filter_entry_by_thread(hists, he);
546         hists__filter_entry_by_symbol(hists, he);
547 }
548
549 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
550 {
551         struct rb_root *root;
552         struct rb_node *next;
553         struct hist_entry *n;
554
555         if (!sort__need_collapse)
556                 return;
557
558         root = hists__get_rotate_entries_in(hists);
559         next = rb_first(root);
560
561         while (next) {
562                 if (session_done())
563                         break;
564                 n = rb_entry(next, struct hist_entry, rb_node_in);
565                 next = rb_next(&n->rb_node_in);
566
567                 rb_erase(&n->rb_node_in, root);
568                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
569                         /*
570                          * If it wasn't combined with one of the entries already
571                          * collapsed, we need to apply the filters that may have
572                          * been set by, say, the hist_browser.
573                          */
574                         hists__apply_filters(hists, n);
575                 }
576                 if (prog)
577                         ui_progress__update(prog, 1);
578         }
579 }
580
581 /*
582  * reverse the map, sort on period.
583  */
584
585 static int period_cmp(u64 period_a, u64 period_b)
586 {
587         if (period_a > period_b)
588                 return 1;
589         if (period_a < period_b)
590                 return -1;
591         return 0;
592 }
593
594 static int hist_entry__sort_on_period(struct hist_entry *a,
595                                       struct hist_entry *b)
596 {
597         int ret;
598         int i, nr_members;
599         struct perf_evsel *evsel;
600         struct hist_entry *pair;
601         u64 *periods_a, *periods_b;
602
603         ret = period_cmp(a->stat.period, b->stat.period);
604         if (ret || !symbol_conf.event_group)
605                 return ret;
606
607         evsel = hists_to_evsel(a->hists);
608         nr_members = evsel->nr_members;
609         if (nr_members <= 1)
610                 return ret;
611
612         periods_a = zalloc(sizeof(periods_a) * nr_members);
613         periods_b = zalloc(sizeof(periods_b) * nr_members);
614
615         if (!periods_a || !periods_b)
616                 goto out;
617
618         list_for_each_entry(pair, &a->pairs.head, pairs.node) {
619                 evsel = hists_to_evsel(pair->hists);
620                 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
621         }
622
623         list_for_each_entry(pair, &b->pairs.head, pairs.node) {
624                 evsel = hists_to_evsel(pair->hists);
625                 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
626         }
627
628         for (i = 1; i < nr_members; i++) {
629                 ret = period_cmp(periods_a[i], periods_b[i]);
630                 if (ret)
631                         break;
632         }
633
634 out:
635         free(periods_a);
636         free(periods_b);
637
638         return ret;
639 }
640
641 static void __hists__insert_output_entry(struct rb_root *entries,
642                                          struct hist_entry *he,
643                                          u64 min_callchain_hits)
644 {
645         struct rb_node **p = &entries->rb_node;
646         struct rb_node *parent = NULL;
647         struct hist_entry *iter;
648
649         if (symbol_conf.use_callchain)
650                 callchain_param.sort(&he->sorted_chain, he->callchain,
651                                       min_callchain_hits, &callchain_param);
652
653         while (*p != NULL) {
654                 parent = *p;
655                 iter = rb_entry(parent, struct hist_entry, rb_node);
656
657                 if (hist_entry__sort_on_period(he, iter) > 0)
658                         p = &(*p)->rb_left;
659                 else
660                         p = &(*p)->rb_right;
661         }
662
663         rb_link_node(&he->rb_node, parent, p);
664         rb_insert_color(&he->rb_node, entries);
665 }
666
667 void hists__output_resort(struct hists *hists)
668 {
669         struct rb_root *root;
670         struct rb_node *next;
671         struct hist_entry *n;
672         u64 min_callchain_hits;
673
674         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
675
676         if (sort__need_collapse)
677                 root = &hists->entries_collapsed;
678         else
679                 root = hists->entries_in;
680
681         next = rb_first(root);
682         hists->entries = RB_ROOT;
683
684         hists->nr_entries = 0;
685         hists->stats.total_period = 0;
686         hists__reset_col_len(hists);
687
688         while (next) {
689                 n = rb_entry(next, struct hist_entry, rb_node_in);
690                 next = rb_next(&n->rb_node_in);
691
692                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
693                 hists__inc_nr_entries(hists, n);
694         }
695 }
696
697 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
698                                        enum hist_filter filter)
699 {
700         h->filtered &= ~(1 << filter);
701         if (h->filtered)
702                 return;
703
704         ++hists->nr_entries;
705         if (h->ms.unfolded)
706                 hists->nr_entries += h->nr_rows;
707         h->row_offset = 0;
708         hists->stats.total_period += h->stat.period;
709         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
710
711         hists__calc_col_len(hists, h);
712 }
713
714
715 static bool hists__filter_entry_by_dso(struct hists *hists,
716                                        struct hist_entry *he)
717 {
718         if (hists->dso_filter != NULL &&
719             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
720                 he->filtered |= (1 << HIST_FILTER__DSO);
721                 return true;
722         }
723
724         return false;
725 }
726
727 void hists__filter_by_dso(struct hists *hists)
728 {
729         struct rb_node *nd;
730
731         hists->nr_entries = hists->stats.total_period = 0;
732         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
733         hists__reset_col_len(hists);
734
735         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
736                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
737
738                 if (symbol_conf.exclude_other && !h->parent)
739                         continue;
740
741                 if (hists__filter_entry_by_dso(hists, h))
742                         continue;
743
744                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
745         }
746 }
747
748 static bool hists__filter_entry_by_thread(struct hists *hists,
749                                           struct hist_entry *he)
750 {
751         if (hists->thread_filter != NULL &&
752             he->thread != hists->thread_filter) {
753                 he->filtered |= (1 << HIST_FILTER__THREAD);
754                 return true;
755         }
756
757         return false;
758 }
759
760 void hists__filter_by_thread(struct hists *hists)
761 {
762         struct rb_node *nd;
763
764         hists->nr_entries = hists->stats.total_period = 0;
765         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
766         hists__reset_col_len(hists);
767
768         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
769                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
770
771                 if (hists__filter_entry_by_thread(hists, h))
772                         continue;
773
774                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
775         }
776 }
777
778 static bool hists__filter_entry_by_symbol(struct hists *hists,
779                                           struct hist_entry *he)
780 {
781         if (hists->symbol_filter_str != NULL &&
782             (!he->ms.sym || strstr(he->ms.sym->name,
783                                    hists->symbol_filter_str) == NULL)) {
784                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
785                 return true;
786         }
787
788         return false;
789 }
790
791 void hists__filter_by_symbol(struct hists *hists)
792 {
793         struct rb_node *nd;
794
795         hists->nr_entries = hists->stats.total_period = 0;
796         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
797         hists__reset_col_len(hists);
798
799         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
800                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
801
802                 if (hists__filter_entry_by_symbol(hists, h))
803                         continue;
804
805                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
806         }
807 }
808
809 void events_stats__inc(struct events_stats *stats, u32 type)
810 {
811         ++stats->nr_events[0];
812         ++stats->nr_events[type];
813 }
814
815 void hists__inc_nr_events(struct hists *hists, u32 type)
816 {
817         events_stats__inc(&hists->stats, type);
818 }
819
820 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
821                                                  struct hist_entry *pair)
822 {
823         struct rb_root *root;
824         struct rb_node **p;
825         struct rb_node *parent = NULL;
826         struct hist_entry *he;
827         int64_t cmp;
828
829         if (sort__need_collapse)
830                 root = &hists->entries_collapsed;
831         else
832                 root = hists->entries_in;
833
834         p = &root->rb_node;
835
836         while (*p != NULL) {
837                 parent = *p;
838                 he = rb_entry(parent, struct hist_entry, rb_node_in);
839
840                 cmp = hist_entry__collapse(he, pair);
841
842                 if (!cmp)
843                         goto out;
844
845                 if (cmp < 0)
846                         p = &(*p)->rb_left;
847                 else
848                         p = &(*p)->rb_right;
849         }
850
851         he = hist_entry__new(pair);
852         if (he) {
853                 memset(&he->stat, 0, sizeof(he->stat));
854                 he->hists = hists;
855                 rb_link_node(&he->rb_node_in, parent, p);
856                 rb_insert_color(&he->rb_node_in, root);
857                 hists__inc_nr_entries(hists, he);
858                 he->dummy = true;
859         }
860 out:
861         return he;
862 }
863
864 static struct hist_entry *hists__find_entry(struct hists *hists,
865                                             struct hist_entry *he)
866 {
867         struct rb_node *n;
868
869         if (sort__need_collapse)
870                 n = hists->entries_collapsed.rb_node;
871         else
872                 n = hists->entries_in->rb_node;
873
874         while (n) {
875                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
876                 int64_t cmp = hist_entry__collapse(iter, he);
877
878                 if (cmp < 0)
879                         n = n->rb_left;
880                 else if (cmp > 0)
881                         n = n->rb_right;
882                 else
883                         return iter;
884         }
885
886         return NULL;
887 }
888
889 /*
890  * Look for pairs to link to the leader buckets (hist_entries):
891  */
892 void hists__match(struct hists *leader, struct hists *other)
893 {
894         struct rb_root *root;
895         struct rb_node *nd;
896         struct hist_entry *pos, *pair;
897
898         if (sort__need_collapse)
899                 root = &leader->entries_collapsed;
900         else
901                 root = leader->entries_in;
902
903         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
904                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
905                 pair = hists__find_entry(other, pos);
906
907                 if (pair)
908                         hist_entry__add_pair(pair, pos);
909         }
910 }
911
912 /*
913  * Look for entries in the other hists that are not present in the leader, if
914  * we find them, just add a dummy entry on the leader hists, with period=0,
915  * nr_events=0, to serve as the list header.
916  */
917 int hists__link(struct hists *leader, struct hists *other)
918 {
919         struct rb_root *root;
920         struct rb_node *nd;
921         struct hist_entry *pos, *pair;
922
923         if (sort__need_collapse)
924                 root = &other->entries_collapsed;
925         else
926                 root = other->entries_in;
927
928         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
929                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
930
931                 if (!hist_entry__has_pairs(pos)) {
932                         pair = hists__add_dummy_entry(leader, pos);
933                         if (pair == NULL)
934                                 return -1;
935                         hist_entry__add_pair(pos, pair);
936                 }
937         }
938
939         return 0;
940 }