1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/string.h>
5 #include "../../util/callchain.h"
6 #include "../../util/hist.h"
7 #include "../../util/map.h"
8 #include "../../util/map_groups.h"
9 #include "../../util/symbol.h"
10 #include "../../util/sort.h"
11 #include "../../util/evsel.h"
12 #include "../../util/srcline.h"
13 #include "../../util/string2.h"
14 #include "../../util/thread.h"
15 #include <linux/ctype.h>
16 #include <linux/zalloc.h>
18 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
21 int ret = fprintf(fp, " ");
23 for (i = 0; i < left_margin; i++)
24 ret += fprintf(fp, " ");
29 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
33 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
35 for (i = 0; i < depth; i++)
36 if (depth_mask & (1 << i))
37 ret += fprintf(fp, "| ");
39 ret += fprintf(fp, " ");
41 ret += fprintf(fp, "\n");
46 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
47 struct callchain_list *chain,
48 int depth, int depth_mask, int period,
49 u64 total_samples, int left_margin)
53 char bf[1024], *alloc_str = NULL;
57 ret += callchain__fprintf_left_margin(fp, left_margin);
58 for (i = 0; i < depth; i++) {
59 if (depth_mask & (1 << i))
60 ret += fprintf(fp, "|");
62 ret += fprintf(fp, " ");
63 if (!period && i == depth - 1) {
64 ret += fprintf(fp, "--");
65 ret += callchain_node__fprintf_value(node, fp, total_samples);
66 ret += fprintf(fp, "--");
68 ret += fprintf(fp, "%s", " ");
71 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
73 if (symbol_conf.show_branchflag_count) {
74 callchain_list_counts__printf_value(chain, NULL,
77 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
78 str = "Not enough memory!";
90 static struct symbol *rem_sq_bracket;
91 static struct callchain_list rem_hits;
93 static void init_rem_hits(void)
95 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
96 if (!rem_sq_bracket) {
97 fprintf(stderr, "Not enough memory to display remaining hits\n");
101 strcpy(rem_sq_bracket->name, "[...]");
102 rem_hits.ms.sym = rem_sq_bracket;
105 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
106 u64 total_samples, int depth,
107 int depth_mask, int left_margin)
109 struct rb_node *node, *next;
110 struct callchain_node *child = NULL;
111 struct callchain_list *chain;
112 int new_depth_mask = depth_mask;
116 uint entries_printed = 0;
119 remaining = total_samples;
121 node = rb_first(root);
126 child = rb_entry(node, struct callchain_node, rb_node);
127 cumul = callchain_cumul_hits(child);
129 cumul_count += callchain_cumul_counts(child);
132 * The depth mask manages the output of pipes that show
133 * the depth. We don't want to keep the pipes of the current
134 * level for the last child of this depth.
135 * Except if we have remaining filtered hits. They will
136 * supersede the last child
138 next = rb_next(node);
139 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
140 new_depth_mask &= ~(1 << (depth - 1));
143 * But we keep the older depth mask for the line separator
144 * to keep the level link until we reach the last child
146 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
149 list_for_each_entry(chain, &child->val, list) {
150 ret += ipchain__fprintf_graph(fp, child, chain, depth,
156 if (callchain_param.mode == CHAIN_GRAPH_REL)
157 new_total = child->children_hit;
159 new_total = total_samples;
161 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
163 new_depth_mask | (1 << depth),
166 if (++entries_printed == callchain_param.print_limit)
170 if (callchain_param.mode == CHAIN_GRAPH_REL &&
171 remaining && remaining != total_samples) {
172 struct callchain_node rem_node = {
179 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
180 rem_node.count = child->parent->children_count - cumul_count;
181 if (rem_node.count <= 0)
185 new_depth_mask &= ~(1 << (depth - 1));
186 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
187 new_depth_mask, 0, total_samples,
195 * If have one single callchain root, don't bother printing
196 * its percentage (100 % in fractal mode and the same percentage
197 * than the hist in graph mode). This also avoid one level of column.
199 * However when percent-limit applied, it's possible that single callchain
200 * node have different (non-100% in fractal mode) percentage.
202 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
204 struct callchain_node *cnode;
209 cnode = rb_entry(node, struct callchain_node, rb_node);
210 return callchain_cumul_hits(cnode) != parent_samples;
213 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
214 u64 total_samples, u64 parent_samples,
217 struct callchain_node *cnode;
218 struct callchain_list *chain;
219 u32 entries_printed = 0;
220 bool printed = false;
221 struct rb_node *node;
226 node = rb_first(root);
227 if (node && !need_percent_display(node, parent_samples)) {
228 cnode = rb_entry(node, struct callchain_node, rb_node);
229 list_for_each_entry(chain, &cnode->val, list) {
231 * If we sort by symbol, the first entry is the same than
232 * the symbol. No need to print it otherwise it appears as
235 if (!i++ && field_order == NULL &&
236 sort_order && strstarts(sort_order, "sym"))
240 ret += callchain__fprintf_left_margin(fp, left_margin);
241 ret += fprintf(fp, "|\n");
242 ret += callchain__fprintf_left_margin(fp, left_margin);
243 ret += fprintf(fp, "---");
247 ret += callchain__fprintf_left_margin(fp, left_margin);
249 ret += fprintf(fp, "%s",
250 callchain_list__sym_name(chain, bf,
254 if (symbol_conf.show_branchflag_count)
255 ret += callchain_list_counts__printf_value(
257 ret += fprintf(fp, "\n");
259 if (++entries_printed == callchain_param.print_limit)
262 root = &cnode->rb_root;
265 if (callchain_param.mode == CHAIN_GRAPH_REL)
266 total_samples = parent_samples;
268 ret += __callchain__fprintf_graph(fp, root, total_samples,
271 /* do not add a blank line if it printed nothing */
272 ret += fprintf(fp, "\n");
278 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
281 struct callchain_list *chain;
288 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
291 list_for_each_entry(chain, &node->val, list) {
292 if (chain->ip >= PERF_CONTEXT_MAX)
294 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
295 bf, sizeof(bf), false));
301 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
305 u32 entries_printed = 0;
306 struct callchain_node *chain;
307 struct rb_node *rb_node = rb_first(tree);
310 chain = rb_entry(rb_node, struct callchain_node, rb_node);
312 ret += fprintf(fp, " ");
313 ret += callchain_node__fprintf_value(chain, fp, total_samples);
314 ret += fprintf(fp, "\n");
315 ret += __callchain__fprintf_flat(fp, chain, total_samples);
316 ret += fprintf(fp, "\n");
317 if (++entries_printed == callchain_param.print_limit)
320 rb_node = rb_next(rb_node);
326 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
328 const char *sep = symbol_conf.field_sep ?: ";";
329 struct callchain_list *chain;
337 ret += __callchain__fprintf_folded(fp, node->parent);
340 list_for_each_entry(chain, &node->val, list) {
341 if (chain->ip >= PERF_CONTEXT_MAX)
343 ret += fprintf(fp, "%s%s", first ? "" : sep,
344 callchain_list__sym_name(chain,
345 bf, sizeof(bf), false));
352 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
356 u32 entries_printed = 0;
357 struct callchain_node *chain;
358 struct rb_node *rb_node = rb_first(tree);
362 chain = rb_entry(rb_node, struct callchain_node, rb_node);
364 ret += callchain_node__fprintf_value(chain, fp, total_samples);
365 ret += fprintf(fp, " ");
366 ret += __callchain__fprintf_folded(fp, chain);
367 ret += fprintf(fp, "\n");
368 if (++entries_printed == callchain_param.print_limit)
371 rb_node = rb_next(rb_node);
377 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
378 u64 total_samples, int left_margin,
381 u64 parent_samples = he->stat.period;
383 if (symbol_conf.cumulate_callchain)
384 parent_samples = he->stat_acc->period;
386 switch (callchain_param.mode) {
387 case CHAIN_GRAPH_REL:
388 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
389 parent_samples, left_margin);
391 case CHAIN_GRAPH_ABS:
392 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
393 parent_samples, left_margin);
396 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
399 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
404 pr_err("Bad callchain mode\n");
410 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
411 struct perf_hpp_list *hpp_list)
413 const char *sep = symbol_conf.field_sep;
414 struct perf_hpp_fmt *fmt;
415 char *start = hpp->buf;
419 if (symbol_conf.exclude_other && !he->parent)
422 perf_hpp_list__for_each_format(hpp_list, fmt) {
423 if (perf_hpp__should_skip(fmt, he->hists))
427 * If there's no field_sep, we still need
428 * to display initial ' '.
430 if (!sep || !first) {
431 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
432 advance_hpp(hpp, ret);
436 if (perf_hpp__use_color() && fmt->color)
437 ret = fmt->color(fmt, hpp, he);
439 ret = fmt->entry(fmt, hpp, he);
441 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
442 advance_hpp(hpp, ret);
445 return hpp->buf - start;
448 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
450 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
453 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
454 struct perf_hpp *hpp,
458 const char *sep = symbol_conf.field_sep;
459 struct perf_hpp_fmt *fmt;
460 struct perf_hpp_list_node *fmt_node;
461 char *buf = hpp->buf;
462 size_t size = hpp->size;
463 int ret, printed = 0;
466 if (symbol_conf.exclude_other && !he->parent)
469 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
470 advance_hpp(hpp, ret);
472 /* the first hpp_list_node is for overhead columns */
473 fmt_node = list_first_entry(&hists->hpp_formats,
474 struct perf_hpp_list_node, list);
475 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
477 * If there's no field_sep, we still need
478 * to display initial ' '.
480 if (!sep || !first) {
481 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
482 advance_hpp(hpp, ret);
486 if (perf_hpp__use_color() && fmt->color)
487 ret = fmt->color(fmt, hpp, he);
489 ret = fmt->entry(fmt, hpp, he);
491 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
492 advance_hpp(hpp, ret);
496 ret = scnprintf(hpp->buf, hpp->size, "%*s",
497 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
498 advance_hpp(hpp, ret);
500 printed += fprintf(fp, "%s", buf);
502 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
507 * No need to call hist_entry__snprintf_alignment() since this
508 * fmt is always the last column in the hierarchy mode.
510 if (perf_hpp__use_color() && fmt->color)
511 fmt->color(fmt, hpp, he);
513 fmt->entry(fmt, hpp, he);
516 * dynamic entries are right-aligned but we want left-aligned
517 * in the hierarchy mode
519 printed += fprintf(fp, "%s%s", sep ?: " ", skip_spaces(buf));
521 printed += putc('\n', fp);
523 if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
524 u64 total = hists__total_period(hists);
526 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
534 static int hist_entry__block_fprintf(struct hist_entry *he,
535 char *bf, size_t size,
538 struct block_hist *bh = container_of(he, struct block_hist, he);
541 for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
542 struct perf_hpp hpp = {
549 hist_entry__snprintf(he, &hpp);
552 ret += fprintf(fp, "%s\n", bf);
558 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
559 char *bf, size_t bfsz, FILE *fp,
560 bool ignore_callchains)
563 int callchain_ret = 0;
564 struct perf_hpp hpp = {
568 struct hists *hists = he->hists;
569 u64 total_period = hists->stats.total_period;
571 if (size == 0 || size > bfsz)
572 size = hpp.size = bfsz;
574 if (symbol_conf.report_hierarchy)
575 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
577 if (symbol_conf.report_block)
578 return hist_entry__block_fprintf(he, bf, size, fp);
580 hist_entry__snprintf(he, &hpp);
582 ret = fprintf(fp, "%s\n", bf);
584 if (hist_entry__has_callchains(he) && !ignore_callchains)
585 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
588 ret += callchain_ret;
593 static int print_hierarchy_indent(const char *sep, int indent,
594 const char *line, FILE *fp)
598 if (sep != NULL || indent < 2)
601 width = (indent - 2) * HIERARCHY_INDENT;
603 return fprintf(fp, "%-*.*s", width, width, line);
606 static int hists__fprintf_hierarchy_headers(struct hists *hists,
607 struct perf_hpp *hpp, FILE *fp)
609 bool first_node, first_col;
613 unsigned header_width = 0;
614 struct perf_hpp_fmt *fmt;
615 struct perf_hpp_list_node *fmt_node;
616 const char *sep = symbol_conf.field_sep;
618 indent = hists->nr_hpp_node;
620 /* preserve max indent depth for column headers */
621 print_hierarchy_indent(sep, indent, " ", fp);
623 /* the first hpp_list_node is for overhead columns */
624 fmt_node = list_first_entry(&hists->hpp_formats,
625 struct perf_hpp_list_node, list);
627 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
628 fmt->header(fmt, hpp, hists, 0, NULL);
629 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
632 /* combine sort headers with ' / ' */
634 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
636 header_width += fprintf(fp, " / ");
640 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
641 if (perf_hpp__should_skip(fmt, hists))
645 header_width += fprintf(fp, "+");
648 fmt->header(fmt, hpp, hists, 0, NULL);
650 header_width += fprintf(fp, "%s", strim(hpp->buf));
656 /* preserve max indent depth for initial dots */
657 print_hierarchy_indent(sep, indent, dots, fp);
659 /* the first hpp_list_node is for overhead columns */
660 fmt_node = list_first_entry(&hists->hpp_formats,
661 struct perf_hpp_list_node, list);
664 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
666 fprintf(fp, "%s", sep ?: "..");
669 width = fmt->width(fmt, hpp, hists);
670 fprintf(fp, "%.*s", width, dots);
674 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
676 width = depth * HIERARCHY_INDENT;
678 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
679 if (perf_hpp__should_skip(fmt, hists))
683 width++; /* for '+' sign between column header */
686 width += fmt->width(fmt, hpp, hists);
689 if (width > header_width)
690 header_width = width;
695 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
697 fprintf(fp, "\n#\n");
702 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
705 struct perf_hpp_fmt *fmt;
706 const char *sep = symbol_conf.field_sep;
710 hists__for_each_format(hists, fmt) {
711 if (perf_hpp__should_skip(fmt, hists))
715 fprintf(fp, "%s", sep ?: " ");
719 fmt->header(fmt, hpp, hists, line, &span);
722 fprintf(fp, "%s", hpp->buf);
727 hists__fprintf_standard_headers(struct hists *hists,
728 struct perf_hpp *hpp,
731 struct perf_hpp_list *hpp_list = hists->hpp_list;
732 struct perf_hpp_fmt *fmt;
734 const char *sep = symbol_conf.field_sep;
738 for (line = 0; line < hpp_list->nr_header_lines; line++) {
739 /* first # is displayed one level up */
742 fprintf_line(hists, hpp, line, fp);
747 return hpp_list->nr_header_lines;
753 hists__for_each_format(hists, fmt) {
756 if (perf_hpp__should_skip(fmt, hists))
760 fprintf(fp, "%s", sep ?: " ");
764 width = fmt->width(fmt, hpp, hists);
765 for (i = 0; i < width; i++)
771 return hpp_list->nr_header_lines + 2;
774 int hists__fprintf_headers(struct hists *hists, FILE *fp)
777 struct perf_hpp dummy_hpp = {
784 if (symbol_conf.report_hierarchy)
785 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
787 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
791 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
792 int max_cols, float min_pcnt, FILE *fp,
793 bool ignore_callchains)
797 const char *sep = symbol_conf.field_sep;
805 hists__reset_column_width(hists);
807 if (symbol_conf.col_width_list_str)
808 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
811 nr_rows += hists__fprintf_headers(hists, fp);
813 if (max_rows && nr_rows >= max_rows)
816 linesz = hists__sort_list_width(hists) + 3 + 1;
817 linesz += perf_hpp__color_overhead();
818 line = malloc(linesz);
824 indent = hists__overhead_width(hists) + 4;
826 for (nd = rb_first_cached(&hists->entries); nd;
827 nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
828 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
834 percent = hist_entry__get_percent_limit(h);
835 if (percent < min_pcnt)
838 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
840 if (max_rows && ++nr_rows >= max_rows)
844 * If all children are filtered out or percent-limited,
845 * display "no entry >= x.xx%" message.
847 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
848 int depth = hists->nr_hpp_node + h->depth + 1;
850 print_hierarchy_indent(sep, depth, " ", fp);
851 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
853 if (max_rows && ++nr_rows >= max_rows)
857 if (h->ms.map == NULL && verbose > 1) {
858 map_groups__fprintf(h->thread->mg, fp);
859 fprintf(fp, "%.10s end\n", graph_dotted_line);
865 zfree(&rem_sq_bracket);
870 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
875 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
878 name = perf_event__name(i);
879 if (!strcmp(name, "UNKNOWN"))
882 ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);