blob: b11a6cfdb414981c3f94814f8e4aa79aa83b8784 [file] [log] [blame]
Arnaldo Carvalho de Melo78f7def2011-02-04 09:45:46 -02001#include "annotate.h"
Frederic Weisbecker8a0ecfb2010-05-13 19:47:16 +02002#include "util.h"
Frederic Weisbecker598357e2010-05-21 12:48:39 +02003#include "build-id.h"
John Kacur3d1d07e2009-09-28 15:32:55 +02004#include "hist.h"
Arnaldo Carvalho de Melo4e4f06e2009-12-14 13:10:39 -02005#include "session.h"
6#include "sort.h"
Namhyung Kim29d720e2013-01-22 18:09:33 +09007#include "evsel.h"
Arnaldo Carvalho de Melo9b338272009-12-16 14:31:49 -02008#include <math.h>
John Kacur3d1d07e2009-09-28 15:32:55 +02009
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -020010static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
Namhyung Kime94d53e2012-03-16 17:50:51 +090014static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -020016
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -030017enum hist_filter {
18 HIST_FILTER__DSO,
19 HIST_FILTER__THREAD,
20 HIST_FILTER__PARENT,
Namhyung Kime94d53e2012-03-16 17:50:51 +090021 HIST_FILTER__SYMBOL,
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -030022};
23
John Kacur3d1d07e2009-09-28 15:32:55 +020024struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
Sam Liaod797fdc2011-06-07 23:49:46 +080026 .min_percent = 0.5,
27 .order = ORDER_CALLEE
John Kacur3d1d07e2009-09-28 15:32:55 +020028};
29
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030030u16 hists__col_len(struct hists *hists, enum hist_column col)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030031{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030032 return hists->col_len[col];
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030033}
34
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030035void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030036{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030037 hists->col_len[col] = len;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030038}
39
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030040bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030041{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030042 if (len > hists__col_len(hists, col)) {
43 hists__set_col_len(hists, col, len);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030044 return true;
45 }
46 return false;
47}
48
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090049void hists__reset_col_len(struct hists *hists)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030050{
51 enum hist_column col;
52
53 for (col = 0; col < HISTC_NR_COLS; ++col)
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030054 hists__set_col_len(hists, col, 0);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030055}
56
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010057static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58{
59 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60
61 if (hists__col_len(hists, dso) < unresolved_col_width &&
62 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 !symbol_conf.dso_list)
64 hists__set_col_len(hists, dso, unresolved_col_width);
65}
66
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090067void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030068{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010069 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
Stephane Eranian98a3b322013-01-24 16:10:35 +010070 int symlen;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030071 u16 len;
72
Namhyung Kimded19d52013-04-01 20:35:19 +090073 /*
74 * +4 accounts for '[x] ' priv level info
75 * +2 accounts for 0x prefix on raw addresses
76 * +3 accounts for ' y ' symtab origin info
77 */
78 if (h->ms.sym) {
79 symlen = h->ms.sym->namelen + 4;
80 if (verbose)
81 symlen += BITS_PER_LONG / 4 + 2 + 3;
82 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
83 } else {
Stephane Eranian98a3b322013-01-24 16:10:35 +010084 symlen = unresolved_col_width + 4 + 2;
85 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010086 hists__set_unres_dso_col_len(hists, HISTC_DSO);
Stephane Eranian98a3b322013-01-24 16:10:35 +010087 }
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030088
89 len = thread__comm_len(h->thread);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030090 if (hists__new_col_len(hists, HISTC_COMM, len))
91 hists__set_col_len(hists, HISTC_THREAD, len + 6);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030092
93 if (h->ms.map) {
94 len = dso__name_len(h->ms.map->dso);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030095 hists__new_col_len(hists, HISTC_DSO, len);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030096 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010097
Namhyung Kimcb993742012-12-27 18:11:42 +090098 if (h->parent)
99 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
100
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100101 if (h->branch_info) {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100102 if (h->branch_info->from.sym) {
103 symlen = (int)h->branch_info->from.sym->namelen + 4;
Namhyung Kimded19d52013-04-01 20:35:19 +0900104 if (verbose)
105 symlen += BITS_PER_LONG / 4 + 2 + 3;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107
108 symlen = dso__name_len(h->branch_info->from.map->dso);
109 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
110 } else {
111 symlen = unresolved_col_width + 4 + 2;
112 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
113 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
114 }
115
116 if (h->branch_info->to.sym) {
117 symlen = (int)h->branch_info->to.sym->namelen + 4;
Namhyung Kimded19d52013-04-01 20:35:19 +0900118 if (verbose)
119 symlen += BITS_PER_LONG / 4 + 2 + 3;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100120 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
121
122 symlen = dso__name_len(h->branch_info->to.map->dso);
123 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
124 } else {
125 symlen = unresolved_col_width + 4 + 2;
126 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
127 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
128 }
129 }
Stephane Eranian98a3b322013-01-24 16:10:35 +0100130
131 if (h->mem_info) {
Stephane Eranian98a3b322013-01-24 16:10:35 +0100132 if (h->mem_info->daddr.sym) {
133 symlen = (int)h->mem_info->daddr.sym->namelen + 4
134 + unresolved_col_width + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 symlen);
137 } else {
138 symlen = unresolved_col_width + 4 + 2;
139 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
140 symlen);
141 }
142 if (h->mem_info->daddr.map) {
143 symlen = dso__name_len(h->mem_info->daddr.map->dso);
144 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
145 symlen);
146 } else {
147 symlen = unresolved_col_width + 4 + 2;
148 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
149 }
150 } else {
151 symlen = unresolved_col_width + 4 + 2;
152 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
153 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154 }
155
156 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
157 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
158 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
159 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
160 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
161 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300162}
163
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900164void hists__output_recalc_col_len(struct hists *hists, int max_rows)
165{
166 struct rb_node *next = rb_first(&hists->entries);
167 struct hist_entry *n;
168 int row = 0;
169
170 hists__reset_col_len(hists);
171
172 while (next && row++ < max_rows) {
173 n = rb_entry(next, struct hist_entry, rb_node);
174 if (!n->filtered)
175 hists__calc_col_len(hists, n);
176 next = rb_next(&n->rb_node);
177 }
178}
179
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200180static void hist_entry__add_cpumode_period(struct hist_entry *he,
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300181 unsigned int cpumode, u64 period)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800182{
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300183 switch (cpumode) {
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800184 case PERF_RECORD_MISC_KERNEL:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900185 he->stat.period_sys += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800186 break;
187 case PERF_RECORD_MISC_USER:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900188 he->stat.period_us += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800189 break;
190 case PERF_RECORD_MISC_GUEST_KERNEL:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900191 he->stat.period_guest_sys += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800192 break;
193 case PERF_RECORD_MISC_GUEST_USER:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900194 he->stat.period_guest_us += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800195 break;
196 default:
197 break;
198 }
199}
200
Andi Kleen05484292013-01-24 16:10:29 +0100201static void he_stat__add_period(struct he_stat *he_stat, u64 period,
202 u64 weight)
Namhyung Kim139c0812012-10-04 21:49:43 +0900203{
Stephane Eranian98a3b322013-01-24 16:10:35 +0100204
Namhyung Kim139c0812012-10-04 21:49:43 +0900205 he_stat->period += period;
Andi Kleen05484292013-01-24 16:10:29 +0100206 he_stat->weight += weight;
Namhyung Kim139c0812012-10-04 21:49:43 +0900207 he_stat->nr_events += 1;
208}
209
210static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
211{
212 dest->period += src->period;
213 dest->period_sys += src->period_sys;
214 dest->period_us += src->period_us;
215 dest->period_guest_sys += src->period_guest_sys;
216 dest->period_guest_us += src->period_guest_us;
217 dest->nr_events += src->nr_events;
Andi Kleen05484292013-01-24 16:10:29 +0100218 dest->weight += src->weight;
Namhyung Kim139c0812012-10-04 21:49:43 +0900219}
220
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300221static void hist_entry__decay(struct hist_entry *he)
222{
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900223 he->stat.period = (he->stat.period * 7) / 8;
224 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
Andi Kleen05484292013-01-24 16:10:29 +0100225 /* XXX need decay for weight too? */
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300226}
227
228static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
229{
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900230 u64 prev_period = he->stat.period;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200231
232 if (prev_period == 0)
Arnaldo Carvalho de Melodf71d952011-10-13 08:01:33 -0300233 return true;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200234
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300235 hist_entry__decay(he);
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200236
237 if (!he->filtered)
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900238 hists->stats.total_period -= prev_period - he->stat.period;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200239
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900240 return he->stat.period == 0;
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300241}
242
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900243void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300244{
245 struct rb_node *next = rb_first(&hists->entries);
246 struct hist_entry *n;
247
248 while (next) {
249 n = rb_entry(next, struct hist_entry, rb_node);
250 next = rb_next(&n->rb_node);
Arnaldo Carvalho de Melodf71d952011-10-13 08:01:33 -0300251 /*
252 * We may be annotating this, for instance, so keep it here in
253 * case some it gets new samples, we'll eventually free it when
254 * the user stops browsing and it agains gets fully decayed.
255 */
Arnaldo Carvalho de Melob079d4e2011-10-17 09:05:04 -0200256 if (((zap_user && n->level == '.') ||
257 (zap_kernel && n->level != '.') ||
258 hists__decay_entry(hists, n)) &&
259 !n->used) {
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300260 rb_erase(&n->rb_node, &hists->entries);
261
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900262 if (sort__need_collapse)
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300263 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
264
265 hist_entry__free(n);
266 --hists->nr_entries;
267 }
268 }
269}
270
John Kacur3d1d07e2009-09-28 15:32:55 +0200271/*
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300272 * histogram, sorted on item, collects periods
John Kacur3d1d07e2009-09-28 15:32:55 +0200273 */
274
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300275static struct hist_entry *hist_entry__new(struct hist_entry *template)
276{
Frederic Weisbeckerd2009c52010-08-22 20:05:22 +0200277 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100278 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300279
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200280 if (he != NULL) {
281 *he = *template;
Namhyung Kimc4b35352012-10-04 21:49:42 +0900282
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200283 if (he->ms.map)
284 he->ms.map->referenced = true;
Stephane Eranian3cf0cb12013-01-14 15:02:45 +0100285
286 if (he->branch_info) {
Namhyung Kim26353a62013-04-01 20:35:17 +0900287 /*
288 * This branch info is (a part of) allocated from
289 * machine__resolve_bstack() and will be freed after
290 * adding new entries. So we need to save a copy.
291 */
292 he->branch_info = malloc(sizeof(*he->branch_info));
293 if (he->branch_info == NULL) {
294 free(he);
295 return NULL;
296 }
297
298 memcpy(he->branch_info, template->branch_info,
299 sizeof(*he->branch_info));
300
Stephane Eranian3cf0cb12013-01-14 15:02:45 +0100301 if (he->branch_info->from.map)
302 he->branch_info->from.map->referenced = true;
303 if (he->branch_info->to.map)
304 he->branch_info->to.map->referenced = true;
305 }
306
Stephane Eranian98a3b322013-01-24 16:10:35 +0100307 if (he->mem_info) {
308 if (he->mem_info->iaddr.map)
309 he->mem_info->iaddr.map->referenced = true;
310 if (he->mem_info->daddr.map)
311 he->mem_info->daddr.map->referenced = true;
312 }
313
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300314 if (symbol_conf.use_callchain)
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200315 callchain_init(he->callchain);
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -0200316
317 INIT_LIST_HEAD(&he->pairs.node);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300318 }
319
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200320 return he;
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300321}
322
Namhyung Kim66f97ed2012-12-10 17:29:56 +0900323void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300324{
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300325 if (!h->filtered) {
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300326 hists__calc_col_len(hists, h);
327 ++hists->nr_entries;
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900328 hists->stats.total_period += h->stat.period;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300329 }
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300330}
331
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -0300332static u8 symbol__parent_filter(const struct symbol *parent)
333{
334 if (symbol_conf.exclude_other && parent == NULL)
335 return 1 << HIST_FILTER__PARENT;
336 return 0;
337}
338
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100339static struct hist_entry *add_hist_entry(struct hists *hists,
340 struct hist_entry *entry,
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300341 struct addr_location *al,
Andi Kleen05484292013-01-24 16:10:29 +0100342 u64 period,
343 u64 weight)
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300344{
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300345 struct rb_node **p;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300346 struct rb_node *parent = NULL;
347 struct hist_entry *he;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300348 int cmp;
349
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300350 p = &hists->entries_in->rb_node;
351
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300352 while (*p != NULL) {
353 parent = *p;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300354 he = rb_entry(parent, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300355
Namhyung Kim9afcf932012-12-10 17:29:54 +0900356 /*
357 * Make sure that it receives arguments in a same order as
358 * hist_entry__collapse() so that we can use an appropriate
359 * function when searching an entry regardless which sort
360 * keys were used.
361 */
362 cmp = hist_entry__cmp(he, entry);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300363
364 if (!cmp) {
Andi Kleen05484292013-01-24 16:10:29 +0100365 he_stat__add_period(&he->stat, period, weight);
David Miller63fa4712012-03-27 03:14:18 -0400366
Namhyung Kimceb2acb2013-04-01 20:35:18 +0900367 /*
368 * This mem info was allocated from machine__resolve_mem
369 * and will not be used anymore.
370 */
371 free(entry->mem_info);
372
David Miller63fa4712012-03-27 03:14:18 -0400373 /* If the map of an existing hist_entry has
374 * become out-of-date due to an exec() or
375 * similar, update it. Otherwise we will
376 * mis-adjust symbol addresses when computing
377 * the history counter to increment.
378 */
379 if (he->ms.map != entry->ms.map) {
380 he->ms.map = entry->ms.map;
381 if (he->ms.map)
382 he->ms.map->referenced = true;
383 }
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300384 goto out;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300385 }
386
387 if (cmp < 0)
388 p = &(*p)->rb_left;
389 else
390 p = &(*p)->rb_right;
391 }
392
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100393 he = hist_entry__new(entry);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300394 if (!he)
Namhyung Kim27a0dcb2013-05-14 11:09:02 +0900395 return NULL;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300396
397 rb_link_node(&he->rb_node_in, parent, p);
398 rb_insert_color(&he->rb_node_in, hists->entries_in);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300399out:
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300400 hist_entry__add_cpumode_period(he, al->cpumode, period);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300401 return he;
402}
403
Stephane Eranian98a3b322013-01-24 16:10:35 +0100404struct hist_entry *__hists__add_mem_entry(struct hists *self,
405 struct addr_location *al,
406 struct symbol *sym_parent,
407 struct mem_info *mi,
408 u64 period,
409 u64 weight)
410{
411 struct hist_entry entry = {
412 .thread = al->thread,
413 .ms = {
414 .map = al->map,
415 .sym = al->sym,
416 },
417 .stat = {
418 .period = period,
419 .weight = weight,
420 .nr_events = 1,
421 },
422 .cpu = al->cpu,
423 .ip = al->addr,
424 .level = al->level,
425 .parent = sym_parent,
426 .filtered = symbol__parent_filter(sym_parent),
427 .hists = self,
428 .mem_info = mi,
429 .branch_info = NULL,
430 };
431 return add_hist_entry(self, &entry, al, period, weight);
432}
433
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100434struct hist_entry *__hists__add_branch_entry(struct hists *self,
435 struct addr_location *al,
436 struct symbol *sym_parent,
437 struct branch_info *bi,
Andi Kleen05484292013-01-24 16:10:29 +0100438 u64 period,
439 u64 weight)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100440{
441 struct hist_entry entry = {
442 .thread = al->thread,
443 .ms = {
444 .map = bi->to.map,
445 .sym = bi->to.sym,
446 },
447 .cpu = al->cpu,
448 .ip = bi->to.addr,
449 .level = al->level,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900450 .stat = {
451 .period = period,
Namhyung Kimc4b35352012-10-04 21:49:42 +0900452 .nr_events = 1,
Andi Kleen05484292013-01-24 16:10:29 +0100453 .weight = weight,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900454 },
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100455 .parent = sym_parent,
456 .filtered = symbol__parent_filter(sym_parent),
457 .branch_info = bi,
Jiri Olsaae359f12012-10-04 21:49:35 +0900458 .hists = self,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100459 .mem_info = NULL,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100460 };
461
Andi Kleen05484292013-01-24 16:10:29 +0100462 return add_hist_entry(self, &entry, al, period, weight);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100463}
464
465struct hist_entry *__hists__add_entry(struct hists *self,
466 struct addr_location *al,
Andi Kleen05484292013-01-24 16:10:29 +0100467 struct symbol *sym_parent, u64 period,
468 u64 weight)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100469{
470 struct hist_entry entry = {
471 .thread = al->thread,
472 .ms = {
473 .map = al->map,
474 .sym = al->sym,
475 },
476 .cpu = al->cpu,
477 .ip = al->addr,
478 .level = al->level,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900479 .stat = {
480 .period = period,
Namhyung Kimc4b35352012-10-04 21:49:42 +0900481 .nr_events = 1,
Andi Kleen05484292013-01-24 16:10:29 +0100482 .weight = weight,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900483 },
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100484 .parent = sym_parent,
485 .filtered = symbol__parent_filter(sym_parent),
Jiri Olsaae359f12012-10-04 21:49:35 +0900486 .hists = self,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100487 .branch_info = NULL,
488 .mem_info = NULL,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100489 };
490
Andi Kleen05484292013-01-24 16:10:29 +0100491 return add_hist_entry(self, &entry, al, period, weight);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100492}
493
John Kacur3d1d07e2009-09-28 15:32:55 +0200494int64_t
495hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
496{
497 struct sort_entry *se;
498 int64_t cmp = 0;
499
500 list_for_each_entry(se, &hist_entry__sort_list, list) {
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200501 cmp = se->se_cmp(left, right);
John Kacur3d1d07e2009-09-28 15:32:55 +0200502 if (cmp)
503 break;
504 }
505
506 return cmp;
507}
508
509int64_t
510hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
511{
512 struct sort_entry *se;
513 int64_t cmp = 0;
514
515 list_for_each_entry(se, &hist_entry__sort_list, list) {
516 int64_t (*f)(struct hist_entry *, struct hist_entry *);
517
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200518 f = se->se_collapse ?: se->se_cmp;
John Kacur3d1d07e2009-09-28 15:32:55 +0200519
520 cmp = f(left, right);
521 if (cmp)
522 break;
523 }
524
525 return cmp;
526}
527
528void hist_entry__free(struct hist_entry *he)
529{
Namhyung Kim580e3382012-11-07 16:27:14 +0900530 free(he->branch_info);
Stephane Eranian028f12e2013-01-24 16:10:38 +0100531 free(he->mem_info);
John Kacur3d1d07e2009-09-28 15:32:55 +0200532 free(he);
533}
534
535/*
536 * collapse the histogram
537 */
538
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300539static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100540 struct rb_root *root,
541 struct hist_entry *he)
John Kacur3d1d07e2009-09-28 15:32:55 +0200542{
Arnaldo Carvalho de Melob9bf0892009-12-14 11:37:11 -0200543 struct rb_node **p = &root->rb_node;
John Kacur3d1d07e2009-09-28 15:32:55 +0200544 struct rb_node *parent = NULL;
545 struct hist_entry *iter;
546 int64_t cmp;
547
548 while (*p != NULL) {
549 parent = *p;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300550 iter = rb_entry(parent, struct hist_entry, rb_node_in);
John Kacur3d1d07e2009-09-28 15:32:55 +0200551
552 cmp = hist_entry__collapse(iter, he);
553
554 if (!cmp) {
Namhyung Kim139c0812012-10-04 21:49:43 +0900555 he_stat__add_stat(&iter->stat, &he->stat);
Namhyung Kim9ec60972012-09-26 16:47:28 +0900556
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100557 if (symbol_conf.use_callchain) {
Namhyung Kim47260642012-05-31 14:43:26 +0900558 callchain_cursor_reset(&callchain_cursor);
559 callchain_merge(&callchain_cursor,
560 iter->callchain,
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100561 he->callchain);
562 }
John Kacur3d1d07e2009-09-28 15:32:55 +0200563 hist_entry__free(he);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300564 return false;
John Kacur3d1d07e2009-09-28 15:32:55 +0200565 }
566
567 if (cmp < 0)
568 p = &(*p)->rb_left;
569 else
570 p = &(*p)->rb_right;
571 }
572
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300573 rb_link_node(&he->rb_node_in, parent, p);
574 rb_insert_color(&he->rb_node_in, root);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300575 return true;
John Kacur3d1d07e2009-09-28 15:32:55 +0200576}
577
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300578static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
579{
580 struct rb_root *root;
581
582 pthread_mutex_lock(&hists->lock);
583
584 root = hists->entries_in;
585 if (++hists->entries_in > &hists->entries_in_array[1])
586 hists->entries_in = &hists->entries_in_array[0];
587
588 pthread_mutex_unlock(&hists->lock);
589
590 return root;
591}
592
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200593static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
594{
595 hists__filter_entry_by_dso(hists, he);
596 hists__filter_entry_by_thread(hists, he);
Namhyung Kime94d53e2012-03-16 17:50:51 +0900597 hists__filter_entry_by_symbol(hists, he);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200598}
599
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900600void hists__collapse_resort(struct hists *hists)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300601{
602 struct rb_root *root;
603 struct rb_node *next;
604 struct hist_entry *n;
605
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900606 if (!sort__need_collapse)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300607 return;
608
609 root = hists__get_rotate_entries_in(hists);
610 next = rb_first(root);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300611
612 while (next) {
613 n = rb_entry(next, struct hist_entry, rb_node_in);
614 next = rb_next(&n->rb_node_in);
615
616 rb_erase(&n->rb_node_in, root);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200617 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
618 /*
619 * If it wasn't combined with one of the entries already
620 * collapsed, we need to apply the filters that may have
621 * been set by, say, the hist_browser.
622 */
623 hists__apply_filters(hists, n);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200624 }
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300625 }
626}
627
John Kacur3d1d07e2009-09-28 15:32:55 +0200628/*
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300629 * reverse the map, sort on period.
John Kacur3d1d07e2009-09-28 15:32:55 +0200630 */
631
Namhyung Kim29d720e2013-01-22 18:09:33 +0900632static int period_cmp(u64 period_a, u64 period_b)
633{
634 if (period_a > period_b)
635 return 1;
636 if (period_a < period_b)
637 return -1;
638 return 0;
639}
640
641static int hist_entry__sort_on_period(struct hist_entry *a,
642 struct hist_entry *b)
643{
644 int ret;
645 int i, nr_members;
646 struct perf_evsel *evsel;
647 struct hist_entry *pair;
648 u64 *periods_a, *periods_b;
649
650 ret = period_cmp(a->stat.period, b->stat.period);
651 if (ret || !symbol_conf.event_group)
652 return ret;
653
654 evsel = hists_to_evsel(a->hists);
655 nr_members = evsel->nr_members;
656 if (nr_members <= 1)
657 return ret;
658
659 periods_a = zalloc(sizeof(periods_a) * nr_members);
660 periods_b = zalloc(sizeof(periods_b) * nr_members);
661
662 if (!periods_a || !periods_b)
663 goto out;
664
665 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
666 evsel = hists_to_evsel(pair->hists);
667 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
668 }
669
670 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
671 evsel = hists_to_evsel(pair->hists);
672 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
673 }
674
675 for (i = 1; i < nr_members; i++) {
676 ret = period_cmp(periods_a[i], periods_b[i]);
677 if (ret)
678 break;
679 }
680
681out:
682 free(periods_a);
683 free(periods_b);
684
685 return ret;
686}
687
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300688static void __hists__insert_output_entry(struct rb_root *entries,
689 struct hist_entry *he,
690 u64 min_callchain_hits)
John Kacur3d1d07e2009-09-28 15:32:55 +0200691{
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300692 struct rb_node **p = &entries->rb_node;
John Kacur3d1d07e2009-09-28 15:32:55 +0200693 struct rb_node *parent = NULL;
694 struct hist_entry *iter;
695
Arnaldo Carvalho de Melod599db32009-12-15 20:04:42 -0200696 if (symbol_conf.use_callchain)
Arnaldo Carvalho de Melob9fb9302010-04-02 09:50:42 -0300697 callchain_param.sort(&he->sorted_chain, he->callchain,
John Kacur3d1d07e2009-09-28 15:32:55 +0200698 min_callchain_hits, &callchain_param);
699
700 while (*p != NULL) {
701 parent = *p;
702 iter = rb_entry(parent, struct hist_entry, rb_node);
703
Namhyung Kim29d720e2013-01-22 18:09:33 +0900704 if (hist_entry__sort_on_period(he, iter) > 0)
John Kacur3d1d07e2009-09-28 15:32:55 +0200705 p = &(*p)->rb_left;
706 else
707 p = &(*p)->rb_right;
708 }
709
710 rb_link_node(&he->rb_node, parent, p);
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300711 rb_insert_color(&he->rb_node, entries);
John Kacur3d1d07e2009-09-28 15:32:55 +0200712}
713
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900714void hists__output_resort(struct hists *hists)
John Kacur3d1d07e2009-09-28 15:32:55 +0200715{
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300716 struct rb_root *root;
John Kacur3d1d07e2009-09-28 15:32:55 +0200717 struct rb_node *next;
718 struct hist_entry *n;
John Kacur3d1d07e2009-09-28 15:32:55 +0200719 u64 min_callchain_hits;
720
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300721 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
John Kacur3d1d07e2009-09-28 15:32:55 +0200722
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900723 if (sort__need_collapse)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300724 root = &hists->entries_collapsed;
725 else
726 root = hists->entries_in;
727
728 next = rb_first(root);
729 hists->entries = RB_ROOT;
John Kacur3d1d07e2009-09-28 15:32:55 +0200730
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300731 hists->nr_entries = 0;
Arnaldo Carvalho de Melo79286312011-10-27 09:19:48 -0200732 hists->stats.total_period = 0;
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300733 hists__reset_col_len(hists);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300734
John Kacur3d1d07e2009-09-28 15:32:55 +0200735 while (next) {
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300736 n = rb_entry(next, struct hist_entry, rb_node_in);
737 next = rb_next(&n->rb_node_in);
John Kacur3d1d07e2009-09-28 15:32:55 +0200738
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300739 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300740 hists__inc_nr_entries(hists, n);
John Kacur3d1d07e2009-09-28 15:32:55 +0200741 }
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300742}
Arnaldo Carvalho de Melob9bf0892009-12-14 11:37:11 -0200743
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300744static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300745 enum hist_filter filter)
746{
747 h->filtered &= ~(1 << filter);
748 if (h->filtered)
749 return;
750
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300751 ++hists->nr_entries;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -0300752 if (h->ms.unfolded)
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300753 hists->nr_entries += h->nr_rows;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -0300754 h->row_offset = 0;
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900755 hists->stats.total_period += h->stat.period;
756 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300757
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300758 hists__calc_col_len(hists, h);
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300759}
760
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200761
762static bool hists__filter_entry_by_dso(struct hists *hists,
763 struct hist_entry *he)
764{
765 if (hists->dso_filter != NULL &&
766 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
767 he->filtered |= (1 << HIST_FILTER__DSO);
768 return true;
769 }
770
771 return false;
772}
773
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200774void hists__filter_by_dso(struct hists *hists)
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300775{
776 struct rb_node *nd;
777
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300778 hists->nr_entries = hists->stats.total_period = 0;
779 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
780 hists__reset_col_len(hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300781
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300782 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300783 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
784
785 if (symbol_conf.exclude_other && !h->parent)
786 continue;
787
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200788 if (hists__filter_entry_by_dso(hists, h))
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300789 continue;
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300790
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300791 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300792 }
793}
794
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200795static bool hists__filter_entry_by_thread(struct hists *hists,
796 struct hist_entry *he)
797{
798 if (hists->thread_filter != NULL &&
799 he->thread != hists->thread_filter) {
800 he->filtered |= (1 << HIST_FILTER__THREAD);
801 return true;
802 }
803
804 return false;
805}
806
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200807void hists__filter_by_thread(struct hists *hists)
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300808{
809 struct rb_node *nd;
810
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300811 hists->nr_entries = hists->stats.total_period = 0;
812 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
813 hists__reset_col_len(hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300814
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300815 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300816 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
817
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200818 if (hists__filter_entry_by_thread(hists, h))
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300819 continue;
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300820
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300821 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300822 }
823}
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300824
Namhyung Kime94d53e2012-03-16 17:50:51 +0900825static bool hists__filter_entry_by_symbol(struct hists *hists,
826 struct hist_entry *he)
827{
828 if (hists->symbol_filter_str != NULL &&
829 (!he->ms.sym || strstr(he->ms.sym->name,
830 hists->symbol_filter_str) == NULL)) {
831 he->filtered |= (1 << HIST_FILTER__SYMBOL);
832 return true;
833 }
834
835 return false;
836}
837
838void hists__filter_by_symbol(struct hists *hists)
839{
840 struct rb_node *nd;
841
842 hists->nr_entries = hists->stats.total_period = 0;
843 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
844 hists__reset_col_len(hists);
845
846 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
847 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
848
849 if (hists__filter_entry_by_symbol(hists, h))
850 continue;
851
852 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
853 }
854}
855
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200856int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300857{
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200858 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300859}
860
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200861int hist_entry__annotate(struct hist_entry *he, size_t privsize)
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300862{
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200863 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300864}
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300865
Arnaldo Carvalho de Melo28a6b6a2012-12-18 16:24:46 -0300866void events_stats__inc(struct events_stats *stats, u32 type)
867{
868 ++stats->nr_events[0];
869 ++stats->nr_events[type];
870}
871
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300872void hists__inc_nr_events(struct hists *hists, u32 type)
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300873{
Arnaldo Carvalho de Melo28a6b6a2012-12-18 16:24:46 -0300874 events_stats__inc(&hists->stats, type);
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300875}
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300876
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300877static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
878 struct hist_entry *pair)
879{
Namhyung Kimce74f602012-12-10 17:29:55 +0900880 struct rb_root *root;
881 struct rb_node **p;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300882 struct rb_node *parent = NULL;
883 struct hist_entry *he;
884 int cmp;
885
Namhyung Kimce74f602012-12-10 17:29:55 +0900886 if (sort__need_collapse)
887 root = &hists->entries_collapsed;
888 else
889 root = hists->entries_in;
890
891 p = &root->rb_node;
892
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300893 while (*p != NULL) {
894 parent = *p;
Namhyung Kimce74f602012-12-10 17:29:55 +0900895 he = rb_entry(parent, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300896
Namhyung Kimce74f602012-12-10 17:29:55 +0900897 cmp = hist_entry__collapse(he, pair);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300898
899 if (!cmp)
900 goto out;
901
902 if (cmp < 0)
903 p = &(*p)->rb_left;
904 else
905 p = &(*p)->rb_right;
906 }
907
908 he = hist_entry__new(pair);
909 if (he) {
Arnaldo Carvalho de Melo30193d72012-11-12 13:20:03 -0300910 memset(&he->stat, 0, sizeof(he->stat));
911 he->hists = hists;
Namhyung Kimce74f602012-12-10 17:29:55 +0900912 rb_link_node(&he->rb_node_in, parent, p);
913 rb_insert_color(&he->rb_node_in, root);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300914 hists__inc_nr_entries(hists, he);
915 }
916out:
917 return he;
918}
919
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300920static struct hist_entry *hists__find_entry(struct hists *hists,
921 struct hist_entry *he)
922{
Namhyung Kimce74f602012-12-10 17:29:55 +0900923 struct rb_node *n;
924
925 if (sort__need_collapse)
926 n = hists->entries_collapsed.rb_node;
927 else
928 n = hists->entries_in->rb_node;
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300929
930 while (n) {
Namhyung Kimce74f602012-12-10 17:29:55 +0900931 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
932 int64_t cmp = hist_entry__collapse(iter, he);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300933
934 if (cmp < 0)
935 n = n->rb_left;
936 else if (cmp > 0)
937 n = n->rb_right;
938 else
939 return iter;
940 }
941
942 return NULL;
943}
944
945/*
946 * Look for pairs to link to the leader buckets (hist_entries):
947 */
948void hists__match(struct hists *leader, struct hists *other)
949{
Namhyung Kimce74f602012-12-10 17:29:55 +0900950 struct rb_root *root;
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300951 struct rb_node *nd;
952 struct hist_entry *pos, *pair;
953
Namhyung Kimce74f602012-12-10 17:29:55 +0900954 if (sort__need_collapse)
955 root = &leader->entries_collapsed;
956 else
957 root = leader->entries_in;
958
959 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
960 pos = rb_entry(nd, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300961 pair = hists__find_entry(other, pos);
962
963 if (pair)
Namhyung Kim5fa90412012-11-29 15:38:34 +0900964 hist_entry__add_pair(pair, pos);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300965 }
966}
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300967
968/*
969 * Look for entries in the other hists that are not present in the leader, if
970 * we find them, just add a dummy entry on the leader hists, with period=0,
971 * nr_events=0, to serve as the list header.
972 */
973int hists__link(struct hists *leader, struct hists *other)
974{
Namhyung Kimce74f602012-12-10 17:29:55 +0900975 struct rb_root *root;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300976 struct rb_node *nd;
977 struct hist_entry *pos, *pair;
978
Namhyung Kimce74f602012-12-10 17:29:55 +0900979 if (sort__need_collapse)
980 root = &other->entries_collapsed;
981 else
982 root = other->entries_in;
983
984 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
985 pos = rb_entry(nd, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300986
987 if (!hist_entry__has_pairs(pos)) {
988 pair = hists__add_dummy_entry(leader, pos);
989 if (pair == NULL)
990 return -1;
Namhyung Kim5fa90412012-11-29 15:38:34 +0900991 hist_entry__add_pair(pos, pair);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300992 }
993 }
994
995 return 0;
996}