Arnaldo Carvalho de Melo | 5c58104 | 2011-01-11 22:30:02 -0200 | [diff] [blame] | 1 | #include <poll.h> |
Arnaldo Carvalho de Melo | 361c99a | 2011-01-11 20:56:53 -0200 | [diff] [blame] | 2 | #include "evlist.h" |
| 3 | #include "evsel.h" |
| 4 | #include "util.h" |
| 5 | |
Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 6 | #include <linux/bitops.h> |
| 7 | #include <linux/hash.h> |
| 8 | |
Arnaldo Carvalho de Melo | 361c99a | 2011-01-11 20:56:53 -0200 | [diff] [blame] | 9 | struct perf_evlist *perf_evlist__new(void) |
| 10 | { |
| 11 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); |
| 12 | |
| 13 | if (evlist != NULL) { |
Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 14 | int i; |
| 15 | |
| 16 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) |
| 17 | INIT_HLIST_HEAD(&evlist->heads[i]); |
Arnaldo Carvalho de Melo | 361c99a | 2011-01-11 20:56:53 -0200 | [diff] [blame] | 18 | INIT_LIST_HEAD(&evlist->entries); |
| 19 | } |
| 20 | |
| 21 | return evlist; |
| 22 | } |
| 23 | |
| 24 | static void perf_evlist__purge(struct perf_evlist *evlist) |
| 25 | { |
| 26 | struct perf_evsel *pos, *n; |
| 27 | |
| 28 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { |
| 29 | list_del_init(&pos->node); |
| 30 | perf_evsel__delete(pos); |
| 31 | } |
| 32 | |
| 33 | evlist->nr_entries = 0; |
| 34 | } |
| 35 | |
| 36 | void perf_evlist__delete(struct perf_evlist *evlist) |
| 37 | { |
| 38 | perf_evlist__purge(evlist); |
Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 39 | free(evlist->mmap); |
Arnaldo Carvalho de Melo | 5c58104 | 2011-01-11 22:30:02 -0200 | [diff] [blame] | 40 | free(evlist->pollfd); |
Arnaldo Carvalho de Melo | 361c99a | 2011-01-11 20:56:53 -0200 | [diff] [blame] | 41 | free(evlist); |
| 42 | } |
| 43 | |
| 44 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) |
| 45 | { |
| 46 | list_add_tail(&entry->node, &evlist->entries); |
| 47 | ++evlist->nr_entries; |
| 48 | } |
| 49 | |
| 50 | int perf_evlist__add_default(struct perf_evlist *evlist) |
| 51 | { |
| 52 | struct perf_event_attr attr = { |
| 53 | .type = PERF_TYPE_HARDWARE, |
| 54 | .config = PERF_COUNT_HW_CPU_CYCLES, |
| 55 | }; |
| 56 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); |
| 57 | |
| 58 | if (evsel == NULL) |
| 59 | return -ENOMEM; |
| 60 | |
| 61 | perf_evlist__add(evlist, evsel); |
| 62 | return 0; |
| 63 | } |
Arnaldo Carvalho de Melo | 5c58104 | 2011-01-11 22:30:02 -0200 | [diff] [blame] | 64 | |
| 65 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist, int ncpus, int nthreads) |
| 66 | { |
| 67 | int nfds = ncpus * nthreads * evlist->nr_entries; |
| 68 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); |
| 69 | return evlist->pollfd != NULL ? 0 : -ENOMEM; |
| 70 | } |
Arnaldo Carvalho de Melo | 70082dd | 2011-01-12 17:03:24 -0200 | [diff] [blame] | 71 | |
| 72 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) |
| 73 | { |
| 74 | fcntl(fd, F_SETFL, O_NONBLOCK); |
| 75 | evlist->pollfd[evlist->nr_fds].fd = fd; |
| 76 | evlist->pollfd[evlist->nr_fds].events = POLLIN; |
| 77 | evlist->nr_fds++; |
| 78 | } |
Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 79 | |
| 80 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) |
| 81 | { |
| 82 | struct hlist_head *head; |
| 83 | struct hlist_node *pos; |
| 84 | struct perf_sample_id *sid; |
| 85 | int hash; |
| 86 | |
| 87 | if (evlist->nr_entries == 1) |
| 88 | return list_entry(evlist->entries.next, struct perf_evsel, node); |
| 89 | |
| 90 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
| 91 | head = &evlist->heads[hash]; |
| 92 | |
| 93 | hlist_for_each_entry(sid, pos, head, node) |
| 94 | if (sid->id == id) |
| 95 | return sid->evsel; |
| 96 | return NULL; |
| 97 | } |
Arnaldo Carvalho de Melo | 04391de | 2011-01-15 10:40:59 -0200 | [diff] [blame] | 98 | |
| 99 | event_t *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) |
| 100 | { |
| 101 | /* XXX Move this to perf.c, making it generally available */ |
| 102 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
| 103 | struct perf_mmap *md = &evlist->mmap[cpu]; |
| 104 | unsigned int head = perf_mmap__read_head(md); |
| 105 | unsigned int old = md->prev; |
| 106 | unsigned char *data = md->base + page_size; |
| 107 | event_t *event = NULL; |
| 108 | int diff; |
| 109 | |
| 110 | /* |
| 111 | * If we're further behind than half the buffer, there's a chance |
| 112 | * the writer will bite our tail and mess up the samples under us. |
| 113 | * |
| 114 | * If we somehow ended up ahead of the head, we got messed up. |
| 115 | * |
| 116 | * In either case, truncate and restart at head. |
| 117 | */ |
| 118 | diff = head - old; |
| 119 | if (diff > md->mask / 2 || diff < 0) { |
| 120 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); |
| 121 | |
| 122 | /* |
| 123 | * head points to a known good entry, start there. |
| 124 | */ |
| 125 | old = head; |
| 126 | } |
| 127 | |
| 128 | if (old != head) { |
| 129 | size_t size; |
| 130 | |
| 131 | event = (event_t *)&data[old & md->mask]; |
| 132 | size = event->header.size; |
| 133 | |
| 134 | /* |
| 135 | * Event straddles the mmap boundary -- header should always |
| 136 | * be inside due to u64 alignment of output. |
| 137 | */ |
| 138 | if ((old & md->mask) + size != ((old + size) & md->mask)) { |
| 139 | unsigned int offset = old; |
| 140 | unsigned int len = min(sizeof(*event), size), cpy; |
| 141 | void *dst = &evlist->event_copy; |
| 142 | |
| 143 | do { |
| 144 | cpy = min(md->mask + 1 - (offset & md->mask), len); |
| 145 | memcpy(dst, &data[offset & md->mask], cpy); |
| 146 | offset += cpy; |
| 147 | dst += cpy; |
| 148 | len -= cpy; |
| 149 | } while (len); |
| 150 | |
| 151 | event = &evlist->event_copy; |
| 152 | } |
| 153 | |
| 154 | old += size; |
| 155 | } |
| 156 | |
| 157 | md->prev = old; |
| 158 | return event; |
| 159 | } |