aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-13 12:21:36 +0100
committerIngo Molnar <mingo@elte.hu>2009-04-06 09:29:43 +0200
commit592903cdcbf606a838056bae6d03fc557806c914 (patch)
tree6851004446a405654ff3f1c39a70b313456544a5 /kernel
parentd6d020e9957745c61285ef3da9f294c5e6801f0f (diff)
perf_counter: add an event_list
I noticed that the counter_list only includes top-level counters, thus perf_swcounter_event() will miss sw-counters in groups. Since perf_swcounter_event() also wants an RCU safe list, create a new event_list that includes all counters and uses RCU list ops and use call_rcu to free the counter structure. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c30
1 files changed, 19 insertions, 11 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f9330d5827c..8d6ecfa64c0 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -22,6 +22,7 @@
#include <linux/perf_counter.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
+#include <linux/rculist.h>
/*
* Each CPU has a list of per CPU counters:
@@ -72,6 +73,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
list_add_tail(&counter->list_entry, &ctx->counter_list);
else
list_add_tail(&counter->list_entry, &group_leader->sibling_list);
+
+ list_add_rcu(&counter->event_entry, &ctx->event_list);
}
static void
@@ -80,6 +83,7 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
struct perf_counter *sibling, *tmp;
list_del_init(&counter->list_entry);
+ list_del_rcu(&counter->event_entry);
/*
* If this was a group counter with sibling counters then
@@ -1133,6 +1137,14 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
return ctx;
}
+static void free_counter_rcu(struct rcu_head *head)
+{
+ struct perf_counter *counter;
+
+ counter = container_of(head, struct perf_counter, rcu_head);
+ kfree(counter);
+}
+
/*
* Called when the last reference to the file is gone.
*/
@@ -1151,7 +1163,7 @@ static int perf_release(struct inode *inode, struct file *file)
mutex_unlock(&counter->mutex);
mutex_unlock(&ctx->mutex);
- kfree(counter);
+ call_rcu(&counter->rcu_head, free_counter_rcu);
put_context(ctx);
return 0;
@@ -1491,22 +1503,16 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
int nmi, struct pt_regs *regs)
{
struct perf_counter *counter;
- unsigned long flags;
- if (list_empty(&ctx->counter_list))
+ if (list_empty(&ctx->event_list))
return;
- spin_lock_irqsave(&ctx->lock, flags);
-
- /*
- * XXX: make counter_list RCU safe
- */
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
if (perf_swcounter_match(counter, event, regs))
perf_swcounter_add(counter, nr, nmi, regs);
}
-
- spin_unlock_irqrestore(&ctx->lock, flags);
+ rcu_read_unlock();
}
void perf_swcounter_event(enum hw_event_types event, u64 nr,
@@ -1846,6 +1852,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
mutex_init(&counter->mutex);
INIT_LIST_HEAD(&counter->list_entry);
+ INIT_LIST_HEAD(&counter->event_entry);
INIT_LIST_HEAD(&counter->sibling_list);
init_waitqueue_head(&counter->waitq);
@@ -1992,6 +1999,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->counter_list);
+ INIT_LIST_HEAD(&ctx->event_list);
ctx->task = task;
}