aboutsummaryrefslogtreecommitdiff
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-01-09 21:05:28 +0100
committerFrederic Weisbecker <fweisbec@gmail.com>2010-01-16 12:30:28 +0100
commite286417378b4f9ce6e473b556193465ab22e12ab (patch)
tree03da147ea654d6475ae3c739906c4e8f6804d0db /kernel/perf_event.c
parent5908cdc85eb30f8d07f2cb11d4a62334d7229048 (diff)
perf: Round robin flexible groups of events using list_rotate_left()
This is more proper that doing it through a list_for_each_entry() that breaks after the first entry. v2: Don't rotate pinned groups as its not needed to time share them. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index c9f8a757649..bbebe283263 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1454,25 +1454,16 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
- struct perf_event *event;
-
if (!ctx->nr_events)
return;
raw_spin_lock(&ctx->lock);
- /*
- * Rotate the first entry last (works just fine for group events too):
- */
+
+ /* Rotate the first entry last of non-pinned groups */
perf_disable();
- list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
- list_move_tail(&event->group_entry, &ctx->pinned_groups);
- break;
- }
- list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
- list_move_tail(&event->group_entry, &ctx->flexible_groups);
- break;
- }
+ list_rotate_left(&ctx->flexible_groups);
+
perf_enable();
raw_spin_unlock(&ctx->lock);