From e4e80e0bec9e03b6e323857e9bb2820f5bbe6287 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 22 Jan 2014 11:45:03 +1100 Subject: md/raid5: close recently introduced race in stripe_head management. commit 7da9d450ab2843bf1db378c156acc6304dbc1c2b upstream. As release_stripe and __release_stripe decrement ->count and then manipulate ->lru both under ->device_lock, it is important that get_active_stripe() increments ->count and clears ->lru also under ->device_lock. However we currently list_del_init ->lru under the lock, but increment the ->count outside the lock. This can lead to races and list corruption. So move the atomic_inc(&sh->count) up inside the ->device_lock protected region. Note that we still increment ->count without device lock in the case where get_free_stripe() was called, and in fact don't take ->device_lock at all in that path. This is safe because if the stripe_head can be found by get_free_stripe, then the hash lock assures us the no-one else could possibly be calling release_stripe() at the same time. Fixes: 566c09c53455d7c4f1130928ef8071da1a24ea65 Reported-and-tested-by: Ian Kumlien Signed-off-by: NeilBrown Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid5.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3088d3af5a89..03f82ab87d9e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -675,8 +675,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector, || !conf->inactive_blocked), *(conf->hash_locks + hash)); conf->inactive_blocked = 0; - } else + } else { init_stripe(sh, sector, previous); + atomic_inc(&sh->count); + } } else { spin_lock(&conf->device_lock); if (atomic_read(&sh->count)) { @@ -695,13 +697,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector, sh->group = NULL; } } + atomic_inc(&sh->count); spin_unlock(&conf->device_lock); } } while (sh == NULL); - if (sh) - atomic_inc(&sh->count); - spin_unlock_irq(conf->hash_locks + hash); return sh; } -- cgit v1.2.3