aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLars Ellenberg <lars@linbit.com>2007-03-26 23:41:58 +0200
committerAdrian Bunk <bunk@stusta.de>2007-03-26 23:41:58 +0200
commit6b9720b5aca01aba1fb9403f1b75bb26689223a1 (patch)
tree0a63dc62beb82fb26d71144edb6ab57ef6331c0c
parentc920d11e0b9a2c894b99cf9a95cbd83f53c533eb (diff)
md: pass down BIO_RW_SYNC in raid{1,10}
md raidX make_request functions strip off the BIO_RW_SYNC flag, thus introducing additional latency. Fixing this in raid1 and raid10 seems to be straightforward enough. For our particular usage case in DRBD, passing this flag improved some initialization time from ~5 minutes to ~5 seconds. Signed-off-by: Lars Ellenberg <lars@linbit.com> Acked-by: NeilBrown <neilb@suse.de> Signed-off-by: Adrian Bunk <bunk@stusta.de>
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c11
2 files changed, 17 insertions, 7 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 59cd546cdecb..31e872eb0b92 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -751,6 +751,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
+ const int do_sync = bio_sync(bio);
int do_barriers;
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
@@ -805,7 +806,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ;
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r1_bio;
generic_make_request(read_bio);
@@ -876,7 +877,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_barriers;
+ mbio->bi_rw = WRITE | do_barriers | do_sync;
mbio->bi_private = r1_bio;
if (behind_pages) {
@@ -911,6 +912,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
blk_plug_device(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (do_sync)
+ md_wakeup_thread(mddev->thread);
#if 0
while ((bio = bio_list_pop(&bl)) != NULL)
generic_make_request(bio);
@@ -1404,6 +1407,7 @@ static void raid1d(mddev_t *mddev)
* all others have had their bios[] entry cleared.
*/
int i;
+ const int do_sync = bio_sync(r1_bio->master_bio);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
@@ -1421,7 +1425,7 @@ static void raid1d(mddev_t *mddev)
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
bio->bi_end_io = raid1_end_write_request;
- bio->bi_rw = WRITE;
+ bio->bi_rw = WRITE | do_sync;
bio->bi_private = r1_bio;
r1_bio->bios[i] = bio;
generic_make_request(bio);
@@ -1518,6 +1522,7 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
+ const int do_sync = bio_sync(r1_bio->master_bio);
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
@@ -1533,7 +1538,7 @@ static void raid1d(mddev_t *mddev)
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ;
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio;
unplug = 1;
generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 039ed49b3c03..0a11d5c20b5b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -748,6 +748,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
+ const int do_sync = bio_sync(bio);
struct bio_list bl;
unsigned long flags;
@@ -829,7 +830,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ;
+ read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
generic_make_request(read_bio);
@@ -875,7 +876,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
conf->mirrors[d].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE;
+ mbio->bi_rw = WRITE | do_sync;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -888,6 +889,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
blk_plug_device(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (do_sync)
+ md_wakeup_thread(mddev->thread);
+
return 0;
}
@@ -1488,6 +1492,7 @@ static void raid10d(mddev_t *mddev)
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
} else {
+ const int do_sync = bio_sync(r10_bio->master_bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
@@ -1499,7 +1504,7 @@ static void raid10d(mddev_t *mddev)
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ;
+ bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
unplug = 1;