aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/linear.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-03-19 12:46:39 +1100
committerNeilBrown <neilb@suse.de>2012-03-19 12:46:39 +1100
commitba13da47ffa202784355561f72160a41350e95cc (patch)
tree9b60f27ab89e4036df65d0dc3bf52b7420f83a50 /drivers/md/linear.c
parentdafb20fa34320a472deb7442f25a0c086e0feb33 (diff)
md: add proper merge_bvec handling to RAID0 and Linear.
These personalities currently set a max request size of one page when any member device has a merge_bvec_fn because they don't bother to call that function. This causes extra works in splitting and combining requests. So make the extra effort to call the merge_bvec_fn when it exists so that we end up with larger requests out the bottom. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/linear.c')
-rw-r--r--drivers/md/linear.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 67940741b19d..b0fcc7d02adb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -68,10 +68,19 @@ static int linear_mergeable_bvec(struct request_queue *q,
struct dev_info *dev0;
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
+ int maxbytes = biovec->bv_len;
+ struct request_queue *subq;
rcu_read_lock();
dev0 = which_dev(mddev, sector);
maxsectors = dev0->end_sector - sector;
+ subq = bdev_get_queue(dev0->rdev->bdev);
+ if (subq->merge_bvec_fn) {
+ bvm->bi_bdev = dev0->rdev->bdev;
+ bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors;
+ maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
+ biovec));
+ }
rcu_read_unlock();
if (maxsectors < bio_sectors)
@@ -80,12 +89,12 @@ static int linear_mergeable_bvec(struct request_queue *q,
maxsectors -= bio_sectors;
if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
- return biovec->bv_len;
- /* The bytes available at this offset could be really big,
- * so we cap at 2^31 to avoid overflow */
- if (maxsectors > (1 << (31-9)))
- return 1<<31;
- return maxsectors << 9;
+ return maxbytes;
+
+ if (maxsectors > (maxbytes >> 9))
+ return maxbytes;
+ else
+ return maxsectors << 9;
}
static int linear_congested(void *data, int bits)
@@ -158,15 +167,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit max_segments to 1 lying within
- * a single page.
- */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
conf->array_sectors += rdev->sectors;
cnt++;