aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2020-09-14 13:50:49 -0400
committerMike Snitzer <snitzer@redhat.com>2020-09-29 16:33:03 -0400
commit094ee64d7de8ab72b495ff9c03d86a60272da56d (patch)
tree49673ac303a09c5991b3a170dbc95493c3e4a254 /drivers/md/dm.c
parent5091cdec56faeaefa79de4b6cb3c3c55e50d1ac3 (diff)
dm: push md->immutable_target optimization down to __process_bio()
Also, update associated stale comment in __bind(). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 82886b4edab8..e1cb3b9fd207 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1680,7 +1680,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* fact that targets that use it do _not_ have a need to split bios.
*/
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
- struct bio *bio, struct dm_target *ti)
+ struct bio *bio)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -1705,6 +1705,12 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
+ struct dm_target *ti = md->immutable_target;
+
+ if (WARN_ON_ONCE(!ti)) {
+ error = -EIO;
+ goto out;
+ }
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
@@ -1724,21 +1730,12 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
blk_qc_t ret = BLK_QC_T_NONE;
- struct dm_target *ti = md->immutable_target;
if (unlikely(!map)) {
bio_io_error(bio);
return ret;
}
- if (!ti) {
- ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
- if (unlikely(!ti)) {
- bio_io_error(bio);
- return ret;
- }
- }
-
/*
* If in ->submit_bio we need to use blk_queue_split(), otherwise
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
@@ -1753,7 +1750,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
}
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- return __process_bio(md, map, bio, ti);
+ return __process_bio(md, map, bio);
return __split_and_process_bio(md, map, bio);
}
@@ -2120,8 +2117,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
/*
* Leverage the fact that request-based DM targets and
* NVMe bio based targets are immutable singletons
- * - used to optimize both dm_request_fn and dm_mq_queue_rq;
- * and __process_bio.
+ * - used to optimize both __process_bio and dm_mq_queue_rq
*/
md->immutable_target = dm_table_get_immutable_target(t);
}