dm crypt: use per-bio data
Change dm-crypt so that it uses auxiliary data allocated with the bio.
Dm-crypt requires two allocations per request - struct dm_crypt_io and
struct ablkcipher_request (with other data appended to it). It
previously only used mempool allocations.
Some requests may require more dm_crypt_ios and ablkcipher_requests,
however most requests need just one of each of these two structures to
complete.
This patch changes it so that the first dm_crypt_io and ablkcipher_request
are allocated with the bio (using target per_bio_data_size option). If
the request needs additional values, they are allocated from the mempool.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
(cherry picked from commit 298a9fa08a1577211d42a75e8fc073baef61e0d9)
Signed-off-by: Alex Shi <alex.shi@linaro.org>
Conflicts:
drivers/md/dm-crypt.c
Solutions:
reuse the old bio->sector instead of bvec ioterator
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index dc89a1e..686cb08 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -61,7 +61,7 @@
int error;
sector_t sector;
struct dm_crypt_io *base_io;
-};
+} CRYPTO_MINALIGN_ATTR;
struct dm_crypt_request {
struct convert_context *ctx;
@@ -164,6 +164,8 @@
*/
unsigned int dmreq_start;
+ unsigned int per_bio_data_size;
+
unsigned long flags;
unsigned int key_size;
unsigned int key_parts; /* independent parts in key buffer */
@@ -906,6 +908,15 @@
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
}
+static void crypt_free_req(struct crypt_config *cc,
+ struct ablkcipher_request *req, struct bio *base_bio)
+{
+ struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
+
+ if ((struct ablkcipher_request *)(io + 1) != req)
+ mempool_free(req, cc->req_pool);
+}
+
/*
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
@@ -1020,12 +1031,9 @@
}
}
-static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
- struct bio *bio, sector_t sector)
+static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ struct bio *bio, sector_t sector)
{
- struct dm_crypt_io *io;
-
- io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->cc = cc;
io->base_bio = bio;
io->sector = sector;
@@ -1033,8 +1041,6 @@
io->base_io = NULL;
io->ctx.req = NULL;
atomic_set(&io->io_pending, 0);
-
- return io;
}
static void crypt_inc_pending(struct dm_crypt_io *io)
@@ -1058,8 +1064,9 @@
return;
if (io->ctx.req)
- mempool_free(io->ctx.req, cc->req_pool);
- mempool_free(io, cc->io_pool);
+ crypt_free_req(cc, io->ctx.req, base_bio);
+ if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
+ mempool_free(io, cc->io_pool);
if (likely(!base_io))
bio_endio(base_bio, error);
@@ -1267,8 +1274,8 @@
* between fragments, so switch to a new dm_crypt_io structure.
*/
if (unlikely(!crypt_finished && remaining)) {
- new_io = crypt_io_alloc(io->cc, io->base_bio,
- sector);
+ new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
+ crypt_io_init(new_io, io->cc, io->base_bio, sector);
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector);
@@ -1338,7 +1345,7 @@
if (error < 0)
io->error = -EIO;
- mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
+ crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
if (!atomic_dec_and_test(&ctx->cc_pending))
return;
@@ -1753,6 +1760,10 @@
goto bad;
}
+ cc->per_bio_data_size = ti->per_bio_data_size =
+ sizeof(struct dm_crypt_io) + cc->dmreq_start +
+ sizeof(struct dm_crypt_request) + cc->iv_size;
+
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
@@ -1859,7 +1870,9 @@
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+ io = dm_per_bio_data(bio, cc->per_bio_data_size);
+ crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_sector));
+ io->ctx.req = (struct ablkcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))