blob: c29daf417aafa2cdb2defde9eb26717826ba37c9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf14299f2014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz542da312009-12-10 23:51:57 +00004 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
Milan Brozed04d982013-10-28 23:21:04 +01005 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070021#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070022#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100023#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100025#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000026#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Mikulas Patocka586e80e2008-10-21 17:44:59 +010030#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Alasdair G Kergon72d94862006-06-26 00:27:35 -070032#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * context holding the current state of a multi-part conversion
36 */
37struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000038 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 struct bio *bio_in;
40 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070041 struct bvec_iter iter_in;
42 struct bvec_iter iter_out;
Mikulas Patockac66029f2012-07-27 15:08:05 +010043 sector_t cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010044 atomic_t cc_pending;
Mikulas Patocka610f2de2014-02-20 18:01:01 -050045 struct ablkcipher_request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
47
Milan Broz53017032008-02-08 02:10:38 +000048/*
49 * per bio private data
50 */
51struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010052 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000053 struct bio *base_bio;
54 struct work_struct work;
55
56 struct convert_context ctx;
57
Mikulas Patocka40b62292012-07-27 15:08:04 +010058 atomic_t io_pending;
Milan Broz53017032008-02-08 02:10:38 +000059 int error;
Milan Broz0c395b02008-02-08 02:10:54 +000060 sector_t sector;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040061} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000062
Milan Broz01482b72008-02-08 02:11:04 +000063struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000064 struct convert_context *ctx;
Milan Broz01482b72008-02-08 02:11:04 +000065 struct scatterlist sg_in;
66 struct scatterlist sg_out;
Milan Broz2dc53272011-01-13 19:59:54 +000067 sector_t iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000068};
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070struct crypt_config;
71
72struct crypt_iv_operations {
73 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010074 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000076 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000077 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000078 int (*generator)(struct crypt_config *cc, u8 *iv,
79 struct dm_crypt_request *dmreq);
80 int (*post)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
Milan Broz60473592009-12-10 23:51:55 +000084struct iv_essiv_private {
Milan Brozb95bf2d2009-12-10 23:51:56 +000085 struct crypto_hash *hash_tfm;
86 u8 *salt;
Milan Broz60473592009-12-10 23:51:55 +000087};
88
89struct iv_benbi_private {
90 int shift;
91};
92
Milan Broz34745782011-01-13 19:59:55 +000093#define LMK_SEED_SIZE 64 /* hash + 0 */
94struct iv_lmk_private {
95 struct crypto_shash *hash_tfm;
96 u8 *seed;
97};
98
Milan Brozed04d982013-10-28 23:21:04 +010099#define TCW_WHITENING_SIZE 16
100struct iv_tcw_private {
101 struct crypto_shash *crc32_tfm;
102 u8 *iv_seed;
103 u8 *whitening;
104};
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * Crypt: maps a linear range of a block device
108 * and encrypts / decrypts at the same time.
109 */
Mikulas Patockaf3396c582015-02-13 08:23:09 -0500110enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU };
Andi Kleenc0297722011-01-13 19:59:53 +0000111
112/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500113 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115struct crypt_config {
116 struct dm_dev *dev;
117 sector_t start;
118
119 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000120 * pool for per bio private data, crypto requests and
121 * encryption requeusts/buffer pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 */
Milan Brozddd42ed2008-02-08 02:11:07 +0000123 mempool_t *req_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 mempool_t *page_pool;
Milan Broz6a24c712006-10-03 01:15:40 -0700125 struct bio_set *bs;
Mikulas Patocka7145c242015-02-13 08:24:41 -0500126 struct mutex bio_alloc_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Milan Brozcabf08e2007-10-19 22:38:58 +0100128 struct workqueue_struct *io_queue;
129 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700130
Milan Broz5ebaee62010-08-12 04:14:07 +0100131 char *cipher;
Milan Broz7dbcd132011-01-13 19:59:52 +0000132 char *cipher_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800135 union {
Milan Broz60473592009-12-10 23:51:55 +0000136 struct iv_essiv_private essiv;
137 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000138 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100139 struct iv_tcw_private tcw;
Herbert Xu79066ad2006-12-05 13:41:52 -0800140 } iv_gen_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 sector_t iv_offset;
142 unsigned int iv_size;
143
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100144 /* ESSIV: struct crypto_cipher *essiv_tfm */
145 void *iv_private;
146 struct crypto_ablkcipher **tfms;
Milan Brozd1f96422011-01-13 19:59:54 +0000147 unsigned tfms_count;
Andi Kleenc0297722011-01-13 19:59:53 +0000148
149 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000150 * Layout of each crypto request:
151 *
152 * struct ablkcipher_request
153 * context
154 * padding
155 * struct dm_crypt_request
156 * padding
157 * IV
158 *
159 * The padding is added so that dm_crypt_request and the IV are
160 * correctly aligned.
161 */
162 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000163
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400164 unsigned int per_bio_data_size;
165
Milan Broze48d4bb2006-10-03 01:15:37 -0700166 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100168 unsigned int key_parts; /* independent parts in key buffer */
169 unsigned int key_extra_size; /* additional keys length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 u8 key[0];
171};
172
Milan Broz6a24c712006-10-03 01:15:40 -0700173#define MIN_IOS 16
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100175static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000176static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Broz2dc53272011-01-13 19:59:54 +0000177static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
Olaf Kirch027581f2007-05-09 02:32:52 -0700178
Andi Kleenc0297722011-01-13 19:59:53 +0000179/*
180 * Use this to access cipher attributes that are the same for each CPU.
181 */
182static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
183{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100184 return cc->tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000185}
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * Different IV generation algorithms:
189 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000190 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200191 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 *
Milan Broz61afef62009-12-10 23:52:25 +0000193 * plain64: the initial vector is the 64-bit little-endian version of the sector
194 * number, padded with zeros if necessary.
195 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000196 * essiv: "encrypted sector|salt initial vector", the sector number is
197 * encrypted with the bulk cipher using a salt as key. The salt
198 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 *
Rik Snel48527fa2006-09-03 08:56:39 +1000200 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
201 * (needed for LRW-32-AES and possible other narrow block modes)
202 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700203 * null: the initial vector is always zero. Provides compatibility with
204 * obsolete loop_fish2 devices. Do not use for new devices.
205 *
Milan Broz34745782011-01-13 19:59:55 +0000206 * lmk: Compatible implementation of the block chaining mode used
207 * by the Loop-AES block device encryption system
208 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
209 * It operates on full 512 byte sectors and uses CBC
210 * with an IV derived from the sector number, the data and
211 * optionally extra IV seed.
212 * This means that after decryption the first block
213 * of sector must be tweaked according to decrypted data.
214 * Loop-AES can use three encryption schemes:
215 * version 1: is plain aes-cbc mode
216 * version 2: uses 64 multikey scheme with lmk IV generator
217 * version 3: the same as version 2 with additional IV seed
218 * (it uses 65 keys, last key is used as IV seed)
219 *
Milan Brozed04d982013-10-28 23:21:04 +0100220 * tcw: Compatible implementation of the block chaining mode used
221 * by the TrueCrypt device encryption system (prior to version 4.1).
222 * For more info see: http://www.truecrypt.org
223 * It operates on full 512 byte sectors and uses CBC
224 * with an IV derived from initial key and the sector number.
225 * In addition, whitening value is applied on every sector, whitening
226 * is calculated from initial key, sector number and mixed using CRC32.
227 * Note that this encryption scheme is vulnerable to watermarking attacks
228 * and should be used for old compatible containers access only.
229 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 * plumb: unimplemented, see:
231 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
232 */
233
Milan Broz2dc53272011-01-13 19:59:54 +0000234static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
235 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
237 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100238 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 return 0;
241}
242
Milan Broz61afef62009-12-10 23:52:25 +0000243static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000244 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000245{
246 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100247 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000248
249 return 0;
250}
251
Milan Brozb95bf2d2009-12-10 23:51:56 +0000252/* Initialise ESSIV - compute salt but no local memory allocations */
253static int crypt_iv_essiv_init(struct crypt_config *cc)
254{
255 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
256 struct hash_desc desc;
257 struct scatterlist sg;
Andi Kleenc0297722011-01-13 19:59:53 +0000258 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100259 int err;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000260
261 sg_init_one(&sg, cc->key, cc->key_size);
262 desc.tfm = essiv->hash_tfm;
263 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
264
265 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
266 if (err)
267 return err;
268
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100269 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000270
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100271 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
272 crypto_hash_digestsize(essiv->hash_tfm));
273 if (err)
274 return err;
Andi Kleenc0297722011-01-13 19:59:53 +0000275
276 return 0;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000277}
278
Milan Broz542da312009-12-10 23:51:57 +0000279/* Wipe salt and reset key derived from volume key */
280static int crypt_iv_essiv_wipe(struct crypt_config *cc)
281{
282 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
283 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000284 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100285 int r, err = 0;
Milan Broz542da312009-12-10 23:51:57 +0000286
287 memset(essiv->salt, 0, salt_size);
288
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100289 essiv_tfm = cc->iv_private;
290 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
291 if (r)
292 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +0000293
294 return err;
295}
296
297/* Set up per cpu cipher state */
298static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
299 struct dm_target *ti,
300 u8 *salt, unsigned saltsize)
301{
302 struct crypto_cipher *essiv_tfm;
303 int err;
304
305 /* Setup the essiv_tfm with the given salt */
306 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
307 if (IS_ERR(essiv_tfm)) {
308 ti->error = "Error allocating crypto tfm for ESSIV";
309 return essiv_tfm;
310 }
311
312 if (crypto_cipher_blocksize(essiv_tfm) !=
313 crypto_ablkcipher_ivsize(any_tfm(cc))) {
314 ti->error = "Block size of ESSIV cipher does "
315 "not match IV size of block cipher";
316 crypto_free_cipher(essiv_tfm);
317 return ERR_PTR(-EINVAL);
318 }
319
320 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
321 if (err) {
322 ti->error = "Failed to set key for ESSIV cipher";
323 crypto_free_cipher(essiv_tfm);
324 return ERR_PTR(err);
325 }
326
327 return essiv_tfm;
Milan Broz542da312009-12-10 23:51:57 +0000328}
329
Milan Broz60473592009-12-10 23:51:55 +0000330static void crypt_iv_essiv_dtr(struct crypt_config *cc)
331{
Andi Kleenc0297722011-01-13 19:59:53 +0000332 struct crypto_cipher *essiv_tfm;
Milan Broz60473592009-12-10 23:51:55 +0000333 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
334
Milan Brozb95bf2d2009-12-10 23:51:56 +0000335 crypto_free_hash(essiv->hash_tfm);
336 essiv->hash_tfm = NULL;
337
338 kzfree(essiv->salt);
339 essiv->salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000340
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100341 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000342
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100343 if (essiv_tfm)
344 crypto_free_cipher(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000345
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100346 cc->iv_private = NULL;
Milan Broz60473592009-12-10 23:51:55 +0000347}
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +0100350 const char *opts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Milan Broz5861f1b2009-12-10 23:51:56 +0000352 struct crypto_cipher *essiv_tfm = NULL;
353 struct crypto_hash *hash_tfm = NULL;
Milan Broz5861f1b2009-12-10 23:51:56 +0000354 u8 *salt = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100355 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
Milan Broz5861f1b2009-12-10 23:51:56 +0000357 if (!opts) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700358 ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return -EINVAL;
360 }
361
Milan Brozb95bf2d2009-12-10 23:51:56 +0000362 /* Allocate hash algorithm */
Herbert Xu35058682006-08-24 19:10:20 +1000363 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
364 if (IS_ERR(hash_tfm)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700365 ti->error = "Error initializing ESSIV hash";
Milan Broz5861f1b2009-12-10 23:51:56 +0000366 err = PTR_ERR(hash_tfm);
367 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 }
369
Milan Brozb95bf2d2009-12-10 23:51:56 +0000370 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
Milan Broz5861f1b2009-12-10 23:51:56 +0000371 if (!salt) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700372 ti->error = "Error kmallocing salt storage in ESSIV";
Milan Broz5861f1b2009-12-10 23:51:56 +0000373 err = -ENOMEM;
374 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Milan Brozb95bf2d2009-12-10 23:51:56 +0000377 cc->iv_gen_private.essiv.salt = salt;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000378 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
379
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100380 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
381 crypto_hash_digestsize(hash_tfm));
382 if (IS_ERR(essiv_tfm)) {
383 crypt_iv_essiv_dtr(cc);
384 return PTR_ERR(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000385 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100386 cc->iv_private = essiv_tfm;
Andi Kleenc0297722011-01-13 19:59:53 +0000387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return 0;
Milan Broz5861f1b2009-12-10 23:51:56 +0000389
390bad:
Milan Broz5861f1b2009-12-10 23:51:56 +0000391 if (hash_tfm && !IS_ERR(hash_tfm))
392 crypto_free_hash(hash_tfm);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000393 kfree(salt);
Milan Broz5861f1b2009-12-10 23:51:56 +0000394 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396
Milan Broz2dc53272011-01-13 19:59:54 +0000397static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
398 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100400 struct crypto_cipher *essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100403 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000404 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return 0;
407}
408
Rik Snel48527fa2006-09-03 08:56:39 +1000409static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
410 const char *opts)
411{
Andi Kleenc0297722011-01-13 19:59:53 +0000412 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
David Howellsf0d1b0b2006-12-08 02:37:49 -0800413 int log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000414
415 /* we need to calculate how far we must shift the sector count
416 * to get the cipher block count, we use this shift in _gen */
417
418 if (1 << log != bs) {
419 ti->error = "cypher blocksize is not a power of 2";
420 return -EINVAL;
421 }
422
423 if (log > 9) {
424 ti->error = "cypher blocksize is > 512";
425 return -EINVAL;
426 }
427
Milan Broz60473592009-12-10 23:51:55 +0000428 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000429
430 return 0;
431}
432
433static void crypt_iv_benbi_dtr(struct crypt_config *cc)
434{
Rik Snel48527fa2006-09-03 08:56:39 +1000435}
436
Milan Broz2dc53272011-01-13 19:59:54 +0000437static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
438 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000439{
Herbert Xu79066ad2006-12-05 13:41:52 -0800440 __be64 val;
441
Rik Snel48527fa2006-09-03 08:56:39 +1000442 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800443
Milan Broz2dc53272011-01-13 19:59:54 +0000444 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800445 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 return 0;
448}
449
Milan Broz2dc53272011-01-13 19:59:54 +0000450static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
451 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700452{
453 memset(iv, 0, cc->iv_size);
454
455 return 0;
456}
457
Milan Broz34745782011-01-13 19:59:55 +0000458static void crypt_iv_lmk_dtr(struct crypt_config *cc)
459{
460 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
461
462 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
463 crypto_free_shash(lmk->hash_tfm);
464 lmk->hash_tfm = NULL;
465
466 kzfree(lmk->seed);
467 lmk->seed = NULL;
468}
469
470static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
471 const char *opts)
472{
473 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
474
475 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
476 if (IS_ERR(lmk->hash_tfm)) {
477 ti->error = "Error initializing LMK hash";
478 return PTR_ERR(lmk->hash_tfm);
479 }
480
481 /* No seed in LMK version 2 */
482 if (cc->key_parts == cc->tfms_count) {
483 lmk->seed = NULL;
484 return 0;
485 }
486
487 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
488 if (!lmk->seed) {
489 crypt_iv_lmk_dtr(cc);
490 ti->error = "Error kmallocing seed storage in LMK";
491 return -ENOMEM;
492 }
493
494 return 0;
495}
496
497static int crypt_iv_lmk_init(struct crypt_config *cc)
498{
499 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
500 int subkey_size = cc->key_size / cc->key_parts;
501
502 /* LMK seed is on the position of LMK_KEYS + 1 key */
503 if (lmk->seed)
504 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
505 crypto_shash_digestsize(lmk->hash_tfm));
506
507 return 0;
508}
509
510static int crypt_iv_lmk_wipe(struct crypt_config *cc)
511{
512 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
513
514 if (lmk->seed)
515 memset(lmk->seed, 0, LMK_SEED_SIZE);
516
517 return 0;
518}
519
520static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
521 struct dm_crypt_request *dmreq,
522 u8 *data)
523{
524 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200525 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000526 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100527 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000528 int i, r;
529
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200530 desc->tfm = lmk->hash_tfm;
531 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Broz34745782011-01-13 19:59:55 +0000532
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200533 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000534 if (r)
535 return r;
536
537 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200538 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000539 if (r)
540 return r;
541 }
542
543 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200544 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000545 if (r)
546 return r;
547
548 /* Sector is cropped to 56 bits here */
549 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
550 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
551 buf[2] = cpu_to_le32(4024);
552 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200553 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000554 if (r)
555 return r;
556
557 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200558 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000559 if (r)
560 return r;
561
562 for (i = 0; i < MD5_HASH_WORDS; i++)
563 __cpu_to_le32s(&md5state.hash[i]);
564 memcpy(iv, &md5state.hash, cc->iv_size);
565
566 return 0;
567}
568
569static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
570 struct dm_crypt_request *dmreq)
571{
572 u8 *src;
573 int r = 0;
574
575 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Cong Wangc2e022c2011-11-28 13:26:02 +0800576 src = kmap_atomic(sg_page(&dmreq->sg_in));
Milan Broz34745782011-01-13 19:59:55 +0000577 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800578 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000579 } else
580 memset(iv, 0, cc->iv_size);
581
582 return r;
583}
584
585static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
586 struct dm_crypt_request *dmreq)
587{
588 u8 *dst;
589 int r;
590
591 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
592 return 0;
593
Cong Wangc2e022c2011-11-28 13:26:02 +0800594 dst = kmap_atomic(sg_page(&dmreq->sg_out));
Milan Broz34745782011-01-13 19:59:55 +0000595 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
596
597 /* Tweak the first block of plaintext sector */
598 if (!r)
599 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
600
Cong Wangc2e022c2011-11-28 13:26:02 +0800601 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000602 return r;
603}
604
Milan Brozed04d982013-10-28 23:21:04 +0100605static void crypt_iv_tcw_dtr(struct crypt_config *cc)
606{
607 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
608
609 kzfree(tcw->iv_seed);
610 tcw->iv_seed = NULL;
611 kzfree(tcw->whitening);
612 tcw->whitening = NULL;
613
614 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
615 crypto_free_shash(tcw->crc32_tfm);
616 tcw->crc32_tfm = NULL;
617}
618
619static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
620 const char *opts)
621{
622 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
623
624 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
625 ti->error = "Wrong key size for TCW";
626 return -EINVAL;
627 }
628
629 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
630 if (IS_ERR(tcw->crc32_tfm)) {
631 ti->error = "Error initializing CRC32 in TCW";
632 return PTR_ERR(tcw->crc32_tfm);
633 }
634
635 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
636 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
637 if (!tcw->iv_seed || !tcw->whitening) {
638 crypt_iv_tcw_dtr(cc);
639 ti->error = "Error allocating seed storage in TCW";
640 return -ENOMEM;
641 }
642
643 return 0;
644}
645
646static int crypt_iv_tcw_init(struct crypt_config *cc)
647{
648 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
649 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
650
651 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
652 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
653 TCW_WHITENING_SIZE);
654
655 return 0;
656}
657
658static int crypt_iv_tcw_wipe(struct crypt_config *cc)
659{
660 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
661
662 memset(tcw->iv_seed, 0, cc->iv_size);
663 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
664
665 return 0;
666}
667
668static int crypt_iv_tcw_whitening(struct crypt_config *cc,
669 struct dm_crypt_request *dmreq,
670 u8 *data)
671{
672 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
673 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
674 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200675 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100676 int i, r;
677
678 /* xor whitening with sector number */
679 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
680 crypto_xor(buf, (u8 *)&sector, 8);
681 crypto_xor(&buf[8], (u8 *)&sector, 8);
682
683 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200684 desc->tfm = tcw->crc32_tfm;
685 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Brozed04d982013-10-28 23:21:04 +0100686 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200687 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100688 if (r)
689 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200690 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100691 if (r)
692 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200693 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100694 if (r)
695 goto out;
696 }
697 crypto_xor(&buf[0], &buf[12], 4);
698 crypto_xor(&buf[4], &buf[8], 4);
699
700 /* apply whitening (8 bytes) to whole sector */
701 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
702 crypto_xor(data + i * 8, buf, 8);
703out:
Milan Broz1a71d6f2014-11-22 09:36:04 +0100704 memzero_explicit(buf, sizeof(buf));
Milan Brozed04d982013-10-28 23:21:04 +0100705 return r;
706}
707
708static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
709 struct dm_crypt_request *dmreq)
710{
711 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
712 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
713 u8 *src;
714 int r = 0;
715
716 /* Remove whitening from ciphertext */
717 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
718 src = kmap_atomic(sg_page(&dmreq->sg_in));
719 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
720 kunmap_atomic(src);
721 }
722
723 /* Calculate IV */
724 memcpy(iv, tcw->iv_seed, cc->iv_size);
725 crypto_xor(iv, (u8 *)&sector, 8);
726 if (cc->iv_size > 8)
727 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
728
729 return r;
730}
731
732static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
733 struct dm_crypt_request *dmreq)
734{
735 u8 *dst;
736 int r;
737
738 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
739 return 0;
740
741 /* Apply whitening on ciphertext */
742 dst = kmap_atomic(sg_page(&dmreq->sg_out));
743 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
744 kunmap_atomic(dst);
745
746 return r;
747}
748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749static struct crypt_iv_operations crypt_iv_plain_ops = {
750 .generator = crypt_iv_plain_gen
751};
752
Milan Broz61afef62009-12-10 23:52:25 +0000753static struct crypt_iv_operations crypt_iv_plain64_ops = {
754 .generator = crypt_iv_plain64_gen
755};
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757static struct crypt_iv_operations crypt_iv_essiv_ops = {
758 .ctr = crypt_iv_essiv_ctr,
759 .dtr = crypt_iv_essiv_dtr,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000760 .init = crypt_iv_essiv_init,
Milan Broz542da312009-12-10 23:51:57 +0000761 .wipe = crypt_iv_essiv_wipe,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 .generator = crypt_iv_essiv_gen
763};
764
Rik Snel48527fa2006-09-03 08:56:39 +1000765static struct crypt_iv_operations crypt_iv_benbi_ops = {
766 .ctr = crypt_iv_benbi_ctr,
767 .dtr = crypt_iv_benbi_dtr,
768 .generator = crypt_iv_benbi_gen
769};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Ludwig Nussel46b47732007-05-09 02:32:55 -0700771static struct crypt_iv_operations crypt_iv_null_ops = {
772 .generator = crypt_iv_null_gen
773};
774
Milan Broz34745782011-01-13 19:59:55 +0000775static struct crypt_iv_operations crypt_iv_lmk_ops = {
776 .ctr = crypt_iv_lmk_ctr,
777 .dtr = crypt_iv_lmk_dtr,
778 .init = crypt_iv_lmk_init,
779 .wipe = crypt_iv_lmk_wipe,
780 .generator = crypt_iv_lmk_gen,
781 .post = crypt_iv_lmk_post
782};
783
Milan Brozed04d982013-10-28 23:21:04 +0100784static struct crypt_iv_operations crypt_iv_tcw_ops = {
785 .ctr = crypt_iv_tcw_ctr,
786 .dtr = crypt_iv_tcw_dtr,
787 .init = crypt_iv_tcw_init,
788 .wipe = crypt_iv_tcw_wipe,
789 .generator = crypt_iv_tcw_gen,
790 .post = crypt_iv_tcw_post
791};
792
Milan Brozd469f842007-10-19 22:42:37 +0100793static void crypt_convert_init(struct crypt_config *cc,
794 struct convert_context *ctx,
795 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +0000796 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
798 ctx->bio_in = bio_in;
799 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -0700800 if (bio_in)
801 ctx->iter_in = bio_in->bi_iter;
802 if (bio_out)
803 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100804 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +0000805 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}
807
Huang Yingb2174ee2009-03-16 17:44:33 +0000808static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
809 struct ablkcipher_request *req)
810{
811 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
812}
813
814static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
815 struct dm_crypt_request *dmreq)
816{
817 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
818}
819
Milan Broz2dc53272011-01-13 19:59:54 +0000820static u8 *iv_of_dmreq(struct crypt_config *cc,
821 struct dm_crypt_request *dmreq)
822{
823 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
824 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
825}
826
Milan Broz01482b72008-02-08 02:11:04 +0000827static int crypt_convert_block(struct crypt_config *cc,
Milan Broz3a7f6c92008-02-08 02:11:14 +0000828 struct convert_context *ctx,
829 struct ablkcipher_request *req)
Milan Broz01482b72008-02-08 02:11:04 +0000830{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700831 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
832 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000833 struct dm_crypt_request *dmreq;
834 u8 *iv;
Mikulas Patocka40b62292012-07-27 15:08:04 +0100835 int r;
Milan Broz01482b72008-02-08 02:11:04 +0000836
Huang Yingb2174ee2009-03-16 17:44:33 +0000837 dmreq = dmreq_of_req(cc, req);
Milan Broz2dc53272011-01-13 19:59:54 +0000838 iv = iv_of_dmreq(cc, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000839
Mikulas Patockac66029f2012-07-27 15:08:05 +0100840 dmreq->iv_sector = ctx->cc_sector;
Huang Yingb2174ee2009-03-16 17:44:33 +0000841 dmreq->ctx = ctx;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000842 sg_init_table(&dmreq->sg_in, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700843 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
844 bv_in.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000845
Milan Broz3a7f6c92008-02-08 02:11:14 +0000846 sg_init_table(&dmreq->sg_out, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700847 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
848 bv_out.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000849
Kent Overstreet003b5c52013-10-11 15:45:43 -0700850 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
851 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
Milan Broz01482b72008-02-08 02:11:04 +0000852
Milan Broz3a7f6c92008-02-08 02:11:14 +0000853 if (cc->iv_gen_ops) {
Milan Broz2dc53272011-01-13 19:59:54 +0000854 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000855 if (r < 0)
856 return r;
857 }
858
859 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
860 1 << SECTOR_SHIFT, iv);
861
862 if (bio_data_dir(ctx->bio_in) == WRITE)
863 r = crypto_ablkcipher_encrypt(req);
864 else
865 r = crypto_ablkcipher_decrypt(req);
866
Milan Broz2dc53272011-01-13 19:59:54 +0000867 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
868 r = cc->iv_gen_ops->post(cc, iv, dmreq);
869
Milan Broz3a7f6c92008-02-08 02:11:14 +0000870 return r;
Milan Broz01482b72008-02-08 02:11:04 +0000871}
872
Milan Broz95497a92008-02-08 02:11:12 +0000873static void kcryptd_async_done(struct crypto_async_request *async_req,
874 int error);
Andi Kleenc0297722011-01-13 19:59:53 +0000875
Milan Brozddd42ed2008-02-08 02:11:07 +0000876static void crypt_alloc_req(struct crypt_config *cc,
877 struct convert_context *ctx)
878{
Mikulas Patockac66029f2012-07-27 15:08:05 +0100879 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +0000880
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500881 if (!ctx->req)
882 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
Andi Kleenc0297722011-01-13 19:59:53 +0000883
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500884 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
885 ablkcipher_request_set_callback(ctx->req,
Andi Kleenc0297722011-01-13 19:59:53 +0000886 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500887 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
Milan Brozddd42ed2008-02-08 02:11:07 +0000888}
889
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400890static void crypt_free_req(struct crypt_config *cc,
891 struct ablkcipher_request *req, struct bio *base_bio)
892{
893 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
894
895 if ((struct ablkcipher_request *)(io + 1) != req)
896 mempool_free(req, cc->req_pool);
897}
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899/*
900 * Encrypt / decrypt data from one bio to another one (can be the same one)
901 */
902static int crypt_convert(struct crypt_config *cc,
Milan Brozd469f842007-10-19 22:42:37 +0100903 struct convert_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
Milan Broz3f1e9072008-03-28 14:16:07 -0700905 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
Mikulas Patocka40b62292012-07-27 15:08:04 +0100907 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +0100908
Kent Overstreet003b5c52013-10-11 15:45:43 -0700909 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Milan Broz3a7f6c92008-02-08 02:11:14 +0000911 crypt_alloc_req(cc, ctx);
912
Mikulas Patocka40b62292012-07-27 15:08:04 +0100913 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700914
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500915 r = crypt_convert_block(cc, ctx, ctx->req);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000916
917 switch (r) {
Milan Broz3f1e9072008-03-28 14:16:07 -0700918 /* async */
Milan Broz3a7f6c92008-02-08 02:11:14 +0000919 case -EBUSY:
920 wait_for_completion(&ctx->restart);
Wolfram Sang16735d02013-11-14 14:32:02 -0800921 reinit_completion(&ctx->restart);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000922 /* fall through*/
923 case -EINPROGRESS:
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500924 ctx->req = NULL;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100925 ctx->cc_sector++;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000926 continue;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000927
Milan Broz3f1e9072008-03-28 14:16:07 -0700928 /* sync */
929 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100930 atomic_dec(&ctx->cc_pending);
Mikulas Patockac66029f2012-07-27 15:08:05 +0100931 ctx->cc_sector++;
Milan Brozc7f1b202008-07-02 09:34:28 +0100932 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -0700933 continue;
934
935 /* error */
936 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100937 atomic_dec(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700938 return r;
939 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
941
Milan Broz3f1e9072008-03-28 14:16:07 -0700942 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
944
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500945static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947/*
948 * Generate a new unfragmented bio with the given size
949 * This should never violate the device limitations
Mikulas Patocka7145c242015-02-13 08:24:41 -0500950 *
951 * This function may be called concurrently. If we allocate from the mempool
952 * concurrently, there is a possibility of deadlock. For example, if we have
953 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
954 * the mempool concurrently, it may deadlock in a situation where both processes
955 * have allocated 128 pages and the mempool is exhausted.
956 *
957 * In order to avoid this scenario we allocate the pages under a mutex.
958 *
959 * In order to not degrade performance with excessive locking, we try
960 * non-blocking allocations without a mutex first but on failure we fallback
961 * to blocking allocations with a mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 */
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500963static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +0100965 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -0700966 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Mikulas Patocka7145c242015-02-13 08:24:41 -0500968 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
969 unsigned i, len, remaining_size;
Milan Broz91e10622007-12-13 14:16:10 +0000970 struct page *page;
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500971 struct bio_vec *bvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Mikulas Patocka7145c242015-02-13 08:24:41 -0500973retry:
974 if (unlikely(gfp_mask & __GFP_WAIT))
975 mutex_lock(&cc->bio_alloc_lock);
976
Olaf Kirch2f9941b2007-05-09 02:32:53 -0700977 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -0700978 if (!clone)
Mikulas Patocka7145c242015-02-13 08:24:41 -0500979 goto return_clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Olaf Kirch027581f2007-05-09 02:32:52 -0700981 clone_init(io, clone);
Milan Broz6a24c712006-10-03 01:15:40 -0700982
Mikulas Patocka7145c242015-02-13 08:24:41 -0500983 remaining_size = size;
984
Olaf Kirchf97380b2007-05-09 02:32:54 -0700985 for (i = 0; i < nr_iovecs; i++) {
Milan Broz91e10622007-12-13 14:16:10 +0000986 page = mempool_alloc(cc->page_pool, gfp_mask);
Mikulas Patocka7145c242015-02-13 08:24:41 -0500987 if (!page) {
988 crypt_free_buffer_pages(cc, clone);
989 bio_put(clone);
990 gfp_mask |= __GFP_WAIT;
991 goto retry;
992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Mikulas Patocka7145c242015-02-13 08:24:41 -0500994 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500996 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
997 bvec->bv_page = page;
998 bvec->bv_len = len;
999 bvec->bv_offset = 0;
1000
1001 clone->bi_iter.bi_size += len;
Milan Broz91e10622007-12-13 14:16:10 +00001002
Mikulas Patocka7145c242015-02-13 08:24:41 -05001003 remaining_size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005
Mikulas Patocka7145c242015-02-13 08:24:41 -05001006return_clone:
1007 if (unlikely(gfp_mask & __GFP_WAIT))
1008 mutex_unlock(&cc->bio_alloc_lock);
1009
Milan Broz8b004452006-10-03 01:15:37 -07001010 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011}
1012
Neil Brown644bd2f2007-10-16 13:48:46 +02001013static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014{
Neil Brown644bd2f2007-10-16 13:48:46 +02001015 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 struct bio_vec *bv;
1017
Kent Overstreetcb34e052012-09-05 15:22:02 -07001018 bio_for_each_segment_all(bv, clone, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 BUG_ON(!bv->bv_page);
1020 mempool_free(bv->bv_page, cc->page_pool);
1021 bv->bv_page = NULL;
1022 }
1023}
1024
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001025static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1026 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001027{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001028 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001029 io->base_bio = bio;
1030 io->sector = sector;
1031 io->error = 0;
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001032 io->ctx.req = NULL;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001033 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001034}
1035
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001036static void crypt_inc_pending(struct dm_crypt_io *io)
1037{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001038 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001039}
1040
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041/*
1042 * One of the bios was finished. Check for completion of
1043 * the whole request and correctly clean up the buffer.
1044 */
Milan Broz5742fd72008-02-08 02:10:43 +00001045static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001047 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001048 struct bio *base_bio = io->base_bio;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001049 int error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
Mikulas Patocka40b62292012-07-27 15:08:04 +01001051 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 return;
1053
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001054 if (io->ctx.req)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001055 crypt_free_req(cc, io->ctx.req, base_bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001056
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001057 bio_endio(base_bio, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058}
1059
1060/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001061 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 *
1063 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001064 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001065 *
1066 * kcryptd performs the actual encryption or decryption.
1067 *
1068 * kcryptd_io performs the IO submission.
1069 *
1070 * They must be separated as otherwise the final stages could be
1071 * starved by new requests which can block in the first stages due
1072 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001073 *
1074 * The work is done per CPU global for all dm-crypt instances.
1075 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 */
NeilBrown6712ecf2007-09-27 12:47:43 +02001077static void crypt_endio(struct bio *clone, int error)
Milan Broz8b004452006-10-03 01:15:37 -07001078{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001079 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001080 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001081 unsigned rw = bio_data_dir(clone);
Milan Broz8b004452006-10-03 01:15:37 -07001082
Milan Brozadfe4772007-12-13 14:15:51 +00001083 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1084 error = -EIO;
1085
Milan Broz8b004452006-10-03 01:15:37 -07001086 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001087 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001088 */
Milan Brozee7a4912008-02-08 02:10:46 +00001089 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001090 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001091
1092 bio_put(clone);
1093
1094 if (rw == READ && !error) {
1095 kcryptd_queue_crypt(io);
1096 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001097 }
Milan Broz8b004452006-10-03 01:15:37 -07001098
Milan Brozadfe4772007-12-13 14:15:51 +00001099 if (unlikely(error))
Milan Broz5742fd72008-02-08 02:10:43 +00001100 io->error = error;
1101
1102 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001103}
1104
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001105static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001106{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001107 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001108
1109 clone->bi_private = io;
1110 clone->bi_end_io = crypt_endio;
1111 clone->bi_bdev = cc->dev->bdev;
1112 clone->bi_rw = io->base_bio->bi_rw;
1113}
1114
Milan Broz20c82532011-01-13 19:59:53 +00001115static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001116{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001117 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001118 struct bio *base_bio = io->base_bio;
1119 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001120
Milan Broz8b004452006-10-03 01:15:37 -07001121 /*
1122 * The block layer might modify the bvec array, so always
1123 * copy the required bvecs because we need the original
1124 * one in order to decrypt the whole bio data *afterwards*.
1125 */
Kent Overstreetbf800ef2012-09-06 15:35:02 -07001126 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001127 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001128 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001129
Milan Broz20c82532011-01-13 19:59:53 +00001130 crypt_inc_pending(io);
1131
Milan Broz8b004452006-10-03 01:15:37 -07001132 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001133 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001134
Milan Broz93e605c2006-10-03 01:15:38 -07001135 generic_make_request(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001136 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001137}
1138
Milan Broz4e4eef62008-02-08 02:10:49 +00001139static void kcryptd_io_write(struct dm_crypt_io *io)
1140{
Milan Broz95497a92008-02-08 02:11:12 +00001141 struct bio *clone = io->ctx.bio_out;
Milan Broz95497a92008-02-08 02:11:12 +00001142 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +00001143}
1144
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001145static void kcryptd_io(struct work_struct *work)
1146{
1147 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1148
Milan Broz20c82532011-01-13 19:59:53 +00001149 if (bio_data_dir(io->base_bio) == READ) {
1150 crypt_inc_pending(io);
1151 if (kcryptd_io_read(io, GFP_NOIO))
1152 io->error = -ENOMEM;
1153 crypt_dec_pending(io);
1154 } else
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001155 kcryptd_io_write(io);
1156}
1157
1158static void kcryptd_queue_io(struct dm_crypt_io *io)
1159{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001160 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001161
1162 INIT_WORK(&io->work, kcryptd_io);
1163 queue_work(cc->io_queue, &io->work);
1164}
1165
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001166static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001167{
Milan Brozdec1ced2008-02-08 02:10:57 +00001168 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001169 struct crypt_config *cc = io->cc;
Milan Brozdec1ced2008-02-08 02:10:57 +00001170
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001171 if (unlikely(io->error < 0)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001172 crypt_free_buffer_pages(cc, clone);
1173 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001174 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001175 return;
1176 }
1177
1178 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001179 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001180
Kent Overstreet4f024f32013-10-11 15:44:27 -07001181 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001182
Milan Broz95497a92008-02-08 02:11:12 +00001183 if (async)
1184 kcryptd_queue_io(io);
Alasdair G Kergon1e37bb82008-10-10 13:37:05 +01001185 else
Milan Broz95497a92008-02-08 02:11:12 +00001186 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +00001187}
1188
Milan Brozfc5a5e92008-10-10 13:37:04 +01001189static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001190{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001191 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001192 struct bio *clone;
Milan Brozc8081612008-10-10 13:37:08 +01001193 int crypt_finished;
Milan Brozb635b002008-10-21 17:45:00 +01001194 sector_t sector = io->sector;
Milan Brozdec1ced2008-02-08 02:10:57 +00001195 int r;
Milan Broz8b004452006-10-03 01:15:37 -07001196
Milan Broz93e605c2006-10-03 01:15:38 -07001197 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01001198 * Prevent io from disappearing until this function completes.
1199 */
1200 crypt_inc_pending(io);
Milan Brozb635b002008-10-21 17:45:00 +01001201 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01001202
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001203 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1204 if (unlikely(!clone)) {
1205 io->error = -EIO;
1206 goto dec;
Milan Broz8b004452006-10-03 01:15:37 -07001207 }
Milan Broz899c95d2008-02-08 02:11:02 +00001208
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001209 io->ctx.bio_out = clone;
1210 io->ctx.iter_out = clone->bi_iter;
1211
1212 sector += bio_sectors(clone);
1213
1214 crypt_inc_pending(io);
1215 r = crypt_convert(cc, &io->ctx);
1216 if (r)
1217 io->error = -EIO;
1218 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1219
1220 /* Encryption was already finished, submit io now */
1221 if (crypt_finished) {
1222 kcryptd_crypt_write_io_submit(io, 0);
1223 io->sector = sector;
1224 }
1225
1226dec:
Milan Broz899c95d2008-02-08 02:11:02 +00001227 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00001228}
1229
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001230static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00001231{
Milan Broz5742fd72008-02-08 02:10:43 +00001232 crypt_dec_pending(io);
1233}
1234
Milan Broz4e4eef62008-02-08 02:10:49 +00001235static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001236{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001237 struct crypt_config *cc = io->cc;
Milan Broz5742fd72008-02-08 02:10:43 +00001238 int r = 0;
Milan Broz8b004452006-10-03 01:15:37 -07001239
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001240 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001241
Milan Broz53017032008-02-08 02:10:38 +00001242 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00001243 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07001244
Milan Broz5742fd72008-02-08 02:10:43 +00001245 r = crypt_convert(cc, &io->ctx);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001246 if (r < 0)
1247 io->error = -EIO;
Milan Broz5742fd72008-02-08 02:10:43 +00001248
Mikulas Patocka40b62292012-07-27 15:08:04 +01001249 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001250 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001251
1252 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001253}
1254
Milan Broz95497a92008-02-08 02:11:12 +00001255static void kcryptd_async_done(struct crypto_async_request *async_req,
1256 int error)
1257{
Huang Yingb2174ee2009-03-16 17:44:33 +00001258 struct dm_crypt_request *dmreq = async_req->data;
1259 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00001260 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001261 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00001262
1263 if (error == -EINPROGRESS) {
1264 complete(&ctx->restart);
1265 return;
1266 }
1267
Milan Broz2dc53272011-01-13 19:59:54 +00001268 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1269 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1270
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001271 if (error < 0)
1272 io->error = -EIO;
1273
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001274 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00001275
Mikulas Patocka40b62292012-07-27 15:08:04 +01001276 if (!atomic_dec_and_test(&ctx->cc_pending))
Milan Broz95497a92008-02-08 02:11:12 +00001277 return;
1278
1279 if (bio_data_dir(io->base_bio) == READ)
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001280 kcryptd_crypt_read_done(io);
Milan Broz95497a92008-02-08 02:11:12 +00001281 else
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001282 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00001283}
1284
Milan Broz4e4eef62008-02-08 02:10:49 +00001285static void kcryptd_crypt(struct work_struct *work)
1286{
1287 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1288
1289 if (bio_data_dir(io->base_bio) == READ)
1290 kcryptd_crypt_read_convert(io);
1291 else
1292 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07001293}
1294
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001295static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1296{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001297 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001298
1299 INIT_WORK(&io->work, kcryptd_crypt);
1300 queue_work(cc->crypt_queue, &io->work);
1301}
1302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303/*
1304 * Decode key from its hex representation
1305 */
1306static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1307{
1308 char buffer[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 unsigned int i;
1310
1311 buffer[2] = '\0';
1312
Milan Broz8b004452006-10-03 01:15:37 -07001313 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 buffer[0] = *hex++;
1315 buffer[1] = *hex++;
1316
majianpeng1a66a082012-07-27 15:07:59 +01001317 if (kstrtou8(buffer, 16, &key[i]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 return -EINVAL;
1319 }
1320
1321 if (*hex != '\0')
1322 return -EINVAL;
1323
1324 return 0;
1325}
1326
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001327static void crypt_free_tfms(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00001328{
Milan Brozd1f96422011-01-13 19:59:54 +00001329 unsigned i;
1330
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001331 if (!cc->tfms)
1332 return;
1333
Milan Brozd1f96422011-01-13 19:59:54 +00001334 for (i = 0; i < cc->tfms_count; i++)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001335 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1336 crypto_free_ablkcipher(cc->tfms[i]);
1337 cc->tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001338 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001339
1340 kfree(cc->tfms);
1341 cc->tfms = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001342}
1343
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001344static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00001345{
Milan Brozd1f96422011-01-13 19:59:54 +00001346 unsigned i;
1347 int err;
1348
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001349 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1350 GFP_KERNEL);
1351 if (!cc->tfms)
1352 return -ENOMEM;
1353
Milan Brozd1f96422011-01-13 19:59:54 +00001354 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001355 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1356 if (IS_ERR(cc->tfms[i])) {
1357 err = PTR_ERR(cc->tfms[i]);
1358 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00001359 return err;
1360 }
1361 }
1362
1363 return 0;
1364}
1365
Andi Kleenc0297722011-01-13 19:59:53 +00001366static int crypt_setkey_allcpus(struct crypt_config *cc)
1367{
Milan Brozda31a072013-10-28 23:21:03 +01001368 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001369 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00001370
Milan Brozda31a072013-10-28 23:21:03 +01001371 /* Ignore extra keys (which are used for IV etc) */
1372 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1373
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001374 for (i = 0; i < cc->tfms_count; i++) {
1375 r = crypto_ablkcipher_setkey(cc->tfms[i],
1376 cc->key + (i * subkey_size),
1377 subkey_size);
1378 if (r)
1379 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00001380 }
1381
1382 return err;
1383}
1384
Milan Broze48d4bb2006-10-03 01:15:37 -07001385static int crypt_set_key(struct crypt_config *cc, char *key)
1386{
Milan Brozde8be5a2011-03-24 13:54:27 +00001387 int r = -EINVAL;
1388 int key_string_len = strlen(key);
1389
Milan Broz69a8cfc2011-01-13 19:59:49 +00001390 /* The key size may not be changed. */
Milan Brozde8be5a2011-03-24 13:54:27 +00001391 if (cc->key_size != (key_string_len >> 1))
1392 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001393
Milan Broz69a8cfc2011-01-13 19:59:49 +00001394 /* Hyphen (which gives a key_size of zero) means there is no key. */
1395 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00001396 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001397
Milan Broz69a8cfc2011-01-13 19:59:49 +00001398 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00001399 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001400
1401 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1402
Milan Brozde8be5a2011-03-24 13:54:27 +00001403 r = crypt_setkey_allcpus(cc);
1404
1405out:
1406 /* Hex key string not needed after here, so wipe it. */
1407 memset(key, '0', key_string_len);
1408
1409 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07001410}
1411
1412static int crypt_wipe_key(struct crypt_config *cc)
1413{
1414 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1415 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00001416
1417 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001418}
1419
Milan Broz28513fc2010-08-12 04:14:06 +01001420static void crypt_dtr(struct dm_target *ti)
1421{
1422 struct crypt_config *cc = ti->private;
1423
1424 ti->private = NULL;
1425
1426 if (!cc)
1427 return;
1428
1429 if (cc->io_queue)
1430 destroy_workqueue(cc->io_queue);
1431 if (cc->crypt_queue)
1432 destroy_workqueue(cc->crypt_queue);
1433
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001434 crypt_free_tfms(cc);
1435
Milan Broz28513fc2010-08-12 04:14:06 +01001436 if (cc->bs)
1437 bioset_free(cc->bs);
1438
1439 if (cc->page_pool)
1440 mempool_destroy(cc->page_pool);
1441 if (cc->req_pool)
1442 mempool_destroy(cc->req_pool);
Milan Broz28513fc2010-08-12 04:14:06 +01001443
1444 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1445 cc->iv_gen_ops->dtr(cc);
1446
Milan Broz28513fc2010-08-12 04:14:06 +01001447 if (cc->dev)
1448 dm_put_device(ti, cc->dev);
1449
Milan Broz5ebaee62010-08-12 04:14:07 +01001450 kzfree(cc->cipher);
Milan Broz7dbcd132011-01-13 19:59:52 +00001451 kzfree(cc->cipher_string);
Milan Broz28513fc2010-08-12 04:14:06 +01001452
1453 /* Must zero key material before freeing */
1454 kzfree(cc);
1455}
1456
Milan Broz5ebaee62010-08-12 04:14:07 +01001457static int crypt_ctr_cipher(struct dm_target *ti,
1458 char *cipher_in, char *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
Milan Broz5ebaee62010-08-12 04:14:07 +01001460 struct crypt_config *cc = ti->private;
Milan Brozd1f96422011-01-13 19:59:54 +00001461 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01001462 char *cipher_api = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001463 int ret = -EINVAL;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001464 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
Milan Broz5ebaee62010-08-12 04:14:07 +01001466 /* Convert to crypto api definition? */
1467 if (strchr(cipher_in, '(')) {
1468 ti->error = "Bad cipher specification";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 return -EINVAL;
1470 }
1471
Milan Broz7dbcd132011-01-13 19:59:52 +00001472 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1473 if (!cc->cipher_string)
1474 goto bad_mem;
1475
Milan Broz5ebaee62010-08-12 04:14:07 +01001476 /*
1477 * Legacy dm-crypt cipher specification
Milan Brozd1f96422011-01-13 19:59:54 +00001478 * cipher[:keycount]-mode-iv:ivopts
Milan Broz5ebaee62010-08-12 04:14:07 +01001479 */
1480 tmp = cipher_in;
Milan Brozd1f96422011-01-13 19:59:54 +00001481 keycount = strsep(&tmp, "-");
1482 cipher = strsep(&keycount, ":");
1483
1484 if (!keycount)
1485 cc->tfms_count = 1;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001486 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
Milan Brozd1f96422011-01-13 19:59:54 +00001487 !is_power_of_2(cc->tfms_count)) {
1488 ti->error = "Bad cipher key count specification";
1489 return -EINVAL;
1490 }
1491 cc->key_parts = cc->tfms_count;
Milan Brozda31a072013-10-28 23:21:03 +01001492 cc->key_extra_size = 0;
Milan Broz5ebaee62010-08-12 04:14:07 +01001493
1494 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1495 if (!cc->cipher)
1496 goto bad_mem;
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 chainmode = strsep(&tmp, "-");
1499 ivopts = strsep(&tmp, "-");
1500 ivmode = strsep(&ivopts, ":");
1501
1502 if (tmp)
Milan Broz5ebaee62010-08-12 04:14:07 +01001503 DMWARN("Ignoring unexpected additional cipher options");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
Milan Broz7dbcd132011-01-13 19:59:52 +00001505 /*
1506 * For compatibility with the original dm-crypt mapping format, if
1507 * only the cipher name is supplied, use cbc-plain.
1508 */
Milan Broz5ebaee62010-08-12 04:14:07 +01001509 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 chainmode = "cbc";
1511 ivmode = "plain";
1512 }
1513
Herbert Xud1806f62006-08-22 20:29:17 +10001514 if (strcmp(chainmode, "ecb") && !ivmode) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001515 ti->error = "IV mechanism required";
1516 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 }
1518
Milan Broz5ebaee62010-08-12 04:14:07 +01001519 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1520 if (!cipher_api)
1521 goto bad_mem;
1522
1523 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1524 "%s(%s)", chainmode, cipher);
1525 if (ret < 0) {
1526 kfree(cipher_api);
1527 goto bad_mem;
Herbert Xud1806f62006-08-22 20:29:17 +10001528 }
1529
Milan Broz5ebaee62010-08-12 04:14:07 +01001530 /* Allocate cipher */
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001531 ret = crypt_alloc_tfms(cc, cipher_api);
1532 if (ret < 0) {
1533 ti->error = "Error allocating crypto tfm";
1534 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Milan Broz5ebaee62010-08-12 04:14:07 +01001537 /* Initialize IV */
Andi Kleenc0297722011-01-13 19:59:53 +00001538 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
Milan Broz5ebaee62010-08-12 04:14:07 +01001539 if (cc->iv_size)
1540 /* at least a 64 bit sector number should fit in our buffer */
1541 cc->iv_size = max(cc->iv_size,
1542 (unsigned int)(sizeof(u64) / sizeof(u8)));
1543 else if (ivmode) {
1544 DMWARN("Selected cipher does not support IVs");
1545 ivmode = NULL;
1546 }
1547
1548 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 if (ivmode == NULL)
1550 cc->iv_gen_ops = NULL;
1551 else if (strcmp(ivmode, "plain") == 0)
1552 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00001553 else if (strcmp(ivmode, "plain64") == 0)
1554 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 else if (strcmp(ivmode, "essiv") == 0)
1556 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10001557 else if (strcmp(ivmode, "benbi") == 0)
1558 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07001559 else if (strcmp(ivmode, "null") == 0)
1560 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Broz34745782011-01-13 19:59:55 +00001561 else if (strcmp(ivmode, "lmk") == 0) {
1562 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01001563 /*
1564 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00001565 * to length of provided multi-key string.
1566 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01001567 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00001568 */
Milan Brozda31a072013-10-28 23:21:03 +01001569 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00001570 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01001571 cc->key_extra_size = cc->key_size / cc->key_parts;
1572 }
Milan Brozed04d982013-10-28 23:21:04 +01001573 } else if (strcmp(ivmode, "tcw") == 0) {
1574 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1575 cc->key_parts += 2; /* IV + whitening */
1576 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broz34745782011-01-13 19:59:55 +00001577 } else {
Milan Broz5ebaee62010-08-12 04:14:07 +01001578 ret = -EINVAL;
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001579 ti->error = "Invalid IV mode";
Milan Broz28513fc2010-08-12 04:14:06 +01001580 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 }
1582
Milan Brozda31a072013-10-28 23:21:03 +01001583 /* Initialize and set key */
1584 ret = crypt_set_key(cc, key);
1585 if (ret < 0) {
1586 ti->error = "Error decoding and setting key";
1587 goto bad;
1588 }
1589
Milan Broz28513fc2010-08-12 04:14:06 +01001590 /* Allocate IV */
1591 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1592 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1593 if (ret < 0) {
1594 ti->error = "Error creating IV";
1595 goto bad;
1596 }
Milan Brozb95bf2d2009-12-10 23:51:56 +00001597 }
1598
Milan Broz28513fc2010-08-12 04:14:06 +01001599 /* Initialize IV (set keys for ESSIV etc) */
1600 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1601 ret = cc->iv_gen_ops->init(cc);
1602 if (ret < 0) {
1603 ti->error = "Error initialising IV";
1604 goto bad;
1605 }
1606 }
1607
Milan Broz5ebaee62010-08-12 04:14:07 +01001608 ret = 0;
1609bad:
1610 kfree(cipher_api);
1611 return ret;
1612
1613bad_mem:
1614 ti->error = "Cannot allocate cipher strings";
1615 return -ENOMEM;
1616}
1617
1618/*
1619 * Construct an encryption mapping:
1620 * <cipher> <key> <iv_offset> <dev_path> <start>
1621 */
1622static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1623{
1624 struct crypt_config *cc;
Milan Broz772ae5f2011-08-02 12:32:08 +01001625 unsigned int key_size, opt_params;
Milan Broz5ebaee62010-08-12 04:14:07 +01001626 unsigned long long tmpll;
1627 int ret;
Mikulas Patockad49ec522014-08-28 11:09:31 -04001628 size_t iv_size_padding;
Milan Broz772ae5f2011-08-02 12:32:08 +01001629 struct dm_arg_set as;
1630 const char *opt_string;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001631 char dummy;
Milan Broz5ebaee62010-08-12 04:14:07 +01001632
Milan Broz772ae5f2011-08-02 12:32:08 +01001633 static struct dm_arg _args[] = {
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001634 {0, 2, "Invalid number of feature args"},
Milan Broz772ae5f2011-08-02 12:32:08 +01001635 };
1636
1637 if (argc < 5) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001638 ti->error = "Not enough arguments";
1639 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
1641
Milan Broz5ebaee62010-08-12 04:14:07 +01001642 key_size = strlen(argv[1]) >> 1;
1643
1644 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1645 if (!cc) {
1646 ti->error = "Cannot allocate encryption context";
1647 return -ENOMEM;
1648 }
Milan Broz69a8cfc2011-01-13 19:59:49 +00001649 cc->key_size = key_size;
Milan Broz5ebaee62010-08-12 04:14:07 +01001650
1651 ti->private = cc;
1652 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1653 if (ret < 0)
1654 goto bad;
1655
Milan Brozddd42ed2008-02-08 02:11:07 +00001656 cc->dmreq_start = sizeof(struct ablkcipher_request);
Andi Kleenc0297722011-01-13 19:59:53 +00001657 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
Mikulas Patockad49ec522014-08-28 11:09:31 -04001658 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1659
1660 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1661 /* Allocate the padding exactly */
1662 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1663 & crypto_ablkcipher_alignmask(any_tfm(cc));
1664 } else {
1665 /*
1666 * If the cipher requires greater alignment than kmalloc
1667 * alignment, we don't know the exact position of the
1668 * initialization vector. We must assume worst case.
1669 */
1670 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1671 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001672
Mikulas Patocka94f5e022015-02-13 08:25:26 -05001673 ret = -ENOMEM;
Milan Brozddd42ed2008-02-08 02:11:07 +00001674 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
Mikulas Patockad49ec522014-08-28 11:09:31 -04001675 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
Milan Brozddd42ed2008-02-08 02:11:07 +00001676 if (!cc->req_pool) {
1677 ti->error = "Cannot allocate crypt request mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001678 goto bad;
Milan Brozddd42ed2008-02-08 02:11:07 +00001679 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001680
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001681 cc->per_bio_data_size = ti->per_bio_data_size =
Mikulas Patockad49ec522014-08-28 11:09:31 -04001682 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1683 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1684 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001685
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001686 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 if (!cc->page_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001688 ti->error = "Cannot allocate page mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001689 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 }
1691
Jens Axboebb799ca2008-12-10 15:35:05 +01001692 cc->bs = bioset_create(MIN_IOS, 0);
Milan Broz6a24c712006-10-03 01:15:40 -07001693 if (!cc->bs) {
1694 ti->error = "Cannot allocate crypt bioset";
Milan Broz28513fc2010-08-12 04:14:06 +01001695 goto bad;
Milan Broz6a24c712006-10-03 01:15:40 -07001696 }
1697
Mikulas Patocka7145c242015-02-13 08:24:41 -05001698 mutex_init(&cc->bio_alloc_lock);
1699
Milan Broz28513fc2010-08-12 04:14:06 +01001700 ret = -EINVAL;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001701 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001702 ti->error = "Invalid iv_offset sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001703 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001705 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
Milan Broz28513fc2010-08-12 04:14:06 +01001707 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1708 ti->error = "Device lookup failed";
1709 goto bad;
1710 }
1711
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001712 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001713 ti->error = "Invalid device sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001714 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001716 cc->start = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Milan Broz772ae5f2011-08-02 12:32:08 +01001718 argv += 5;
1719 argc -= 5;
1720
1721 /* Optional parameters */
1722 if (argc) {
1723 as.argc = argc;
1724 as.argv = argv;
1725
1726 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1727 if (ret)
1728 goto bad;
1729
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001730 while (opt_params--) {
1731 opt_string = dm_shift_arg(&as);
1732 if (!opt_string) {
1733 ti->error = "Not enough feature arguments";
1734 goto bad;
1735 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001736
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001737 if (!strcasecmp(opt_string, "allow_discards"))
1738 ti->num_discard_bios = 1;
1739
1740 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1741 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1742
1743 else {
1744 ti->error = "Invalid feature arguments";
1745 goto bad;
1746 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001747 }
1748 }
1749
Milan Broz28513fc2010-08-12 04:14:06 +01001750 ret = -ENOMEM;
Tejun Heo670368a2013-07-30 08:40:21 -04001751 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001752 if (!cc->io_queue) {
1753 ti->error = "Couldn't create kcryptd io queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001754 goto bad;
Milan Brozcabf08e2007-10-19 22:38:58 +01001755 }
1756
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001757 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1758 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1759 else
1760 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1761 num_online_cpus());
Milan Brozcabf08e2007-10-19 22:38:58 +01001762 if (!cc->crypt_queue) {
Milan Broz9934a8b2007-10-19 22:38:57 +01001763 ti->error = "Couldn't create kcryptd queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001764 goto bad;
Milan Broz9934a8b2007-10-19 22:38:57 +01001765 }
1766
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001767 ti->num_flush_bios = 1;
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01001768 ti->discard_zeroes_data_unsupported = true;
Milan Broz983c7db2011-09-25 23:26:21 +01001769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 return 0;
1771
Milan Broz28513fc2010-08-12 04:14:06 +01001772bad:
1773 crypt_dtr(ti);
1774 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775}
1776
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001777static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001779 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001780 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001781
Milan Broz772ae5f2011-08-02 12:32:08 +01001782 /*
1783 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1784 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1785 * - for REQ_DISCARD caller must use flush if IO ordering matters
1786 */
1787 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001788 bio->bi_bdev = cc->dev->bdev;
Milan Broz772ae5f2011-08-02 12:32:08 +01001789 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001790 bio->bi_iter.bi_sector = cc->start +
1791 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001792 return DM_MAPIO_REMAPPED;
1793 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001795 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1796 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1797 io->ctx.req = (struct ablkcipher_request *)(io + 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001798
Milan Broz20c82532011-01-13 19:59:53 +00001799 if (bio_data_dir(io->base_bio) == READ) {
1800 if (kcryptd_io_read(io, GFP_NOWAIT))
1801 kcryptd_queue_io(io);
1802 } else
Milan Brozcabf08e2007-10-19 22:38:58 +01001803 kcryptd_queue_crypt(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001805 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806}
1807
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001808static void crypt_status(struct dm_target *ti, status_type_t type,
1809 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
Milan Broz5ebaee62010-08-12 04:14:07 +01001811 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001812 unsigned i, sz = 0;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001813 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 switch (type) {
1816 case STATUSTYPE_INFO:
1817 result[0] = '\0';
1818 break;
1819
1820 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00001821 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001823 if (cc->key_size > 0)
1824 for (i = 0; i < cc->key_size; i++)
1825 DMEMIT("%02x", cc->key[i]);
1826 else
1827 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
Andrew Morton4ee218c2006-03-27 01:17:48 -08001829 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1830 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01001831
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001832 num_feature_args += !!ti->num_discard_bios;
1833 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1834 if (num_feature_args) {
1835 DMEMIT(" %d", num_feature_args);
1836 if (ti->num_discard_bios)
1837 DMEMIT(" allow_discards");
1838 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1839 DMEMIT(" same_cpu_crypt");
1840 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001841
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 break;
1843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
1845
Milan Broze48d4bb2006-10-03 01:15:37 -07001846static void crypt_postsuspend(struct dm_target *ti)
1847{
1848 struct crypt_config *cc = ti->private;
1849
1850 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1851}
1852
1853static int crypt_preresume(struct dm_target *ti)
1854{
1855 struct crypt_config *cc = ti->private;
1856
1857 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1858 DMERR("aborting resume - crypt key is not set.");
1859 return -EAGAIN;
1860 }
1861
1862 return 0;
1863}
1864
1865static void crypt_resume(struct dm_target *ti)
1866{
1867 struct crypt_config *cc = ti->private;
1868
1869 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1870}
1871
1872/* Message interface
1873 * key set <key>
1874 * key wipe
1875 */
1876static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1877{
1878 struct crypt_config *cc = ti->private;
Milan Broz542da312009-12-10 23:51:57 +00001879 int ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07001880
1881 if (argc < 2)
1882 goto error;
1883
Mike Snitzer498f0102011-08-02 12:32:04 +01001884 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07001885 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1886 DMWARN("not suspended during key manipulation.");
1887 return -EINVAL;
1888 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001889 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Milan Broz542da312009-12-10 23:51:57 +00001890 ret = crypt_set_key(cc, argv[2]);
1891 if (ret)
1892 return ret;
1893 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1894 ret = cc->iv_gen_ops->init(cc);
1895 return ret;
1896 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001897 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
Milan Broz542da312009-12-10 23:51:57 +00001898 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1899 ret = cc->iv_gen_ops->wipe(cc);
1900 if (ret)
1901 return ret;
1902 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001903 return crypt_wipe_key(cc);
Milan Broz542da312009-12-10 23:51:57 +00001904 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001905 }
1906
1907error:
1908 DMWARN("unrecognised message received.");
1909 return -EINVAL;
1910}
1911
Milan Brozd41e26b2008-07-21 12:00:40 +01001912static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1913 struct bio_vec *biovec, int max_size)
1914{
1915 struct crypt_config *cc = ti->private;
1916 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1917
1918 if (!q->merge_bvec_fn)
1919 return max_size;
1920
1921 bvm->bi_bdev = cc->dev->bdev;
Alasdair G Kergonb441a2622010-08-12 04:14:11 +01001922 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
Milan Brozd41e26b2008-07-21 12:00:40 +01001923
1924 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1925}
1926
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001927static int crypt_iterate_devices(struct dm_target *ti,
1928 iterate_devices_callout_fn fn, void *data)
1929{
1930 struct crypt_config *cc = ti->private;
1931
Mike Snitzer5dea2712009-07-23 20:30:42 +01001932 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001933}
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935static struct target_type crypt_target = {
1936 .name = "crypt",
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001937 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 .module = THIS_MODULE,
1939 .ctr = crypt_ctr,
1940 .dtr = crypt_dtr,
1941 .map = crypt_map,
1942 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07001943 .postsuspend = crypt_postsuspend,
1944 .preresume = crypt_preresume,
1945 .resume = crypt_resume,
1946 .message = crypt_message,
Milan Brozd41e26b2008-07-21 12:00:40 +01001947 .merge = crypt_merge,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001948 .iterate_devices = crypt_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949};
1950
1951static int __init dm_crypt_init(void)
1952{
1953 int r;
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 r = dm_register_target(&crypt_target);
Mikulas Patocka94f5e022015-02-13 08:25:26 -05001956 if (r < 0)
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001957 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 return r;
1960}
1961
1962static void __exit dm_crypt_exit(void)
1963{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001964 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965}
1966
1967module_init(dm_crypt_init);
1968module_exit(dm_crypt_exit);
1969
Jana Saoutbf14299f2014-06-24 14:27:04 -04001970MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1972MODULE_LICENSE("GPL");