blob: 8c0e36b1d0ed84ebd0932f25dec1f6e200508407 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jana Saoutbf14299f2014-06-24 14:27:04 -04002 * Copyright (C) 2003 Jana Saout <jana@saout.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz542da312009-12-10 23:51:57 +00004 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
Milan Brozed04d982013-10-28 23:21:04 +01005 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the GPL.
8 */
9
Milan Broz43d69032008-02-08 02:11:09 +000010#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100011#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
Mikulas Patockadc267622015-02-13 08:25:59 -050021#include <linux/kthread.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070022#include <linux/backing-dev.h>
Arun Sharma600634972011-07-26 16:09:06 -070023#include <linux/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100024#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100026#include <asm/unaligned.h>
Milan Broz34745782011-01-13 19:59:55 +000027#include <crypto/hash.h>
28#include <crypto/md5.h>
29#include <crypto/algapi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Mikulas Patocka586e80e2008-10-21 17:44:59 +010031#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Alasdair G Kergon72d94862006-06-26 00:27:35 -070033#define DM_MSG_PREFIX "crypt"
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * context holding the current state of a multi-part conversion
37 */
38struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000039 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 struct bio *bio_in;
41 struct bio *bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -070042 struct bvec_iter iter_in;
43 struct bvec_iter iter_out;
Mikulas Patockac66029f2012-07-27 15:08:05 +010044 sector_t cc_sector;
Mikulas Patocka40b62292012-07-27 15:08:04 +010045 atomic_t cc_pending;
Mikulas Patocka610f2de2014-02-20 18:01:01 -050046 struct ablkcipher_request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047};
48
Milan Broz53017032008-02-08 02:10:38 +000049/*
50 * per bio private data
51 */
52struct dm_crypt_io {
Alasdair G Kergon49a8a922012-07-27 15:08:05 +010053 struct crypt_config *cc;
Milan Broz53017032008-02-08 02:10:38 +000054 struct bio *base_bio;
55 struct work_struct work;
56
57 struct convert_context ctx;
58
Mikulas Patocka40b62292012-07-27 15:08:04 +010059 atomic_t io_pending;
Milan Broz53017032008-02-08 02:10:38 +000060 int error;
Milan Broz0c395b02008-02-08 02:10:54 +000061 sector_t sector;
Mikulas Patockadc267622015-02-13 08:25:59 -050062
63 struct list_head list;
Mikulas Patocka298a9fa2014-03-28 15:51:55 -040064} CRYPTO_MINALIGN_ATTR;
Milan Broz53017032008-02-08 02:10:38 +000065
Milan Broz01482b72008-02-08 02:11:04 +000066struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000067 struct convert_context *ctx;
Milan Broz01482b72008-02-08 02:11:04 +000068 struct scatterlist sg_in;
69 struct scatterlist sg_out;
Milan Broz2dc53272011-01-13 19:59:54 +000070 sector_t iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000071};
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073struct crypt_config;
74
75struct crypt_iv_operations {
76 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010077 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000079 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000080 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000081 int (*generator)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83 int (*post)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085};
86
Milan Broz60473592009-12-10 23:51:55 +000087struct iv_essiv_private {
Milan Brozb95bf2d2009-12-10 23:51:56 +000088 struct crypto_hash *hash_tfm;
89 u8 *salt;
Milan Broz60473592009-12-10 23:51:55 +000090};
91
92struct iv_benbi_private {
93 int shift;
94};
95
Milan Broz34745782011-01-13 19:59:55 +000096#define LMK_SEED_SIZE 64 /* hash + 0 */
97struct iv_lmk_private {
98 struct crypto_shash *hash_tfm;
99 u8 *seed;
100};
101
Milan Brozed04d982013-10-28 23:21:04 +0100102#define TCW_WHITENING_SIZE 16
103struct iv_tcw_private {
104 struct crypto_shash *crc32_tfm;
105 u8 *iv_seed;
106 u8 *whitening;
107};
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/*
110 * Crypt: maps a linear range of a block device
111 * and encrypts / decrypts at the same time.
112 */
Mikulas Patockaf3396c582015-02-13 08:23:09 -0500113enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU };
Andi Kleenc0297722011-01-13 19:59:53 +0000114
115/*
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500116 * The fields in here must be read only after initialization.
Andi Kleenc0297722011-01-13 19:59:53 +0000117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118struct crypt_config {
119 struct dm_dev *dev;
120 sector_t start;
121
122 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000123 * pool for per bio private data, crypto requests and
124 * encryption requeusts/buffer pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 */
Milan Brozddd42ed2008-02-08 02:11:07 +0000126 mempool_t *req_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 mempool_t *page_pool;
Milan Broz6a24c712006-10-03 01:15:40 -0700128 struct bio_set *bs;
Mikulas Patocka7145c242015-02-13 08:24:41 -0500129 struct mutex bio_alloc_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Milan Brozcabf08e2007-10-19 22:38:58 +0100131 struct workqueue_struct *io_queue;
132 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700133
Mikulas Patockadc267622015-02-13 08:25:59 -0500134 struct task_struct *write_thread;
135 wait_queue_head_t write_thread_wait;
136 struct list_head write_thread_list;
137
Milan Broz5ebaee62010-08-12 04:14:07 +0100138 char *cipher;
Milan Broz7dbcd132011-01-13 19:59:52 +0000139 char *cipher_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800142 union {
Milan Broz60473592009-12-10 23:51:55 +0000143 struct iv_essiv_private essiv;
144 struct iv_benbi_private benbi;
Milan Broz34745782011-01-13 19:59:55 +0000145 struct iv_lmk_private lmk;
Milan Brozed04d982013-10-28 23:21:04 +0100146 struct iv_tcw_private tcw;
Herbert Xu79066ad2006-12-05 13:41:52 -0800147 } iv_gen_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 sector_t iv_offset;
149 unsigned int iv_size;
150
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100151 /* ESSIV: struct crypto_cipher *essiv_tfm */
152 void *iv_private;
153 struct crypto_ablkcipher **tfms;
Milan Brozd1f96422011-01-13 19:59:54 +0000154 unsigned tfms_count;
Andi Kleenc0297722011-01-13 19:59:53 +0000155
156 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000157 * Layout of each crypto request:
158 *
159 * struct ablkcipher_request
160 * context
161 * padding
162 * struct dm_crypt_request
163 * padding
164 * IV
165 *
166 * The padding is added so that dm_crypt_request and the IV are
167 * correctly aligned.
168 */
169 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000170
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400171 unsigned int per_bio_data_size;
172
Milan Broze48d4bb2006-10-03 01:15:37 -0700173 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 unsigned int key_size;
Milan Brozda31a072013-10-28 23:21:03 +0100175 unsigned int key_parts; /* independent parts in key buffer */
176 unsigned int key_extra_size; /* additional keys length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 u8 key[0];
178};
179
Milan Broz6a24c712006-10-03 01:15:40 -0700180#define MIN_IOS 16
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100182static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000183static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Broz2dc53272011-01-13 19:59:54 +0000184static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
Olaf Kirch027581f2007-05-09 02:32:52 -0700185
Andi Kleenc0297722011-01-13 19:59:53 +0000186/*
187 * Use this to access cipher attributes that are the same for each CPU.
188 */
189static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
190{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100191 return cc->tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000192}
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * Different IV generation algorithms:
196 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000197 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200198 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 *
Milan Broz61afef62009-12-10 23:52:25 +0000200 * plain64: the initial vector is the 64-bit little-endian version of the sector
201 * number, padded with zeros if necessary.
202 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000203 * essiv: "encrypted sector|salt initial vector", the sector number is
204 * encrypted with the bulk cipher using a salt as key. The salt
205 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 *
Rik Snel48527fa2006-09-03 08:56:39 +1000207 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
208 * (needed for LRW-32-AES and possible other narrow block modes)
209 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700210 * null: the initial vector is always zero. Provides compatibility with
211 * obsolete loop_fish2 devices. Do not use for new devices.
212 *
Milan Broz34745782011-01-13 19:59:55 +0000213 * lmk: Compatible implementation of the block chaining mode used
214 * by the Loop-AES block device encryption system
215 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
216 * It operates on full 512 byte sectors and uses CBC
217 * with an IV derived from the sector number, the data and
218 * optionally extra IV seed.
219 * This means that after decryption the first block
220 * of sector must be tweaked according to decrypted data.
221 * Loop-AES can use three encryption schemes:
222 * version 1: is plain aes-cbc mode
223 * version 2: uses 64 multikey scheme with lmk IV generator
224 * version 3: the same as version 2 with additional IV seed
225 * (it uses 65 keys, last key is used as IV seed)
226 *
Milan Brozed04d982013-10-28 23:21:04 +0100227 * tcw: Compatible implementation of the block chaining mode used
228 * by the TrueCrypt device encryption system (prior to version 4.1).
229 * For more info see: http://www.truecrypt.org
230 * It operates on full 512 byte sectors and uses CBC
231 * with an IV derived from initial key and the sector number.
232 * In addition, whitening value is applied on every sector, whitening
233 * is calculated from initial key, sector number and mixed using CRC32.
234 * Note that this encryption scheme is vulnerable to watermarking attacks
235 * and should be used for old compatible containers access only.
236 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 * plumb: unimplemented, see:
238 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
239 */
240
Milan Broz2dc53272011-01-13 19:59:54 +0000241static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
242 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
244 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100245 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 return 0;
248}
249
Milan Broz61afef62009-12-10 23:52:25 +0000250static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000251 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000252{
253 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100254 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000255
256 return 0;
257}
258
Milan Brozb95bf2d2009-12-10 23:51:56 +0000259/* Initialise ESSIV - compute salt but no local memory allocations */
260static int crypt_iv_essiv_init(struct crypt_config *cc)
261{
262 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
263 struct hash_desc desc;
264 struct scatterlist sg;
Andi Kleenc0297722011-01-13 19:59:53 +0000265 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100266 int err;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000267
268 sg_init_one(&sg, cc->key, cc->key_size);
269 desc.tfm = essiv->hash_tfm;
270 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
271
272 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
273 if (err)
274 return err;
275
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100276 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000277
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100278 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
279 crypto_hash_digestsize(essiv->hash_tfm));
280 if (err)
281 return err;
Andi Kleenc0297722011-01-13 19:59:53 +0000282
283 return 0;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000284}
285
Milan Broz542da312009-12-10 23:51:57 +0000286/* Wipe salt and reset key derived from volume key */
287static int crypt_iv_essiv_wipe(struct crypt_config *cc)
288{
289 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
290 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000291 struct crypto_cipher *essiv_tfm;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100292 int r, err = 0;
Milan Broz542da312009-12-10 23:51:57 +0000293
294 memset(essiv->salt, 0, salt_size);
295
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100296 essiv_tfm = cc->iv_private;
297 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
298 if (r)
299 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +0000300
301 return err;
302}
303
304/* Set up per cpu cipher state */
305static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
306 struct dm_target *ti,
307 u8 *salt, unsigned saltsize)
308{
309 struct crypto_cipher *essiv_tfm;
310 int err;
311
312 /* Setup the essiv_tfm with the given salt */
313 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
314 if (IS_ERR(essiv_tfm)) {
315 ti->error = "Error allocating crypto tfm for ESSIV";
316 return essiv_tfm;
317 }
318
319 if (crypto_cipher_blocksize(essiv_tfm) !=
320 crypto_ablkcipher_ivsize(any_tfm(cc))) {
321 ti->error = "Block size of ESSIV cipher does "
322 "not match IV size of block cipher";
323 crypto_free_cipher(essiv_tfm);
324 return ERR_PTR(-EINVAL);
325 }
326
327 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
328 if (err) {
329 ti->error = "Failed to set key for ESSIV cipher";
330 crypto_free_cipher(essiv_tfm);
331 return ERR_PTR(err);
332 }
333
334 return essiv_tfm;
Milan Broz542da312009-12-10 23:51:57 +0000335}
336
Milan Broz60473592009-12-10 23:51:55 +0000337static void crypt_iv_essiv_dtr(struct crypt_config *cc)
338{
Andi Kleenc0297722011-01-13 19:59:53 +0000339 struct crypto_cipher *essiv_tfm;
Milan Broz60473592009-12-10 23:51:55 +0000340 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
341
Milan Brozb95bf2d2009-12-10 23:51:56 +0000342 crypto_free_hash(essiv->hash_tfm);
343 essiv->hash_tfm = NULL;
344
345 kzfree(essiv->salt);
346 essiv->salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000347
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100348 essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000349
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100350 if (essiv_tfm)
351 crypto_free_cipher(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000352
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100353 cc->iv_private = NULL;
Milan Broz60473592009-12-10 23:51:55 +0000354}
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +0100357 const char *opts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358{
Milan Broz5861f1b2009-12-10 23:51:56 +0000359 struct crypto_cipher *essiv_tfm = NULL;
360 struct crypto_hash *hash_tfm = NULL;
Milan Broz5861f1b2009-12-10 23:51:56 +0000361 u8 *salt = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100362 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Milan Broz5861f1b2009-12-10 23:51:56 +0000364 if (!opts) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700365 ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 return -EINVAL;
367 }
368
Milan Brozb95bf2d2009-12-10 23:51:56 +0000369 /* Allocate hash algorithm */
Herbert Xu35058682006-08-24 19:10:20 +1000370 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
371 if (IS_ERR(hash_tfm)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700372 ti->error = "Error initializing ESSIV hash";
Milan Broz5861f1b2009-12-10 23:51:56 +0000373 err = PTR_ERR(hash_tfm);
374 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Milan Brozb95bf2d2009-12-10 23:51:56 +0000377 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
Milan Broz5861f1b2009-12-10 23:51:56 +0000378 if (!salt) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700379 ti->error = "Error kmallocing salt storage in ESSIV";
Milan Broz5861f1b2009-12-10 23:51:56 +0000380 err = -ENOMEM;
381 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
Milan Brozb95bf2d2009-12-10 23:51:56 +0000384 cc->iv_gen_private.essiv.salt = salt;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000385 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
386
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100387 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
388 crypto_hash_digestsize(hash_tfm));
389 if (IS_ERR(essiv_tfm)) {
390 crypt_iv_essiv_dtr(cc);
391 return PTR_ERR(essiv_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000392 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100393 cc->iv_private = essiv_tfm;
Andi Kleenc0297722011-01-13 19:59:53 +0000394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 return 0;
Milan Broz5861f1b2009-12-10 23:51:56 +0000396
397bad:
Milan Broz5861f1b2009-12-10 23:51:56 +0000398 if (hash_tfm && !IS_ERR(hash_tfm))
399 crypto_free_hash(hash_tfm);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000400 kfree(salt);
Milan Broz5861f1b2009-12-10 23:51:56 +0000401 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
Milan Broz2dc53272011-01-13 19:59:54 +0000404static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
405 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Mikulas Patockafd2d2312012-07-27 15:08:05 +0100407 struct crypto_cipher *essiv_tfm = cc->iv_private;
Andi Kleenc0297722011-01-13 19:59:53 +0000408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 memset(iv, 0, cc->iv_size);
Alasdair G Kergon283a8322011-08-02 12:32:01 +0100410 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000411 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return 0;
414}
415
Rik Snel48527fa2006-09-03 08:56:39 +1000416static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
417 const char *opts)
418{
Andi Kleenc0297722011-01-13 19:59:53 +0000419 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
David Howellsf0d1b0b2006-12-08 02:37:49 -0800420 int log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000421
422 /* we need to calculate how far we must shift the sector count
423 * to get the cipher block count, we use this shift in _gen */
424
425 if (1 << log != bs) {
426 ti->error = "cypher blocksize is not a power of 2";
427 return -EINVAL;
428 }
429
430 if (log > 9) {
431 ti->error = "cypher blocksize is > 512";
432 return -EINVAL;
433 }
434
Milan Broz60473592009-12-10 23:51:55 +0000435 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000436
437 return 0;
438}
439
440static void crypt_iv_benbi_dtr(struct crypt_config *cc)
441{
Rik Snel48527fa2006-09-03 08:56:39 +1000442}
443
Milan Broz2dc53272011-01-13 19:59:54 +0000444static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
445 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000446{
Herbert Xu79066ad2006-12-05 13:41:52 -0800447 __be64 val;
448
Rik Snel48527fa2006-09-03 08:56:39 +1000449 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800450
Milan Broz2dc53272011-01-13 19:59:54 +0000451 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800452 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 return 0;
455}
456
Milan Broz2dc53272011-01-13 19:59:54 +0000457static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
458 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700459{
460 memset(iv, 0, cc->iv_size);
461
462 return 0;
463}
464
Milan Broz34745782011-01-13 19:59:55 +0000465static void crypt_iv_lmk_dtr(struct crypt_config *cc)
466{
467 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
468
469 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
470 crypto_free_shash(lmk->hash_tfm);
471 lmk->hash_tfm = NULL;
472
473 kzfree(lmk->seed);
474 lmk->seed = NULL;
475}
476
477static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
478 const char *opts)
479{
480 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
481
482 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
483 if (IS_ERR(lmk->hash_tfm)) {
484 ti->error = "Error initializing LMK hash";
485 return PTR_ERR(lmk->hash_tfm);
486 }
487
488 /* No seed in LMK version 2 */
489 if (cc->key_parts == cc->tfms_count) {
490 lmk->seed = NULL;
491 return 0;
492 }
493
494 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
495 if (!lmk->seed) {
496 crypt_iv_lmk_dtr(cc);
497 ti->error = "Error kmallocing seed storage in LMK";
498 return -ENOMEM;
499 }
500
501 return 0;
502}
503
504static int crypt_iv_lmk_init(struct crypt_config *cc)
505{
506 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
507 int subkey_size = cc->key_size / cc->key_parts;
508
509 /* LMK seed is on the position of LMK_KEYS + 1 key */
510 if (lmk->seed)
511 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
512 crypto_shash_digestsize(lmk->hash_tfm));
513
514 return 0;
515}
516
517static int crypt_iv_lmk_wipe(struct crypt_config *cc)
518{
519 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
520
521 if (lmk->seed)
522 memset(lmk->seed, 0, LMK_SEED_SIZE);
523
524 return 0;
525}
526
527static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
528 struct dm_crypt_request *dmreq,
529 u8 *data)
530{
531 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200532 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
Milan Broz34745782011-01-13 19:59:55 +0000533 struct md5_state md5state;
Milan Brozda31a072013-10-28 23:21:03 +0100534 __le32 buf[4];
Milan Broz34745782011-01-13 19:59:55 +0000535 int i, r;
536
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200537 desc->tfm = lmk->hash_tfm;
538 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Broz34745782011-01-13 19:59:55 +0000539
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200540 r = crypto_shash_init(desc);
Milan Broz34745782011-01-13 19:59:55 +0000541 if (r)
542 return r;
543
544 if (lmk->seed) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200545 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
Milan Broz34745782011-01-13 19:59:55 +0000546 if (r)
547 return r;
548 }
549
550 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200551 r = crypto_shash_update(desc, data + 16, 16 * 31);
Milan Broz34745782011-01-13 19:59:55 +0000552 if (r)
553 return r;
554
555 /* Sector is cropped to 56 bits here */
556 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
557 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
558 buf[2] = cpu_to_le32(4024);
559 buf[3] = 0;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200560 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
Milan Broz34745782011-01-13 19:59:55 +0000561 if (r)
562 return r;
563
564 /* No MD5 padding here */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200565 r = crypto_shash_export(desc, &md5state);
Milan Broz34745782011-01-13 19:59:55 +0000566 if (r)
567 return r;
568
569 for (i = 0; i < MD5_HASH_WORDS; i++)
570 __cpu_to_le32s(&md5state.hash[i]);
571 memcpy(iv, &md5state.hash, cc->iv_size);
572
573 return 0;
574}
575
576static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
577 struct dm_crypt_request *dmreq)
578{
579 u8 *src;
580 int r = 0;
581
582 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
Cong Wangc2e022c2011-11-28 13:26:02 +0800583 src = kmap_atomic(sg_page(&dmreq->sg_in));
Milan Broz34745782011-01-13 19:59:55 +0000584 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
Cong Wangc2e022c2011-11-28 13:26:02 +0800585 kunmap_atomic(src);
Milan Broz34745782011-01-13 19:59:55 +0000586 } else
587 memset(iv, 0, cc->iv_size);
588
589 return r;
590}
591
592static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
593 struct dm_crypt_request *dmreq)
594{
595 u8 *dst;
596 int r;
597
598 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
599 return 0;
600
Cong Wangc2e022c2011-11-28 13:26:02 +0800601 dst = kmap_atomic(sg_page(&dmreq->sg_out));
Milan Broz34745782011-01-13 19:59:55 +0000602 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
603
604 /* Tweak the first block of plaintext sector */
605 if (!r)
606 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
607
Cong Wangc2e022c2011-11-28 13:26:02 +0800608 kunmap_atomic(dst);
Milan Broz34745782011-01-13 19:59:55 +0000609 return r;
610}
611
Milan Brozed04d982013-10-28 23:21:04 +0100612static void crypt_iv_tcw_dtr(struct crypt_config *cc)
613{
614 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
615
616 kzfree(tcw->iv_seed);
617 tcw->iv_seed = NULL;
618 kzfree(tcw->whitening);
619 tcw->whitening = NULL;
620
621 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
622 crypto_free_shash(tcw->crc32_tfm);
623 tcw->crc32_tfm = NULL;
624}
625
626static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
627 const char *opts)
628{
629 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
630
631 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
632 ti->error = "Wrong key size for TCW";
633 return -EINVAL;
634 }
635
636 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
637 if (IS_ERR(tcw->crc32_tfm)) {
638 ti->error = "Error initializing CRC32 in TCW";
639 return PTR_ERR(tcw->crc32_tfm);
640 }
641
642 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
643 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
644 if (!tcw->iv_seed || !tcw->whitening) {
645 crypt_iv_tcw_dtr(cc);
646 ti->error = "Error allocating seed storage in TCW";
647 return -ENOMEM;
648 }
649
650 return 0;
651}
652
653static int crypt_iv_tcw_init(struct crypt_config *cc)
654{
655 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
656 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
657
658 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
659 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
660 TCW_WHITENING_SIZE);
661
662 return 0;
663}
664
665static int crypt_iv_tcw_wipe(struct crypt_config *cc)
666{
667 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
668
669 memset(tcw->iv_seed, 0, cc->iv_size);
670 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
671
672 return 0;
673}
674
675static int crypt_iv_tcw_whitening(struct crypt_config *cc,
676 struct dm_crypt_request *dmreq,
677 u8 *data)
678{
679 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
680 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
681 u8 buf[TCW_WHITENING_SIZE];
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200682 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
Milan Brozed04d982013-10-28 23:21:04 +0100683 int i, r;
684
685 /* xor whitening with sector number */
686 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
687 crypto_xor(buf, (u8 *)&sector, 8);
688 crypto_xor(&buf[8], (u8 *)&sector, 8);
689
690 /* calculate crc32 for every 32bit part and xor it */
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200691 desc->tfm = tcw->crc32_tfm;
692 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
Milan Brozed04d982013-10-28 23:21:04 +0100693 for (i = 0; i < 4; i++) {
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200694 r = crypto_shash_init(desc);
Milan Brozed04d982013-10-28 23:21:04 +0100695 if (r)
696 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200697 r = crypto_shash_update(desc, &buf[i * 4], 4);
Milan Brozed04d982013-10-28 23:21:04 +0100698 if (r)
699 goto out;
Jan-Simon Möllerb6106262012-07-02 13:50:54 +0200700 r = crypto_shash_final(desc, &buf[i * 4]);
Milan Brozed04d982013-10-28 23:21:04 +0100701 if (r)
702 goto out;
703 }
704 crypto_xor(&buf[0], &buf[12], 4);
705 crypto_xor(&buf[4], &buf[8], 4);
706
707 /* apply whitening (8 bytes) to whole sector */
708 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
709 crypto_xor(data + i * 8, buf, 8);
710out:
Milan Broz1a71d6f2014-11-22 09:36:04 +0100711 memzero_explicit(buf, sizeof(buf));
Milan Brozed04d982013-10-28 23:21:04 +0100712 return r;
713}
714
715static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
716 struct dm_crypt_request *dmreq)
717{
718 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
719 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
720 u8 *src;
721 int r = 0;
722
723 /* Remove whitening from ciphertext */
724 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
725 src = kmap_atomic(sg_page(&dmreq->sg_in));
726 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
727 kunmap_atomic(src);
728 }
729
730 /* Calculate IV */
731 memcpy(iv, tcw->iv_seed, cc->iv_size);
732 crypto_xor(iv, (u8 *)&sector, 8);
733 if (cc->iv_size > 8)
734 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
735
736 return r;
737}
738
739static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
740 struct dm_crypt_request *dmreq)
741{
742 u8 *dst;
743 int r;
744
745 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
746 return 0;
747
748 /* Apply whitening on ciphertext */
749 dst = kmap_atomic(sg_page(&dmreq->sg_out));
750 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
751 kunmap_atomic(dst);
752
753 return r;
754}
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756static struct crypt_iv_operations crypt_iv_plain_ops = {
757 .generator = crypt_iv_plain_gen
758};
759
Milan Broz61afef62009-12-10 23:52:25 +0000760static struct crypt_iv_operations crypt_iv_plain64_ops = {
761 .generator = crypt_iv_plain64_gen
762};
763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764static struct crypt_iv_operations crypt_iv_essiv_ops = {
765 .ctr = crypt_iv_essiv_ctr,
766 .dtr = crypt_iv_essiv_dtr,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000767 .init = crypt_iv_essiv_init,
Milan Broz542da312009-12-10 23:51:57 +0000768 .wipe = crypt_iv_essiv_wipe,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 .generator = crypt_iv_essiv_gen
770};
771
Rik Snel48527fa2006-09-03 08:56:39 +1000772static struct crypt_iv_operations crypt_iv_benbi_ops = {
773 .ctr = crypt_iv_benbi_ctr,
774 .dtr = crypt_iv_benbi_dtr,
775 .generator = crypt_iv_benbi_gen
776};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Ludwig Nussel46b47732007-05-09 02:32:55 -0700778static struct crypt_iv_operations crypt_iv_null_ops = {
779 .generator = crypt_iv_null_gen
780};
781
Milan Broz34745782011-01-13 19:59:55 +0000782static struct crypt_iv_operations crypt_iv_lmk_ops = {
783 .ctr = crypt_iv_lmk_ctr,
784 .dtr = crypt_iv_lmk_dtr,
785 .init = crypt_iv_lmk_init,
786 .wipe = crypt_iv_lmk_wipe,
787 .generator = crypt_iv_lmk_gen,
788 .post = crypt_iv_lmk_post
789};
790
Milan Brozed04d982013-10-28 23:21:04 +0100791static struct crypt_iv_operations crypt_iv_tcw_ops = {
792 .ctr = crypt_iv_tcw_ctr,
793 .dtr = crypt_iv_tcw_dtr,
794 .init = crypt_iv_tcw_init,
795 .wipe = crypt_iv_tcw_wipe,
796 .generator = crypt_iv_tcw_gen,
797 .post = crypt_iv_tcw_post
798};
799
Milan Brozd469f842007-10-19 22:42:37 +0100800static void crypt_convert_init(struct crypt_config *cc,
801 struct convert_context *ctx,
802 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +0000803 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 ctx->bio_in = bio_in;
806 ctx->bio_out = bio_out;
Kent Overstreet003b5c52013-10-11 15:45:43 -0700807 if (bio_in)
808 ctx->iter_in = bio_in->bi_iter;
809 if (bio_out)
810 ctx->iter_out = bio_out->bi_iter;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100811 ctx->cc_sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +0000812 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813}
814
Huang Yingb2174ee2009-03-16 17:44:33 +0000815static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
816 struct ablkcipher_request *req)
817{
818 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
819}
820
821static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
822 struct dm_crypt_request *dmreq)
823{
824 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
825}
826
Milan Broz2dc53272011-01-13 19:59:54 +0000827static u8 *iv_of_dmreq(struct crypt_config *cc,
828 struct dm_crypt_request *dmreq)
829{
830 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
831 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
832}
833
Milan Broz01482b72008-02-08 02:11:04 +0000834static int crypt_convert_block(struct crypt_config *cc,
Milan Broz3a7f6c92008-02-08 02:11:14 +0000835 struct convert_context *ctx,
836 struct ablkcipher_request *req)
Milan Broz01482b72008-02-08 02:11:04 +0000837{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700838 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
839 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000840 struct dm_crypt_request *dmreq;
841 u8 *iv;
Mikulas Patocka40b62292012-07-27 15:08:04 +0100842 int r;
Milan Broz01482b72008-02-08 02:11:04 +0000843
Huang Yingb2174ee2009-03-16 17:44:33 +0000844 dmreq = dmreq_of_req(cc, req);
Milan Broz2dc53272011-01-13 19:59:54 +0000845 iv = iv_of_dmreq(cc, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000846
Mikulas Patockac66029f2012-07-27 15:08:05 +0100847 dmreq->iv_sector = ctx->cc_sector;
Huang Yingb2174ee2009-03-16 17:44:33 +0000848 dmreq->ctx = ctx;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000849 sg_init_table(&dmreq->sg_in, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700850 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
851 bv_in.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000852
Milan Broz3a7f6c92008-02-08 02:11:14 +0000853 sg_init_table(&dmreq->sg_out, 1);
Kent Overstreet003b5c52013-10-11 15:45:43 -0700854 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
855 bv_out.bv_offset);
Milan Broz01482b72008-02-08 02:11:04 +0000856
Kent Overstreet003b5c52013-10-11 15:45:43 -0700857 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
858 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
Milan Broz01482b72008-02-08 02:11:04 +0000859
Milan Broz3a7f6c92008-02-08 02:11:14 +0000860 if (cc->iv_gen_ops) {
Milan Broz2dc53272011-01-13 19:59:54 +0000861 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000862 if (r < 0)
863 return r;
864 }
865
866 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
867 1 << SECTOR_SHIFT, iv);
868
869 if (bio_data_dir(ctx->bio_in) == WRITE)
870 r = crypto_ablkcipher_encrypt(req);
871 else
872 r = crypto_ablkcipher_decrypt(req);
873
Milan Broz2dc53272011-01-13 19:59:54 +0000874 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
875 r = cc->iv_gen_ops->post(cc, iv, dmreq);
876
Milan Broz3a7f6c92008-02-08 02:11:14 +0000877 return r;
Milan Broz01482b72008-02-08 02:11:04 +0000878}
879
Milan Broz95497a92008-02-08 02:11:12 +0000880static void kcryptd_async_done(struct crypto_async_request *async_req,
881 int error);
Andi Kleenc0297722011-01-13 19:59:53 +0000882
Milan Brozddd42ed2008-02-08 02:11:07 +0000883static void crypt_alloc_req(struct crypt_config *cc,
884 struct convert_context *ctx)
885{
Mikulas Patockac66029f2012-07-27 15:08:05 +0100886 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +0000887
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500888 if (!ctx->req)
889 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
Andi Kleenc0297722011-01-13 19:59:53 +0000890
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500891 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
892 ablkcipher_request_set_callback(ctx->req,
Andi Kleenc0297722011-01-13 19:59:53 +0000893 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500894 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
Milan Brozddd42ed2008-02-08 02:11:07 +0000895}
896
Mikulas Patocka298a9fa2014-03-28 15:51:55 -0400897static void crypt_free_req(struct crypt_config *cc,
898 struct ablkcipher_request *req, struct bio *base_bio)
899{
900 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
901
902 if ((struct ablkcipher_request *)(io + 1) != req)
903 mempool_free(req, cc->req_pool);
904}
905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906/*
907 * Encrypt / decrypt data from one bio to another one (can be the same one)
908 */
909static int crypt_convert(struct crypt_config *cc,
Milan Brozd469f842007-10-19 22:42:37 +0100910 struct convert_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
Milan Broz3f1e9072008-03-28 14:16:07 -0700912 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Mikulas Patocka40b62292012-07-27 15:08:04 +0100914 atomic_set(&ctx->cc_pending, 1);
Milan Brozc8081612008-10-10 13:37:08 +0100915
Kent Overstreet003b5c52013-10-11 15:45:43 -0700916 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Milan Broz3a7f6c92008-02-08 02:11:14 +0000918 crypt_alloc_req(cc, ctx);
919
Mikulas Patocka40b62292012-07-27 15:08:04 +0100920 atomic_inc(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700921
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500922 r = crypt_convert_block(cc, ctx, ctx->req);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000923
924 switch (r) {
Milan Broz3f1e9072008-03-28 14:16:07 -0700925 /* async */
Milan Broz3a7f6c92008-02-08 02:11:14 +0000926 case -EBUSY:
927 wait_for_completion(&ctx->restart);
Wolfram Sang16735d02013-11-14 14:32:02 -0800928 reinit_completion(&ctx->restart);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000929 /* fall through*/
930 case -EINPROGRESS:
Mikulas Patocka610f2de2014-02-20 18:01:01 -0500931 ctx->req = NULL;
Mikulas Patockac66029f2012-07-27 15:08:05 +0100932 ctx->cc_sector++;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000933 continue;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000934
Milan Broz3f1e9072008-03-28 14:16:07 -0700935 /* sync */
936 case 0:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100937 atomic_dec(&ctx->cc_pending);
Mikulas Patockac66029f2012-07-27 15:08:05 +0100938 ctx->cc_sector++;
Milan Brozc7f1b202008-07-02 09:34:28 +0100939 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -0700940 continue;
941
942 /* error */
943 default:
Mikulas Patocka40b62292012-07-27 15:08:04 +0100944 atomic_dec(&ctx->cc_pending);
Milan Broz3f1e9072008-03-28 14:16:07 -0700945 return r;
946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
948
Milan Broz3f1e9072008-03-28 14:16:07 -0700949 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950}
951
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500952static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954/*
955 * Generate a new unfragmented bio with the given size
956 * This should never violate the device limitations
Mikulas Patocka7145c242015-02-13 08:24:41 -0500957 *
958 * This function may be called concurrently. If we allocate from the mempool
959 * concurrently, there is a possibility of deadlock. For example, if we have
960 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
961 * the mempool concurrently, it may deadlock in a situation where both processes
962 * have allocated 128 pages and the mempool is exhausted.
963 *
964 * In order to avoid this scenario we allocate the pages under a mutex.
965 *
966 * In order to not degrade performance with excessive locking, we try
967 * non-blocking allocations without a mutex first but on failure we fallback
968 * to blocking allocations with a mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 */
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500970static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +0100972 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -0700973 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Mikulas Patocka7145c242015-02-13 08:24:41 -0500975 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
976 unsigned i, len, remaining_size;
Milan Broz91e10622007-12-13 14:16:10 +0000977 struct page *page;
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -0500978 struct bio_vec *bvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Mikulas Patocka7145c242015-02-13 08:24:41 -0500980retry:
981 if (unlikely(gfp_mask & __GFP_WAIT))
982 mutex_lock(&cc->bio_alloc_lock);
983
Olaf Kirch2f9941b2007-05-09 02:32:53 -0700984 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -0700985 if (!clone)
Mikulas Patocka7145c242015-02-13 08:24:41 -0500986 goto return_clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Olaf Kirch027581f2007-05-09 02:32:52 -0700988 clone_init(io, clone);
Milan Broz6a24c712006-10-03 01:15:40 -0700989
Mikulas Patocka7145c242015-02-13 08:24:41 -0500990 remaining_size = size;
991
Olaf Kirchf97380b2007-05-09 02:32:54 -0700992 for (i = 0; i < nr_iovecs; i++) {
Milan Broz91e10622007-12-13 14:16:10 +0000993 page = mempool_alloc(cc->page_pool, gfp_mask);
Mikulas Patocka7145c242015-02-13 08:24:41 -0500994 if (!page) {
995 crypt_free_buffer_pages(cc, clone);
996 bio_put(clone);
997 gfp_mask |= __GFP_WAIT;
998 goto retry;
999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Mikulas Patocka7145c242015-02-13 08:24:41 -05001001 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001003 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1004 bvec->bv_page = page;
1005 bvec->bv_len = len;
1006 bvec->bv_offset = 0;
1007
1008 clone->bi_iter.bi_size += len;
Milan Broz91e10622007-12-13 14:16:10 +00001009
Mikulas Patocka7145c242015-02-13 08:24:41 -05001010 remaining_size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 }
1012
Mikulas Patocka7145c242015-02-13 08:24:41 -05001013return_clone:
1014 if (unlikely(gfp_mask & __GFP_WAIT))
1015 mutex_unlock(&cc->bio_alloc_lock);
1016
Milan Broz8b004452006-10-03 01:15:37 -07001017 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018}
1019
Neil Brown644bd2f2007-10-16 13:48:46 +02001020static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
Neil Brown644bd2f2007-10-16 13:48:46 +02001022 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 struct bio_vec *bv;
1024
Kent Overstreetcb34e052012-09-05 15:22:02 -07001025 bio_for_each_segment_all(bv, clone, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 BUG_ON(!bv->bv_page);
1027 mempool_free(bv->bv_page, cc->page_pool);
1028 bv->bv_page = NULL;
1029 }
1030}
1031
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001032static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1033 struct bio *bio, sector_t sector)
Milan Brozdc440d1e2008-10-10 13:37:03 +01001034{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001035 io->cc = cc;
Milan Brozdc440d1e2008-10-10 13:37:03 +01001036 io->base_bio = bio;
1037 io->sector = sector;
1038 io->error = 0;
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001039 io->ctx.req = NULL;
Mikulas Patocka40b62292012-07-27 15:08:04 +01001040 atomic_set(&io->io_pending, 0);
Milan Brozdc440d1e2008-10-10 13:37:03 +01001041}
1042
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001043static void crypt_inc_pending(struct dm_crypt_io *io)
1044{
Mikulas Patocka40b62292012-07-27 15:08:04 +01001045 atomic_inc(&io->io_pending);
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001046}
1047
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048/*
1049 * One of the bios was finished. Check for completion of
1050 * the whole request and correctly clean up the buffer.
1051 */
Milan Broz5742fd72008-02-08 02:10:43 +00001052static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001054 struct crypt_config *cc = io->cc;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001055 struct bio *base_bio = io->base_bio;
Milan Brozb35f8ca2009-03-16 17:44:36 +00001056 int error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Mikulas Patocka40b62292012-07-27 15:08:04 +01001058 if (!atomic_dec_and_test(&io->io_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 return;
1060
Mikulas Patocka610f2de2014-02-20 18:01:01 -05001061 if (io->ctx.req)
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001062 crypt_free_req(cc, io->ctx.req, base_bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +00001063
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001064 bio_endio(base_bio, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
1067/*
Milan Brozcabf08e2007-10-19 22:38:58 +01001068 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 *
1070 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -07001071 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +01001072 *
1073 * kcryptd performs the actual encryption or decryption.
1074 *
1075 * kcryptd_io performs the IO submission.
1076 *
1077 * They must be separated as otherwise the final stages could be
1078 * starved by new requests which can block in the first stages due
1079 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +00001080 *
1081 * The work is done per CPU global for all dm-crypt instances.
1082 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 */
NeilBrown6712ecf2007-09-27 12:47:43 +02001084static void crypt_endio(struct bio *clone, int error)
Milan Broz8b004452006-10-03 01:15:37 -07001085{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001086 struct dm_crypt_io *io = clone->bi_private;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001087 struct crypt_config *cc = io->cc;
Milan Brozee7a4912008-02-08 02:10:46 +00001088 unsigned rw = bio_data_dir(clone);
Milan Broz8b004452006-10-03 01:15:37 -07001089
Milan Brozadfe4772007-12-13 14:15:51 +00001090 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1091 error = -EIO;
1092
Milan Broz8b004452006-10-03 01:15:37 -07001093 /*
NeilBrown6712ecf2007-09-27 12:47:43 +02001094 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -07001095 */
Milan Brozee7a4912008-02-08 02:10:46 +00001096 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +02001097 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +00001098
1099 bio_put(clone);
1100
1101 if (rw == READ && !error) {
1102 kcryptd_queue_crypt(io);
1103 return;
NeilBrown6712ecf2007-09-27 12:47:43 +02001104 }
Milan Broz8b004452006-10-03 01:15:37 -07001105
Milan Brozadfe4772007-12-13 14:15:51 +00001106 if (unlikely(error))
Milan Broz5742fd72008-02-08 02:10:43 +00001107 io->error = error;
1108
1109 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001110}
1111
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001112static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -07001113{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001114 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001115
1116 clone->bi_private = io;
1117 clone->bi_end_io = crypt_endio;
1118 clone->bi_bdev = cc->dev->bdev;
1119 clone->bi_rw = io->base_bio->bi_rw;
1120}
1121
Milan Broz20c82532011-01-13 19:59:53 +00001122static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -07001123{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001124 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001125 struct bio *base_bio = io->base_bio;
1126 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -07001127
Milan Broz8b004452006-10-03 01:15:37 -07001128 /*
1129 * The block layer might modify the bvec array, so always
1130 * copy the required bvecs because we need the original
1131 * one in order to decrypt the whole bio data *afterwards*.
1132 */
Kent Overstreetbf800ef2012-09-06 15:35:02 -07001133 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001134 if (!clone)
Milan Broz20c82532011-01-13 19:59:53 +00001135 return 1;
Milan Broz8b004452006-10-03 01:15:37 -07001136
Milan Broz20c82532011-01-13 19:59:53 +00001137 crypt_inc_pending(io);
1138
Milan Broz8b004452006-10-03 01:15:37 -07001139 clone_init(io, clone);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001140 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -07001141
Milan Broz93e605c2006-10-03 01:15:38 -07001142 generic_make_request(clone);
Milan Broz20c82532011-01-13 19:59:53 +00001143 return 0;
Milan Broz8b004452006-10-03 01:15:37 -07001144}
1145
Mikulas Patockadc267622015-02-13 08:25:59 -05001146static void kcryptd_io_read_work(struct work_struct *work)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001147{
1148 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1149
Mikulas Patockadc267622015-02-13 08:25:59 -05001150 crypt_inc_pending(io);
1151 if (kcryptd_io_read(io, GFP_NOIO))
1152 io->error = -ENOMEM;
1153 crypt_dec_pending(io);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001154}
1155
Mikulas Patockadc267622015-02-13 08:25:59 -05001156static void kcryptd_queue_read(struct dm_crypt_io *io)
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001157{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001158 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001159
Mikulas Patockadc267622015-02-13 08:25:59 -05001160 INIT_WORK(&io->work, kcryptd_io_read_work);
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001161 queue_work(cc->io_queue, &io->work);
1162}
1163
Mikulas Patockadc267622015-02-13 08:25:59 -05001164static void kcryptd_io_write(struct dm_crypt_io *io)
1165{
1166 struct bio *clone = io->ctx.bio_out;
1167
1168 generic_make_request(clone);
1169}
1170
1171static int dmcrypt_write(void *data)
1172{
1173 struct crypt_config *cc = data;
1174 while (1) {
1175 struct list_head local_list;
1176 struct blk_plug plug;
1177
1178 DECLARE_WAITQUEUE(wait, current);
1179
1180 spin_lock_irq(&cc->write_thread_wait.lock);
1181continue_locked:
1182
1183 if (!list_empty(&cc->write_thread_list))
1184 goto pop_from_list;
1185
1186 __set_current_state(TASK_INTERRUPTIBLE);
1187 __add_wait_queue(&cc->write_thread_wait, &wait);
1188
1189 spin_unlock_irq(&cc->write_thread_wait.lock);
1190
1191 if (unlikely(kthread_should_stop())) {
1192 set_task_state(current, TASK_RUNNING);
1193 remove_wait_queue(&cc->write_thread_wait, &wait);
1194 break;
1195 }
1196
1197 schedule();
1198
1199 set_task_state(current, TASK_RUNNING);
1200 spin_lock_irq(&cc->write_thread_wait.lock);
1201 __remove_wait_queue(&cc->write_thread_wait, &wait);
1202 goto continue_locked;
1203
1204pop_from_list:
1205 local_list = cc->write_thread_list;
1206 local_list.next->prev = &local_list;
1207 local_list.prev->next = &local_list;
1208 INIT_LIST_HEAD(&cc->write_thread_list);
1209
1210 spin_unlock_irq(&cc->write_thread_wait.lock);
1211
1212 blk_start_plug(&plug);
1213 do {
1214 struct dm_crypt_io *io = container_of(local_list.next,
1215 struct dm_crypt_io, list);
1216 list_del(&io->list);
1217 kcryptd_io_write(io);
1218 } while (!list_empty(&local_list));
1219 blk_finish_plug(&plug);
1220 }
1221 return 0;
1222}
1223
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001224static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +00001225{
Milan Brozdec1ced2008-02-08 02:10:57 +00001226 struct bio *clone = io->ctx.bio_out;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001227 struct crypt_config *cc = io->cc;
Mikulas Patockadc267622015-02-13 08:25:59 -05001228 unsigned long flags;
Milan Brozdec1ced2008-02-08 02:10:57 +00001229
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001230 if (unlikely(io->error < 0)) {
Milan Brozdec1ced2008-02-08 02:10:57 +00001231 crypt_free_buffer_pages(cc, clone);
1232 bio_put(clone);
Milan Broz6c031f42008-10-10 13:37:06 +01001233 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +00001234 return;
1235 }
1236
1237 /* crypt_convert should have filled the clone bio */
Kent Overstreet003b5c52013-10-11 15:45:43 -07001238 BUG_ON(io->ctx.iter_out.bi_size);
Milan Brozdec1ced2008-02-08 02:10:57 +00001239
Kent Overstreet4f024f32013-10-11 15:44:27 -07001240 clone->bi_iter.bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +00001241
Mikulas Patockadc267622015-02-13 08:25:59 -05001242 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1243 list_add_tail(&io->list, &cc->write_thread_list);
1244 wake_up_locked(&cc->write_thread_wait);
1245 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
Milan Broz4e4eef62008-02-08 02:10:49 +00001246}
1247
Milan Brozfc5a5e92008-10-10 13:37:04 +01001248static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001249{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001250 struct crypt_config *cc = io->cc;
Milan Broz8b004452006-10-03 01:15:37 -07001251 struct bio *clone;
Milan Brozc8081612008-10-10 13:37:08 +01001252 int crypt_finished;
Milan Brozb635b002008-10-21 17:45:00 +01001253 sector_t sector = io->sector;
Milan Brozdec1ced2008-02-08 02:10:57 +00001254 int r;
Milan Broz8b004452006-10-03 01:15:37 -07001255
Milan Broz93e605c2006-10-03 01:15:38 -07001256 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +01001257 * Prevent io from disappearing until this function completes.
1258 */
1259 crypt_inc_pending(io);
Milan Brozb635b002008-10-21 17:45:00 +01001260 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +01001261
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001262 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1263 if (unlikely(!clone)) {
1264 io->error = -EIO;
1265 goto dec;
Milan Broz8b004452006-10-03 01:15:37 -07001266 }
Milan Broz899c95d2008-02-08 02:11:02 +00001267
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001268 io->ctx.bio_out = clone;
1269 io->ctx.iter_out = clone->bi_iter;
1270
1271 sector += bio_sectors(clone);
1272
1273 crypt_inc_pending(io);
1274 r = crypt_convert(cc, &io->ctx);
1275 if (r)
1276 io->error = -EIO;
1277 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1278
1279 /* Encryption was already finished, submit io now */
1280 if (crypt_finished) {
1281 kcryptd_crypt_write_io_submit(io, 0);
1282 io->sector = sector;
1283 }
1284
1285dec:
Milan Broz899c95d2008-02-08 02:11:02 +00001286 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +00001287}
1288
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001289static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
Milan Broz5742fd72008-02-08 02:10:43 +00001290{
Milan Broz5742fd72008-02-08 02:10:43 +00001291 crypt_dec_pending(io);
1292}
1293
Milan Broz4e4eef62008-02-08 02:10:49 +00001294static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -07001295{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001296 struct crypt_config *cc = io->cc;
Milan Broz5742fd72008-02-08 02:10:43 +00001297 int r = 0;
Milan Broz8b004452006-10-03 01:15:37 -07001298
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001299 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001300
Milan Broz53017032008-02-08 02:10:38 +00001301 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00001302 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07001303
Milan Broz5742fd72008-02-08 02:10:43 +00001304 r = crypt_convert(cc, &io->ctx);
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001305 if (r < 0)
1306 io->error = -EIO;
Milan Broz5742fd72008-02-08 02:10:43 +00001307
Mikulas Patocka40b62292012-07-27 15:08:04 +01001308 if (atomic_dec_and_test(&io->ctx.cc_pending))
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001309 kcryptd_crypt_read_done(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001310
1311 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001312}
1313
Milan Broz95497a92008-02-08 02:11:12 +00001314static void kcryptd_async_done(struct crypto_async_request *async_req,
1315 int error)
1316{
Huang Yingb2174ee2009-03-16 17:44:33 +00001317 struct dm_crypt_request *dmreq = async_req->data;
1318 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00001319 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001320 struct crypt_config *cc = io->cc;
Milan Broz95497a92008-02-08 02:11:12 +00001321
1322 if (error == -EINPROGRESS) {
1323 complete(&ctx->restart);
1324 return;
1325 }
1326
Milan Broz2dc53272011-01-13 19:59:54 +00001327 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1328 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1329
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001330 if (error < 0)
1331 io->error = -EIO;
1332
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001333 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
Milan Broz95497a92008-02-08 02:11:12 +00001334
Mikulas Patocka40b62292012-07-27 15:08:04 +01001335 if (!atomic_dec_and_test(&ctx->cc_pending))
Milan Broz95497a92008-02-08 02:11:12 +00001336 return;
1337
1338 if (bio_data_dir(io->base_bio) == READ)
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001339 kcryptd_crypt_read_done(io);
Milan Broz95497a92008-02-08 02:11:12 +00001340 else
Mikulas Patocka72c6e7a2012-03-28 18:41:22 +01001341 kcryptd_crypt_write_io_submit(io, 1);
Milan Broz95497a92008-02-08 02:11:12 +00001342}
1343
Milan Broz4e4eef62008-02-08 02:10:49 +00001344static void kcryptd_crypt(struct work_struct *work)
1345{
1346 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1347
1348 if (bio_data_dir(io->base_bio) == READ)
1349 kcryptd_crypt_read_convert(io);
1350 else
1351 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07001352}
1353
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001354static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1355{
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001356 struct crypt_config *cc = io->cc;
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001357
1358 INIT_WORK(&io->work, kcryptd_crypt);
1359 queue_work(cc->crypt_queue, &io->work);
1360}
1361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362/*
1363 * Decode key from its hex representation
1364 */
1365static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1366{
1367 char buffer[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 unsigned int i;
1369
1370 buffer[2] = '\0';
1371
Milan Broz8b004452006-10-03 01:15:37 -07001372 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 buffer[0] = *hex++;
1374 buffer[1] = *hex++;
1375
majianpeng1a66a082012-07-27 15:07:59 +01001376 if (kstrtou8(buffer, 16, &key[i]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 return -EINVAL;
1378 }
1379
1380 if (*hex != '\0')
1381 return -EINVAL;
1382
1383 return 0;
1384}
1385
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001386static void crypt_free_tfms(struct crypt_config *cc)
Milan Brozd1f96422011-01-13 19:59:54 +00001387{
Milan Brozd1f96422011-01-13 19:59:54 +00001388 unsigned i;
1389
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001390 if (!cc->tfms)
1391 return;
1392
Milan Brozd1f96422011-01-13 19:59:54 +00001393 for (i = 0; i < cc->tfms_count; i++)
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001394 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1395 crypto_free_ablkcipher(cc->tfms[i]);
1396 cc->tfms[i] = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001397 }
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001398
1399 kfree(cc->tfms);
1400 cc->tfms = NULL;
Milan Brozd1f96422011-01-13 19:59:54 +00001401}
1402
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001403static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
Milan Brozd1f96422011-01-13 19:59:54 +00001404{
Milan Brozd1f96422011-01-13 19:59:54 +00001405 unsigned i;
1406 int err;
1407
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001408 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1409 GFP_KERNEL);
1410 if (!cc->tfms)
1411 return -ENOMEM;
1412
Milan Brozd1f96422011-01-13 19:59:54 +00001413 for (i = 0; i < cc->tfms_count; i++) {
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001414 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1415 if (IS_ERR(cc->tfms[i])) {
1416 err = PTR_ERR(cc->tfms[i]);
1417 crypt_free_tfms(cc);
Milan Brozd1f96422011-01-13 19:59:54 +00001418 return err;
1419 }
1420 }
1421
1422 return 0;
1423}
1424
Andi Kleenc0297722011-01-13 19:59:53 +00001425static int crypt_setkey_allcpus(struct crypt_config *cc)
1426{
Milan Brozda31a072013-10-28 23:21:03 +01001427 unsigned subkey_size;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001428 int err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00001429
Milan Brozda31a072013-10-28 23:21:03 +01001430 /* Ignore extra keys (which are used for IV etc) */
1431 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1432
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001433 for (i = 0; i < cc->tfms_count; i++) {
1434 r = crypto_ablkcipher_setkey(cc->tfms[i],
1435 cc->key + (i * subkey_size),
1436 subkey_size);
1437 if (r)
1438 err = r;
Andi Kleenc0297722011-01-13 19:59:53 +00001439 }
1440
1441 return err;
1442}
1443
Milan Broze48d4bb2006-10-03 01:15:37 -07001444static int crypt_set_key(struct crypt_config *cc, char *key)
1445{
Milan Brozde8be5a2011-03-24 13:54:27 +00001446 int r = -EINVAL;
1447 int key_string_len = strlen(key);
1448
Milan Broz69a8cfc2011-01-13 19:59:49 +00001449 /* The key size may not be changed. */
Milan Brozde8be5a2011-03-24 13:54:27 +00001450 if (cc->key_size != (key_string_len >> 1))
1451 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001452
Milan Broz69a8cfc2011-01-13 19:59:49 +00001453 /* Hyphen (which gives a key_size of zero) means there is no key. */
1454 if (!cc->key_size && strcmp(key, "-"))
Milan Brozde8be5a2011-03-24 13:54:27 +00001455 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001456
Milan Broz69a8cfc2011-01-13 19:59:49 +00001457 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
Milan Brozde8be5a2011-03-24 13:54:27 +00001458 goto out;
Milan Broze48d4bb2006-10-03 01:15:37 -07001459
1460 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1461
Milan Brozde8be5a2011-03-24 13:54:27 +00001462 r = crypt_setkey_allcpus(cc);
1463
1464out:
1465 /* Hex key string not needed after here, so wipe it. */
1466 memset(key, '0', key_string_len);
1467
1468 return r;
Milan Broze48d4bb2006-10-03 01:15:37 -07001469}
1470
1471static int crypt_wipe_key(struct crypt_config *cc)
1472{
1473 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1474 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00001475
1476 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001477}
1478
Milan Broz28513fc2010-08-12 04:14:06 +01001479static void crypt_dtr(struct dm_target *ti)
1480{
1481 struct crypt_config *cc = ti->private;
1482
1483 ti->private = NULL;
1484
1485 if (!cc)
1486 return;
1487
Mikulas Patockadc267622015-02-13 08:25:59 -05001488 if (cc->write_thread)
1489 kthread_stop(cc->write_thread);
1490
Milan Broz28513fc2010-08-12 04:14:06 +01001491 if (cc->io_queue)
1492 destroy_workqueue(cc->io_queue);
1493 if (cc->crypt_queue)
1494 destroy_workqueue(cc->crypt_queue);
1495
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001496 crypt_free_tfms(cc);
1497
Milan Broz28513fc2010-08-12 04:14:06 +01001498 if (cc->bs)
1499 bioset_free(cc->bs);
1500
1501 if (cc->page_pool)
1502 mempool_destroy(cc->page_pool);
1503 if (cc->req_pool)
1504 mempool_destroy(cc->req_pool);
Milan Broz28513fc2010-08-12 04:14:06 +01001505
1506 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1507 cc->iv_gen_ops->dtr(cc);
1508
Milan Broz28513fc2010-08-12 04:14:06 +01001509 if (cc->dev)
1510 dm_put_device(ti, cc->dev);
1511
Milan Broz5ebaee62010-08-12 04:14:07 +01001512 kzfree(cc->cipher);
Milan Broz7dbcd132011-01-13 19:59:52 +00001513 kzfree(cc->cipher_string);
Milan Broz28513fc2010-08-12 04:14:06 +01001514
1515 /* Must zero key material before freeing */
1516 kzfree(cc);
1517}
1518
Milan Broz5ebaee62010-08-12 04:14:07 +01001519static int crypt_ctr_cipher(struct dm_target *ti,
1520 char *cipher_in, char *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521{
Milan Broz5ebaee62010-08-12 04:14:07 +01001522 struct crypt_config *cc = ti->private;
Milan Brozd1f96422011-01-13 19:59:54 +00001523 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01001524 char *cipher_api = NULL;
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001525 int ret = -EINVAL;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001526 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Milan Broz5ebaee62010-08-12 04:14:07 +01001528 /* Convert to crypto api definition? */
1529 if (strchr(cipher_in, '(')) {
1530 ti->error = "Bad cipher specification";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return -EINVAL;
1532 }
1533
Milan Broz7dbcd132011-01-13 19:59:52 +00001534 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1535 if (!cc->cipher_string)
1536 goto bad_mem;
1537
Milan Broz5ebaee62010-08-12 04:14:07 +01001538 /*
1539 * Legacy dm-crypt cipher specification
Milan Brozd1f96422011-01-13 19:59:54 +00001540 * cipher[:keycount]-mode-iv:ivopts
Milan Broz5ebaee62010-08-12 04:14:07 +01001541 */
1542 tmp = cipher_in;
Milan Brozd1f96422011-01-13 19:59:54 +00001543 keycount = strsep(&tmp, "-");
1544 cipher = strsep(&keycount, ":");
1545
1546 if (!keycount)
1547 cc->tfms_count = 1;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001548 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
Milan Brozd1f96422011-01-13 19:59:54 +00001549 !is_power_of_2(cc->tfms_count)) {
1550 ti->error = "Bad cipher key count specification";
1551 return -EINVAL;
1552 }
1553 cc->key_parts = cc->tfms_count;
Milan Brozda31a072013-10-28 23:21:03 +01001554 cc->key_extra_size = 0;
Milan Broz5ebaee62010-08-12 04:14:07 +01001555
1556 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1557 if (!cc->cipher)
1558 goto bad_mem;
1559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 chainmode = strsep(&tmp, "-");
1561 ivopts = strsep(&tmp, "-");
1562 ivmode = strsep(&ivopts, ":");
1563
1564 if (tmp)
Milan Broz5ebaee62010-08-12 04:14:07 +01001565 DMWARN("Ignoring unexpected additional cipher options");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Milan Broz7dbcd132011-01-13 19:59:52 +00001567 /*
1568 * For compatibility with the original dm-crypt mapping format, if
1569 * only the cipher name is supplied, use cbc-plain.
1570 */
Milan Broz5ebaee62010-08-12 04:14:07 +01001571 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 chainmode = "cbc";
1573 ivmode = "plain";
1574 }
1575
Herbert Xud1806f62006-08-22 20:29:17 +10001576 if (strcmp(chainmode, "ecb") && !ivmode) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001577 ti->error = "IV mechanism required";
1578 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 }
1580
Milan Broz5ebaee62010-08-12 04:14:07 +01001581 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1582 if (!cipher_api)
1583 goto bad_mem;
1584
1585 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1586 "%s(%s)", chainmode, cipher);
1587 if (ret < 0) {
1588 kfree(cipher_api);
1589 goto bad_mem;
Herbert Xud1806f62006-08-22 20:29:17 +10001590 }
1591
Milan Broz5ebaee62010-08-12 04:14:07 +01001592 /* Allocate cipher */
Mikulas Patockafd2d2312012-07-27 15:08:05 +01001593 ret = crypt_alloc_tfms(cc, cipher_api);
1594 if (ret < 0) {
1595 ti->error = "Error allocating crypto tfm";
1596 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Milan Broz5ebaee62010-08-12 04:14:07 +01001599 /* Initialize IV */
Andi Kleenc0297722011-01-13 19:59:53 +00001600 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
Milan Broz5ebaee62010-08-12 04:14:07 +01001601 if (cc->iv_size)
1602 /* at least a 64 bit sector number should fit in our buffer */
1603 cc->iv_size = max(cc->iv_size,
1604 (unsigned int)(sizeof(u64) / sizeof(u8)));
1605 else if (ivmode) {
1606 DMWARN("Selected cipher does not support IVs");
1607 ivmode = NULL;
1608 }
1609
1610 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 if (ivmode == NULL)
1612 cc->iv_gen_ops = NULL;
1613 else if (strcmp(ivmode, "plain") == 0)
1614 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00001615 else if (strcmp(ivmode, "plain64") == 0)
1616 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 else if (strcmp(ivmode, "essiv") == 0)
1618 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10001619 else if (strcmp(ivmode, "benbi") == 0)
1620 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07001621 else if (strcmp(ivmode, "null") == 0)
1622 cc->iv_gen_ops = &crypt_iv_null_ops;
Milan Broz34745782011-01-13 19:59:55 +00001623 else if (strcmp(ivmode, "lmk") == 0) {
1624 cc->iv_gen_ops = &crypt_iv_lmk_ops;
Milan Brozed04d982013-10-28 23:21:04 +01001625 /*
1626 * Version 2 and 3 is recognised according
Milan Broz34745782011-01-13 19:59:55 +00001627 * to length of provided multi-key string.
1628 * If present (version 3), last key is used as IV seed.
Milan Brozed04d982013-10-28 23:21:04 +01001629 * All keys (including IV seed) are always the same size.
Milan Broz34745782011-01-13 19:59:55 +00001630 */
Milan Brozda31a072013-10-28 23:21:03 +01001631 if (cc->key_size % cc->key_parts) {
Milan Broz34745782011-01-13 19:59:55 +00001632 cc->key_parts++;
Milan Brozda31a072013-10-28 23:21:03 +01001633 cc->key_extra_size = cc->key_size / cc->key_parts;
1634 }
Milan Brozed04d982013-10-28 23:21:04 +01001635 } else if (strcmp(ivmode, "tcw") == 0) {
1636 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1637 cc->key_parts += 2; /* IV + whitening */
1638 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
Milan Broz34745782011-01-13 19:59:55 +00001639 } else {
Milan Broz5ebaee62010-08-12 04:14:07 +01001640 ret = -EINVAL;
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001641 ti->error = "Invalid IV mode";
Milan Broz28513fc2010-08-12 04:14:06 +01001642 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 }
1644
Milan Brozda31a072013-10-28 23:21:03 +01001645 /* Initialize and set key */
1646 ret = crypt_set_key(cc, key);
1647 if (ret < 0) {
1648 ti->error = "Error decoding and setting key";
1649 goto bad;
1650 }
1651
Milan Broz28513fc2010-08-12 04:14:06 +01001652 /* Allocate IV */
1653 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1654 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1655 if (ret < 0) {
1656 ti->error = "Error creating IV";
1657 goto bad;
1658 }
Milan Brozb95bf2d2009-12-10 23:51:56 +00001659 }
1660
Milan Broz28513fc2010-08-12 04:14:06 +01001661 /* Initialize IV (set keys for ESSIV etc) */
1662 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1663 ret = cc->iv_gen_ops->init(cc);
1664 if (ret < 0) {
1665 ti->error = "Error initialising IV";
1666 goto bad;
1667 }
1668 }
1669
Milan Broz5ebaee62010-08-12 04:14:07 +01001670 ret = 0;
1671bad:
1672 kfree(cipher_api);
1673 return ret;
1674
1675bad_mem:
1676 ti->error = "Cannot allocate cipher strings";
1677 return -ENOMEM;
1678}
1679
1680/*
1681 * Construct an encryption mapping:
1682 * <cipher> <key> <iv_offset> <dev_path> <start>
1683 */
1684static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1685{
1686 struct crypt_config *cc;
Milan Broz772ae5f2011-08-02 12:32:08 +01001687 unsigned int key_size, opt_params;
Milan Broz5ebaee62010-08-12 04:14:07 +01001688 unsigned long long tmpll;
1689 int ret;
Mikulas Patockad49ec522014-08-28 11:09:31 -04001690 size_t iv_size_padding;
Milan Broz772ae5f2011-08-02 12:32:08 +01001691 struct dm_arg_set as;
1692 const char *opt_string;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001693 char dummy;
Milan Broz5ebaee62010-08-12 04:14:07 +01001694
Milan Broz772ae5f2011-08-02 12:32:08 +01001695 static struct dm_arg _args[] = {
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001696 {0, 2, "Invalid number of feature args"},
Milan Broz772ae5f2011-08-02 12:32:08 +01001697 };
1698
1699 if (argc < 5) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001700 ti->error = "Not enough arguments";
1701 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 }
1703
Milan Broz5ebaee62010-08-12 04:14:07 +01001704 key_size = strlen(argv[1]) >> 1;
1705
1706 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1707 if (!cc) {
1708 ti->error = "Cannot allocate encryption context";
1709 return -ENOMEM;
1710 }
Milan Broz69a8cfc2011-01-13 19:59:49 +00001711 cc->key_size = key_size;
Milan Broz5ebaee62010-08-12 04:14:07 +01001712
1713 ti->private = cc;
1714 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1715 if (ret < 0)
1716 goto bad;
1717
Milan Brozddd42ed2008-02-08 02:11:07 +00001718 cc->dmreq_start = sizeof(struct ablkcipher_request);
Andi Kleenc0297722011-01-13 19:59:53 +00001719 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
Mikulas Patockad49ec522014-08-28 11:09:31 -04001720 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1721
1722 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1723 /* Allocate the padding exactly */
1724 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1725 & crypto_ablkcipher_alignmask(any_tfm(cc));
1726 } else {
1727 /*
1728 * If the cipher requires greater alignment than kmalloc
1729 * alignment, we don't know the exact position of the
1730 * initialization vector. We must assume worst case.
1731 */
1732 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1733 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001734
Mikulas Patocka94f5e022015-02-13 08:25:26 -05001735 ret = -ENOMEM;
Milan Brozddd42ed2008-02-08 02:11:07 +00001736 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
Mikulas Patockad49ec522014-08-28 11:09:31 -04001737 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
Milan Brozddd42ed2008-02-08 02:11:07 +00001738 if (!cc->req_pool) {
1739 ti->error = "Cannot allocate crypt request mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001740 goto bad;
Milan Brozddd42ed2008-02-08 02:11:07 +00001741 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001742
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001743 cc->per_bio_data_size = ti->per_bio_data_size =
Mikulas Patockad49ec522014-08-28 11:09:31 -04001744 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1745 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1746 ARCH_KMALLOC_MINALIGN);
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001747
Mikulas Patockacf2f1ab2015-02-13 08:23:52 -05001748 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 if (!cc->page_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001750 ti->error = "Cannot allocate page mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001751 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 }
1753
Jens Axboebb799ca2008-12-10 15:35:05 +01001754 cc->bs = bioset_create(MIN_IOS, 0);
Milan Broz6a24c712006-10-03 01:15:40 -07001755 if (!cc->bs) {
1756 ti->error = "Cannot allocate crypt bioset";
Milan Broz28513fc2010-08-12 04:14:06 +01001757 goto bad;
Milan Broz6a24c712006-10-03 01:15:40 -07001758 }
1759
Mikulas Patocka7145c242015-02-13 08:24:41 -05001760 mutex_init(&cc->bio_alloc_lock);
1761
Milan Broz28513fc2010-08-12 04:14:06 +01001762 ret = -EINVAL;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001763 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001764 ti->error = "Invalid iv_offset sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001765 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001767 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Milan Broz28513fc2010-08-12 04:14:06 +01001769 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1770 ti->error = "Device lookup failed";
1771 goto bad;
1772 }
1773
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001774 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001775 ti->error = "Invalid device sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001776 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001778 cc->start = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Milan Broz772ae5f2011-08-02 12:32:08 +01001780 argv += 5;
1781 argc -= 5;
1782
1783 /* Optional parameters */
1784 if (argc) {
1785 as.argc = argc;
1786 as.argv = argv;
1787
1788 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1789 if (ret)
1790 goto bad;
1791
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001792 while (opt_params--) {
1793 opt_string = dm_shift_arg(&as);
1794 if (!opt_string) {
1795 ti->error = "Not enough feature arguments";
1796 goto bad;
1797 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001798
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001799 if (!strcasecmp(opt_string, "allow_discards"))
1800 ti->num_discard_bios = 1;
1801
1802 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1803 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1804
1805 else {
1806 ti->error = "Invalid feature arguments";
1807 goto bad;
1808 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001809 }
1810 }
1811
Milan Broz28513fc2010-08-12 04:14:06 +01001812 ret = -ENOMEM;
Tejun Heo670368a2013-07-30 08:40:21 -04001813 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001814 if (!cc->io_queue) {
1815 ti->error = "Couldn't create kcryptd io queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001816 goto bad;
Milan Brozcabf08e2007-10-19 22:38:58 +01001817 }
1818
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001819 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1820 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1821 else
1822 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1823 num_online_cpus());
Milan Brozcabf08e2007-10-19 22:38:58 +01001824 if (!cc->crypt_queue) {
Milan Broz9934a8b2007-10-19 22:38:57 +01001825 ti->error = "Couldn't create kcryptd queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001826 goto bad;
Milan Broz9934a8b2007-10-19 22:38:57 +01001827 }
1828
Mikulas Patockadc267622015-02-13 08:25:59 -05001829 init_waitqueue_head(&cc->write_thread_wait);
1830 INIT_LIST_HEAD(&cc->write_thread_list);
1831
1832 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1833 if (IS_ERR(cc->write_thread)) {
1834 ret = PTR_ERR(cc->write_thread);
1835 cc->write_thread = NULL;
1836 ti->error = "Couldn't spawn write thread";
1837 goto bad;
1838 }
1839 wake_up_process(cc->write_thread);
1840
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001841 ti->num_flush_bios = 1;
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01001842 ti->discard_zeroes_data_unsupported = true;
Milan Broz983c7db2011-09-25 23:26:21 +01001843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 return 0;
1845
Milan Broz28513fc2010-08-12 04:14:06 +01001846bad:
1847 crypt_dtr(ti);
1848 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849}
1850
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001851static int crypt_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001853 struct dm_crypt_io *io;
Alasdair G Kergon49a8a922012-07-27 15:08:05 +01001854 struct crypt_config *cc = ti->private;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001855
Milan Broz772ae5f2011-08-02 12:32:08 +01001856 /*
1857 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1858 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1859 * - for REQ_DISCARD caller must use flush if IO ordering matters
1860 */
1861 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001862 bio->bi_bdev = cc->dev->bdev;
Milan Broz772ae5f2011-08-02 12:32:08 +01001863 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001864 bio->bi_iter.bi_sector = cc->start +
1865 dm_target_offset(ti, bio->bi_iter.bi_sector);
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001866 return DM_MAPIO_REMAPPED;
1867 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
Mikulas Patocka298a9fa2014-03-28 15:51:55 -04001869 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1870 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1871 io->ctx.req = (struct ablkcipher_request *)(io + 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001872
Milan Broz20c82532011-01-13 19:59:53 +00001873 if (bio_data_dir(io->base_bio) == READ) {
1874 if (kcryptd_io_read(io, GFP_NOWAIT))
Mikulas Patockadc267622015-02-13 08:25:59 -05001875 kcryptd_queue_read(io);
Milan Broz20c82532011-01-13 19:59:53 +00001876 } else
Milan Brozcabf08e2007-10-19 22:38:58 +01001877 kcryptd_queue_crypt(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001879 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880}
1881
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001882static void crypt_status(struct dm_target *ti, status_type_t type,
1883 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884{
Milan Broz5ebaee62010-08-12 04:14:07 +01001885 struct crypt_config *cc = ti->private;
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001886 unsigned i, sz = 0;
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001887 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 switch (type) {
1890 case STATUSTYPE_INFO:
1891 result[0] = '\0';
1892 break;
1893
1894 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00001895 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001897 if (cc->key_size > 0)
1898 for (i = 0; i < cc->key_size; i++)
1899 DMEMIT("%02x", cc->key[i]);
1900 else
1901 DMEMIT("-");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
Andrew Morton4ee218c2006-03-27 01:17:48 -08001903 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1904 cc->dev->name, (unsigned long long)cc->start);
Milan Broz772ae5f2011-08-02 12:32:08 +01001905
Mikulas Patockaf3396c582015-02-13 08:23:09 -05001906 num_feature_args += !!ti->num_discard_bios;
1907 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1908 if (num_feature_args) {
1909 DMEMIT(" %d", num_feature_args);
1910 if (ti->num_discard_bios)
1911 DMEMIT(" allow_discards");
1912 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1913 DMEMIT(" same_cpu_crypt");
1914 }
Milan Broz772ae5f2011-08-02 12:32:08 +01001915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 break;
1917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918}
1919
Milan Broze48d4bb2006-10-03 01:15:37 -07001920static void crypt_postsuspend(struct dm_target *ti)
1921{
1922 struct crypt_config *cc = ti->private;
1923
1924 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1925}
1926
1927static int crypt_preresume(struct dm_target *ti)
1928{
1929 struct crypt_config *cc = ti->private;
1930
1931 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1932 DMERR("aborting resume - crypt key is not set.");
1933 return -EAGAIN;
1934 }
1935
1936 return 0;
1937}
1938
1939static void crypt_resume(struct dm_target *ti)
1940{
1941 struct crypt_config *cc = ti->private;
1942
1943 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1944}
1945
1946/* Message interface
1947 * key set <key>
1948 * key wipe
1949 */
1950static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1951{
1952 struct crypt_config *cc = ti->private;
Milan Broz542da312009-12-10 23:51:57 +00001953 int ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07001954
1955 if (argc < 2)
1956 goto error;
1957
Mike Snitzer498f0102011-08-02 12:32:04 +01001958 if (!strcasecmp(argv[0], "key")) {
Milan Broze48d4bb2006-10-03 01:15:37 -07001959 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1960 DMWARN("not suspended during key manipulation.");
1961 return -EINVAL;
1962 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001963 if (argc == 3 && !strcasecmp(argv[1], "set")) {
Milan Broz542da312009-12-10 23:51:57 +00001964 ret = crypt_set_key(cc, argv[2]);
1965 if (ret)
1966 return ret;
1967 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1968 ret = cc->iv_gen_ops->init(cc);
1969 return ret;
1970 }
Mike Snitzer498f0102011-08-02 12:32:04 +01001971 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
Milan Broz542da312009-12-10 23:51:57 +00001972 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1973 ret = cc->iv_gen_ops->wipe(cc);
1974 if (ret)
1975 return ret;
1976 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001977 return crypt_wipe_key(cc);
Milan Broz542da312009-12-10 23:51:57 +00001978 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001979 }
1980
1981error:
1982 DMWARN("unrecognised message received.");
1983 return -EINVAL;
1984}
1985
Milan Brozd41e26b2008-07-21 12:00:40 +01001986static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1987 struct bio_vec *biovec, int max_size)
1988{
1989 struct crypt_config *cc = ti->private;
1990 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1991
1992 if (!q->merge_bvec_fn)
1993 return max_size;
1994
1995 bvm->bi_bdev = cc->dev->bdev;
Alasdair G Kergonb441a2622010-08-12 04:14:11 +01001996 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
Milan Brozd41e26b2008-07-21 12:00:40 +01001997
1998 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1999}
2000
Mike Snitzeraf4874e2009-06-22 10:12:33 +01002001static int crypt_iterate_devices(struct dm_target *ti,
2002 iterate_devices_callout_fn fn, void *data)
2003{
2004 struct crypt_config *cc = ti->private;
2005
Mike Snitzer5dea2712009-07-23 20:30:42 +01002006 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01002007}
2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009static struct target_type crypt_target = {
2010 .name = "crypt",
Mikulas Patockaf3396c582015-02-13 08:23:09 -05002011 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 .module = THIS_MODULE,
2013 .ctr = crypt_ctr,
2014 .dtr = crypt_dtr,
2015 .map = crypt_map,
2016 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07002017 .postsuspend = crypt_postsuspend,
2018 .preresume = crypt_preresume,
2019 .resume = crypt_resume,
2020 .message = crypt_message,
Milan Brozd41e26b2008-07-21 12:00:40 +01002021 .merge = crypt_merge,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01002022 .iterate_devices = crypt_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023};
2024
2025static int __init dm_crypt_init(void)
2026{
2027 int r;
2028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 r = dm_register_target(&crypt_target);
Mikulas Patocka94f5e022015-02-13 08:25:26 -05002030 if (r < 0)
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002031 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 return r;
2034}
2035
2036static void __exit dm_crypt_exit(void)
2037{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00002038 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039}
2040
2041module_init(dm_crypt_init);
2042module_exit(dm_crypt_exit);
2043
Jana Saoutbf14299f2014-06-24 14:27:04 -04002044MODULE_AUTHOR("Jana Saout <jana@saout.de>");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2046MODULE_LICENSE("GPL");