blob: b524f702e658cb7ced525ade15e0034a7ca038aa [file] [log] [blame]
Herbert Xub5b7f082007-04-16 20:48:54 +10001/*
2 * Asynchronous block chaining cipher operations.
Richard Hartmannc4ede642010-02-16 20:23:37 +08003 *
Herbert Xub5b7f082007-04-16 20:48:54 +10004 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
6 *
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
Richard Hartmannc4ede642010-02-16 20:23:37 +080011 * Software Foundation; either version 2 of the License, or (at your option)
Herbert Xub5b7f082007-04-16 20:48:54 +100012 * any later version.
13 *
14 */
15
Herbert Xu378f4f52007-12-17 20:07:31 +080016#include <crypto/internal/skcipher.h>
Herbert Xu0b67fb62009-06-25 18:43:48 +080017#include <linux/cpumask.h>
Herbert Xu378f4f52007-12-17 20:07:31 +080018#include <linux/err.h>
Herbert Xu791b4d52007-08-23 16:23:01 +080019#include <linux/kernel.h>
Herbert Xub9c55aa2007-12-04 12:46:48 +110020#include <linux/rtnetlink.h>
21#include <linux/sched.h>
Herbert Xu791b4d52007-08-23 16:23:01 +080022#include <linux/slab.h>
Herbert Xub5b7f082007-04-16 20:48:54 +100023#include <linux/seq_file.h>
Steffen Klassert29ffc872011-09-27 07:42:32 +020024#include <linux/cryptouser.h>
25#include <net/netlink.h>
Herbert Xub5b7f082007-04-16 20:48:54 +100026
David S. Millerbf060992010-05-19 14:13:07 +100027#include <crypto/scatterwalk.h>
28
Herbert Xu378f4f52007-12-17 20:07:31 +080029#include "internal.h"
30
David S. Millerbf060992010-05-19 14:13:07 +100031struct ablkcipher_buffer {
32 struct list_head entry;
33 struct scatter_walk dst;
34 unsigned int len;
35 void *data;
36};
37
38enum {
39 ABLKCIPHER_WALK_SLOW = 1 << 0,
40};
41
42static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
43{
44 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
45}
46
47void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
48{
49 struct ablkcipher_buffer *p, *tmp;
50
51 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
52 ablkcipher_buffer_write(p);
53 list_del(&p->entry);
54 kfree(p);
55 }
56}
57EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
58
59static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
60 struct ablkcipher_buffer *p)
61{
62 p->dst = walk->out;
63 list_add_tail(&p->entry, &walk->buffers);
64}
65
66/* Get a spot of the specified length that does not straddle a page.
67 * The caller needs to ensure that there is enough space for this operation.
68 */
69static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
70{
71 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
Joshua I. Jamesa861afb2014-12-05 14:06:16 +090072
David S. Millerbf060992010-05-19 14:13:07 +100073 return max(start, end_page);
74}
75
Eric Biggers930787c2018-07-23 10:54:58 -070076static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
77 unsigned int n)
David S. Millerbf060992010-05-19 14:13:07 +100078{
David S. Millerbf060992010-05-19 14:13:07 +100079 for (;;) {
80 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
81
82 if (len_this_page > n)
83 len_this_page = n;
84 scatterwalk_advance(&walk->out, n);
85 if (n == len_this_page)
86 break;
87 n -= len_this_page;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +020088 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
David S. Millerbf060992010-05-19 14:13:07 +100089 }
David S. Millerbf060992010-05-19 14:13:07 +100090}
91
Eric Biggers930787c2018-07-23 10:54:58 -070092static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
93 unsigned int n)
David S. Millerbf060992010-05-19 14:13:07 +100094{
95 scatterwalk_advance(&walk->in, n);
96 scatterwalk_advance(&walk->out, n);
David S. Millerbf060992010-05-19 14:13:07 +100097}
98
99static int ablkcipher_walk_next(struct ablkcipher_request *req,
100 struct ablkcipher_walk *walk);
101
102int ablkcipher_walk_done(struct ablkcipher_request *req,
103 struct ablkcipher_walk *walk, int err)
104{
105 struct crypto_tfm *tfm = req->base.tfm;
Eric Biggers930787c2018-07-23 10:54:58 -0700106 unsigned int n; /* bytes processed */
107 bool more;
David S. Millerbf060992010-05-19 14:13:07 +1000108
Eric Biggers930787c2018-07-23 10:54:58 -0700109 if (unlikely(err < 0))
110 goto finish;
David S. Millerbf060992010-05-19 14:13:07 +1000111
Eric Biggers930787c2018-07-23 10:54:58 -0700112 n = walk->nbytes - err;
113 walk->total -= n;
114 more = (walk->total != 0);
115
116 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
117 ablkcipher_done_fast(walk, n);
118 } else {
119 if (WARN_ON(err)) {
120 /* unexpected case; didn't process all bytes */
David S. Millerbf060992010-05-19 14:13:07 +1000121 err = -EINVAL;
Eric Biggers930787c2018-07-23 10:54:58 -0700122 goto finish;
123 }
124 ablkcipher_done_slow(walk, n);
David S. Millerbf060992010-05-19 14:13:07 +1000125 }
126
Eric Biggers930787c2018-07-23 10:54:58 -0700127 scatterwalk_done(&walk->in, 0, more);
128 scatterwalk_done(&walk->out, 1, more);
David S. Millerbf060992010-05-19 14:13:07 +1000129
Eric Biggers930787c2018-07-23 10:54:58 -0700130 if (more) {
David S. Millerbf060992010-05-19 14:13:07 +1000131 crypto_yield(req->base.flags);
132 return ablkcipher_walk_next(req, walk);
133 }
Eric Biggers930787c2018-07-23 10:54:58 -0700134 err = 0;
135finish:
136 walk->nbytes = 0;
David S. Millerbf060992010-05-19 14:13:07 +1000137 if (walk->iv != req->info)
138 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
Davidlohr Bueso33c7c0f2011-01-29 15:09:43 +1100139 kfree(walk->iv_buffer);
David S. Millerbf060992010-05-19 14:13:07 +1000140 return err;
141}
142EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
143
144static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
145 struct ablkcipher_walk *walk,
146 unsigned int bsize,
147 unsigned int alignmask,
148 void **src_p, void **dst_p)
149{
150 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
151 struct ablkcipher_buffer *p;
152 void *src, *dst, *base;
153 unsigned int n;
154
155 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
156 n += (aligned_bsize * 3 - (alignmask + 1) +
157 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
158
159 p = kmalloc(n, GFP_ATOMIC);
160 if (!p)
Jiri Slaby2716fbf2010-06-23 20:01:45 +1000161 return ablkcipher_walk_done(req, walk, -ENOMEM);
David S. Millerbf060992010-05-19 14:13:07 +1000162
163 base = p + 1;
164
165 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
166 src = dst = ablkcipher_get_spot(dst, bsize);
167
168 p->len = bsize;
169 p->data = dst;
170
171 scatterwalk_copychunks(src, &walk->in, bsize, 0);
172
173 ablkcipher_queue_write(walk, p);
174
175 walk->nbytes = bsize;
176 walk->flags |= ABLKCIPHER_WALK_SLOW;
177
178 *src_p = src;
179 *dst_p = dst;
180
181 return 0;
182}
183
184static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
185 struct crypto_tfm *tfm,
186 unsigned int alignmask)
187{
188 unsigned bs = walk->blocksize;
189 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
190 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
191 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
192 (alignmask + 1);
193 u8 *iv;
194
195 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
196 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
197 if (!walk->iv_buffer)
198 return -ENOMEM;
199
200 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
201 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
202 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
203 iv = ablkcipher_get_spot(iv, ivsize);
204
205 walk->iv = memcpy(iv, walk->iv, ivsize);
206 return 0;
207}
208
209static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
210 struct ablkcipher_walk *walk)
211{
212 walk->src.page = scatterwalk_page(&walk->in);
213 walk->src.offset = offset_in_page(walk->in.offset);
214 walk->dst.page = scatterwalk_page(&walk->out);
215 walk->dst.offset = offset_in_page(walk->out.offset);
216
217 return 0;
218}
219
220static int ablkcipher_walk_next(struct ablkcipher_request *req,
221 struct ablkcipher_walk *walk)
222{
223 struct crypto_tfm *tfm = req->base.tfm;
224 unsigned int alignmask, bsize, n;
225 void *src, *dst;
226 int err;
227
228 alignmask = crypto_tfm_alg_alignmask(tfm);
229 n = walk->total;
230 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
231 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
232 return ablkcipher_walk_done(req, walk, -EINVAL);
233 }
234
235 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
236 src = dst = NULL;
237
238 bsize = min(walk->blocksize, n);
239 n = scatterwalk_clamp(&walk->in, n);
240 n = scatterwalk_clamp(&walk->out, n);
241
242 if (n < bsize ||
243 !scatterwalk_aligned(&walk->in, alignmask) ||
244 !scatterwalk_aligned(&walk->out, alignmask)) {
245 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
246 &src, &dst);
247 goto set_phys_lowmem;
248 }
249
250 walk->nbytes = n;
251
252 return ablkcipher_next_fast(req, walk);
253
254set_phys_lowmem:
255 if (err >= 0) {
256 walk->src.page = virt_to_page(src);
257 walk->dst.page = virt_to_page(dst);
258 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
259 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
260 }
261
262 return err;
263}
264
265static int ablkcipher_walk_first(struct ablkcipher_request *req,
266 struct ablkcipher_walk *walk)
267{
268 struct crypto_tfm *tfm = req->base.tfm;
269 unsigned int alignmask;
270
271 alignmask = crypto_tfm_alg_alignmask(tfm);
272 if (WARN_ON_ONCE(in_irq()))
273 return -EDEADLK;
274
Jason A. Donenfeld70d906b2015-12-06 02:51:37 +0100275 walk->iv = req->info;
David S. Millerbf060992010-05-19 14:13:07 +1000276 walk->nbytes = walk->total;
277 if (unlikely(!walk->total))
278 return 0;
279
280 walk->iv_buffer = NULL;
David S. Millerbf060992010-05-19 14:13:07 +1000281 if (unlikely(((unsigned long)walk->iv & alignmask))) {
282 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
Joshua I. Jamesa861afb2014-12-05 14:06:16 +0900283
David S. Millerbf060992010-05-19 14:13:07 +1000284 if (err)
285 return err;
286 }
287
288 scatterwalk_start(&walk->in, walk->in.sg);
289 scatterwalk_start(&walk->out, walk->out.sg);
290
291 return ablkcipher_walk_next(req, walk);
292}
293
294int ablkcipher_walk_phys(struct ablkcipher_request *req,
295 struct ablkcipher_walk *walk)
296{
297 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
298 return ablkcipher_walk_first(req, walk);
299}
300EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
301
Herbert Xu791b4d52007-08-23 16:23:01 +0800302static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
303 unsigned int keylen)
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000304{
305 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
306 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
307 int ret;
308 u8 *buffer, *alignbuffer;
309 unsigned long absize;
310
311 absize = keylen + alignmask;
312 buffer = kmalloc(absize, GFP_ATOMIC);
313 if (!buffer)
314 return -ENOMEM;
315
316 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
317 memcpy(alignbuffer, key, keylen);
318 ret = cipher->setkey(tfm, alignbuffer, keylen);
Sebastian Siewior06817172007-08-03 20:33:47 +0800319 memset(alignbuffer, 0, keylen);
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000320 kfree(buffer);
321 return ret;
322}
323
Herbert Xub5b7f082007-04-16 20:48:54 +1000324static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
325 unsigned int keylen)
326{
327 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000328 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
Herbert Xub5b7f082007-04-16 20:48:54 +1000329
330 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
331 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
332 return -EINVAL;
333 }
334
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000335 if ((unsigned long)key & alignmask)
336 return setkey_unaligned(tfm, key, keylen);
337
Herbert Xub5b7f082007-04-16 20:48:54 +1000338 return cipher->setkey(tfm, key, keylen);
339}
340
341static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
342 u32 mask)
343{
344 return alg->cra_ctxsize;
345}
346
Herbert Xub9c55aa2007-12-04 12:46:48 +1100347int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
348{
349 return crypto_ablkcipher_encrypt(&req->creq);
350}
351
352int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
353{
354 return crypto_ablkcipher_decrypt(&req->creq);
355}
356
Herbert Xub5b7f082007-04-16 20:48:54 +1000357static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
358 u32 mask)
359{
360 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
361 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
362
363 if (alg->ivsize > PAGE_SIZE / 8)
364 return -EINVAL;
365
366 crt->setkey = setkey;
367 crt->encrypt = alg->encrypt;
368 crt->decrypt = alg->decrypt;
Herbert Xub9c55aa2007-12-04 12:46:48 +1100369 if (!alg->ivsize) {
370 crt->givencrypt = skcipher_null_givencrypt;
371 crt->givdecrypt = skcipher_null_givdecrypt;
372 }
Herbert Xuecfc4322007-12-05 21:08:36 +1100373 crt->base = __crypto_ablkcipher_cast(tfm);
Herbert Xub5b7f082007-04-16 20:48:54 +1000374 crt->ivsize = alg->ivsize;
375
376 return 0;
377}
378
Herbert Xu3acc8472011-11-03 23:46:07 +1100379#ifdef CONFIG_NET
Steffen Klassert29ffc872011-09-27 07:42:32 +0200380static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
381{
382 struct crypto_report_blkcipher rblkcipher;
383
Mathias Krause9a5467b2013-02-05 18:19:13 +0100384 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
385 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
386 sizeof(rblkcipher.geniv));
Stafford Horne52965d62018-06-25 21:45:37 +0900387 rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
Steffen Klassert29ffc872011-09-27 07:42:32 +0200388
389 rblkcipher.blocksize = alg->cra_blocksize;
390 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
391 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
392 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
393
David S. Miller6662df32012-04-01 20:19:05 -0400394 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
395 sizeof(struct crypto_report_blkcipher), &rblkcipher))
396 goto nla_put_failure;
Steffen Klassert29ffc872011-09-27 07:42:32 +0200397 return 0;
398
399nla_put_failure:
400 return -EMSGSIZE;
401}
Herbert Xu3acc8472011-11-03 23:46:07 +1100402#else
403static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
404{
405 return -ENOSYS;
406}
407#endif
Steffen Klassert29ffc872011-09-27 07:42:32 +0200408
Herbert Xub5b7f082007-04-16 20:48:54 +1000409static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
410 __attribute__ ((unused));
411static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
412{
413 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
414
415 seq_printf(m, "type : ablkcipher\n");
Herbert Xu189ed662007-12-14 22:29:37 +0800416 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
417 "yes" : "no");
Herbert Xub5b7f082007-04-16 20:48:54 +1000418 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
419 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
420 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
421 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
Herbert Xu23508e12007-11-27 21:33:24 +0800422 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
Herbert Xub5b7f082007-04-16 20:48:54 +1000423}
424
425const struct crypto_type crypto_ablkcipher_type = {
426 .ctxsize = crypto_ablkcipher_ctxsize,
427 .init = crypto_init_ablkcipher_ops,
428#ifdef CONFIG_PROC_FS
429 .show = crypto_ablkcipher_show,
430#endif
Steffen Klassert29ffc872011-09-27 07:42:32 +0200431 .report = crypto_ablkcipher_report,
Herbert Xub5b7f082007-04-16 20:48:54 +1000432};
433EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
434
Herbert Xu61da88e2007-12-17 21:51:27 +0800435static int no_givdecrypt(struct skcipher_givcrypt_request *req)
436{
437 return -ENOSYS;
438}
439
440static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
441 u32 mask)
442{
443 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
444 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
445
446 if (alg->ivsize > PAGE_SIZE / 8)
447 return -EINVAL;
448
Herbert Xuecfc4322007-12-05 21:08:36 +1100449 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
450 alg->setkey : setkey;
Herbert Xu61da88e2007-12-17 21:51:27 +0800451 crt->encrypt = alg->encrypt;
452 crt->decrypt = alg->decrypt;
Herbert Xu21dbd962015-06-21 19:11:41 +0800453 crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
Herbert Xu61da88e2007-12-17 21:51:27 +0800454 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
Herbert Xuecfc4322007-12-05 21:08:36 +1100455 crt->base = __crypto_ablkcipher_cast(tfm);
Herbert Xu61da88e2007-12-17 21:51:27 +0800456 crt->ivsize = alg->ivsize;
457
458 return 0;
459}
460
Herbert Xu3acc8472011-11-03 23:46:07 +1100461#ifdef CONFIG_NET
Steffen Klassert3e29c102011-09-27 07:43:24 +0200462static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
463{
464 struct crypto_report_blkcipher rblkcipher;
465
Mathias Krause9a5467b2013-02-05 18:19:13 +0100466 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
467 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
468 sizeof(rblkcipher.geniv));
Stafford Horne52965d62018-06-25 21:45:37 +0900469 rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
Steffen Klassert3e29c102011-09-27 07:43:24 +0200470
471 rblkcipher.blocksize = alg->cra_blocksize;
472 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
473 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
474 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
475
David S. Miller6662df32012-04-01 20:19:05 -0400476 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
477 sizeof(struct crypto_report_blkcipher), &rblkcipher))
478 goto nla_put_failure;
Steffen Klassert3e29c102011-09-27 07:43:24 +0200479 return 0;
480
481nla_put_failure:
482 return -EMSGSIZE;
483}
Herbert Xu3acc8472011-11-03 23:46:07 +1100484#else
485static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
486{
487 return -ENOSYS;
488}
489#endif
Steffen Klassert3e29c102011-09-27 07:43:24 +0200490
Herbert Xu61da88e2007-12-17 21:51:27 +0800491static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
492 __attribute__ ((unused));
493static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
494{
495 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
496
497 seq_printf(m, "type : givcipher\n");
Herbert Xu189ed662007-12-14 22:29:37 +0800498 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
499 "yes" : "no");
Herbert Xu61da88e2007-12-17 21:51:27 +0800500 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
501 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
502 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
503 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
Herbert Xu23508e12007-11-27 21:33:24 +0800504 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
Herbert Xu61da88e2007-12-17 21:51:27 +0800505}
506
507const struct crypto_type crypto_givcipher_type = {
508 .ctxsize = crypto_ablkcipher_ctxsize,
509 .init = crypto_init_givcipher_ops,
510#ifdef CONFIG_PROC_FS
511 .show = crypto_givcipher_show,
512#endif
Steffen Klassert3e29c102011-09-27 07:43:24 +0200513 .report = crypto_givcipher_report,
Herbert Xu61da88e2007-12-17 21:51:27 +0800514};
515EXPORT_SYMBOL_GPL(crypto_givcipher_type);
516
Herbert Xuecfc4322007-12-05 21:08:36 +1100517const char *crypto_default_geniv(const struct crypto_alg *alg)
518{
Herbert Xu63b5ac22009-08-14 22:55:35 +1000519 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
520 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
521 alg->cra_ablkcipher.ivsize) !=
522 alg->cra_blocksize)
523 return "chainiv";
524
Herbert Xuf3d53ed2013-10-30 09:51:45 +0800525 return "eseqiv";
Herbert Xuecfc4322007-12-05 21:08:36 +1100526}
527
Herbert Xub9c55aa2007-12-04 12:46:48 +1100528static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
529{
530 struct rtattr *tb[3];
531 struct {
532 struct rtattr attr;
533 struct crypto_attr_type data;
534 } ptype;
535 struct {
536 struct rtattr attr;
537 struct crypto_attr_alg data;
538 } palg;
539 struct crypto_template *tmpl;
540 struct crypto_instance *inst;
541 struct crypto_alg *larval;
542 const char *geniv;
543 int err;
544
545 larval = crypto_larval_lookup(alg->cra_driver_name,
Herbert Xu435578a2009-06-25 14:46:31 +0800546 (type & ~CRYPTO_ALG_TYPE_MASK) |
Herbert Xub9c55aa2007-12-04 12:46:48 +1100547 CRYPTO_ALG_TYPE_GIVCIPHER,
Herbert Xu435578a2009-06-25 14:46:31 +0800548 mask | CRYPTO_ALG_TYPE_MASK);
Herbert Xub9c55aa2007-12-04 12:46:48 +1100549 err = PTR_ERR(larval);
550 if (IS_ERR(larval))
551 goto out;
552
553 err = -EAGAIN;
554 if (!crypto_is_larval(larval))
555 goto drop_larval;
556
557 ptype.attr.rta_len = sizeof(ptype);
558 ptype.attr.rta_type = CRYPTOA_TYPE;
559 ptype.data.type = type | CRYPTO_ALG_GENIV;
560 /* GENIV tells the template that we're making a default geniv. */
561 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
562 tb[0] = &ptype.attr;
563
564 palg.attr.rta_len = sizeof(palg);
565 palg.attr.rta_type = CRYPTOA_ALG;
566 /* Must use the exact name to locate ourselves. */
567 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
568 tb[1] = &palg.attr;
569
570 tb[2] = NULL;
571
572 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
573 CRYPTO_ALG_TYPE_BLKCIPHER)
574 geniv = alg->cra_blkcipher.geniv;
575 else
576 geniv = alg->cra_ablkcipher.geniv;
577
578 if (!geniv)
579 geniv = crypto_default_geniv(alg);
580
581 tmpl = crypto_lookup_template(geniv);
582 err = -ENOENT;
583 if (!tmpl)
584 goto kill_larval;
585
Herbert Xu56e34372015-05-23 15:41:48 +0800586 if (tmpl->create) {
587 err = tmpl->create(tmpl, tb);
588 if (err)
589 goto put_tmpl;
590 goto ok;
591 }
592
Herbert Xub9c55aa2007-12-04 12:46:48 +1100593 inst = tmpl->alloc(tb);
594 err = PTR_ERR(inst);
595 if (IS_ERR(inst))
596 goto put_tmpl;
597
Joshua I. Jamesa861afb2014-12-05 14:06:16 +0900598 err = crypto_register_instance(tmpl, inst);
599 if (err) {
Herbert Xub9c55aa2007-12-04 12:46:48 +1100600 tmpl->free(inst);
601 goto put_tmpl;
602 }
603
Herbert Xu56e34372015-05-23 15:41:48 +0800604ok:
Herbert Xub9c55aa2007-12-04 12:46:48 +1100605 /* Redo the lookup to use the instance we just registered. */
606 err = -EAGAIN;
607
608put_tmpl:
609 crypto_tmpl_put(tmpl);
610kill_larval:
611 crypto_larval_kill(larval);
612drop_larval:
613 crypto_mod_put(larval);
614out:
615 crypto_mod_put(alg);
616 return err;
617}
618
Steffen Klassert1e122992012-03-29 09:03:47 +0200619struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
Herbert Xub9c55aa2007-12-04 12:46:48 +1100620{
621 struct crypto_alg *alg;
622
623 alg = crypto_alg_mod_lookup(name, type, mask);
624 if (IS_ERR(alg))
625 return alg;
626
627 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
628 CRYPTO_ALG_TYPE_GIVCIPHER)
629 return alg;
630
631 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
632 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
633 alg->cra_ablkcipher.ivsize))
634 return alg;
635
Herbert Xub170a132009-02-18 20:33:55 +0800636 crypto_mod_put(alg);
637 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
638 mask & ~CRYPTO_ALG_TESTED);
639 if (IS_ERR(alg))
640 return alg;
641
642 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
643 CRYPTO_ALG_TYPE_GIVCIPHER) {
Herbert Xu26739532015-04-23 16:34:47 +0800644 if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
Herbert Xub170a132009-02-18 20:33:55 +0800645 crypto_mod_put(alg);
646 alg = ERR_PTR(-ENOENT);
647 }
648 return alg;
649 }
650
651 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
652 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
653 alg->cra_ablkcipher.ivsize));
654
Herbert Xub9c55aa2007-12-04 12:46:48 +1100655 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
656}
Steffen Klassert1e122992012-03-29 09:03:47 +0200657EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
Herbert Xub9c55aa2007-12-04 12:46:48 +1100658
Herbert Xu378f4f52007-12-17 20:07:31 +0800659int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
660 u32 type, u32 mask)
661{
662 struct crypto_alg *alg;
663 int err;
664
665 type = crypto_skcipher_type(type);
666 mask = crypto_skcipher_mask(mask);
667
Herbert Xub9c55aa2007-12-04 12:46:48 +1100668 alg = crypto_lookup_skcipher(name, type, mask);
Herbert Xu378f4f52007-12-17 20:07:31 +0800669 if (IS_ERR(alg))
670 return PTR_ERR(alg);
671
672 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
673 crypto_mod_put(alg);
674 return err;
675}
676EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
677
Herbert Xub9c55aa2007-12-04 12:46:48 +1100678struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
679 u32 type, u32 mask)
680{
681 struct crypto_tfm *tfm;
682 int err;
683
684 type = crypto_skcipher_type(type);
685 mask = crypto_skcipher_mask(mask);
686
687 for (;;) {
688 struct crypto_alg *alg;
689
690 alg = crypto_lookup_skcipher(alg_name, type, mask);
691 if (IS_ERR(alg)) {
692 err = PTR_ERR(alg);
693 goto err;
694 }
695
696 tfm = __crypto_alloc_tfm(alg, type, mask);
697 if (!IS_ERR(tfm))
698 return __crypto_ablkcipher_cast(tfm);
699
700 crypto_mod_put(alg);
701 err = PTR_ERR(tfm);
702
703err:
704 if (err != -EAGAIN)
705 break;
Herbert Xu3fc89ad2015-10-19 18:23:57 +0800706 if (fatal_signal_pending(current)) {
Herbert Xub9c55aa2007-12-04 12:46:48 +1100707 err = -EINTR;
708 break;
709 }
710 }
711
712 return ERR_PTR(err);
713}
714EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);