aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-21 13:14:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-21 13:14:07 -0700
commit00d94a6a5e3d6a44818e2911a4d606e28e29fecb (patch)
tree15a524318349cb4075f6dd69d87e4414ba54ed31
parent8b12e2505ad8c5010922e45f896d908fd1436709 (diff)
parentb6f34d44cb341ad32f08717d1a2c418e6053a031 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: aes-ni - Remove CRYPTO_TFM_REQ_MAY_SLEEP from fpu template crypto: aes-ni - Do not sleep when using the FPU crypto: aes-ni - Fix cbc mode IV saving crypto: padlock-aes - work around Nano CPU errata in CBC mode crypto: padlock-aes - work around Nano CPU errata in ECB mode
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/crypto/fpu.c4
-rw-r--r--drivers/crypto/padlock-aes.c138
4 files changed, 107 insertions, 44 deletions
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index caba9960170..eb0566e8331 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -845,7 +845,7 @@ ENTRY(aesni_cbc_enc)
*/
ENTRY(aesni_cbc_dec)
cmp $16, LEN
- jb .Lcbc_dec_ret
+ jb .Lcbc_dec_just_ret
mov 480(KEYP), KLEN
add $240, KEYP
movups (IVP), IV
@@ -891,6 +891,7 @@ ENTRY(aesni_cbc_dec)
add $16, OUTP
cmp $16, LEN
jge .Lcbc_dec_loop1
- movups IV, (IVP)
.Lcbc_dec_ret:
+ movups IV, (IVP)
+.Lcbc_dec_just_ret:
ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 4e663398f77..c580c5ec1ca 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -198,6 +198,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -221,6 +222,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -266,6 +268,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -289,6 +292,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 5f9781a3815..daef6cd2b45 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -48,7 +48,7 @@ static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
- .flags = desc_in->flags,
+ .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
};
kernel_fpu_begin();
@@ -67,7 +67,7 @@ static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
- .flags = desc_in->flags,
+ .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
};
kernel_fpu_begin();
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 87f92c39b5f..a9952b1236b 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,9 +18,22 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/byteorder.h>
+#include <asm/processor.h>
#include <asm/i387.h>
#include "padlock.h"
+/*
+ * Number of data blocks actually fetched for each xcrypt insn.
+ * Processors with prefetch errata will fetch extra blocks.
+ */
+static unsigned int ecb_fetch_blocks = 2;
+#define MAX_ECB_FETCH_BLOCKS (8)
+#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+
+static unsigned int cbc_fetch_blocks = 1;
+#define MAX_CBC_FETCH_BLOCKS (4)
+#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
+
/* Control word. */
struct cword {
unsigned int __attribute__ ((__packed__))
@@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword)
* should be used only inside the irq_ts_save/restore() context
*/
-static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
- struct cword *control_word)
+static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+ struct cword *control_word, int count)
{
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(1));
+ : "d"(control_word), "b"(key), "c"(count));
+}
+
+static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+ u8 *iv, struct cword *control_word, int count)
+{
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
+ : "+S" (input), "+D" (output), "+a" (iv)
+ : "d" (control_word), "b" (key), "c" (count));
+ return iv;
}
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
+static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword, int count)
{
- u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
+ /*
+ * Padlock prefetches extra data so we must provide mapped input buffers.
+ * Assume there are at least 16 bytes of stack already in use.
+ */
+ u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
+ u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+ memcpy(tmp, in, count * AES_BLOCK_SIZE);
+ rep_xcrypt_ecb(tmp, out, key, cword, count);
+}
+
+static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
+ u8 *iv, struct cword *cword, int count)
+{
+ /*
+ * Padlock prefetches extra data so we must provide mapped input buffers.
+ * Assume there are at least 16 bytes of stack already in use.
+ */
+ u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- memcpy(tmp, in, AES_BLOCK_SIZE);
- padlock_xcrypt(tmp, out, key, cword);
+ memcpy(tmp, in, count * AES_BLOCK_SIZE);
+ return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
}
-static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
- struct cword *cword)
+static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword, int count)
{
- /* padlock_xcrypt requires at least two blocks of data. */
- if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
- (PAGE_SIZE - 1)))) {
- aes_crypt_copy(in, out, key, cword);
+ /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
+ * We could avoid some copying here but it's probably not worth it.
+ */
+ if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
+ ecb_crypt_copy(in, out, key, cword, count);
return;
}
- padlock_xcrypt(in, out, key, cword);
+ rep_xcrypt_ecb(in, out, key, cword, count);
+}
+
+static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
+ u8 *iv, struct cword *cword, int count)
+{
+ /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
+ if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
+ return cbc_crypt_copy(in, out, key, iv, cword, count);
+
+ return rep_xcrypt_cbc(in, out, key, iv, cword, count);
}
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
void *control_word, u32 count)
{
- if (count == 1) {
- aes_crypt(input, output, key, control_word);
+ u32 initial = count & (ecb_fetch_blocks - 1);
+
+ if (count < ecb_fetch_blocks) {
+ ecb_crypt(input, output, key, control_word, count);
return;
}
- asm volatile ("test $1, %%cl;"
- "je 1f;"
-#ifndef CONFIG_X86_64
- "lea -1(%%ecx), %%eax;"
- "mov $1, %%ecx;"
-#else
- "lea -1(%%rcx), %%rax;"
- "mov $1, %%rcx;"
-#endif
- ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
-#ifndef CONFIG_X86_64
- "mov %%eax, %%ecx;"
-#else
- "mov %%rax, %%rcx;"
-#endif
- "1:"
- ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ if (initial)
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ : "+S"(input), "+D"(output)
+ : "d"(control_word), "b"(key), "c"(initial));
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count)
- : "ax");
+ : "d"(control_word), "b"(key), "c"(count - initial));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
u8 *iv, void *control_word, u32 count)
{
- /* rep xcryptcbc */
- asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
+ u32 initial = count & (cbc_fetch_blocks - 1);
+
+ if (count < cbc_fetch_blocks)
+ return cbc_crypt(input, output, key, iv, control_word, count);
+
+ if (initial)
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
+ : "+S" (input), "+D" (output), "+a" (iv)
+ : "d" (control_word), "b" (key), "c" (count));
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count));
+ : "d" (control_word), "b" (key), "c" (count-initial));
return iv;
}
@@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
- aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+ ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
- aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+ ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = {
static int __init padlock_init(void)
{
int ret;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if (!cpu_has_xcrypt) {
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -476,6 +528,12 @@ static int __init padlock_init(void)
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+ if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+ ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+ cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
+ printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+ }
+
out:
return ret;