From 5a83d60c074ddf4f6364be25654a643d0e941824 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 31 Oct 2016 15:18:44 -0700 Subject: x86/fpu: Remove irq_ts_save() and irq_ts_restore() Now that lazy FPU is gone, we don't use CR0.TS (except possibly in KVM guest mode). Remove irq_ts_save(), irq_ts_restore(), and all of their callers. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Quentin Casasnovas Cc: Rik van Riel Cc: Rusty Russell Cc: Thomas Gleixner Cc: kvm list Link: http://lkml.kernel.org/r/70b9b9e7ba70659bedcb08aba63d0f9214f338f2.1477951965.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu/api.h | 10 ---------- arch/x86/kernel/fpu/core.c | 29 ----------------------------- drivers/char/hw_random/via-rng.c | 8 ++------ drivers/crypto/padlock-aes.c | 23 ++--------------------- drivers/crypto/padlock-sha.c | 18 ------------------ 5 files changed, 4 insertions(+), 84 deletions(-) diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index 1429a7c736db..0877ae018fc9 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -26,16 +26,6 @@ extern void kernel_fpu_begin(void); extern void kernel_fpu_end(void); extern bool irq_fpu_usable(void); -/* - * Some instructions like VIA's padlock instructions generate a spurious - * DNA fault but don't modify SSE registers. And these instructions - * get used from interrupt context as well. To prevent these kernel instructions - * in interrupt context interacting wrongly with other user/kernel fpu usage, we - * should use them only in the context of irq_ts_save/restore() - */ -extern int irq_ts_save(void); -extern void irq_ts_restore(int TS_state); - /* * Query the presence of one or more xfeatures. Works on any legacy CPU as well. * diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 52f5684405c1..7d8e2628e82c 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -137,35 +137,6 @@ void kernel_fpu_end(void) } EXPORT_SYMBOL_GPL(kernel_fpu_end); -/* - * CR0::TS save/restore functions: - */ -int irq_ts_save(void) -{ - /* - * If in process context and not atomic, we can take a spurious DNA fault. - * Otherwise, doing clts() in process context requires disabling preemption - * or some heavy lifting like kernel_fpu_begin() - */ - if (!in_atomic()) - return 0; - - if (read_cr0() & X86_CR0_TS) { - clts(); - return 1; - } - - return 0; -} -EXPORT_SYMBOL_GPL(irq_ts_save); - -void irq_ts_restore(int TS_state) -{ - if (TS_state) - stts(); -} -EXPORT_SYMBOL_GPL(irq_ts_restore); - /* * Save the FPU state (mark it for reload if necessary): * diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 44ce80606944..d1f5bb534e0e 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -70,21 +70,17 @@ enum { * until we have 4 bytes, thus returning a u32 at a time, * instead of the current u8-at-a-time. * - * Padlock instructions can generate a spurious DNA fault, so - * we have to call them in the context of irq_ts_save/restore() + * Padlock instructions can generate a spurious DNA fault, but the + * kernel doesn't use CR0.TS, so this doesn't matter. */ static inline u32 xstore(u32 *addr, u32 edx_in) { u32 eax_out; - int ts_state; - - ts_state = irq_ts_save(); asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); - irq_ts_restore(ts_state); return eax_out; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 441e86b23571..b3869748cc6b 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword) /* * While the padlock instructions don't use FP/SSE registers, they - * generate a spurious DNA fault when cr0.ts is '1'. These instructions - * should be used only inside the irq_ts_save/restore() context + * generate a spurious DNA fault when CR0.TS is '1'. Fortunately, + * the kernel doesn't use CR0.TS. */ static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, @@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - ts_state = irq_ts_save(); ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - ts_state = irq_ts_save(); ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -346,14 +340,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, &ctx->cword.encrypt, @@ -361,7 +353,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); @@ -375,14 +366,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.decrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, &ctx->cword.decrypt, @@ -390,7 +379,6 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); @@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, @@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.decrypt); @@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, walk.iv, &ctx->cword.decrypt, @@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); - padlock_store_cword(&ctx->cword.encrypt); return err; diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 8c5f90647b7a..bc72d20c32c3 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, struct sha1_state state; unsigned int space; unsigned int leftover; - int ts_state; int err; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; @@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, memcpy(result, &state.state, SHA1_DIGEST_SIZE); - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : \ : "c"((unsigned long)state.count + count), \ "a"((unsigned long)state.count), \ "S"(in), "D"(result)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); @@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, struct sha256_state state; unsigned int space; unsigned int leftover; - int ts_state; int err; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; @@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, memcpy(result, &state.state, SHA256_DIGEST_SIZE); - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : \ : "c"((unsigned long)state.count + count), \ "a"((unsigned long)state.count), \ "S"(in), "D"(result)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); @@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc, u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - int ts_state; partial = sctx->count & 0x3f; sctx->count += len; @@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc, memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); src = sctx->buffer; - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" : "+S"(src), "+D"(dst) \ : "a"((long)-1), "c"((unsigned long)1)); - irq_ts_restore(ts_state); done += SHA1_BLOCK_SIZE; src = data + done; } /* Process the left bytes from the input data */ if (len - done >= SHA1_BLOCK_SIZE) { - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); - irq_ts_restore(ts_state); done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); src = data + done; } @@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - int ts_state; partial = sctx->count & 0x3f; sctx->count += len; @@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); src = sctx->buf; - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)1)); - irq_ts_restore(ts_state); done += SHA256_BLOCK_SIZE; src = data + done; } /* Process the left bytes from input data*/ if (len - done >= SHA256_BLOCK_SIZE) { - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); - irq_ts_restore(ts_state); done += ((len - done) - (len - done) % 64); src = data + done; } -- cgit v1.2.3