From 6cb9c3697585c47977c42c5cc1b9fc49247ac530 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 17 Dec 2012 11:52:47 -0600 Subject: sparc: huge_ptep_set_* functions need to call set_huge_pte_at() Modifying the huge pte's requires that all the underlying pte's be modified. Version 2: added missing flush_tlb_page() Signed-off-by: Dave Kleikamp Cc: "David S. Miller" Cc: sparclinux@vger.kernel.org Signed-off-by: David S. Miller --- arch/sparc/include/asm/hugetlb.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 8c5eed6d267f..9661e9bc7bb6 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -61,14 +61,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep_set_wrprotect(mm, addr, ptep); + pte_t old_pte = *ptep; + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + if (changed) { + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + flush_tlb_page(vma, addr); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) -- cgit v1.2.3 From 4a9d1946b0135b15d901d7e7c9796d36f352aaea Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 18 Dec 2012 16:06:16 -0800 Subject: sparc64: Define pte_accessible() We can elide flush_tlb_*() calls when _PAGE_VALID is clear as that is the test used to determine whether or not to queue up a TLB flush in set_pte_at(). Signed-off-by: David S. Miller --- arch/sparc/include/asm/pgtable_64.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 95515f1e7cef..7870be0f5adc 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -617,6 +617,12 @@ static inline unsigned long pte_present(pte_t pte) return val; } +#define pte_accessible pte_accessible +static inline unsigned long pte_accessible(pte_t a) +{ + return pte_val(a) & _PAGE_VALID; +} + static inline unsigned long pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; @@ -802,7 +808,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * and SUN4V pte layout, so this inline test is fine. */ - if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) + if (likely(mm != &init_mm) && pte_accessible(orig)) tlb_batch_add(mm, addr, ptep, orig, fullmm); } -- cgit v1.2.3 From 9f28ffc03e93343ac04874fda9edb7affea45165 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 19 Dec 2012 15:19:11 -0800 Subject: sparc64: Fix unrolled AES 256-bit key loops. The basic scheme of the block mode assembler is that we start by enabling the FPU, loading the key into the floating point registers, then iterate calling the encrypt/decrypt routine for each block. For the 256-bit key cases, we run short on registers in the unrolled loops. So the {ENCRYPT,DECRYPT}_256_2() macros reload the key registers that get clobbered. The unrolled macros, {ENCRYPT,DECRYPT}_256(), are not mindful of this. So if we have a mix of multi-block and single-block calls, the single-block unrolled 256-bit encrypt/decrypt can run with some of the key registers clobbered. Handle this by always explicitly loading those registers before using the non-unrolled 256-bit macro. This was discovered thanks to all of the new test cases added by Jussi Kivilinna. Signed-off-by: David S. Miller --- arch/sparc/crypto/aes_asm.S | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S index 23f6cbb910d3..1cda8aa7cb85 100644 --- a/arch/sparc/crypto/aes_asm.S +++ b/arch/sparc/crypto/aes_asm.S @@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256) add %o2, 0x20, %o2 brlz,pt %o3, 11f nop -10: ldx [%o1 + 0x00], %g3 +10: ldd [%o0 + 0xd0], %f56 + ldd [%o0 + 0xd8], %f58 + ldd [%o0 + 0xe0], %f60 + ldd [%o0 + 0xe8], %f62 + ldx [%o1 + 0x00], %g3 ldx [%o1 + 0x08], %g7 xor %g1, %g3, %g3 xor %g2, %g7, %g7 @@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256) /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */ ldx [%o0 - 0x10], %g1 subcc %o3, 0x10, %o3 + ldx [%o0 - 0x08], %g2 be 10f - ldx [%o0 - 0x08], %g2 - sub %o0, 0xf0, %o0 + sub %o0, 0xf0, %o0 1: ldx [%o1 + 0x00], %g3 ldx [%o1 + 0x08], %g7 ldx [%o1 + 0x10], %o4 @@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256) add %o2, 0x20, %o2 brlz,pt %o3, 11f nop -10: ldx [%o1 + 0x00], %g3 +10: ldd [%o0 + 0x18], %f56 + ldd [%o0 + 0x10], %f58 + ldd [%o0 + 0x08], %f60 + ldd [%o0 + 0x00], %f62 + ldx [%o1 + 0x00], %g3 ldx [%o1 + 0x08], %g7 xor %g1, %g3, %g3 xor %g2, %g7, %g7 @@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256) add %o2, 0x20, %o2 brlz,pt %o3, 11f nop - ldd [%o0 + 0xd0], %f56 +10: ldd [%o0 + 0xd0], %f56 ldd [%o0 + 0xd8], %f58 ldd [%o0 + 0xe0], %f60 ldd [%o0 + 0xe8], %f62 -10: xor %g1, %g3, %o5 + xor %g1, %g3, %o5 MOVXTOD_O5_F0 xor %g2, %g7, %o5 MOVXTOD_O5_F2 -- cgit v1.2.3 From a8d97cef2168ffe5af1aeed6bf6cdc3ce53f3d0b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 19 Dec 2012 15:20:23 -0800 Subject: sparc64: Fix AES ctr mode block size. Like the generic versions, we need to support a block size of '1' for CTR mode AES. This was discovered thanks to all of the new test cases added by Jussi Kivilinna. Signed-off-by: David S. Miller --- arch/sparc/crypto/aes_glue.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 3965d1d36dfa..d26e75126fb5 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -329,6 +329,22 @@ static int cbc_decrypt(struct blkcipher_desc *desc, return err; } +static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx, + struct blkcipher_walk *walk) +{ + u8 *ctrblk = walk->iv; + u64 keystream[AES_BLOCK_SIZE / sizeof(u64)]; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + + ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk, + keystream, AES_BLOCK_SIZE); + crypto_xor((u8 *) keystream, src, nbytes); + memcpy(dst, keystream, nbytes); + crypto_inc(ctrblk, AES_BLOCK_SIZE); +} + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) @@ -338,10 +354,11 @@ static int ctr_crypt(struct blkcipher_desc *desc, int err; blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctx->ops->load_encrypt_keys(&ctx->key[0]); - while ((nbytes = walk.nbytes)) { + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { unsigned int block_len = nbytes & AES_BLOCK_MASK; if (likely(block_len)) { @@ -353,6 +370,10 @@ static int ctr_crypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + if (walk.nbytes) { + ctr_crypt_final(ctx, &walk); + err = blkcipher_walk_done(desc, &walk, 0); + } fprs_write(0); return err; } @@ -418,7 +439,7 @@ static struct crypto_alg algs[] = { { .cra_driver_name = "ctr-aes-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), .cra_alignmask = 7, .cra_type = &crypto_blkcipher_type, -- cgit v1.2.3 From b35d282ef7345320b594d48d8d70caedfa962a01 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 19 Dec 2012 15:22:03 -0800 Subject: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in AES code. We use the FPU and therefore cannot sleep during the crypto loops. Signed-off-by: David S. Miller --- arch/sparc/crypto/aes_glue.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sparc') diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index d26e75126fb5..503e6d96ad4e 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -222,6 +222,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctx->ops->load_encrypt_keys(&ctx->key[0]); while ((nbytes = walk.nbytes)) { @@ -251,6 +252,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctx->ops->load_decrypt_keys(&ctx->key[0]); key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; @@ -280,6 +282,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctx->ops->load_encrypt_keys(&ctx->key[0]); while ((nbytes = walk.nbytes)) { @@ -309,6 +312,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctx->ops->load_decrypt_keys(&ctx->key[0]); key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; -- cgit v1.2.3 From ce6889515d5d481a5bd8ce5913dfed18f08310ea Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 19 Dec 2012 15:30:07 -0800 Subject: sparc64: Fix ECB looping constructs in AES code. Things works better when you increment the source buffer pointer properly. Signed-off-by: David S. Miller --- arch/sparc/crypto/des_asm.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sparc') diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S index 30b6e90b28b2..b5c8fc269b5f 100644 --- a/arch/sparc/crypto/des_asm.S +++ b/arch/sparc/crypto/des_asm.S @@ -376,6 +376,7 @@ ENTRY(des3_ede_sparc64_ecb_crypt) 1: ldd [%o1 + 0x00], %f60 DES3_LOOP_BODY(60) std %f60, [%o2 + 0x00] + add %o1, 0x08, %o1 subcc %o3, 0x08, %o3 bne,pt %icc, 1b add %o2, 0x08, %o2 -- cgit v1.2.3 From b3a37947074fa0a488d6c7ede58125b2278ab4e8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 19 Dec 2012 15:43:38 -0800 Subject: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in DES code. We use the FPU and therefore cannot sleep during the crypto loops. Signed-off-by: David S. Miller --- arch/sparc/crypto/des_glue.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sparc') diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index 41524cebcc49..3065bc61f9d3 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -100,6 +100,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; if (encrypt) des_sparc64_load_keys(&ctx->encrypt_expkey[0]); @@ -147,6 +148,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; des_sparc64_load_keys(&ctx->encrypt_expkey[0]); while ((nbytes = walk.nbytes)) { @@ -177,6 +179,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; des_sparc64_load_keys(&ctx->decrypt_expkey[0]); while ((nbytes = walk.nbytes)) { @@ -266,6 +269,7 @@ static int __ecb3_crypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; if (encrypt) K = &ctx->encrypt_expkey[0]; @@ -317,6 +321,7 @@ static int cbc3_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; K = &ctx->encrypt_expkey[0]; des3_ede_sparc64_load_keys(K); @@ -352,6 +357,7 @@ static int cbc3_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; K = &ctx->decrypt_expkey[0]; des3_ede_sparc64_load_keys(K); -- cgit v1.2.3 From 62ba63dc892cf836ecb9ce4fdb7644d45c95070b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 19 Dec 2012 15:44:31 -0800 Subject: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in CAMELLIA code. We use the FPU and therefore cannot sleep during the crypto loops. Signed-off-by: David S. Miller --- arch/sparc/crypto/camellia_glue.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/sparc') diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 62c89af3fd3f..888f6260b4ec 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -98,6 +98,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; if (encrypt) key = &ctx->encrypt_key[0]; @@ -160,6 +161,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; key = &ctx->encrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); @@ -198,6 +200,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); -- cgit v1.2.3