aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto/marvell/cesa.c
diff options
context:
space:
mode:
authorRomain Perier <romain.perier@free-electrons.com>2016-06-21 10:08:39 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2016-06-23 18:29:51 +0800
commit85030c5168f1df03a164d47254cc785331f1dfe2 (patch)
tree0d8f0077d43d48359535f064cdb0626069c66592 /drivers/crypto/marvell/cesa.c
parentbf8f91e711926c1fb57629338e30164ecfba9700 (diff)
downloadlinux-85030c5168f1df03a164d47254cc785331f1dfe2.tar.gz
crypto: marvell - Add support for chaining crypto requests in TDMA mode
The Cryptographic Engines and Security Accelerators (CESA) supports the Multi-Packet Chain Mode. With this mode enabled, multiple tdma requests can be chained and processed by the hardware without software intervention. This mode was already activated, however the crypto requests were not chained together. By doing so, we reduce significantly the number of IRQs. Instead of being interrupted at the end of each crypto request, we are interrupted at the end of the last cryptographic request processed by the engine. This commits re-factorizes the code, changes the code architecture and adds the required data structures to chain cryptographic requests together before sending them to an engine (stopped or possibly already running). Signed-off-by: Romain Perier <romain.perier@free-electrons.com> Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/marvell/cesa.c')
-rw-r--r--drivers/crypto/marvell/cesa.c115
1 files changed, 90 insertions, 25 deletions
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 26624f176c39..218c497f2ddb 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -40,14 +40,33 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over
struct mv_cesa_dev *cesa_dev;
-static void mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine)
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+ struct crypto_async_request **backlog)
{
- struct crypto_async_request *req, *backlog;
- struct mv_cesa_ctx *ctx;
+ struct crypto_async_request *req;
- backlog = crypto_get_backlog(&engine->queue);
+ *backlog = crypto_get_backlog(&engine->queue);
req = crypto_dequeue_request(&engine->queue);
- engine->req = req;
+
+ if (!req)
+ return NULL;
+
+ return req;
+}
+
+static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
+{
+ struct crypto_async_request *req = NULL, *backlog = NULL;
+ struct mv_cesa_ctx *ctx;
+
+
+ spin_lock_bh(&engine->lock);
+ if (!engine->req) {
+ req = mv_cesa_dequeue_req_locked(engine, &backlog);
+ engine->req = req;
+ }
+ spin_unlock_bh(&engine->lock);
if (!req)
return;
@@ -57,6 +76,46 @@ static void mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine)
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
+
+ return;
+}
+
+static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
+{
+ struct crypto_async_request *req;
+ struct mv_cesa_ctx *ctx;
+ int res;
+
+ req = engine->req;
+ ctx = crypto_tfm_ctx(req->tfm);
+ res = ctx->ops->process(req, status);
+
+ if (res == 0) {
+ ctx->ops->complete(req);
+ mv_cesa_engine_enqueue_complete_request(engine, req);
+ } else if (res == -EINPROGRESS) {
+ ctx->ops->step(req);
+ }
+
+ return res;
+}
+
+static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
+{
+ if (engine->chain.first && engine->chain.last)
+ return mv_cesa_tdma_process(engine, status);
+
+ return mv_cesa_std_process(engine, status);
+}
+
+static inline void
+mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
+ int res)
+{
+ ctx->ops->cleanup(req);
+ local_bh_disable();
+ req->complete(req, res);
+ local_bh_enable();
}
static irqreturn_t mv_cesa_int(int irq, void *priv)
@@ -83,26 +142,31 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
writel(~status, engine->regs + CESA_SA_INT_STATUS);
+ /* Process fetched requests */
+ res = mv_cesa_int_process(engine, status & mask);
ret = IRQ_HANDLED;
+
spin_lock_bh(&engine->lock);
req = engine->req;
+ if (res != -EINPROGRESS)
+ engine->req = NULL;
spin_unlock_bh(&engine->lock);
- if (req) {
- ctx = crypto_tfm_ctx(req->tfm);
- res = ctx->ops->process(req, status & mask);
- if (res != -EINPROGRESS) {
- spin_lock_bh(&engine->lock);
- engine->req = NULL;
- mv_cesa_dequeue_req_locked(engine);
- spin_unlock_bh(&engine->lock);
- ctx->ops->complete(req);
- ctx->ops->cleanup(req);
- local_bh_disable();
- req->complete(req, res);
- local_bh_enable();
- } else {
- ctx->ops->step(req);
- }
+
+ ctx = crypto_tfm_ctx(req->tfm);
+
+ if (res && res != -EINPROGRESS)
+ mv_cesa_complete_req(ctx, req, res);
+
+ /* Launch the next pending request */
+ mv_cesa_rearm_engine(engine);
+
+ /* Iterate over the complete queue */
+ while (true) {
+ req = mv_cesa_engine_dequeue_complete_request(engine);
+ if (!req)
+ break;
+
+ mv_cesa_complete_req(ctx, req, 0);
}
}
@@ -116,16 +180,16 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
struct mv_cesa_engine *engine = creq->engine;
spin_lock_bh(&engine->lock);
+ if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
+ mv_cesa_tdma_chain(engine, creq);
+
ret = crypto_enqueue_request(&engine->queue, req);
spin_unlock_bh(&engine->lock);
if (ret != -EINPROGRESS)
return ret;
- spin_lock_bh(&engine->lock);
- if (!engine->req)
- mv_cesa_dequeue_req_locked(engine);
- spin_unlock_bh(&engine->lock);
+ mv_cesa_rearm_engine(engine);
return -EINPROGRESS;
}
@@ -496,6 +560,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
atomic_set(&engine->load, 0);
+ INIT_LIST_HEAD(&engine->complete_queue);
}
cesa_dev = cesa;