crypto: marvell/cesa - Do not chain submitted requests
commit 0413bcf0fc460a68a2a7a8354aee833293d7d693 upstream.
This driver tries to chain requests together before submitting them
to hardware in order to reduce completion interrupts.
However, it even extends chains that have already been submitted
to hardware. This is dangerous because there is no way of knowing
whether the hardware has already read the DMA memory in question
or not.
Fix this by splitting the chain list into two. One for submitted
requests and one for requests that have not yet been submitted.
Only extend the latter.
Reported-by: Klaus Kudielka <klaus.kudielka@gmail.com>
Fixes: 85030c5168
("crypto: marvell - Add support for chaining crypto requests in TDMA mode")
Cc: <stable@vger.kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
0df5e4c7de
commit
f5b9144715
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
|
|||||||
|
|
||||||
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
|
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
|
||||||
{
|
{
|
||||||
if (engine->chain.first && engine->chain.last)
|
if (engine->chain_hw.first && engine->chain_hw.last)
|
||||||
return mv_cesa_tdma_process(engine, status);
|
return mv_cesa_tdma_process(engine, status);
|
||||||
|
|
||||||
return mv_cesa_std_process(engine, status);
|
return mv_cesa_std_process(engine, status);
|
||||||
|
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
|
|||||||
* SRAM
|
* SRAM
|
||||||
* @queue: fifo of the pending crypto requests
|
* @queue: fifo of the pending crypto requests
|
||||||
* @load: engine load counter, useful for load balancing
|
* @load: engine load counter, useful for load balancing
|
||||||
* @chain: list of the current tdma descriptors being processed
|
* @chain_hw: list of the current tdma descriptors being processed
|
||||||
* by this engine.
|
* by the hardware.
|
||||||
|
* @chain_sw: list of the current tdma descriptors that will be
|
||||||
|
* submitted to the hardware.
|
||||||
* @complete_queue: fifo of the processed requests by the engine
|
* @complete_queue: fifo of the processed requests by the engine
|
||||||
*
|
*
|
||||||
* Structure storing CESA engine information.
|
* Structure storing CESA engine information.
|
||||||
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
|
|||||||
struct gen_pool *pool;
|
struct gen_pool *pool;
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
atomic_t load;
|
atomic_t load;
|
||||||
struct mv_cesa_tdma_chain chain;
|
struct mv_cesa_tdma_chain chain_hw;
|
||||||
|
struct mv_cesa_tdma_chain chain_sw;
|
||||||
struct list_head complete_queue;
|
struct list_head complete_queue;
|
||||||
int irq;
|
int irq;
|
||||||
};
|
};
|
||||||
|
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
|
|||||||
{
|
{
|
||||||
struct mv_cesa_engine *engine = dreq->engine;
|
struct mv_cesa_engine *engine = dreq->engine;
|
||||||
|
|
||||||
|
spin_lock_bh(&engine->lock);
|
||||||
|
if (engine->chain_sw.first == dreq->chain.first) {
|
||||||
|
engine->chain_sw.first = NULL;
|
||||||
|
engine->chain_sw.last = NULL;
|
||||||
|
}
|
||||||
|
engine->chain_hw.first = dreq->chain.first;
|
||||||
|
engine->chain_hw.last = dreq->chain.last;
|
||||||
|
spin_unlock_bh(&engine->lock);
|
||||||
|
|
||||||
writel_relaxed(0, engine->regs + CESA_SA_CFG);
|
writel_relaxed(0, engine->regs + CESA_SA_CFG);
|
||||||
|
|
||||||
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
|
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
|
||||||
@@ -96,26 +105,28 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
|
|||||||
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
|
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
|
||||||
struct mv_cesa_req *dreq)
|
struct mv_cesa_req *dreq)
|
||||||
{
|
{
|
||||||
if (engine->chain.first == NULL && engine->chain.last == NULL) {
|
struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
|
||||||
engine->chain.first = dreq->chain.first;
|
|
||||||
engine->chain.last = dreq->chain.last;
|
|
||||||
} else {
|
|
||||||
struct mv_cesa_tdma_desc *last;
|
|
||||||
|
|
||||||
last = engine->chain.last;
|
|
||||||
last->next = dreq->chain.first;
|
|
||||||
engine->chain.last = dreq->chain.last;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
|
* Break the DMA chain if the request being queued needs the IV
|
||||||
* the last element of the current chain, or if the request
|
* regs to be set before lauching the request.
|
||||||
* being queued needs the IV regs to be set before lauching
|
|
||||||
* the request.
|
|
||||||
*/
|
*/
|
||||||
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
|
if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
|
||||||
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
|
engine->chain_sw.first = dreq->chain.first;
|
||||||
|
else {
|
||||||
|
last->next = dreq->chain.first;
|
||||||
last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
|
last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
|
||||||
}
|
}
|
||||||
|
last = dreq->chain.last;
|
||||||
|
engine->chain_sw.last = last;
|
||||||
|
/*
|
||||||
|
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
|
||||||
|
* the last element of the current chain.
|
||||||
|
*/
|
||||||
|
if (last->flags & CESA_TDMA_BREAK_CHAIN) {
|
||||||
|
engine->chain_sw.first = NULL;
|
||||||
|
engine->chain_sw.last = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
|
int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
|
||||||
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
|
|||||||
|
|
||||||
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
|
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
|
||||||
|
|
||||||
for (tdma = engine->chain.first; tdma; tdma = next) {
|
for (tdma = engine->chain_hw.first; tdma; tdma = next) {
|
||||||
spin_lock_bh(&engine->lock);
|
spin_lock_bh(&engine->lock);
|
||||||
next = tdma->next;
|
next = tdma->next;
|
||||||
spin_unlock_bh(&engine->lock);
|
spin_unlock_bh(&engine->lock);
|
||||||
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
|
|||||||
&backlog);
|
&backlog);
|
||||||
|
|
||||||
/* Re-chaining to the next request */
|
/* Re-chaining to the next request */
|
||||||
engine->chain.first = tdma->next;
|
engine->chain_hw.first = tdma->next;
|
||||||
tdma->next = NULL;
|
tdma->next = NULL;
|
||||||
|
|
||||||
/* If this is the last request, clear the chain */
|
/* If this is the last request, clear the chain */
|
||||||
if (engine->chain.first == NULL)
|
if (engine->chain_hw.first == NULL)
|
||||||
engine->chain.last = NULL;
|
engine->chain_hw.last = NULL;
|
||||||
spin_unlock_bh(&engine->lock);
|
spin_unlock_bh(&engine->lock);
|
||||||
|
|
||||||
ctx = crypto_tfm_ctx(req->tfm);
|
ctx = crypto_tfm_ctx(req->tfm);
|
||||||
|
Reference in New Issue
Block a user