aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crypto/async_tx/async_tx.c46
-rw-r--r--drivers/dma/dmaengine.c16
-rw-r--r--include/linux/dmaengine.h60
3 files changed, 88 insertions, 34 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index f9cdf04fe7c..7f2c00a4520 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
- #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
- BUG();
- #endif
-
/* first check to see if we can still append to depend_tx */
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent && depend_tx->chan == tx->chan) {
- tx->parent = depend_tx;
- depend_tx->next = tx;
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
+ txd_chain(depend_tx, tx);
intr_tx = NULL;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */
if (!intr_tx) {
@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
if (intr_tx) {
intr_tx->callback = NULL;
intr_tx->callback_param = NULL;
- tx->parent = intr_tx;
- /* safe to set ->next outside the lock since we know we are
+ /* safe to chain outside the lock since we know we are
* not submitted yet
*/
- intr_tx->next = tx;
+ txd_chain(intr_tx, tx);
/* check if we need to append */
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent) {
- intr_tx->parent = depend_tx;
- depend_tx->next = intr_tx;
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx)) {
+ txd_chain(depend_tx, intr_tx);
async_tx_ack(intr_tx);
intr_tx = NULL;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
if (intr_tx) {
- intr_tx->parent = NULL;
+ txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
- BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
- tx->parent);
+ BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
+ txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
*/
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent) {
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly
* if we are staying on the same channel: append
* else: channel switch
*/
if (depend_tx->chan == chan) {
- tx->parent = depend_tx;
- depend_tx->next = tx;
+ txd_chain(depend_tx, tx);
s = ASYNC_TX_SUBMITTED;
} else
s = ASYNC_TX_CHANNEL_SWITCH;
@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
else
s = ASYNC_TX_CHANNEL_SWITCH;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
switch (s) {
case ASYNC_TX_SUBMITTED:
@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
async_tx_channel_switch(depend_tx, tx);
break;
case ASYNC_TX_DIRECT_SUBMIT:
- tx->parent = NULL;
+ txd_clear_parent(tx);
tx->tx_submit(tx);
break;
}
} else {
- tx->parent = NULL;
+ txd_clear_parent(tx);
tx->tx_submit(tx);
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d18b5d069d7..fcfe1a62ffa 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -978,7 +978,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
tx->chan = chan;
+ #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock);
+ #endif
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
@@ -1011,7 +1013,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
*/
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
- struct dma_async_tx_descriptor *dep = tx->next;
+ struct dma_async_tx_descriptor *dep = txd_next(tx);
struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan;
@@ -1019,7 +1021,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
return;
/* we'll submit tx->next now, so clear the link */
- tx->next = NULL;
+ txd_clear_next(tx);
chan = dep->chan;
/* keep submitting up until a channel switch is detected
@@ -1027,14 +1029,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
* processing the interrupt from async_tx_channel_switch
*/
for (; dep; dep = dep_next) {
- spin_lock_bh(&dep->lock);
- dep->parent = NULL;
- dep_next = dep->next;
+ txd_lock(dep);
+ txd_clear_parent(dep);
+ dep_next = txd_next(dep);
if (dep_next && dep_next->chan == chan)
- dep->next = NULL; /* ->next will be submitted */
+ txd_clear_next(dep); /* ->next will be submitted */
else
dep_next = NULL; /* submit current dep and terminate */
- spin_unlock_bh(&dep->lock);
+ txd_unlock(dep);
dep->tx_submit(dep);
}
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 20ea12c86fd..cb234979fc6 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -230,11 +230,71 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
+#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
spinlock_t lock;
+#endif
};
+#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ BUG();
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+
+#else
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+ spin_lock_bh(&txd->lock);
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+ spin_unlock_bh(&txd->lock);
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ txd->next = next;
+ next->parent = txd;
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+ txd->parent = NULL;
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+ txd->next = NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return txd->parent;
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return txd->next;
+}
+#endif
+
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported