aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-09-08 16:49:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-09-08 16:49:32 -0700
commite4e436e0bd480668834fe6849a52c5397b7be4fb (patch)
treec2beb41548c9879c55811e8e027c9c89d23e3fdc /drivers
parent9ba365438a532436ecd96a089fb29b01516bed33 (diff)
parent82babbb361f207a80cffa8ac34c2b6a0b62acc88 (diff)
Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma
* 'fixes' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine/ste_dma40: fix memory leak due to prepared descriptors dmaengine/ste_dma40: fix Oops due to double free of client descriptor dmaengine/ste_dma40: remove duplicate call to d40_pool_lli_free(). dmaengine/ste_dma40: add missing kernel doc for pending_queue
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/ste_dma40.c42
1 files changed, 29 insertions, 13 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cd3a7c726bf..467e4dcb20a 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -174,8 +174,10 @@ struct d40_base;
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback.
* @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
* @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
* @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
struct list_head pending_queue;
struct list_head active;
struct list_head queue;
+ struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
bool configured;
struct d40_base *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
return d;
}
+/* remove desc from current queue and add it to the pending_queue */
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
+ d40_desc_remove(desc);
+ desc->is_in_client_list = false;
list_add_tail(&desc->node, &d40c->pending_queue);
}
@@ -803,6 +808,7 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
+ struct d40_desc *_d;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release descriptors in prepare queue */
+ if (!list_empty(&d40c->prepare_queue))
+ list_for_each_entry_safe(d40d, _d,
+ &d40c->prepare_queue, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
d40c->pending_tx = 0;
d40c->busy = false;
}
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
} else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
u32 event;
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
- struct d40_desc *d;
- struct d40_desc *_d;
-
/* Terminate all queued and active transfers */
d40_term_all(d40c);
- /* Release client owned descriptors */
- if (!list_empty(&d40c->client))
- list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d40c, d);
- d40_desc_remove(d);
- d40_desc_free(d40c, d);
- }
-
if (phy == NULL) {
chan_err(d40c, "phy == null\n");
return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
goto err;
}
+ /*
+ * add descriptor to the prepare queue in order to be able
+ * to free them later in terminate_all
+ */
+ list_add_tail(&desc->node, &chan->prepare_queue);
+
spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
+ INIT_LIST_HEAD(&d40c->prepare_queue);
tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c);