aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c628
1 files changed, 406 insertions, 222 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 03ac4b96117c..f3ef4edd4de1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -60,6 +60,8 @@ static long dmaengine_ref_count;
/* --- sysfs implementation --- */
+#define DMA_SLAVE_NAME "slave"
+
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev - device node
@@ -164,11 +166,152 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
-#define dma_device_satisfies_mask(device, mask) \
- __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
- const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ * the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ * the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min || chan->table_count < min->table_count)
+ min = chan;
+
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
+ }
+ }
+
+ chan = localmin ? localmin : min;
+
+ if (chan)
+ chan->table_count++;
+
+ return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case. Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
+
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
+
+ /* redistribute available channels */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ chan = min_chan(cap, cpu);
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
@@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
- return chan->device->dev->driver->owner;
+ return chan->device->owner;
}
/**
@@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan)
}
}
+static void dma_device_release(struct kref *ref)
+{
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
+
+ if (device->device_release)
+ device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+ lockdep_assert_held(&dma_list_mutex);
+ kref_put(&device->ref, dma_device_release);
+}
+
/**
* dma_chan_get - try to grab a dma channel's parent driver module
* @chan - channel to grab
@@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan)
if (!try_module_get(owner))
return -ENODEV;
+ ret = kref_get_unless_zero(&chan->device->ref);
+ if (!ret) {
+ ret = -ENODEV;
+ goto module_put_out;
+ }
+
/* allocate upon first client reference */
if (chan->device->device_alloc_chan_resources) {
ret = chan->device->device_alloc_chan_resources(chan);
@@ -233,6 +399,8 @@ out:
return 0;
err_out:
+ dma_device_put(chan->device);
+module_put_out:
module_put(owner);
return ret;
}
@@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan)
return;
chan->client_count--;
- module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
if (!chan->client_count && chan->device->device_free_chan_resources) {
@@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan)
chan->router = NULL;
chan->route_data = NULL;
}
+
+ dma_device_put(chan->device);
+ module_put(dma_chan_to_owner(chan));
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
EXPORT_SYMBOL(dma_sync_wait);
/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
- struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
- enum dma_transaction_type cap;
- int err = 0;
-
- bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
- /* 'interrupt', 'private', and 'slave' are channel capabilities,
- * but are not associated with an operation so they do not need
- * an entry in the channel_table
- */
- clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
- clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
- clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
- for_each_dma_cap_mask(cap, dma_cap_mask_all) {
- channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
- if (!channel_table[cap]) {
- err = -ENOMEM;
- break;
- }
- }
-
- if (err) {
- pr_err("initialization failure\n");
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- free_percpu(channel_table[cap]);
- }
-
- return err;
-}
-arch_initcall(dma_channel_table_init);
-
-/**
* dma_find_channel - find a channel to carry out the operation
* @tx_type: transaction type
*/
@@ -369,97 +488,6 @@ void dma_issue_pending_all(void)
}
EXPORT_SYMBOL(dma_issue_pending_all);
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
- int node = dev_to_node(chan->device->dev);
- return node == NUMA_NO_NODE ||
- cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
- struct dma_device *device;
- struct dma_chan *chan;
- struct dma_chan *min = NULL;
- struct dma_chan *localmin = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (!dma_has_cap(cap, device->cap_mask) ||
- dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node) {
- if (!chan->client_count)
- continue;
- if (!min || chan->table_count < min->table_count)
- min = chan;
-
- if (dma_chan_is_local(chan, cpu))
- if (!localmin ||
- chan->table_count < localmin->table_count)
- localmin = chan;
- }
- }
-
- chan = localmin ? localmin : min;
-
- if (chan)
- chan->table_count++;
-
- return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
- struct dma_chan *chan;
- struct dma_device *device;
- int cpu;
- int cap;
-
- /* undo the last distribution */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_possible_cpu(cpu)
- per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node)
- chan->table_count = 0;
- }
-
- /* don't populate the channel_table if no clients are available */
- if (!dmaengine_ref_count)
- return;
-
- /* redistribute available channels */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_online_cpu(cpu) {
- chan = min_chan(cap, cpu);
- per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
- }
-}
-
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
struct dma_device *device;
@@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
{
struct dma_chan *chan;
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
@@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
if (has_acpi_companion(dev) && !chan)
chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan) {
- /* Valid channel found or requester needs to be deferred */
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- return chan;
- }
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
/* Try to find the channel via the DMA filter map(s) */
mutex_lock(&dma_list_mutex);
@@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
}
mutex_unlock(&dma_list_mutex);
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
+
+ return ERR_PTR(-EPROBE_DEFER);
+
+found:
+ chan->slave = dev;
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+ if (!chan->name)
+ return ERR_PTR(-ENOMEM);
+
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+ DMA_SLAVE_NAME))
+ dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+ dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+ return chan;
}
EXPORT_SYMBOL_GPL(dma_request_chan);
@@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+ if (chan->slave) {
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
+ kfree(chan->name);
+ chan->name = NULL;
+ chan->slave = NULL;
+ }
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get);
*/
void dmaengine_put(void)
{
- struct dma_device *device;
+ struct dma_device *device, *_d;
struct dma_chan *chan;
mutex_lock(&dma_list_mutex);
dmaengine_ref_count--;
BUG_ON(dmaengine_ref_count < 0);
/* drop channel references */
- list_for_each_entry(device, &dma_device_list, global_node) {
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
continue;
list_for_each_entry(chan, &device->channels, device_node)
@@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device)
return 0;
}
+static int __dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan,
+ int chan_id)
+{
+ int rc = 0;
+ int chancnt = device->chancnt;
+ atomic_t *idr_ref;
+ struct dma_chan *tchan;
+
+ tchan = list_first_entry_or_null(&device->channels,
+ struct dma_chan, device_node);
+ if (tchan->dev) {
+ idr_ref = tchan->dev->idr_ref;
+ } else {
+ idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+ if (!idr_ref)
+ return -ENOMEM;
+ atomic_set(idr_ref, 0);
+ }
+
+ chan->local = alloc_percpu(typeof(*chan->local));
+ if (!chan->local)
+ goto err_out;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (!chan->dev) {
+ free_percpu(chan->local);
+ chan->local = NULL;
+ goto err_out;
+ }
+
+ /*
+ * When the chan_id is a negative value, we are dynamically adding
+ * the channel. Otherwise we are static enumerating.
+ */
+ chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->idr_ref = idr_ref;
+ chan->dev->dev_id = device->dev_id;
+ atomic_inc(idr_ref);
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
+
+ rc = device_register(&chan->dev->device);
+ if (rc)
+ goto err_out;
+ chan->client_count = 0;
+ device->chancnt = chan->chan_id + 1;
+
+ return 0;
+
+ err_out:
+ free_percpu(chan->local);
+ kfree(chan->dev);
+ if (atomic_dec_return(idr_ref) == 0)
+ kfree(idr_ref);
+ return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ rc = __dma_async_device_channel_register(device, chan, -1);
+ if (rc < 0)
+ return rc;
+
+ dma_channel_rebalance();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ list_del(&chan->device_node);
+ device->chancnt--;
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ __dma_async_device_channel_unregister(device, chan);
+ dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
*/
int dma_async_device_register(struct dma_device *device)
{
- int chancnt = 0, rc;
+ int rc, i = 0;
struct dma_chan* chan;
- atomic_t *idr_ref;
if (!device)
return -ENODEV;
@@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ device->owner = device->dev->driver->owner;
+
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
@@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (!device->device_release)
+ dev_warn(device->dev,
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+ kref_init(&device->ref);
+
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
- if (!idr_ref)
- return -ENOMEM;
rc = get_dma_id(device);
- if (rc != 0) {
- kfree(idr_ref);
+ if (rc != 0)
return rc;
- }
-
- atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = -ENOMEM;
- chan->local = alloc_percpu(typeof(*chan->local));
- if (chan->local == NULL)
- goto err_out;
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
- if (chan->dev == NULL) {
- free_percpu(chan->local);
- chan->local = NULL;
+ rc = __dma_async_device_channel_register(device, chan, i++);
+ if (rc < 0)
goto err_out;
- }
-
- chan->chan_id = chancnt++;
- chan->dev->device.class = &dma_devclass;
- chan->dev->device.parent = device->dev;
- chan->dev->chan = chan;
- chan->dev->idr_ref = idr_ref;
- chan->dev->dev_id = device->dev_id;
- atomic_inc(idr_ref);
- dev_set_name(&chan->dev->device, "dma%dchan%d",
- device->dev_id, chan->chan_id);
-
- rc = device_register(&chan->dev->device);
- if (rc) {
- free_percpu(chan->local);
- chan->local = NULL;
- kfree(chan->dev);
- atomic_dec(idr_ref);
- goto err_out;
- }
- chan->client_count = 0;
- }
-
- if (!chancnt) {
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
- rc = -ENODEV;
- goto err_out;
}
- device->chancnt = chancnt;
-
mutex_lock(&dma_list_mutex);
/* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device)
err_out:
/* if we never registered a channel just release the idr */
- if (atomic_read(idr_ref) == 0) {
+ if (!device->chancnt) {
ida_free(&dma_ida, device->dev_id);
- kfree(idr_ref);
return rc;
}
@@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register);
*/
void dma_async_device_unregister(struct dma_device *device)
{
- struct dma_chan *chan;
+ struct dma_chan *chan, *n;
+
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
+ __dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex);
- list_del_rcu(&device->global_node);
+ /*
+ * setting DMA_PRIVATE ensures the device being torn down will not
+ * be used in the channel_table
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
+ dma_device_put(device);
mutex_unlock(&dma_list_mutex);
-
- list_for_each_entry(chan, &device->channels, device_node) {
- WARN_ONCE(chan->client_count,
- "%s called while %d clients hold a reference\n",
- __func__, chan->client_count);
- mutex_lock(&dma_list_mutex);
- chan->dev->chan = NULL;
- mutex_unlock(&dma_list_mutex);
- device_unregister(&chan->dev->device);
- free_percpu(chan->local);
- }
}
EXPORT_SYMBOL(dma_async_device_unregister);
@@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+static inline int desc_check_and_set_metadata_mode(
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+ /* Make sure that the metadata mode is not mixed */
+ if (!desc->desc_metadata_mode) {
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+ desc->desc_metadata_mode = mode;
+ else
+ return -ENOTSUPP;
+ } else if (desc->desc_metadata_mode != mode) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ int ret;
+
+ if (!desc)
+ return ERR_PTR(-EINVAL);
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+ return ERR_PTR(-ENOTSUPP);
+
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
/* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on
*/
@@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void)
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
-
-