regmap: debugfs: Cache offsets of valid regions for dump
Avoid doing a linear scan of the entire register map for each read() of
the debugfs register dump by recording the offsets where valid registers
exist when we first read the registers file. This assumes the set of
valid registers never changes, if this is not the case invalidation of
the cache will be required.
This could be further improved for large blocks of contiguous registers
by calculating the register we will read from within the block - currently
we do a linear scan of the block. An rbtree may also be worthwhile.
Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 749a1dc..07aad78 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -65,25 +65,53 @@
loff_t from,
loff_t *pos)
{
- loff_t p = *pos;
- unsigned int i;
+ struct regmap_debugfs_off_cache *c = NULL;
+ loff_t p = 0;
+ unsigned int i, ret;
- for (i = base; i <= map->max_register; i += map->reg_stride) {
- if (!regmap_readable(map, i))
- continue;
+ /*
+ * If we don't have a cache build one so we don't have to do a
+ * linear scan each time.
+ */
+ if (list_empty(&map->debugfs_off_cache)) {
+ for (i = base; i <= map->max_register; i += map->reg_stride) {
+ /* Skip unprinted registers, closing off cache entry */
+ if (!regmap_readable(map, i) ||
+ regmap_precious(map, i)) {
+ if (c) {
+ c->max = p - 1;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ c = NULL;
+ }
- if (regmap_precious(map, i))
- continue;
+ continue;
+ }
- if (i >= from) {
- *pos = p;
- return i;
+ /* No cache entry? Start a new one */
+ if (!c) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ break;
+ c->min = p;
+ c->base_reg = i;
+ }
+
+ p += map->debugfs_tot_len;
}
-
- p += map->debugfs_tot_len;
}
- return base;
+ /* Find the relevant block */
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (*pos >= c->min && *pos <= c->max) {
+ *pos = c->min;
+ return c->base_reg;
+ }
+
+ ret = c->max;
+ }
+
+ return ret;
}
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
@@ -309,6 +337,8 @@
struct rb_node *next;
struct regmap_range_node *range_node;
+ INIT_LIST_HEAD(&map->debugfs_off_cache);
+
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
dev_name(map->dev), name);
@@ -357,7 +387,16 @@
void regmap_debugfs_exit(struct regmap *map)
{
+ struct regmap_debugfs_off_cache *c;
+
debugfs_remove_recursive(map->debugfs);
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
kfree(map->debugfs_name);
}