aboutsummaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig28
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/core.c18
-rw-r--r--drivers/base/cpu.c52
-rw-r--r--drivers/base/dd.c19
-rw-r--r--drivers/base/devres.c31
-rw-r--r--drivers/base/dma-contiguous.c119
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/main.c6
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap.c8
-rw-r--r--drivers/base/topology.c3
15 files changed, 175 insertions, 129 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 07abd9d76f7f..4fe77b887457 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -187,6 +187,14 @@ config GENERIC_CPU_DEVICES
bool
default n
+config HAVE_CPU_AUTOPROBE
+ def_bool ARCH_HAS_CPU_AUTOPROBE
+
+config GENERIC_CPU_AUTOPROBE
+ bool
+ depends on !ARCH_HAS_CPU_AUTOPROBE
+ select HAVE_CPU_AUTOPROBE
+
config SOC_BUS
bool
@@ -202,11 +210,9 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config CMA
- bool "Contiguous Memory Allocator"
- depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
- select MIGRATION
- select MEMORY_ISOLATION
+config DMA_CMA
+ bool "DMA Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && CMA
help
This enables the Contiguous Memory Allocator which allows drivers
to allocate big physically-contiguous blocks of memory for use with
@@ -215,17 +221,7 @@ config CMA
For more information see <include/linux/dma-contiguous.h>.
If unsure, say "n".
-if CMA
-
-config CMA_DEBUG
- bool "CMA debug messages (DEVELOPMENT)"
- depends on DEBUG_KERNEL
- help
- Turns on debug messages in CMA. This produces KERN_DEBUG
- messages for every CMA call as well as various messages while
- processing calls such as dma_alloc_from_contiguous().
- This option does not affect warning and error messages.
-
+if DMA_CMA
comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4e22ce3ed73d..5d93bb519753 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_CMA) += dma-contiguous.o
+obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2499cefdcdf2..2a19097a7cb1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -765,12 +765,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
return &dir->kobj;
}
+static DEFINE_MUTEX(gdp_mutex);
static struct kobject *get_device_parent(struct device *dev,
struct device *parent)
{
if (dev->class) {
- static DEFINE_MUTEX(gdp_mutex);
struct kobject *kobj = NULL;
struct kobject *parent_kobj;
struct kobject *k;
@@ -834,7 +834,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
glue_dir->kset != &dev->class->p->glue_dirs)
return;
+ mutex_lock(&gdp_mutex);
kobject_put(glue_dir);
+ mutex_unlock(&gdp_mutex);
}
static void cleanup_device_parent(struct device *dev)
@@ -1839,7 +1841,7 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev;
+ struct device *dev, *parent;
spin_lock(&devices_kset->list_lock);
/*
@@ -1856,7 +1858,7 @@ void device_shutdown(void)
* prevent it from being freed because parent's
* lock is to be held
*/
- get_device(dev->parent);
+ parent = get_device(dev->parent);
get_device(dev);
/*
* Make sure the device is off the kset list, in the
@@ -1866,8 +1868,8 @@ void device_shutdown(void)
spin_unlock(&devices_kset->list_lock);
/* hold lock to avoid race with probe/release */
- if (dev->parent)
- device_lock(dev->parent);
+ if (parent)
+ device_lock(parent);
device_lock(dev);
/* Don't allow any more runtime suspends */
@@ -1885,11 +1887,11 @@ void device_shutdown(void)
}
device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ if (parent)
+ device_unlock(parent);
put_device(dev);
- put_device(dev->parent);
+ put_device(parent);
spin_lock(&devices_kset->list_lock);
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 3d48fc887ef4..607efe6b7dc8 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -13,6 +13,9 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/percpu.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/cpufeature.h>
#include "base.h"
@@ -260,6 +263,45 @@ static void cpu_device_release(struct device *dev)
*/
}
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t n;
+ u32 i;
+
+ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
+
+ for (i = 0; i < MAX_CPU_FEATURES; i++)
+ if (cpu_have_feature(i)) {
+ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+ WARN(1, "CPU features overflow page\n");
+ break;
+ }
+ n += sprintf(&buf[n], ",%04X", i);
+ }
+ buf[n++] = '\n';
+ return n;
+}
+#else
+#define print_cpu_modalias arch_print_cpu_modalias
+#endif
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ print_cpu_modalias(NULL, NULL, buf);
+ add_uevent_var(env, "MODALIAS=%s", buf);
+ kfree(buf);
+ }
+ return 0;
+}
+#endif
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -277,8 +319,8 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- cpu->dev.bus->uevent = arch_cpu_uevent;
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = cpu_uevent;
#endif
error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
@@ -307,8 +349,8 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
static struct attribute *cpu_root_attrs[] = {
@@ -321,7 +363,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
NULL
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 35fa36898916..8a8d611f2021 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
*/
static void driver_deferred_probe_trigger(void)
{
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
+ atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
+ int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
/* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
@@ -499,7 +516,7 @@ static void __device_release_driver(struct device *dev)
BUS_NOTIFY_UNBIND_DRIVER,
dev);
- pm_runtime_put(dev);
+ pm_runtime_put_sync(dev);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 507379e7b763..545c4de412c3 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, tot_size);
+ memset(dr, 0, offsetof(struct devres, data));
+
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
@@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
@@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
return dr->data;
@@ -745,58 +746,62 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
EXPORT_SYMBOL_GPL(devm_remove_action);
/*
- * Managed kzalloc/kfree
+ * Managed kmalloc/kfree
*/
-static void devm_kzalloc_release(struct device *dev, void *res)
+static void devm_kmalloc_release(struct device *dev, void *res)
{
/* noop */
}
-static int devm_kzalloc_match(struct device *dev, void *res, void *data)
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
- * devm_kzalloc - Resource-managed kzalloc
+ * devm_kmalloc - Resource-managed kmalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
- * Managed kzalloc. Memory allocated with this function is
+ * Managed kmalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kzalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp);
if (unlikely(!dr))
return NULL;
+ /*
+ * This is named devm_kzalloc_release for historical reasons
+ * The initial implementation did not support kmalloc, only kzalloc
+ */
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devm_kzalloc);
+EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with devm_kzalloc().
+ * Free memory allocated with devm_kmalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
+ rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca54421ce97..99802d6f3c60 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
#endif
/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
* @limit: End address of the reserved memory (optional, 0 for any).
*
* This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
#endif
}
- if (selected_size) {
+ if (selected_size && !dma_contiguous_default_area) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- dma_declare_contiguous(NULL, selected_size, 0, limit);
+ dma_contiguous_reserve_area(selected_size, 0, limit,
+ &dma_contiguous_default_area);
}
};
static DEFINE_MUTEX(cma_mutex);
-static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+static int __init cma_activate_area(struct cma *cma)
{
- unsigned long pfn = base_pfn;
- unsigned i = count >> pageblock_order;
+ int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
struct zone *zone;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ return -ENOMEM;
+
WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
@@ -153,92 +160,53 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
}
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
- return 0;
-}
-
-static __init struct cma *cma_create_area(unsigned long base_pfn,
- unsigned long count)
-{
- int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
- struct cma *cma;
- int ret = -ENOMEM;
-
- pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
-
- cma = kmalloc(sizeof *cma, GFP_KERNEL);
- if (!cma)
- return ERR_PTR(-ENOMEM);
-
- cma->base_pfn = base_pfn;
- cma->count = count;
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!cma->bitmap)
- goto no_mem;
-
- ret = cma_activate_area(base_pfn, count);
- if (ret)
- goto error;
-
- pr_debug("%s: returned %p\n", __func__, (void *)cma);
- return cma;
-
-error:
- kfree(cma->bitmap);
-no_mem:
- kfree(cma);
- return ERR_PTR(ret);
+ return 0;
}
-static struct cma_reserved {
- phys_addr_t start;
- unsigned long size;
- struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
+static struct cma cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
static int __init cma_init_reserved_areas(void)
{
- struct cma_reserved *r = cma_reserved;
- unsigned i = cma_reserved_count;
-
- pr_debug("%s()\n", __func__);
+ int i;
- for (; i; --i, ++r) {
- struct cma *cma;
- cma = cma_create_area(PFN_DOWN(r->start),
- r->size >> PAGE_SHIFT);
- if (!IS_ERR(cma))
- dev_set_cma_area(r->dev, cma);
+ for (i = 0; i < cma_area_count; i++) {
+ int ret = cma_activate_area(&cma_areas[i]);
+ if (ret)
+ return ret;
}
+
return 0;
}
core_initcall(cma_init_reserved_areas);
/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
*
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
*/
-int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma)
{
- struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ struct cma *cma = &cma_areas[cma_area_count];
phys_addr_t alignment;
+ int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
(unsigned long)size, (unsigned long)base,
(unsigned long)limit);
/* Sanity checks */
- if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}
@@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
if (base) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
- base = -EBUSY;
+ ret = -EBUSY;
goto err;
}
} else {
@@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
*/
phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
if (!addr) {
- base = -ENOMEM;
+ ret = -ENOMEM;
goto err;
} else {
base = addr;
@@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
* Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available.
*/
- r->start = base;
- r->size = size;
- r->dev = dev;
- cma_reserved_count++;
+ cma->base_pfn = PFN_DOWN(base);
+ cma->count = size >> PAGE_SHIFT;
+ *res_cma = cma;
+ cma_area_count++;
+
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
@@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
return 0;
err:
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
- return base;
+ return ret;
}
/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 01e21037d8fe..00a565676583 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1021,6 +1021,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (!firmware_p)
return -EINVAL;
+ if (!name || name[0] == '\0')
+ return -EINVAL;
+
ret = _request_firmware_prepare(&fw, name, device);
if (ret <= 0) /* error or already assigned */
goto out;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f8a6954da0..86abbff912ec 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -152,6 +152,8 @@ static ssize_t show_mem_removable(struct device *dev,
container_of(dev, struct memory_block, dev);
for (i = 0; i < sections_per_block; i++) {
+ if (!present_section_nr(mem->start_section_nr + i))
+ continue;
pfn = section_nr_to_pfn(mem->start_section_nr + i);
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5a9b6569dd74..220ec3a3ca75 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,6 +28,8 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
+#include <trace/events/power.h>
+#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include "../base.h"
#include "power.h"
@@ -713,6 +715,8 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
+
+ cpufreq_resume();
}
/**
@@ -1177,6 +1181,8 @@ int dpm_suspend(pm_message_t state)
might_sleep();
+ cpufreq_suspend();
+
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 02f490bad30f..bb8c3bbc7812 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -362,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
if (!rbnode)
return -ENOMEM;
- rbnode->blklen = sizeof(*rbnode);
+ rbnode->blklen = 1;
rbnode->base_reg = reg;
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 507ee2da0f6e..46283fd3c4c0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -644,7 +644,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
}
}
- return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+ map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 975719bc3450..b41994fd8460 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -460,16 +460,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
{
struct rb_node *next;
struct regmap_range_node *range_node;
+ const char *devname = "dummy";
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
+ if (map->dev)
+ devname = dev_name(map->dev);
+
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
- dev_name(map->dev), name);
+ devname, name);
name = map->debugfs_name;
} else {
- name = dev_name(map->dev);
+ name = devname;
}
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index a941dcfe7590..6a66f0b7d3d4 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -114,7 +114,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
bool regmap_volatile(struct regmap *map, unsigned int reg)
{
- if (!regmap_readable(map, reg))
+ if (!map->format.format_write && !regmap_readable(map, reg))
return false;
if (map->volatile_reg)
@@ -1177,7 +1177,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
}
#ifdef LOG_DEVICE
- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x <= %x\n", reg, val);
#endif
@@ -1437,7 +1437,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
ret = map->reg_read(context, reg, val);
if (ret == 0) {
#ifdef LOG_DEVICE
- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
@@ -1717,7 +1717,7 @@ int regmap_async_complete(struct regmap *map)
int ret;
/* Nothing to do with no async support */
- if (!map->bus->async_write)
+ if (!map->bus || !map->bus->async_write)
return 0;
trace_regmap_async_complete_start(map->dev);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index ae989c57cd5e..bcd19886fa1a 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -40,8 +40,7 @@
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- unsigned int cpu = dev->id; \
- return sprintf(buf, "%d\n", topology_##name(cpu)); \
+ return sprintf(buf, "%d\n", topology_##name(dev->id)); \
}
#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \