aboutsummaryrefslogtreecommitdiff
path: root/drivers/thermal
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thermal')
-rw-r--r--drivers/thermal/Kconfig40
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/cpu_cooling.c585
-rw-r--r--drivers/thermal/db8500_thermal.c2
-rw-r--r--drivers/thermal/devfreq_cooling.c573
-rw-r--r--drivers/thermal/fair_share.c39
-rw-r--r--drivers/thermal/imx_thermal.c3
-rw-r--r--drivers/thermal/of-thermal.c41
-rw-r--r--drivers/thermal/power_allocator.c544
-rw-r--r--drivers/thermal/thermal_core.c314
-rw-r--r--drivers/thermal/thermal_core.h11
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c3
12 files changed, 2100 insertions, 59 deletions
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index af40db0df58e..0ae6bbf0a89b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -42,6 +42,17 @@ config THERMAL_OF
Say 'Y' here if you need to build thermal infrastructure
based on device tree.
+config THERMAL_WRITABLE_TRIPS
+ bool "Enable writable trip points"
+ help
+ This option allows the system integrator to choose whether
+ trip temperatures can be changed from userspace. The
+ writable trips need to be specified when setting up the
+ thermal zone but the choice here takes precedence.
+
+ Say 'Y' here if you would like to allow userspace tools to
+ change trip temperatures.
+
choice
prompt "Default Thermal governor"
default THERMAL_DEFAULT_GOV_STEP_WISE
@@ -71,6 +82,14 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
Select this if you want to let the user space manage the
platform thermals.
+config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
+ bool "power_allocator"
+ select THERMAL_GOV_POWER_ALLOCATOR
+ help
+ Select this if you want to control temperature based on
+ system and device power allocation. This governor can only
+ operate on cooling devices that implement the power API.
+
endchoice
config THERMAL_GOV_FAIR_SHARE
@@ -99,6 +118,13 @@ config THERMAL_GOV_USER_SPACE
help
Enable this to let the user space manage the platform thermals.
+config THERMAL_GOV_POWER_ALLOCATOR
+ bool "Power allocator thermal governor"
+ select THERMAL_POWER_ACTOR
+ help
+ Enable this to manage platform thermals by dynamically
+ allocating and limiting power to devices.
+
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
@@ -122,6 +148,20 @@ config CLOCK_THERMAL
device that is configured to use this cooling mechanism will be
controlled to reduce clock frequency whenever temperature is high.
+config DEVFREQ_THERMAL
+ bool "Generic device cooling support"
+ depends on PM_DEVFREQ
+ depends on PM_OPP
+ help
+ This implements the generic devfreq cooling mechanism through
+ frequency reduction for devices using devfreq.
+
+ This will throttle the device by limiting the maximum allowed DVFS
+ frequency corresponding to the cooling level.
+
+ In order to use the power extensions of the cooling device,
+ devfreq should use the simple_ondemand governor.
+
If you want this support, you should say Y here.
config THERMAL_EMULATION
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index fa0dc486790f..cb8d8a5961e3 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -14,6 +14,7 @@ thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o
thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
# cpufreq cooling
thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
@@ -21,6 +22,9 @@ thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
# clock cooling
thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
+# devfreq cooling
+thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
+
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 5db5e91fd12f..54c18b2b91e0 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -26,10 +26,13 @@
#include <linux/thermal.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+#include <trace/events/thermal.h>
+
/*
* Cooling state <-> CPUFreq frequency
*
@@ -45,6 +48,19 @@
*/
/**
+ * struct power_table - frequency to power conversion
+ * @frequency: frequency in KHz
+ * @power: power in mW
+ *
+ * This structure is built when the cooling device registers and helps
+ * in translating frequency to power and viceversa.
+ */
+struct power_table {
+ u32 frequency;
+ u32 power;
+};
+
+/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
@@ -58,6 +74,15 @@
* cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
* @node: list_head to link all cpufreq_cooling_device together.
+ * @last_load: load measured by the latest call to cpufreq_get_actual_power()
+ * @time_in_idle: previous reading of the absolute time that this cpu was idle
+ * @time_in_idle_timestamp: wall time of the last invocation of
+ * get_cpu_idle_time_us()
+ * @dyn_power_table: array of struct power_table for frequency to power
+ * conversion, sorted in ascending order.
+ * @dyn_power_table_entries: number of entries in the @dyn_power_table array
+ * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
+ * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -71,6 +96,13 @@ struct cpufreq_cooling_device {
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct list_head node;
+ u32 last_load;
+ u64 *time_in_idle;
+ u64 *time_in_idle_timestamp;
+ struct power_table *dyn_power_table;
+ int dyn_power_table_entries;
+ struct device *cpu_dev;
+ get_static_t plat_get_static_power;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -186,23 +218,237 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long max_freq = 0;
struct cpufreq_cooling_device *cpufreq_dev;
- if (event != CPUFREQ_ADJUST)
- return 0;
+ switch (event) {
- mutex_lock(&cooling_cpufreq_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
+ case CPUFREQ_ADJUST:
+ mutex_lock(&cooling_cpufreq_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (!cpumask_test_cpu(policy->cpu,
+ &cpufreq_dev->allowed_cpus))
+ continue;
+
+ max_freq = cpufreq_dev->clipped_freq;
+
+ if (policy->max != max_freq)
+ cpufreq_verify_within_limits(policy, 0,
+ max_freq);
+ }
+ mutex_unlock(&cooling_cpufreq_lock);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * build_dyn_power_table() - create a dynamic power to frequency table
+ * @cpufreq_device: the cpufreq cooling device in which to store the table
+ * @capacitance: dynamic power coefficient for these cpus
+ *
+ * Build a dynamic power to frequency table for this cpu and store it
+ * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
+ * cpu_freq_to_power() to convert between power and frequency
+ * efficiently. Power is stored in mW, frequency in KHz. The
+ * resulting table is in ascending order.
+ *
+ * Return: 0 on success, -E* on error.
+ */
+static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
+ u32 capacitance)
+{
+ struct power_table *power_table;
+ struct dev_pm_opp *opp;
+ struct device *dev = NULL;
+ int num_opps = 0, cpu, i, ret = 0;
+ unsigned long freq;
+
+ rcu_read_lock();
+
+ for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_warn(&cpufreq_device->cool_dev->device,
+ "No cpu device for cpu %d\n", cpu);
continue;
+ }
+
+ num_opps = dev_pm_opp_get_opp_count(dev);
+ if (num_opps > 0) {
+ break;
+ } else if (num_opps < 0) {
+ ret = num_opps;
+ goto unlock;
+ }
+ }
- max_freq = cpufreq_dev->clipped_freq;
+ if (num_opps == 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0, max_freq);
+ power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
+ if (!power_table) {
+ ret = -ENOMEM;
+ goto unlock;
}
- mutex_unlock(&cooling_cpufreq_lock);
- return 0;
+ for (freq = 0, i = 0;
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
+ freq++, i++) {
+ u32 freq_mhz, voltage_mv;
+ u64 power;
+
+ freq_mhz = freq / 1000000;
+ voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
+
+ /*
+ * Do the multiplication with MHz and millivolt so as
+ * to not overflow.
+ */
+ power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
+ do_div(power, 1000000000);
+
+ /* frequency is stored in power_table in KHz */
+ power_table[i].frequency = freq / 1000;
+
+ /* power is stored in mW */
+ power_table[i].power = power;
+ }
+
+ if (i == 0) {
+ ret = PTR_ERR(opp);
+ goto unlock;
+ }
+
+ cpufreq_device->cpu_dev = dev;
+ cpufreq_device->dyn_power_table = power_table;
+ cpufreq_device->dyn_power_table_entries = i;
+
+unlock:
+ rcu_read_unlock();
+ return ret;
+}
+
+static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
+ u32 freq)
+{
+ int i;
+ struct power_table *pt = cpufreq_device->dyn_power_table;
+
+ for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
+ if (freq < pt[i].frequency)
+ break;
+
+ return pt[i - 1].power;
+}
+
+static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
+ u32 power)
+{
+ int i;
+ struct power_table *pt = cpufreq_device->dyn_power_table;
+
+ for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
+ if (power < pt[i].power)
+ break;
+
+ return pt[i - 1].frequency;
+}
+
+/**
+ * get_load() - get load for a cpu since last updated
+ * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
+ * @cpu: cpu number
+ *
+ * Return: The average load of cpu @cpu in percentage since this
+ * function was last called.
+ */
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
+{
+ u32 load;
+ u64 now, now_idle, delta_time, delta_idle;
+
+ now_idle = get_cpu_idle_time(cpu, &now, 0);
+ delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
+ delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
+
+ if (delta_time <= delta_idle)
+ load = 0;
+ else
+ load = div64_u64(100 * (delta_time - delta_idle), delta_time);
+
+ cpufreq_device->time_in_idle[cpu] = now_idle;
+ cpufreq_device->time_in_idle_timestamp[cpu] = now;
+
+ return load;
+}
+
+/**
+ * get_static_power() - calculate the static power consumed by the cpus
+ * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
+ * @tz: thermal zone device in which we're operating
+ * @freq: frequency in KHz
+ * @power: pointer in which to store the calculated static power
+ *
+ * Calculate the static power consumed by the cpus described by
+ * @cpu_actor running at frequency @freq. This function relies on a
+ * platform specific function that should have been provided when the
+ * actor was registered. If it wasn't, the static power is assumed to
+ * be negligible. The calculated static power is stored in @power.
+ *
+ * Return: 0 on success, -E* on failure.
+ */
+static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
+ struct thermal_zone_device *tz, unsigned long freq,
+ u32 *power)
+{
+ struct dev_pm_opp *opp;
+ unsigned long voltage;
+ struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
+ unsigned long freq_hz = freq * 1000;
+
+ if (!cpufreq_device->plat_get_static_power ||
+ !cpufreq_device->cpu_dev) {
+ *power = 0;
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
+ true);
+ voltage = dev_pm_opp_get_voltage(opp);
+
+ rcu_read_unlock();
+
+ if (voltage == 0) {
+ dev_warn_ratelimited(cpufreq_device->cpu_dev,
+ "Failed to get voltage for frequency %lu: %ld\n",
+ freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+ return -EINVAL;
+ }
+
+ return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
+ voltage, power);
+}
+
+/**
+ * get_dynamic_power() - calculate the dynamic power
+ * @cpufreq_device: &cpufreq_cooling_device for this cdev
+ * @freq: current frequency
+ *
+ * Return: the dynamic power consumed by the cpus described by
+ * @cpufreq_device.
+ */
+static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
+ unsigned long freq)
+{
+ u32 raw_cpu_power;
+
+ raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
+ return (raw_cpu_power * cpufreq_device->last_load) / 100;
}
/* cpufreq cooling device callback functions are defined below */
@@ -280,8 +526,205 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
+/**
+ * cpufreq_get_requested_power() - get the current power
+ * @cdev: &thermal_cooling_device pointer
+ * @tz: a valid thermal zone device pointer
+ * @power: pointer in which to store the resulting power
+ *
+ * Calculate the current power consumption of the cpus in milliwatts
+ * and store it in @power. This function should actually calculate
+ * the requested power, but it's hard to get the frequency that
+ * cpufreq would have assigned if there were no thermal limits.
+ * Instead, we calculate the current power on the assumption that the
+ * immediate future will look like the immediate past.
+ *
+ * We use the current frequency and the average load since this
+ * function was last called. In reality, there could have been
+ * multiple opps since this function was last called and that affects
+ * the load calculation. While it's not perfectly accurate, this
+ * simplification is good enough and works. REVISIT this, as more
+ * complex code may be needed if experiments show that it's not
+ * accurate enough.
+ *
+ * Return: 0 on success, -E* if getting the static power failed.
+ */
+static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ u32 *power)
+{
+ unsigned long freq;
+ int i = 0, cpu, ret;
+ u32 static_power, dynamic_power, total_load = 0;
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ u32 *load_cpu = NULL;
+
+ cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
+
+ /*
+ * All the CPUs are offline, thus the requested power by
+ * the cdev is 0
+ */
+ if (cpu >= nr_cpu_ids) {
+ *power = 0;
+ return 0;
+ }
+
+ freq = cpufreq_quick_get(cpu);
+
+ if (trace_thermal_power_cpu_get_power_enabled()) {
+ u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
+
+ load_cpu = devm_kcalloc(&cdev->device, ncpus, sizeof(*load_cpu),
+ GFP_KERNEL);
+ }
+
+ for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
+ u32 load;
+
+ if (cpu_online(cpu))
+ load = get_load(cpufreq_device, cpu);
+ else
+ load = 0;
+
+ total_load += load;
+ if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
+ load_cpu[i] = load;
+
+ i++;
+ }
+
+ cpufreq_device->last_load = total_load;
+
+ dynamic_power = get_dynamic_power(cpufreq_device, freq);
+ ret = get_static_power(cpufreq_device, tz, freq, &static_power);
+ if (ret) {
+ if (load_cpu)
+ devm_kfree(&cdev->device, load_cpu);
+ return ret;
+ }
+
+ if (load_cpu) {
+ trace_thermal_power_cpu_get_power(
+ &cpufreq_device->allowed_cpus,
+ freq, load_cpu, i, dynamic_power, static_power);
+
+ devm_kfree(&cdev->device, load_cpu);
+ }
+
+ *power = static_power + dynamic_power;
+ return 0;
+}
+
+/**
+ * cpufreq_state2power() - convert a cpu cdev state to power consumed
+ * @cdev: &thermal_cooling_device pointer
+ * @tz: a valid thermal zone device pointer
+ * @state: cooling device state to be converted
+ * @power: pointer in which to store the resulting power
+ *
+ * Convert cooling device state @state into power consumption in
+ * milliwatts assuming 100% load. Store the calculated power in
+ * @power.
+ *
+ * Return: 0 on success, -EINVAL if the cooling device state could not
+ * be converted into a frequency or other -E* if there was an error
+ * when calculating the static power.
+ */
+static int cpufreq_state2power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ unsigned long state, u32 *power)
+{
+ unsigned int freq, num_cpus;
+ cpumask_t cpumask;
+ u32 static_power, dynamic_power;
+ int ret;
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+
+ cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
+ num_cpus = cpumask_weight(&cpumask);
+
+ /* None of our cpus are online, so no power */
+ if (num_cpus == 0) {
+ *power = 0;
+ return 0;
+ }
+
+ freq = cpufreq_device->freq_table[state];
+ if (!freq)
+ return -EINVAL;
+
+ dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
+ ret = get_static_power(cpufreq_device, tz, freq, &static_power);
+ if (ret)
+ return ret;
+
+ *power = static_power + dynamic_power;
+ return 0;
+}
+
+/**
+ * cpufreq_power2state() - convert power to a cooling device state
+ * @cdev: &thermal_cooling_device pointer
+ * @tz: a valid thermal zone device pointer
+ * @power: power in milliwatts to be converted
+ * @state: pointer in which to store the resulting state
+ *
+ * Calculate a cooling device state for the cpus described by @cdev
+ * that would allow them to consume at most @power mW and store it in
+ * @state. Note that this calculation depends on external factors
+ * such as the cpu load or the current static power. Calling this
+ * function with the same power as input can yield different cooling
+ * device states depending on those external factors.
+ *
+ * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
+ * the calculated frequency could not be converted to a valid state.
+ * The latter should not happen unless the frequencies available to
+ * cpufreq have changed since the initialization of the cpu cooling
+ * device.
+ */
+static int cpufreq_power2state(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 power,
+ unsigned long *state)
+{
+ unsigned int cpu, cur_freq, target_freq;
+ int ret;
+ s32 dyn_power;
+ u32 last_load, normalised_power, static_power;
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+
+ cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
+
+ /* None of our cpus are online */
+ if (cpu >= nr_cpu_ids)
+ return -ENODEV;
+
+ cur_freq = cpufreq_quick_get(cpu);
+ ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
+ if (ret)
+ return ret;
+
+ dyn_power = power - static_power;
+ dyn_power = dyn_power > 0 ? dyn_power : 0;
+ last_load = cpufreq_device->last_load ?: 1;
+ normalised_power = (dyn_power * 100) / last_load;
+ target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
+
+ *state = cpufreq_cooling_get_level(cpu, target_freq);
+ if (*state == THERMAL_CSTATE_INVALID) {
+ dev_warn_ratelimited(&cdev->device,
+ "Failed to convert %dKHz for cpu %d into a cdev state\n",
+ target_freq, cpu);
+ return -EINVAL;
+ }
+
+ trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
+ target_freq, *state, power);
+ return 0;
+}
+
/* Bind cpufreq callbacks to thermal cooling device ops */
-static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
+static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
@@ -311,6 +754,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
* Normally this should be same as cpufreq policy->related_cpus.
+ * @capacitance: dynamic power coefficient for these cpus
+ * @plat_static_func: function to calculate the static power consumed by these
+ * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -322,13 +768,14 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ const struct cpumask *clip_cpus, u32 capacitance,
+ get_static_t plat_static_func)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
struct cpufreq_frequency_table *pos, *table;
- unsigned int freq, i;
+ unsigned int freq, i, num_cpus;
int ret;
table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
@@ -341,6 +788,23 @@ __cpufreq_cooling_register(struct device_node *np,
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
+ num_cpus = cpumask_weight(clip_cpus);
+ cpufreq_dev->time_in_idle = kcalloc(num_cpus,
+ sizeof(*cpufreq_dev->time_in_idle),
+ GFP_KERNEL);
+ if (!cpufreq_dev->time_in_idle) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_cdev;
+ }
+
+ cpufreq_dev->time_in_idle_timestamp =
+ kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
+ GFP_KERNEL);
+ if (!cpufreq_dev->time_in_idle_timestamp) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_time_in_idle;
+ }
+
/* Find max levels */
cpufreq_for_each_valid_entry(pos, table)
cpufreq_dev->max_level++;
@@ -349,7 +813,7 @@ __cpufreq_cooling_register(struct device_node *np,
cpufreq_dev->max_level, GFP_KERNEL);
if (!cpufreq_dev->freq_table) {
cool_dev = ERR_PTR(-ENOMEM);
- goto free_cdev;
+ goto free_time_in_idle_timestamp;
}
/* max_level is an index, not a counter */
@@ -357,6 +821,20 @@ __cpufreq_cooling_register(struct device_node *np,
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
+ if (capacitance) {
+ cpufreq_cooling_ops.get_requested_power =
+ cpufreq_get_requested_power;
+ cpufreq_cooling_ops.state2power = cpufreq_state2power;
+ cpufreq_cooling_ops.power2state = cpufreq_power2state;
+ cpufreq_dev->plat_get_static_power = plat_static_func;
+
+ ret = build_dyn_power_table(cpufreq_dev, capacitance);
+ if (ret) {
+ cool_dev = ERR_PTR(ret);
+ goto free_table;
+ }
+ }
+
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
cool_dev = ERR_PTR(ret);
@@ -402,6 +880,10 @@ remove_idr:
release_idr(&cpufreq_idr, cpufreq_dev->id);
free_table:
kfree(cpufreq_dev->freq_table);
+free_time_in_idle_timestamp:
+ kfree(cpufreq_dev->time_in_idle_timestamp);
+free_time_in_idle:
+ kfree(cpufreq_dev->time_in_idle);
free_cdev:
kfree(cpufreq_dev);
@@ -422,7 +904,7 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return __cpufreq_cooling_register(NULL, clip_cpus);
+ return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
@@ -446,11 +928,78 @@ of_cpufreq_cooling_register(struct device_node *np,
if (!np)
return ERR_PTR(-EINVAL);
- return __cpufreq_cooling_register(np, clip_cpus);
+ return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
+ * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @capacitance: dynamic power coefficient for these cpus
+ * @plat_static_func: function to calculate the static power consumed by these
+ * cpus (optional)
+ *
+ * This interface function registers the cpufreq cooling device with
+ * the name "thermal-cpufreq-%x". This api can support multiple
+ * instances of cpufreq cooling devices. Using this function, the
+ * cooling device will implement the power extensions by using a
+ * simple cpu power model. The cpus must have registered their OPPs
+ * using the OPP library.
+ *
+ * An optional @plat_static_func may be provided to calculate the
+ * static power consumed by these cpus. If the platform's static
+ * power consumption is unknown or negligible, make it NULL.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
+ get_static_t plat_static_func)
+{
+ return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
+ plat_static_func);
+}
+EXPORT_SYMBOL(cpufreq_power_cooling_register);
+
+/**
+ * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
+ * @np: a valid struct device_node to the cooling device device tree node
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @capacitance: dynamic power coefficient for these cpus
+ * @plat_static_func: function to calculate the static power consumed by these
+ * cpus (optional)
+ *
+ * This interface function registers the cpufreq cooling device with
+ * the name "thermal-cpufreq-%x". This api can support multiple
+ * instances of cpufreq cooling devices. Using this API, the cpufreq
+ * cooling device will be linked to the device tree node provided.
+ * Using this function, the cooling device will implement the power
+ * extensions by using a simple cpu power model. The cpus must have
+ * registered their OPPs using the OPP library.
+ *
+ * An optional @plat_static_func may be provided to calculate the
+ * static power consumed by these cpus. If the platform's static
+ * power consumption is unknown or negligible, make it NULL.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func)
+{
+ if (!np)
+ return ERR_PTR(-EINVAL);
+
+ return __cpufreq_cooling_register(np, clip_cpus, capacitance,
+ plat_static_func);
+}
+EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+
+/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*
@@ -475,6 +1024,8 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->time_in_idle_timestamp);
+ kfree(cpufreq_dev->time_in_idle);
kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 20adfbe27df1..2fb273c4baa9 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -76,7 +76,7 @@ static int db8500_cdev_bind(struct thermal_zone_device *thermal,
upper = lower = i > max_state ? max_state : i;
ret = thermal_zone_bind_cooling_device(thermal, i, cdev,
- upper, lower);
+ upper, lower, THERMAL_WEIGHT_DEFAULT);
dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type,
i, ret, ret ? "fail" : "succeed");
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
new file mode 100644
index 000000000000..01f0015f80dc
--- /dev/null
+++ b/drivers/thermal/devfreq_cooling.c
@@ -0,0 +1,573 @@
+/*
+ * devfreq_cooling: Thermal cooling device implementation for devices using
+ * devfreq
+ *
+ * Copyright (C) 2014-2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO:
+ * - If OPPs are added or removed after devfreq cooling has
+ * registered, the devfreq cooling won't react to it.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_opp.h>
+#include <linux/thermal.h>
+
+#include <trace/events/thermal.h>
+
+static DEFINE_MUTEX(devfreq_lock);
+static DEFINE_IDR(devfreq_idr);
+
+/**
+ * struct devfreq_cooling_device - Devfreq cooling device
+ * @id: unique integer value corresponding to each
+ * devfreq_cooling_device registered.
+ * @cdev: Pointer to associated thermal cooling device.
+ * @devfreq: Pointer to associated devfreq device.
+ * @cooling_state: Current cooling state.
+ * @power_table: Pointer to table with maximum power draw for each
+ * cooling state. State is the index into the table, and
+ * the power is in mW.
+ * @freq_table: Pointer to a table with the frequencies sorted in descending
+ * order. You can index the table by cooling device state
+ * @freq_table_size: Size of the @freq_table and @power_table
+ * @power_ops: Pointer to devfreq_cooling_power, used to generate the
+ * @power_table.
+ */
+struct devfreq_cooling_device {
+ int id;
+ struct thermal_cooling_device *cdev;
+ struct devfreq *devfreq;
+ unsigned long cooling_state;
+ u32 *power_table;
+ u32 *freq_table;
+ size_t freq_table_size;
+ struct devfreq_cooling_power *power_ops;
+};
+
+/**
+ * get_idr - function to get a unique id.
+ * @idr: struct idr * handle used to create a id.
+ * @id: int * value generated by this function.
+ *
+ * This function will populate @id with an unique
+ * id, using the idr API.
+ *
+ * Return: 0 on success, an error code on failure.
+ */
+static int get_idr(struct idr *idr, int *id)
+{
+ int ret;
+
+ mutex_lock(&devfreq_lock);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
+ mutex_unlock(&devfreq_lock);
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
+
+ return 0;
+}
+
+/**
+ * release_idr - function to free the unique id.
+ * @idr: struct idr * handle used for creating the id.
+ * @id: int value representing the unique id.
+ */
+static void release_idr(struct idr *idr, int id)
+{
+ mutex_lock(&devfreq_lock);
+ idr_remove(idr, id);
+ mutex_unlock(&devfreq_lock);
+}
+
+/**
+ * partition_enable_opps() - disable all opps above a given state
+ * @dfc: Pointer to devfreq we are operating on
+ * @cdev_state: cooling device state we're setting
+ *
+ * Go through the OPPs of the device, enabling all OPPs until
+ * @cdev_state and disabling those frequencies above it.
+ */
+static int partition_enable_opps(struct devfreq_cooling_device *dfc,
+ unsigned long cdev_state)
+{
+ int i;
+ struct device *dev = dfc->devfreq->dev.parent;
+
+ for (i = 0; i < dfc->freq_table_size; i++) {
+ struct dev_pm_opp *opp;
+ int ret = 0;
+ unsigned int freq = dfc->freq_table[i];
+ bool want_enable = i >= cdev_state ? true : false;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_exact(dev, freq, !want_enable);
+ rcu_read_unlock();
+
+ if (PTR_ERR(opp) == -ERANGE)
+ continue;
+ else if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ if (want_enable)
+ ret = dev_pm_opp_enable(dev, freq);
+ else
+ ret = dev_pm_opp_disable(dev, freq);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int devfreq_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+
+ *state = dfc->freq_table_size - 1;
+
+ return 0;
+}
+
+static int devfreq_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+
+ *state = dfc->cooling_state;
+
+ return 0;
+}
+
+static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+ struct devfreq *df = dfc->devfreq;
+ struct device *dev = df->dev.parent;
+ int ret;
+
+ if (state == dfc->cooling_state)
+ return 0;
+
+ dev_dbg(dev, "Setting cooling state %lu\n", state);
+
+ if (state >= dfc->freq_table_size)
+ return -EINVAL;
+
+ ret = partition_enable_opps(dfc, state);
+ if (ret)
+ return ret;
+
+ dfc->cooling_state = state;
+
+ return 0;
+}
+
+/**
+ * freq_get_state() - get the cooling state corresponding to a frequency
+ * @dfc: Pointer to devfreq cooling device
+ * @freq: frequency in Hz
+ *
+ * Return: the cooling state associated with the @freq, or
+ * THERMAL_CSTATE_INVALID if it wasn't found.
+ */
+static unsigned long
+freq_get_state(struct devfreq_cooling_device *dfc, unsigned long freq)
+{
+ int i;
+
+ for (i = 0; i < dfc->freq_table_size; i++) {
+ if (dfc->freq_table[i] == freq)
+ return i;
+ }
+
+ return THERMAL_CSTATE_INVALID;
+}
+
+/**
+ * get_static_power() - calculate the static power
+ * @dfc: Pointer to devfreq cooling device
+ * @freq: Frequency in Hz
+ *
+ * Calculate the static power in milliwatts using the supplied
+ * get_static_power(). The current voltage is calculated using the
+ * OPP library. If no get_static_power() was supplied, assume the
+ * static power is negligible.
+ */
+static unsigned long
+get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
+{
+ struct devfreq *df = dfc->devfreq;
+ struct device *dev = df->dev.parent;
+ unsigned long voltage;
+ struct dev_pm_opp *opp;
+
+ if (!dfc->power_ops->get_static_power)
+ return 0;
+
+ rcu_read_lock();
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE))
+ opp = dev_pm_opp_find_freq_exact(dev, freq, false);
+
+ voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
+
+ rcu_read_unlock();
+
+ if (voltage == 0) {
+ dev_warn_ratelimited(dev,
+ "Failed to get voltage for frequency %lu: %ld\n",
+ freq, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+ return 0;
+ }
+
+ return dfc->power_ops->get_static_power(voltage);
+}
+
+/**
+ * get_dynamic_power - calculate the dynamic power
+ * @dfc: Pointer to devfreq cooling device
+ * @freq: Frequency in Hz
+ * @voltage: Voltage in millivolts
+ *
+ * Calculate the dynamic power in milliwatts consumed by the device at
+ * frequency @freq and voltage @voltage. If the get_dynamic_power()
+ * was supplied as part of the devfreq_cooling_power struct, then that
+ * function is used. Otherwise, a simple power model (Pdyn = Coeff *
+ * Voltage^2 * Frequency) is used.
+ */
+static unsigned long
+get_dynamic_power(struct devfreq_cooling_device *dfc, unsigned long freq,
+ unsigned long voltage)
+{
+ u64 power;
+ u32 freq_mhz;
+ struct devfreq_cooling_power *dfc_power = dfc->power_ops;
+
+ if (dfc_power->get_dynamic_power)
+ return dfc_power->get_dynamic_power(freq, voltage);
+
+ freq_mhz = freq / 1000000;
+ power = (u64)dfc_power->dyn_power_coeff * freq_mhz * voltage * voltage;
+ do_div(power, 1000000000);
+
+ return power;
+}
+
+static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ u32 *power)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+ struct devfreq *df = dfc->devfreq;
+ struct devfreq_dev_status *status = &df->last_status;
+ unsigned long state;
+ unsigned long freq = status->current_frequency;
+ u32 dyn_power, static_power;
+
+ /* Get dynamic power for state */
+ state = freq_get_state(dfc, freq);
+ if (state == THERMAL_CSTATE_INVALID)
+ return -EAGAIN;
+
+ dyn_power = dfc->power_table[state];
+
+ /* Scale dynamic power for utilization */
+ dyn_power = (dyn_power * status->busy_time) / status->total_time;
+
+ /* Get static power */
+ static_power = get_static_power(dfc, freq);
+
+ trace_thermal_power_devfreq_get_power(cdev, status, freq, dyn_power,
+ static_power);
+
+ *power = dyn_power + static_power;
+
+ return 0;
+}
+
+static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ unsigned long state,
+ u32 *power)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+ unsigned long freq;
+ u32 static_power;
+
+ if (state < 0 || state >= dfc->freq_table_size)
+ return -EINVAL;
+
+ freq = dfc->freq_table[state];
+ static_power = get_static_power(dfc, freq);
+
+ *power = dfc->power_table[state] + static_power;
+ return 0;
+}
+
+static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ u32 power, unsigned long *state)
+{
+ struct devfreq_cooling_device *dfc = cdev->devdata;
+ struct devfreq *df = dfc->devfreq;
+ struct devfreq_dev_status *status = &df->last_status;
+ unsigned long freq = status->current_frequency;
+ unsigned long busy_time;
+ s32 dyn_power;
+ u32 static_power;
+ int i;
+
+ static_power = get_static_power(dfc, freq);
+
+ dyn_power = power - static_power;
+ dyn_power = dyn_power > 0 ? dyn_power : 0;
+
+ /* Scale dynamic power for utilization */
+ busy_time = status->busy_time ?: 1;
+ dyn_power = (dyn_power * status->total_time) / busy_time;
+
+ /*
+ * Find the first cooling state that is within the power
+ * budget for dynamic power.
+ */
+ for (i = 0; i < dfc->freq_table_size - 1; i++)
+ if (dyn_power >= dfc->power_table[i])
+ break;
+
+ *state = i;
+ trace_thermal_power_devfreq_limit(cdev, freq, *state, power);
+ return 0;
+}
+
+static struct thermal_cooling_device_ops devfreq_cooling_ops = {
+ .get_max_state = devfreq_cooling_get_max_state,
+ .get_cur_state = devfreq_cooling_get_cur_state,
+ .set_cur_state = devfreq_cooling_set_cur_state,
+};
+
+/**
+ * devfreq_cooling_gen_tables() - Generate power and freq tables.
+ * @dfc: Pointer to devfreq cooling device.
+ *
+ * Generate power and frequency tables: the power table hold the
+ * device's maximum power usage at each cooling state (OPP). The
+ * static and dynamic power using the appropriate voltage and
+ * frequency for the state, is acquired from the struct
+ * devfreq_cooling_power, and summed to make the maximum power draw.
+ *
+ * The frequency table holds the frequencies in descending order.
+ * That way its indexed by cooling device state.
+ *
+ * The tables are malloced, and pointers put in dfc. They must be
+ * freed when unregistering the devfreq cooling device.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc)
+{
+ struct devfreq *df = dfc->devfreq;
+ struct device *dev = df->dev.parent;
+ int ret, num_opps;
+ unsigned long freq;
+ u32 *power_table = NULL;
+ u32 *freq_table;
+ int i;
+
+ num_opps = dev_pm_opp_get_opp_count(dev);
+
+ if (dfc->power_ops) {
+ power_table = kcalloc(num_opps, sizeof(*power_table),
+ GFP_KERNEL);
+ if (!power_table)
+ return -ENOMEM;
+ }
+
+ freq_table = kcalloc(num_opps, sizeof(*freq_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto free_power_table;
+ }
+
+ for (i = 0, freq = ULONG_MAX; i < num_opps; i++, freq--) {
+ unsigned long power_dyn, voltage;
+ struct dev_pm_opp *opp;
+
+ rcu_read_lock();
+
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ ret = PTR_ERR(opp);
+ goto free_tables;
+ }
+
+ voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
+
+ rcu_read_unlock();
+
+ if (dfc->power_ops) {
+ power_dyn = get_dynamic_power(dfc, freq, voltage);
+
+ dev_dbg(dev, "Dynamic power table: %lu MHz @ %lu mV: %lu = %lu mW\n",
+ freq / 1000000, voltage, power_dyn, power_dyn);
+
+ power_table[i] = power_dyn;
+ }
+
+ freq_table[i] = freq;
+ }
+
+ if (dfc->power_ops)
+ dfc->power_table = power_table;
+
+ dfc->freq_table = freq_table;
+ dfc->freq_table_size = num_opps;
+
+ return 0;
+
+free_tables:
+ kfree(freq_table);
+free_power_table:
+ kfree(power_table);
+
+ return ret;
+}
+
+/**
+ * of_devfreq_cooling_register_power() - Register devfreq cooling device,
+ * with OF and power information.
+ * @np: Pointer to OF device_node.
+ * @df: Pointer to devfreq device.
+ * @dfc_power: Pointer to devfreq_cooling_power.
+ *
+ * Register a devfreq cooling device. The available OPPs must be
+ * registered on the device.
+ *
+ * If @dfc_power is provided, the cooling device is registered with the
+ * power extensions. For the power extensions to work correctly,
+ * devfreq should use the simple_ondemand governor, other governors
+ * are not currently supported.
+ */
+struct thermal_cooling_device *
+of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ struct thermal_cooling_device *cdev;
+ struct devfreq_cooling_device *dfc;
+ char dev_name[THERMAL_NAME_LENGTH];
+ int err;
+
+ dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
+ if (!dfc)
+ return ERR_PTR(-ENOMEM);
+
+ dfc->devfreq = df;
+
+ if (dfc_power) {
+ dfc->power_ops = dfc_power;
+
+ devfreq_cooling_ops.get_requested_power =
+ devfreq_cooling_get_requested_power;
+ devfreq_cooling_ops.state2power = devfreq_cooling_state2power;
+ devfreq_cooling_ops.power2state = devfreq_cooling_power2state;
+ }
+
+ err = devfreq_cooling_gen_tables(dfc);
+ if (err)
+ goto free_dfc;
+
+ err = get_idr(&devfreq_idr, &dfc->id);
+ if (err)
+ goto free_tables;
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
+
+ cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
+ &devfreq_cooling_ops);
+ if (IS_ERR(cdev)) {
+ err = PTR_ERR(cdev);
+ dev_err(df->dev.parent,
+ "Failed to register devfreq cooling device (%d)\n",
+ err);
+ goto release_idr;
+ }
+
+ dfc->cdev = cdev;
+
+ return cdev;
+
+release_idr:
+ release_idr(&devfreq_idr, dfc->id);
+free_tables:
+ kfree(dfc->power_table);
+ kfree(dfc->freq_table);
+free_dfc:
+ kfree(dfc);
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(of_devfreq_cooling_register_power);
+
+/**
+ * of_devfreq_cooling_register() - Register devfreq cooling device,
+ * with OF information.
+ * @np: Pointer to OF device_node.
+ * @df: Pointer to devfreq device.
+ */
+struct thermal_cooling_device *
+of_devfreq_cooling_register(struct device_node *np, struct devfreq *df)
+{
+ return of_devfreq_cooling_register_power(np, df, NULL);
+}
+EXPORT_SYMBOL_GPL(of_devfreq_cooling_register);
+
+/**
+ * devfreq_cooling_register() - Register devfreq cooling device.
+ * @df: Pointer to devfreq device.
+ */
+struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df)
+{
+ return of_devfreq_cooling_register(NULL, df);
+}
+EXPORT_SYMBOL_GPL(devfreq_cooling_register);
+
+/**
+ * devfreq_cooling_unregister() - Unregister devfreq cooling device.
+ * @dfc: Pointer to devfreq cooling device to unregister.
+ */
+void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+ struct devfreq_cooling_device *dfc;
+
+ if (!cdev)
+ return;
+
+ dfc = cdev->devdata;
+
+ thermal_cooling_device_unregister(dfc->cdev);
+ release_idr(&devfreq_idr, dfc->id);
+ kfree(dfc->power_table);
+ kfree(dfc->freq_table);
+
+ kfree(dfc);
+}
+EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 6e0a3fbfae86..8c50b8d6afb7 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -59,13 +59,13 @@ static int get_trip_level(struct thermal_zone_device *tz)
}
static long get_target_state(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int weight, int level)
+ struct thermal_cooling_device *cdev, int percentage, int level)
{
unsigned long max_state;
cdev->ops->get_max_state(cdev, &max_state);
- return (long)(weight * level * max_state) / (100 * tz->trips);
+ return (long)(percentage * level * max_state) / (100 * tz->trips);
}
/**
@@ -77,7 +77,7 @@ static long get_target_state(struct thermal_zone_device *tz,
*
* Parameters used for Throttling:
* P1. max_state: Maximum throttle state exposed by the cooling device.
- * P2. weight[i]/100:
+ * P2. percentage[i]/100:
* How 'effective' the 'i'th device is, in cooling the given zone.
* P3. cur_trip_level/max_no_of_trips:
* This describes the extent to which the devices should be throttled.
@@ -88,28 +88,33 @@ static long get_target_state(struct thermal_zone_device *tz,
*/
static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
{
- const struct thermal_zone_params *tzp;
- struct thermal_cooling_device *cdev;
struct thermal_instance *instance;
- int i;
+ int total_weight = 0;
+ int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
- if (!tz->tzp || !tz->tzp->tbp)
- return -EINVAL;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ total_weight += instance->weight;
+ total_instance++;
+ }
- tzp = tz->tzp;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ int percentage;
+ struct thermal_cooling_device *cdev = instance->cdev;
- for (i = 0; i < tzp->num_tbps; i++) {
- if (!tzp->tbp[i].cdev)
+ if (instance->trip != trip)
continue;
- cdev = tzp->tbp[i].cdev;
- instance = get_thermal_instance(tz, cdev, trip);
- if (!instance)
- continue;
+ if (!total_weight)
+ percentage = 100 / total_instance;
+ else
+ percentage = (instance->weight * 100) / total_weight;
- instance->target = get_target_state(tz, cdev,
- tzp->tbp[i].weight, cur_trip_level);
+ instance->target = get_target_state(tz, cdev, percentage,
+ cur_trip_level);
instance->cdev->updated = false;
thermal_cdev_update(cdev);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 2ccbc0788353..fde4c2876d14 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,8 @@ static int imx_bind(struct thermal_zone_device *tz,
ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev,
THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
if (ret) {
dev_err(&tz->device,
"binding zone %s with cdev %s failed:%d\n",
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 668fb1bdea9e..b295b2b6c191 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -58,6 +58,8 @@ struct __thermal_bind_params {
* @mode: current thermal zone device mode (enabled/disabled)
* @passive_delay: polling interval while passive cooling is activated
* @polling_delay: zone polling interval
+ * @slope: slope of the temperature adjustment curve
+ * @offset: offset of the temperature adjustment curve
* @ntrips: number of trip points
* @trips: an array of trip points (0..ntrips - 1)
* @num_tbps: number of thermal bind params
@@ -70,6 +72,8 @@ struct __thermal_zone {
enum thermal_device_mode mode;
int passive_delay;
int polling_delay;
+ int slope;
+ int offset;
/* trip data */
int ntrips;
@@ -227,7 +231,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
tbp->max,
- tbp->min);
+ tbp->min,
+ tbp->usage);
if (ret)
return ret;
}
@@ -581,7 +586,7 @@ static int thermal_of_populate_bind_params(struct device_node *np,
u32 prop;
/* Default weight. Usage is optional */
- __tbp->usage = 0;
+ __tbp->usage = THERMAL_WEIGHT_DEFAULT;
ret = of_property_read_u32(np, "contribution", &prop);
if (ret == 0)
__tbp->usage = prop;
@@ -715,7 +720,7 @@ static int thermal_of_populate_trip(struct device_node *np,
* @np parameter and fills the read data into a __thermal_zone data structure
* and return this pointer.
*
- * TODO: Missing properties to parse: thermal-sensor-names and coefficients
+ * TODO: Missing properties to parse: thermal-sensor-names
*
* Return: On success returns a valid struct __thermal_zone,
* otherwise, it returns a corresponding ERR_PTR(). Caller must
@@ -727,7 +732,7 @@ thermal_of_build_thermal_zone(struct device_node *np)
struct device_node *child = NULL, *gchild;
struct __thermal_zone *tz;
int ret, i;
- u32 prop;
+ u32 prop, coef[2];
if (!np) {
pr_err("no thermal zone np\n");
@@ -752,6 +757,20 @@ thermal_of_build_thermal_zone(struct device_node *np)
}
tz->polling_delay = prop;
+ /*
+ * REVIST: for now, the thermal framework supports only
+ * one sensor per thermal zone. Thus, we are considering
+ * only the first two values as slope and offset.
+ */
+ ret = of_property_read_u32_array(np, "coefficients", coef, 2);
+ if (ret == 0) {
+ tz->slope = coef[0];
+ tz->offset = coef[1];
+ } else {
+ tz->slope = 1;
+ tz->offset = 0;
+ }
+
/* trips */
child = of_get_child_by_name(np, "trips");
@@ -865,6 +884,8 @@ int __init of_parse_thermal_zones(void)
for_each_child_of_node(np, child) {
struct thermal_zone_device *zone;
struct thermal_zone_params *tzp;
+ int i, mask = 0;
+ u32 prop;
/* Check whether child is enabled or not */
if (!of_device_is_available(child))
@@ -891,8 +912,18 @@ int __init of_parse_thermal_zones(void)
/* No hwmon because there might be hwmon drivers registering */
tzp->no_hwmon = true;
+ if (!of_property_read_u32(child, "sustainable-power", &prop))
+ tzp->sustainable_power = prop;
+
+ for (i = 0; i < tz->ntrips; i++)
+ mask |= 1 << i;
+
+ /* these two are left for temperature drivers to use */
+ tzp->slope = tz->slope;
+ tzp->offset = tz->offset;
+
zone = thermal_zone_device_register(child->name, tz->ntrips,
- 0, tz,
+ mask, tz,
ops, tzp,
tz->passive_delay,
tz->polling_delay);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
new file mode 100644
index 000000000000..251676902869
--- /dev/null
+++ b/drivers/thermal/power_allocator.c
@@ -0,0 +1,544 @@
+/*
+ * A power allocator to manage temperature
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "Power allocator: " fmt
+
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_power_allocator.h>
+
+#include "thermal_core.h"
+
+#define FRAC_BITS 10
+#define int_to_frac(x) ((x) << FRAC_BITS)
+#define frac_to_int(x) ((x) >> FRAC_BITS)
+
+/**
+ * mul_frac() - multiply two fixed-point numbers
+ * @x: first multiplicand
+ * @y: second multiplicand
+ *
+ * Return: the result of multiplying two fixed-point numbers. The
+ * result is also a fixed-point number.
+ */
+static inline s64 mul_frac(s64 x, s64 y)
+{
+ return (x * y) >> FRAC_BITS;
+}
+
+/**
+ * div_frac() - divide two fixed-point numbers
+ * @x: the dividend
+ * @y: the divisor
+ *
+ * Return: the result of dividing two fixed-point numbers. The
+ * result is also a fixed-point number.
+ */
+static inline s64 div_frac(s64 x, s64 y)
+{
+ return div_s64(x << FRAC_BITS, y);
+}
+
+/**
+ * struct power_allocator_params - parameters for the power allocator governor
+ * @err_integral: accumulated error in the PID controller.
+ * @prev_err: error in the previous iteration of the PID controller.
+ * Used to calculate the derivative term.
+ * @trip_switch_on: first passive trip point of the thermal zone. The
+ * governor switches on when this trip point is crossed.
+ * @trip_max_desired_temperature: last passive trip point of the thermal
+ * zone. The temperature we are
+ * controlling for.
+ */
+struct power_allocator_params {
+ s64 err_integral;
+ s32 prev_err;
+ int trip_switch_on;
+ int trip_max_desired_temperature;
+};
+
+/**
+ * pid_controller() - PID controller
+ * @tz: thermal zone we are operating in
+ * @current_temp: the current temperature in millicelsius
+ * @control_temp: the target temperature in millicelsius
+ * @max_allocatable_power: maximum allocatable power for this thermal zone
+ *
+ * This PID controller increases the available power budget so that the
+ * temperature of the thermal zone gets as close as possible to
+ * @control_temp and limits the power if it exceeds it. k_po is the
+ * proportional term when we are overshooting, k_pu is the
+ * proportional term when we are undershooting. integral_cutoff is a
+ * threshold below which we stop accumulating the error. The
+ * accumulated error is only valid if the requested power will make
+ * the system warmer. If the system is mostly idle, there's no point
+ * in accumulating positive error.
+ *
+ * Return: The power budget for the next period.
+ */
+static u32 pid_controller(struct thermal_zone_device *tz,
+ unsigned long current_temp,
+ unsigned long control_temp,
+ u32 max_allocatable_power)
+{
+ s64 p, i, d, power_range;
+ s32 err, max_power_frac;
+ struct power_allocator_params *params = tz->governor_data;
+
+ max_power_frac = int_to_frac(max_allocatable_power);
+
+ err = ((s32)control_temp - (s32)current_temp);
+ err = int_to_frac(err);
+
+ /* Calculate the proportional term */
+ p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err);
+
+ /*
+ * Calculate the integral term
+ *
+ * if the error is less than cut off allow integration (but
+ * the integral is limited to max power)
+ */
+ i = mul_frac(tz->tzp->k_i, params->err_integral);
+
+ if (err < int_to_frac(tz->tzp->integral_cutoff)) {
+ s64 i_next = i + mul_frac(tz->tzp->k_i, err);
+
+ if (abs64(i_next) < max_power_frac) {
+ i = i_next;
+ params->err_integral += err;
+ }
+ }
+
+ /*
+ * Calculate the derivative term
+ *
+ * We do err - prev_err, so with a positive k_d, a decreasing
+ * error (i.e. driving closer to the line) results in less
+ * power being applied, slowing down the controller)
+ */
+ d = mul_frac(tz->tzp->k_d, err - params->prev_err);
+ d = div_frac(d, tz->passive_delay);
+ params->prev_err = err;
+
+ power_range = p + i + d;
+
+ /* feed-forward the known sustainable dissipatable power */
+ power_range = tz->tzp->sustainable_power + frac_to_int(power_range);
+
+ power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
+
+ trace_thermal_power_allocator_pid(tz, frac_to_int(err),
+ frac_to_int(params->err_integral),
+ frac_to_int(p), frac_to_int(i),
+ frac_to_int(d), power_range);
+
+ return power_range;
+}
+
+/**
+ * divvy_up_power() - divvy the allocated power between the actors
+ * @req_power: each actor's requested power
+ * @max_power: each actor's maximum available power
+ * @num_actors: size of the @req_power, @max_power and @granted_power's array
+ * @total_req_power: sum of @req_power
+ * @power_range: total allocated power
+ * @granted_power: output array: each actor's granted power
+ * @extra_actor_power: an appropriately sized array to be used in the
+ * function as temporary storage of the extra power given
+ * to the actors
+ *
+ * This function divides the total allocated power (@power_range)
+ * fairly between the actors. It first tries to give each actor a
+ * share of the @power_range according to how much power it requested
+ * compared to the rest of the actors. For example, if only one actor
+ * requests power, then it receives all the @power_range. If
+ * three actors each requests 1mW, each receives a third of the
+ * @power_range.
+ *
+ * If any actor received more than their maximum power, then that
+ * surplus is re-divvied among the actors based on how far they are
+ * from their respective maximums.
+ *
+ * Granted power for each actor is written to @granted_power, which
+ * should've been allocated by the calling function.
+ */
+static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
+ u32 total_req_power, u32 power_range,
+ u32 *granted_power, u32 *extra_actor_power)
+{
+ u32 extra_power, capped_extra_power;
+ int i;
+
+ /*
+ * Prevent division by 0 if none of the actors request power.
+ */
+ if (!total_req_power)
+ total_req_power = 1;
+
+ capped_extra_power = 0;
+ extra_power = 0;
+ for (i = 0; i < num_actors; i++) {
+ u64 req_range = req_power[i] * power_range;
+
+ granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
+ total_req_power);
+
+ if (granted_power[i] > max_power[i]) {
+ extra_power += granted_power[i] - max_power[i];
+ granted_power[i] = max_power[i];
+ }
+
+ extra_actor_power[i] = max_power[i] - granted_power[i];
+ capped_extra_power += extra_actor_power[i];
+ }
+
+ if (!extra_power)
+ return;
+
+ /*
+ * Re-divvy the reclaimed extra among actors based on
+ * how far they are from the max
+ */
+ extra_power = min(extra_power, capped_extra_power);
+ if (capped_extra_power > 0)
+ for (i = 0; i < num_actors; i++)
+ granted_power[i] += (extra_actor_power[i] *
+ extra_power) / capped_extra_power;
+}
+
+static int allocate_power(struct thermal_zone_device *tz,
+ unsigned long current_temp,
+ unsigned long control_temp)
+{
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
+ u32 *req_power, *max_power, *granted_power, *extra_actor_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
+ u32 total_granted_power, power_range;
+ int i, num_actors, total_weight, ret = 0;
+ int trip_max_desired_temperature = params->trip_max_desired_temperature;
+
+ mutex_lock(&tz->lock);
+
+ num_actors = 0;
+ total_weight = 0;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if ((instance->trip == trip_max_desired_temperature) &&
+ cdev_is_power_actor(instance->cdev)) {
+ num_actors++;
+ total_weight += instance->weight;
+ }
+ }
+
+ /*
+ * We need to allocate five arrays of the same size:
+ * req_power, max_power, granted_power, extra_actor_power and
+ * weighted_req_power. They are going to be needed until this
+ * function returns. Allocate them all in one go to simplify
+ * the allocation and deallocation logic.
+ */
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+ req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
+ if (!req_power) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ max_power = &req_power[num_actors];
+ granted_power = &req_power[2 * num_actors];
+ extra_actor_power = &req_power[3 * num_actors];
+ weighted_req_power = &req_power[4 * num_actors];
+
+ i = 0;
+ total_weighted_req_power = 0;
+ total_req_power = 0;
+ max_allocatable_power = 0;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ int weight;
+ struct thermal_cooling_device *cdev = instance->cdev;
+
+ if (instance->trip != trip_max_desired_temperature)
+ continue;
+
+ if (!cdev_is_power_actor(cdev))
+ continue;
+
+ if (cdev->ops->get_requested_power(cdev, tz, &req_power[i]))
+ continue;
+
+ if (!total_weight)
+ weight = 1 << FRAC_BITS;
+ else
+ weight = instance->weight;
+
+ weighted_req_power[i] = frac_to_int(weight * req_power[i]);
+
+ if (power_actor_get_max_power(cdev, tz, &max_power[i]))
+ continue;
+
+ total_req_power += req_power[i];
+ max_allocatable_power += max_power[i];
+ total_weighted_req_power += weighted_req_power[i];
+
+ i++;
+ }
+
+ power_range = pid_controller(tz, current_temp, control_temp,
+ max_allocatable_power);
+
+ divvy_up_power(weighted_req_power, max_power, num_actors,
+ total_weighted_req_power, power_range, granted_power,
+ extra_actor_power);
+
+ total_granted_power = 0;
+ i = 0;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip_max_desired_temperature)
+ continue;
+
+ if (!cdev_is_power_actor(instance->cdev))
+ continue;
+
+ power_actor_set_power(instance->cdev, instance,
+ granted_power[i]);
+ total_granted_power += granted_power[i];
+
+ i++;
+ }
+
+ trace_thermal_power_allocator(tz, req_power, total_req_power,
+ granted_power, total_granted_power,
+ num_actors, power_range,
+ max_allocatable_power, current_temp,
+ (s32)control_temp - (s32)current_temp);
+
+ kfree(req_power);
+unlock:
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+
+static int get_governor_trips(struct thermal_zone_device *tz,
+ struct power_allocator_params *params)
+{
+ int i, ret, last_passive;
+ bool found_first_passive;
+
+ found_first_passive = false;
+ last_passive = -1;
+ ret = -EINVAL;
+
+ for (i = 0; i < tz->trips; i++) {
+ enum thermal_trip_type type;
+
+ ret = tz->ops->get_trip_type(tz, i, &type);
+ if (ret)
+ return ret;
+
+ if (!found_first_passive) {
+ if (type == THERMAL_TRIP_PASSIVE) {
+ params->trip_switch_on = i;
+ found_first_passive = true;
+ }
+ } else if (type == THERMAL_TRIP_PASSIVE) {
+ last_passive = i;
+ } else {
+ break;
+ }
+ }
+
+ if (last_passive != -1) {
+ params->trip_max_desired_temperature = last_passive;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void reset_pid_controller(struct power_allocator_params *params)
+{
+ params->err_integral = 0;
+ params->prev_err = 0;
+}
+
+static void allow_maximum_power(struct thermal_zone_device *tz)
+{
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if ((instance->trip != params->trip_max_desired_temperature) ||
+ (!cdev_is_power_actor(instance->cdev)))
+ continue;
+
+ instance->target = 0;
+ instance->cdev->updated = false;
+ thermal_cdev_update(instance->cdev);
+ }
+}
+
+/**
+ * power_allocator_bind() - bind the power_allocator governor to a thermal zone
+ * @tz: thermal zone to bind it to
+ *
+ * Check that the thermal zone is valid for this governor, that is, it
+ * has two thermal trips. If so, initialize the PID controller
+ * parameters and bind it to the thermal zone.
+ *
+ * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM
+ * if we ran out of memory.
+ */
+static int power_allocator_bind(struct thermal_zone_device *tz)
+{
+ int ret;
+ struct power_allocator_params *params;
+ unsigned long switch_on_temp, control_temp;
+ u32 temperature_threshold;
+
+ if (!tz->tzp || !tz->tzp->sustainable_power) {
+ dev_err(&tz->device,
+ "power_allocator: missing sustainable_power\n");
+ return -EINVAL;
+ }
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ ret = get_governor_trips(tz, params);
+ if (ret) {
+ dev_err(&tz->device,
+ "thermal zone %s has wrong trip setup for power allocator\n",
+ tz->type);
+ goto free;
+ }
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
+ &switch_on_temp);
+ if (ret)
+ goto free;
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
+ &control_temp);
+ if (ret)
+ goto free;
+
+ temperature_threshold = control_temp - switch_on_temp;
+
+ tz->tzp->k_po = tz->tzp->k_po ?:
+ int_to_frac(tz->tzp->sustainable_power) / temperature_threshold;
+ tz->tzp->k_pu = tz->tzp->k_pu ?:
+ int_to_frac(2 * tz->tzp->sustainable_power) /
+ temperature_threshold;
+ tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000;
+ /*
+ * The default for k_d and integral_cutoff is 0, so we can
+ * leave them as they are.
+ */
+
+ reset_pid_controller(params);
+
+ tz->governor_data = params;
+
+ return 0;
+
+free:
+ kfree(params);
+ return ret;
+}
+
+static void power_allocator_unbind(struct thermal_zone_device *tz)
+{
+ dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
+ kfree(tz->governor_data);
+ tz->governor_data = NULL;
+}
+
+static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
+{
+ int ret;
+ unsigned long switch_on_temp, control_temp, current_temp;
+ struct power_allocator_params *params = tz->governor_data;
+
+ /*
+ * We get called for every trip point but we only need to do
+ * our calculations once
+ */
+ if (trip != params->trip_max_desired_temperature)
+ return 0;
+
+ ret = thermal_zone_get_temp(tz, &current_temp);
+ if (ret) {
+ dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
+ return ret;
+ }
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
+ &switch_on_temp);
+ if (ret) {
+ dev_warn(&tz->device,
+ "Failed to get switch on temperature: %d\n", ret);
+ return ret;
+ }
+
+ if (current_temp < switch_on_temp) {
+ tz->passive = 0;
+ reset_pid_controller(params);
+ allow_maximum_power(tz);
+ return 0;
+ }
+
+ tz->passive = 1;
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
+ &control_temp);
+ if (ret) {
+ dev_warn(&tz->device,
+ "Failed to get the maximum desired temperature: %d\n",
+ ret);
+ return ret;
+ }
+
+ return allocate_power(tz, current_temp, control_temp);
+}
+
+static struct thermal_governor thermal_gov_power_allocator = {
+ .name = "power_allocator",
+ .bind_to_tz = power_allocator_bind,
+ .unbind_from_tz = power_allocator_unbind,
+ .throttle = power_allocator_throttle,
+};
+
+int thermal_gov_power_allocator_register(void)
+{
+ return thermal_register_governor(&thermal_gov_power_allocator);
+}
+
+void thermal_gov_power_allocator_unregister(void)
+{
+ thermal_unregister_governor(&thermal_gov_power_allocator);
+}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a61386ce41ee..20b115711a0c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -78,6 +78,58 @@ static struct thermal_governor *__find_governor(const char *name)
return NULL;
}
+/**
+ * bind_previous_governor() - bind the previous governor of the thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @failed_gov_name: the name of the governor that failed to register
+ *
+ * Register the previous governor of the thermal zone after a new
+ * governor has failed to be bound.
+ */
+static void bind_previous_governor(struct thermal_zone_device *tz,
+ const char *failed_gov_name)
+{
+ if (tz->governor && tz->governor->bind_to_tz) {
+ if (tz->governor->bind_to_tz(tz)) {
+ dev_err(&tz->device,
+ "governor %s failed to bind and the previous one (%s) failed to bind again, thermal zone %s has no governor\n",
+ failed_gov_name, tz->governor->name, tz->type);
+ tz->governor = NULL;
+ }
+ }
+}
+
+/**
+ * thermal_set_governor() - Switch to another governor
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @new_gov: pointer to the new governor
+ *
+ * Change the governor of thermal zone @tz.
+ *
+ * Return: 0 on success, an error if the new governor's bind_to_tz() failed.
+ */
+static int thermal_set_governor(struct thermal_zone_device *tz,
+ struct thermal_governor *new_gov)
+{
+ int ret = 0;
+
+ if (tz->governor && tz->governor->unbind_from_tz)
+ tz->governor->unbind_from_tz(tz);
+
+ if (new_gov && new_gov->bind_to_tz) {
+ ret = new_gov->bind_to_tz(tz);
+ if (ret) {
+ bind_previous_governor(tz, new_gov->name);
+
+ return ret;
+ }
+ }
+
+ tz->governor = new_gov;
+
+ return ret;
+}
+
int thermal_register_governor(struct thermal_governor *governor)
{
int err;
@@ -110,8 +162,15 @@ int thermal_register_governor(struct thermal_governor *governor)
name = pos->tzp->governor_name;
- if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH))
- pos->governor = governor;
+ if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) {
+ int ret;
+
+ ret = thermal_set_governor(pos, governor);
+ if (ret)
+ dev_err(&pos->device,
+ "Failed to set governor %s for thermal zone %s: %d\n",
+ governor->name, pos->type, ret);
+ }
}
mutex_unlock(&thermal_list_lock);
@@ -137,7 +196,7 @@ void thermal_unregister_governor(struct thermal_governor *governor)
list_for_each_entry(pos, &thermal_tz_list, node) {
if (!strncasecmp(pos->governor->name, governor->name,
THERMAL_NAME_LENGTH))
- pos->governor = NULL;
+ thermal_set_governor(pos, NULL);
}
mutex_unlock(&thermal_list_lock);
@@ -221,7 +280,8 @@ static void print_bind_err_msg(struct thermal_zone_device *tz,
static void __bind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev,
- unsigned long *limits)
+ unsigned long *limits,
+ unsigned int weight)
{
int i, ret;
@@ -236,7 +296,8 @@ static void __bind(struct thermal_zone_device *tz, int mask,
upper = limits[i * 2 + 1];
}
ret = thermal_zone_bind_cooling_device(tz, i, cdev,
- upper, lower);
+ upper, lower,
+ weight);
if (ret)
print_bind_err_msg(tz, cdev, ret);
}
@@ -283,7 +344,8 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
continue;
tzp->tbp[i].cdev = cdev;
__bind(pos, tzp->tbp[i].trip_mask, cdev,
- tzp->tbp[i].binding_limits);
+ tzp->tbp[i].binding_limits,
+ tzp->tbp[i].weight);
}
}
@@ -322,7 +384,8 @@ static void bind_tz(struct thermal_zone_device *tz)
continue;
tzp->tbp[i].cdev = pos;
__bind(tz, tzp->tbp[i].trip_mask, pos,
- tzp->tbp[i].binding_limits);
+ tzp->tbp[i].binding_limits,
+ tzp->tbp[i].weight);
}
}
exit:
@@ -737,7 +800,8 @@ passive_store(struct device *dev, struct device_attribute *attr,
thermal_zone_bind_cooling_device(tz,
THERMAL_TRIPS_NONE, cdev,
THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
}
mutex_unlock(&thermal_list_lock);
if (!tz->passive_delay)
@@ -789,8 +853,9 @@ policy_store(struct device *dev, struct device_attribute *attr,
if (!gov)
goto exit;
- tz->governor = gov;
- ret = count;
+ ret = thermal_set_governor(tz, gov);
+ if (!ret)
+ ret = count;
exit:
mutex_unlock(&tz->lock);
@@ -834,6 +899,158 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
#endif/*CONFIG_THERMAL_EMULATION*/
+static ssize_t
+sustainable_power_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (tz->tzp)
+ return sprintf(buf, "%u\n", tz->tzp->sustainable_power);
+ else
+ return -EIO;
+}
+
+static ssize_t
+sustainable_power_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ u32 sustainable_power;
+
+ if (!tz->tzp)
+ return -EIO;
+
+ if (kstrtou32(buf, 10, &sustainable_power))
+ return -EINVAL;
+
+ tz->tzp->sustainable_power = sustainable_power;
+
+ return count;
+}
+static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
+ sustainable_power_store);
+
+#define create_s32_tzp_attr(name) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *devattr, \
+ char *buf) \
+ { \
+ struct thermal_zone_device *tz = to_thermal_zone(dev); \
+ \
+ if (tz->tzp) \
+ return sprintf(buf, "%u\n", tz->tzp->name); \
+ else \
+ return -EIO; \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *devattr, \
+ const char *buf, size_t count) \
+ { \
+ struct thermal_zone_device *tz = to_thermal_zone(dev); \
+ s32 value; \
+ \
+ if (!tz->tzp) \
+ return -EIO; \
+ \
+ if (kstrtos32(buf, 10, &value)) \
+ return -EINVAL; \
+ \
+ tz->tzp->name = value; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store)
+
+create_s32_tzp_attr(k_po);
+create_s32_tzp_attr(k_pu);
+create_s32_tzp_attr(k_i);
+create_s32_tzp_attr(k_d);
+create_s32_tzp_attr(integral_cutoff);
+create_s32_tzp_attr(slope);
+create_s32_tzp_attr(offset);
+#undef create_s32_tzp_attr
+
+static struct device_attribute *dev_tzp_attrs[] = {
+ &dev_attr_sustainable_power,
+ &dev_attr_k_po,
+ &dev_attr_k_pu,
+ &dev_attr_k_i,
+ &dev_attr_k_d,
+ &dev_attr_integral_cutoff,
+ &dev_attr_slope,
+ &dev_attr_offset,
+};
+
+static int create_tzp_attrs(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) {
+ int ret;
+ struct device_attribute *dev_attr = dev_tzp_attrs[i];
+
+ ret = device_create_file(dev, dev_attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * power_actor_get_max_power() - get the maximum power that a cdev can consume
+ * @cdev: pointer to &thermal_cooling_device
+ * @tz: a valid thermal zone device pointer
+ * @max_power: pointer in which to store the maximum power
+ *
+ * Calculate the maximum power consumption in milliwats that the
+ * cooling device can currently consume and store it in @max_power.
+ *
+ * Return: 0 on success, -EINVAL if @cdev doesn't support the
+ * power_actor API or -E* on other error.
+ */
+int power_actor_get_max_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 *max_power)
+{
+ if (!cdev_is_power_actor(cdev))
+ return -EINVAL;
+
+ return cdev->ops->state2power(cdev, tz, 0, max_power);
+}
+
+/**
+ * power_actor_set_power() - limit the maximum power that a cooling device can consume
+ * @cdev: pointer to &thermal_cooling_device
+ * @instance: thermal instance to update
+ * @power: the power in milliwatts
+ *
+ * Set the cooling device to consume at most @power milliwatts.
+ *
+ * Return: 0 on success, -EINVAL if the cooling device does not
+ * implement the power actor API or -E* for other failures.
+ */
+int power_actor_set_power(struct thermal_cooling_device *cdev,
+ struct thermal_instance *instance, u32 power)
+{
+ unsigned long state;
+ int ret;
+
+ if (!cdev_is_power_actor(cdev))
+ return -EINVAL;
+
+ ret = cdev->ops->power2state(cdev, instance->tz, power, &state);
+ if (ret)
+ return ret;
+
+ instance->target = state;
+ cdev->updated = false;
+ thermal_cdev_update(cdev);
+
+ return 0;
+}
+
static DEVICE_ATTR(type, 0444, type_show, NULL);
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -941,6 +1158,34 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
NULL,
};
+static ssize_t
+thermal_cooling_device_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_instance *instance;
+
+ instance = container_of(attr, struct thermal_instance, weight_attr);
+
+ return sprintf(buf, "%d\n", instance->weight);
+}
+
+static ssize_t
+thermal_cooling_device_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_instance *instance;
+ int ret, weight;
+
+ ret = kstrtoint(buf, 0, &weight);
+ if (ret)
+ return ret;
+
+ instance = container_of(attr, struct thermal_instance, weight_attr);
+ instance->weight = weight;
+
+ return count;
+}
/* Device management */
/**
@@ -955,6 +1200,9 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
* @lower: the Minimum cooling state can be used for this trip point.
* THERMAL_NO_LIMIT means no lower limit,
* and the cooling device can be in cooling state 0.
+ * @weight: The weight of the cooling device to be bound to the
+ * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the
+ * default value
*
* This interface function bind a thermal cooling device to the certain trip
* point of a thermal zone device.
@@ -965,7 +1213,8 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
int trip,
struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower)
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
{
struct thermal_instance *dev;
struct thermal_instance *pos;
@@ -1010,6 +1259,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
dev->upper = upper;
dev->lower = lower;
dev->target = THERMAL_NO_TARGET;
+ dev->weight = weight;
result = get_idr(&tz->idr, &tz->lock, &dev->id);
if (result)
@@ -1030,6 +1280,16 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (result)
goto remove_symbol_link;
+ sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
+ sysfs_attr_init(&dev->weight_attr.attr);
+ dev->weight_attr.attr.name = dev->weight_attr_name;
+ dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+ dev->weight_attr.show = thermal_cooling_device_weight_show;
+ dev->weight_attr.store = thermal_cooling_device_weight_store;
+ result = device_create_file(&tz->device, &dev->weight_attr);
+ if (result)
+ goto remove_trip_file;
+
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
@@ -1048,6 +1308,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (!result)
return 0;
+ device_remove_file(&tz->device, &dev->weight_attr);
+remove_trip_file:
device_remove_file(&tz->device, &dev->attr);
remove_symbol_link:
sysfs_remove_link(&tz->device.kobj, dev->name);
@@ -1409,7 +1671,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
tz->trip_temp_attrs[indx].name;
tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
- if (mask & (1 << indx)) {
+ if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) &&
+ mask & (1 << indx)) {
tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
tz->trip_temp_attrs[indx].attr.store =
trip_point_temp_store;
@@ -1486,7 +1749,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
+ struct thermal_zone_params *tzp,
int passive_delay, int polling_delay)
{
struct thermal_zone_device *tz;
@@ -1495,6 +1758,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
int result;
int count;
int passive = 0;
+ struct thermal_governor *governor;
if (type && strlen(type) >= THERMAL_NAME_LENGTH)
return ERR_PTR(-EINVAL);
@@ -1589,13 +1853,24 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (result)
goto unregister;
+ /* Add thermal zone params */
+ result = create_tzp_attrs(&tz->device);
+ if (result)
+ goto unregister;
+
/* Update 'this' zone's governor information */
mutex_lock(&thermal_governor_lock);
if (tz->tzp)
- tz->governor = __find_governor(tz->tzp->governor_name);
+ governor = __find_governor(tz->tzp->governor_name);
else
- tz->governor = def_governor;
+ governor = def_governor;
+
+ result = thermal_set_governor(tz, governor);
+ if (result) {
+ mutex_unlock(&thermal_governor_lock);
+ goto unregister;
+ }
mutex_unlock(&thermal_governor_lock);
@@ -1687,7 +1962,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
device_remove_file(&tz->device, &dev_attr_mode);
device_remove_file(&tz->device, &dev_attr_policy);
remove_trip_attrs(tz);
- tz->governor = NULL;
+ thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
@@ -1843,7 +2118,11 @@ static int __init thermal_register_governors(void)
if (result)
return result;
- return thermal_gov_user_space_register();
+ result = thermal_gov_user_space_register();
+ if (result)
+ return result;
+
+ return thermal_gov_power_allocator_register();
}
static void thermal_unregister_governors(void)
@@ -1852,6 +2131,7 @@ static void thermal_unregister_governors(void)
thermal_gov_fair_share_unregister();
thermal_gov_bang_bang_unregister();
thermal_gov_user_space_unregister();
+ thermal_gov_power_allocator_unregister();
}
static int thermal_pm_notify(struct notifier_block *nb,
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index dce86ee8e9d7..749d41abfbab 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -47,8 +47,11 @@ struct thermal_instance {
unsigned long target; /* expected cooling state */
char attr_name[THERMAL_NAME_LENGTH];
struct device_attribute attr;
+ char weight_attr_name[THERMAL_NAME_LENGTH];
+ struct device_attribute weight_attr;
struct list_head tz_node; /* node in tz->thermal_instances */
struct list_head cdev_node; /* node in cdev->thermal_instances */
+ unsigned int weight; /* The weight of the cooling device */
};
int thermal_register_governor(struct thermal_governor *);
@@ -86,6 +89,14 @@ static inline int thermal_gov_user_space_register(void) { return 0; }
static inline void thermal_gov_user_space_unregister(void) {}
#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
+int thermal_gov_power_allocator_register(void);
+void thermal_gov_power_allocator_unregister(void);
+#else
+static inline int thermal_gov_power_allocator_register(void) { return 0; }
+static inline void thermal_gov_power_allocator_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
+
/* device tree support */
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index a38c1756442a..cb45e729adb5 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -146,7 +146,8 @@ static int ti_thermal_bind(struct thermal_zone_device *thermal,
return thermal_zone_bind_cooling_device(thermal, 0, cdev,
/* bind with min and max states defined by cpu_cooling */
THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
}
/* Unbind callback functions for thermal zone */