aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c132
-rw-r--r--drivers/cpufreq/cpufreq_stats.c224
-rw-r--r--drivers/misc/uid_cputime.c14
-rw-r--r--drivers/mmc/core/core.c23
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/usb/gadget/f_accessory.c3
-rw-r--r--drivers/usb/gadget/u_ether.c22
7 files changed, 258 insertions, 163 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index b84f709c760e..39e62187cdbc 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -47,9 +47,10 @@ struct cpufreq_interactive_cpuinfo {
spinlock_t target_freq_lock; /*protects target freq */
unsigned int target_freq;
unsigned int floor_freq;
- unsigned int max_freq;
- u64 floor_validate_time;
- u64 hispeed_validate_time;
+ u64 pol_floor_val_time; /* policy floor_validate_time */
+ u64 loc_floor_val_time; /* per-cpu floor_validate_time */
+ u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
+ u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
struct rw_semaphore enable_sem;
int governor_enabled;
};
@@ -345,6 +346,7 @@ static void cpufreq_interactive_timer(unsigned long data)
unsigned int loadadjfreq;
unsigned int index;
unsigned long flags;
+ u64 max_fvtime;
if (!down_read_trylock(&pcpu->enable_sem))
return;
@@ -367,7 +369,7 @@ static void cpufreq_interactive_timer(unsigned long data)
tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
- if (pcpu->target_freq < tunables->hispeed_freq) {
+ if (pcpu->policy->cur < tunables->hispeed_freq) {
new_freq = tunables->hispeed_freq;
} else {
new_freq = choose_freq(pcpu, loadadjfreq);
@@ -378,14 +380,14 @@ static void cpufreq_interactive_timer(unsigned long data)
} else {
new_freq = choose_freq(pcpu, loadadjfreq);
if (new_freq > tunables->hispeed_freq &&
- pcpu->target_freq < tunables->hispeed_freq)
+ pcpu->policy->cur < tunables->hispeed_freq)
new_freq = tunables->hispeed_freq;
}
- if (pcpu->target_freq >= tunables->hispeed_freq &&
- new_freq > pcpu->target_freq &&
- now - pcpu->hispeed_validate_time <
- freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
+ if (pcpu->policy->cur >= tunables->hispeed_freq &&
+ new_freq > pcpu->policy->cur &&
+ now - pcpu->pol_hispeed_val_time <
+ freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
trace_cpufreq_interactive_notyet(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
@@ -393,7 +395,7 @@ static void cpufreq_interactive_timer(unsigned long data)
goto rearm;
}
- pcpu->hispeed_validate_time = now;
+ pcpu->loc_hispeed_val_time = now;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_L,
@@ -408,9 +410,10 @@ static void cpufreq_interactive_timer(unsigned long data)
* Do not scale below floor_freq unless we have been at or above the
* floor frequency for the minimum sample time since last validated.
*/
- if (new_freq < pcpu->floor_freq) {
- if (now - pcpu->floor_validate_time <
- tunables->min_sample_time) {
+ max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
+ if (new_freq < pcpu->floor_freq &&
+ pcpu->target_freq >= pcpu->policy->cur) {
+ if (now - max_fvtime < tunables->min_sample_time) {
trace_cpufreq_interactive_notyet(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
@@ -429,7 +432,9 @@ static void cpufreq_interactive_timer(unsigned long data)
if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
pcpu->floor_freq = new_freq;
- pcpu->floor_validate_time = now;
+ if (pcpu->target_freq >= pcpu->policy->cur ||
+ new_freq >= pcpu->policy->cur)
+ pcpu->loc_floor_val_time = now;
}
if (pcpu->target_freq == new_freq &&
@@ -438,7 +443,7 @@ static void cpufreq_interactive_timer(unsigned long data)
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- goto rearm_if_notmax;
+ goto rearm;
}
trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
@@ -451,14 +456,6 @@ static void cpufreq_interactive_timer(unsigned long data)
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
wake_up_process(speedchange_task);
-rearm_if_notmax:
- /*
- * Already set max speed and don't see a need to change that,
- * wait until next idle to re-evaluate, don't need timer.
- */
- if (pcpu->target_freq == pcpu->policy->max)
- goto exit;
-
rearm:
if (!timer_pending(&pcpu->cpu_timer))
cpufreq_interactive_timer_resched(pcpu);
@@ -468,37 +465,6 @@ exit:
return;
}
-static void cpufreq_interactive_idle_start(void)
-{
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, smp_processor_id());
- int pending;
-
- if (!down_read_trylock(&pcpu->enable_sem))
- return;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- return;
- }
-
- pending = timer_pending(&pcpu->cpu_timer);
-
- if (pcpu->target_freq != pcpu->policy->min) {
- /*
- * Entering idle while not at lowest speed. On some
- * platforms this can hold the other CPU(s) at that speed
- * even though the CPU is idle. Set a timer to re-evaluate
- * speed so this idle CPU doesn't hold the other CPUs above
- * min indefinitely. This should probably be a quirk of
- * the CPUFreq driver.
- */
- if (!pending)
- cpufreq_interactive_timer_resched(pcpu);
- }
-
- up_read(&pcpu->enable_sem);
-}
-
static void cpufreq_interactive_idle_end(void)
{
struct cpufreq_interactive_cpuinfo *pcpu =
@@ -553,6 +519,8 @@ static int cpufreq_interactive_speedchange_task(void *data)
for_each_cpu(cpu, &tmp_mask) {
unsigned int j;
unsigned int max_freq = 0;
+ struct cpufreq_interactive_cpuinfo *pjcpu;
+ u64 hvt = ~0ULL, fvt = 0;
pcpu = &per_cpu(cpuinfo, cpu);
if (!down_read_trylock(&pcpu->enable_sem))
@@ -563,17 +531,30 @@ static int cpufreq_interactive_speedchange_task(void *data)
}
for_each_cpu(j, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, j);
+ pjcpu = &per_cpu(cpuinfo, j);
- if (pjcpu->target_freq > max_freq)
+ fvt = max(fvt, pjcpu->loc_floor_val_time);
+ if (pjcpu->target_freq > max_freq) {
max_freq = pjcpu->target_freq;
+ hvt = pjcpu->loc_hispeed_val_time;
+ } else if (pjcpu->target_freq == max_freq) {
+ hvt = min(hvt, pjcpu->loc_hispeed_val_time);
+ }
+ }
+ for_each_cpu(j, pcpu->policy->cpus) {
+ pjcpu = &per_cpu(cpuinfo, j);
+ pjcpu->pol_floor_val_time = fvt;
}
- if (max_freq != pcpu->policy->cur)
+ if (max_freq != pcpu->policy->cur) {
__cpufreq_driver_target(pcpu->policy,
max_freq,
CPUFREQ_RELATION_H);
+ for_each_cpu(j, pcpu->policy->cpus) {
+ pjcpu = &per_cpu(cpuinfo, j);
+ pjcpu->pol_hispeed_val_time = hvt;
+ }
+ }
trace_cpufreq_interactive_setspeed(cpu,
pcpu->target_freq,
pcpu->policy->cur);
@@ -605,7 +586,7 @@ static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunab
if (pcpu->target_freq < tunables->hispeed_freq) {
pcpu->target_freq = tunables->hispeed_freq;
cpumask_set_cpu(i, &speedchange_cpumask);
- pcpu->hispeed_validate_time =
+ pcpu->pol_hispeed_val_time =
ktime_to_us(ktime_get());
anyboost = 1;
}
@@ -1108,14 +1089,8 @@ static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
unsigned long val,
void *data)
{
- switch (val) {
- case IDLE_START:
- cpufreq_interactive_idle_start();
- break;
- case IDLE_END:
+ if (val == IDLE_END)
cpufreq_interactive_idle_end();
- break;
- }
return 0;
}
@@ -1232,11 +1207,11 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pcpu->target_freq = policy->cur;
pcpu->freq_table = freq_table;
pcpu->floor_freq = pcpu->target_freq;
- pcpu->floor_validate_time =
+ pcpu->pol_floor_val_time =
ktime_to_us(ktime_get());
- pcpu->hispeed_validate_time =
- pcpu->floor_validate_time;
- pcpu->max_freq = policy->max;
+ pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
+ pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
+ pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
down_write(&pcpu->enable_sem);
del_timer_sync(&pcpu->cpu_timer);
del_timer_sync(&pcpu->cpu_slack_timer);
@@ -1286,23 +1261,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
up_read(&pcpu->enable_sem);
-
- /* Reschedule timer only if policy->max is raised.
- * Delete the timers, else the timer callback may
- * return without re-arm the timer when failed
- * acquire the semaphore. This race may cause timer
- * stopped unexpectedly.
- */
-
- if (policy->max > pcpu->max_freq) {
- down_write(&pcpu->enable_sem);
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer_start(tunables, j);
- up_write(&pcpu->enable_sem);
- }
-
- pcpu->max_freq = policy->max;
}
break;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1337d99f0c83..3811168bf28d 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -14,7 +14,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sort.h>
-#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/sched.h>
#include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -39,6 +40,12 @@ struct all_cpufreq_stats {
unsigned int *freq_table;
};
+struct cpufreq_power_stats {
+ unsigned int state_num;
+ unsigned int *curr;
+ unsigned int *freq_table;
+};
+
struct all_freq_table {
unsigned int *freq_table;
unsigned int table_size;
@@ -48,6 +55,7 @@ static struct all_freq_table *all_freq_table;
static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
+static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
struct cpufreq_stats_attribute {
struct attribute attr;
@@ -118,6 +126,47 @@ static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
return -1;
}
+void acct_update_power(struct task_struct *task, cputime_t cputime) {
+ struct cpufreq_power_stats *powerstats;
+ struct cpufreq_stats *stats;
+ unsigned int cpu_num, curr;
+
+ if (!task)
+ return;
+ cpu_num = task_cpu(task);
+ powerstats = per_cpu(cpufreq_power_stats, cpu_num);
+ stats = per_cpu(cpufreq_stats_table, cpu_num);
+ if (!powerstats || !stats)
+ return;
+
+ curr = powerstats->curr[stats->last_index];
+ task->cpu_power += curr * cputime_to_usecs(cputime);
+}
+EXPORT_SYMBOL_GPL(acct_update_power);
+
+static ssize_t show_current_in_state(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ unsigned int i, cpu;
+ struct cpufreq_power_stats *powerstats;
+
+ spin_lock(&cpufreq_stats_lock);
+ for_each_possible_cpu(cpu) {
+ powerstats = per_cpu(cpufreq_power_stats, cpu);
+ if (!powerstats)
+ continue;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
+ for (i = 0; i < powerstats->state_num; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d=%d ", powerstats->freq_table[i],
+ powerstats->curr[i]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+ spin_unlock(&cpufreq_stats_lock);
+ return len;
+}
+
static ssize_t show_all_time_in_state(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -227,6 +276,9 @@ static struct attribute_group stats_attr_group = {
static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
0444, show_all_time_in_state, NULL);
+static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
+ 0444, show_current_in_state, NULL);
+
static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
{
int index;
@@ -288,10 +340,27 @@ static void cpufreq_allstats_free(void)
}
}
+static void cpufreq_powerstats_free(void)
+{
+ int cpu;
+ struct cpufreq_power_stats *powerstats;
+
+ sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
+
+ for_each_possible_cpu(cpu) {
+ powerstats = per_cpu(cpufreq_power_stats, cpu);
+ if (!powerstats)
+ continue;
+ kfree(powerstats->curr);
+ kfree(powerstats);
+ per_cpu(cpufreq_power_stats, cpu) = NULL;
+ }
+}
+
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+ struct cpufreq_frequency_table *table, int count)
{
- unsigned int i, j, count = 0, ret = 0;
+ unsigned int i, j, ret = 0;
struct cpufreq_stats *stat;
struct cpufreq_policy *current_policy;
unsigned int alloc_size;
@@ -315,12 +384,6 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->cpu = cpu;
per_cpu(cpufreq_stats_table, cpu) = stat;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
- count++;
- }
alloc_size = count * sizeof(int) + count * sizeof(u64);
@@ -361,26 +424,6 @@ error_get_fail:
return ret;
}
-static void cpufreq_stats_create_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy;
- struct cpufreq_frequency_table *table;
-
- /*
- * "likely(!policy)" because normally cpufreq_stats will be registered
- * before cpufreq driver
- */
- policy = cpufreq_cpu_get(cpu);
- if (likely(!policy))
- return;
-
- table = cpufreq_frequency_get_table(policy->cpu);
- if (likely(table))
- __cpufreq_stats_create_table(policy, table);
-
- cpufreq_cpu_put(policy);
-}
-
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
@@ -394,6 +437,54 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
stat->cpu = policy->cpu;
}
+static void cpufreq_powerstats_create(unsigned int cpu,
+ struct cpufreq_frequency_table *table, int count) {
+ unsigned int alloc_size, i = 0, j = 0, ret = 0;
+ struct cpufreq_power_stats *powerstats;
+ struct device_node *cpu_node;
+ char device_path[16];
+
+ powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
+ GFP_KERNEL);
+ if (!powerstats)
+ return;
+
+ /* Allocate memory for freq table per cpu as well as clockticks per
+ * freq*/
+ alloc_size = count * sizeof(unsigned int) +
+ count * sizeof(unsigned int);
+ powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
+ if (!powerstats->curr) {
+ kfree(powerstats);
+ return;
+ }
+ powerstats->freq_table = powerstats->curr + count;
+
+ spin_lock(&cpufreq_stats_lock);
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
+ unsigned int freq = table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+ powerstats->freq_table[j++] = freq;
+ }
+ powerstats->state_num = j;
+
+ snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
+ cpu_node = of_find_node_by_path(device_path);
+ if (cpu_node) {
+ ret = of_property_read_u32_array(cpu_node, "current",
+ powerstats->curr, count);
+ if (ret) {
+ kfree(powerstats->curr);
+ kfree(powerstats);
+ powerstats = NULL;
+ }
+ }
+ per_cpu(cpufreq_power_stats, cpu) = powerstats;
+ spin_unlock(&cpufreq_stats_lock);
+}
+
static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
{
unsigned int lhs = *(const unsigned int *)(lhs_ptr);
@@ -438,24 +529,14 @@ static void add_all_freq_table(unsigned int freq)
all_freq_table->freq_table[all_freq_table->table_size++] = freq;
}
-static void cpufreq_allstats_create(unsigned int cpu)
+static void cpufreq_allstats_create(unsigned int cpu,
+ struct cpufreq_frequency_table *table, int count)
{
int i , j = 0;
- unsigned int alloc_size, count = 0;
- struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu);
+ unsigned int alloc_size;
struct all_cpufreq_stats *all_stat;
bool sort_needed = false;
- if (!table)
- return;
-
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
- count++;
- }
-
all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
GFP_KERNEL);
if (!all_stat) {
@@ -494,10 +575,44 @@ static void cpufreq_allstats_create(unsigned int cpu)
spin_unlock(&cpufreq_stats_lock);
}
+static void cpufreq_stats_create_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table;
+ int i, count = 0;
+ /*
+ * "likely(!policy)" because normally cpufreq_stats will be registered
+ * before cpufreq driver
+ */
+ policy = cpufreq_cpu_get(cpu);
+ if (likely(!policy))
+ return;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (likely(table)) {
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned int freq = table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+ count++;
+ }
+
+ if (!per_cpu(all_cpufreq_stats, cpu))
+ cpufreq_allstats_create(cpu, table, count);
+
+ if (!per_cpu(cpufreq_power_stats, cpu))
+ cpufreq_powerstats_create(cpu, table, count);
+
+ __cpufreq_stats_create_table(policy, table, count);
+ }
+ cpufreq_cpu_put(policy);
+}
+
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
- int ret = 0;
+ int ret = 0, count = 0, i;
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
@@ -511,11 +626,22 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
if (!table)
return 0;
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned int freq = table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+ count++;
+ }
+
if (!per_cpu(all_cpufreq_stats, cpu))
- cpufreq_allstats_create(cpu);
+ cpufreq_allstats_create(cpu, table, count);
+
+ if (!per_cpu(cpufreq_power_stats, cpu))
+ cpufreq_powerstats_create(cpu, table, count);
if (val == CPUFREQ_CREATE_POLICY)
- ret = __cpufreq_stats_create_table(policy, table);
+ ret = __cpufreq_stats_create_table(policy, table, count);
else if (val == CPUFREQ_REMOVE_POLICY)
__cpufreq_stats_free_table(policy);
@@ -595,7 +721,12 @@ static int __init cpufreq_stats_init(void)
ret = sysfs_create_file(cpufreq_global_kobject,
&_attr_all_time_in_state.attr);
if (ret)
- pr_warn("Error creating sysfs file for cpufreq stats\n");
+ pr_warn("Cannot create sysfs file for cpufreq stats\n");
+
+ ret = sysfs_create_file(cpufreq_global_kobject,
+ &_attr_current_in_state.attr);
+ if (ret)
+ pr_warn("Cannot create sysfs file for cpufreq current stats\n");
return 0;
}
@@ -610,6 +741,7 @@ static void __exit cpufreq_stats_exit(void)
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
cpufreq_allstats_free();
+ cpufreq_powerstats_free();
cpufreq_put_global_kobject();
}
diff --git a/drivers/misc/uid_cputime.c b/drivers/misc/uid_cputime.c
index acd7046ce497..89bfba6c5b6a 100644
--- a/drivers/misc/uid_cputime.c
+++ b/drivers/misc/uid_cputime.c
@@ -38,6 +38,8 @@ struct uid_entry {
cputime_t stime;
cputime_t active_utime;
cputime_t active_stime;
+ unsigned long long active_power;
+ unsigned long long power;
struct hlist_node hash;
};
@@ -83,6 +85,7 @@ static int uid_stat_show(struct seq_file *m, void *v)
hash_for_each(hash_table, bkt, uid_entry, hash) {
uid_entry->active_stime = 0;
uid_entry->active_utime = 0;
+ uid_entry->active_power = 0;
}
read_lock(&tasklist_lock);
@@ -100,6 +103,7 @@ static int uid_stat_show(struct seq_file *m, void *v)
task_cputime_adjusted(task, &utime, &stime);
uid_entry->active_utime += utime;
uid_entry->active_stime += stime;
+ uid_entry->active_power += task->cpu_power;
}
read_unlock(&tasklist_lock);
@@ -108,9 +112,12 @@ static int uid_stat_show(struct seq_file *m, void *v)
uid_entry->active_utime;
cputime_t total_stime = uid_entry->stime +
uid_entry->active_stime;
- seq_printf(m, "%d: %u %u\n", uid_entry->uid,
+ unsigned long long total_power = uid_entry->power +
+ uid_entry->active_power;
+ seq_printf(m, "%d: %u %u %llu\n", uid_entry->uid,
cputime_to_usecs(total_utime),
- cputime_to_usecs(total_stime));
+ cputime_to_usecs(total_stime),
+ total_power);
}
mutex_unlock(&uid_lock);
@@ -203,6 +210,7 @@ static int process_notifier(struct notifier_block *self,
task_cputime_adjusted(task, &utime, &stime);
uid_entry->utime += utime;
uid_entry->stime += stime;
+ uid_entry->power += task->cpu_power;
exit:
mutex_unlock(&uid_lock);
@@ -226,7 +234,7 @@ static int __init proc_uid_cputime_init(void)
proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
NULL);
- proc_create_data("show_uid_stat", S_IWUGO, parent, &uid_stat_fops,
+ proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
NULL);
profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2a1cc36b3ace..11655e90be53 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -63,6 +63,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_end);
#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
static struct workqueue_struct *workqueue;
+static struct wake_lock mmc_delayed_work_wake_lock;
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
@@ -96,6 +97,7 @@ MODULE_PARM_DESC(
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
+ wake_lock(&mmc_delayed_work_wake_lock);
return queue_delayed_work(workqueue, work, delay);
}
@@ -1726,8 +1728,6 @@ static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
pm_wakeup_event(mmc_dev(host), 5000);
host->detect_change = 1;
-
- wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -2511,13 +2511,11 @@ void mmc_rescan(struct work_struct *work)
out:
if (extend_wakelock)
- wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+ wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
else
- wake_unlock(&host->detect_wake_lock);
- if (host->caps & MMC_CAP_NEEDS_POLL) {
- wake_lock(&host->detect_wake_lock);
+ wake_unlock(&mmc_delayed_work_wake_lock);
+ if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
- }
}
void mmc_start_host(struct mmc_host *host)
@@ -2541,8 +2539,7 @@ void mmc_stop_host(struct mmc_host *host)
#endif
host->rescan_disable = 1;
- if (cancel_delayed_work_sync(&host->detect))
- wake_unlock(&host->detect_wake_lock);
+ cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
@@ -2699,8 +2696,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
- if (cancel_delayed_work_sync(&host->detect))
- wake_unlock(&host->detect_wake_lock);
+ cancel_delayed_work_sync(&host->detect);
if (!host->bus_ops)
break;
@@ -2776,6 +2772,9 @@ static int __init mmc_init(void)
if (!workqueue)
return -ENOMEM;
+ wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND,
+ "mmc_delayed_work");
+
ret = mmc_register_bus();
if (ret)
goto destroy_workqueue;
@@ -2796,6 +2795,7 @@ unregister_bus:
mmc_unregister_bus();
destroy_workqueue:
destroy_workqueue(workqueue);
+ wake_lock_destroy(&mmc_delayed_work_wake_lock);
return ret;
}
@@ -2806,6 +2806,7 @@ static void __exit mmc_exit(void)
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
+ wake_lock_destroy(&mmc_delayed_work_wake_lock);
}
subsys_initcall(mmc_init);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2c886ae34198..cc3164a939eb 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -481,8 +481,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
- wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
- kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
@@ -582,7 +580,6 @@ void mmc_free_host(struct mmc_host *host)
spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
spin_unlock(&mmc_host_lock);
- wake_lock_destroy(&host->detect_wake_lock);
put_device(&host->class_dev);
}
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
index dcedeb4b2fef..0237f1e059b4 100644
--- a/drivers/usb/gadget/f_accessory.c
+++ b/drivers/usb/gadget/f_accessory.c
@@ -1204,8 +1204,7 @@ err:
static void acc_disconnect(void)
{
/* unregister all HID devices if USB is disconnected */
- if (_acc_dev)
- kill_all_hid_devices(_acc_dev);
+ kill_all_hid_devices(_acc_dev);
}
static void acc_cleanup(void)
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index bf6589eb7cb5..28b291a4e0e5 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -875,17 +875,6 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
return 1;
}
-static int get_host_ether_addr(u8 *str, u8 *dev_addr)
-{
- memcpy(dev_addr, str, ETH_ALEN);
- if (is_valid_ether_addr(dev_addr))
- return 0;
-
- random_ether_addr(dev_addr);
- memcpy(str, dev_addr, ETH_ALEN);
- return 1;
-}
-
static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
{
if (len < 18)
@@ -897,6 +886,17 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
return 18;
}
+static int get_host_ether_addr(u8 *str, u8 *dev_addr)
+{
+ memcpy(dev_addr, str, ETH_ALEN);
+ if (is_valid_ether_addr(dev_addr))
+ return 0;
+
+ random_ether_addr(dev_addr);
+ memcpy(str, dev_addr, ETH_ALEN);
+ return 1;
+}
+
static const struct net_device_ops eth_netdev_ops = {
.ndo_open = eth_open,
.ndo_stop = eth_stop,