diff options
author | Tuukka Tikkanen <tuukka.tikkanen@linaro.org> | 2015-01-02 09:29:10 +0200 |
---|---|---|
committer | Tuukka Tikkanen <tuukka.tikkanen@linaro.org> | 2015-01-05 10:21:01 +0200 |
commit | 550333633430c2463de676295284c52a79700f74 (patch) | |
tree | 60a7b87326c6d187689cdf65bf0bc3f79bd78f9f | |
parent | dd32f2784f2276ba00f6cd4532b9050f2fe1a995 (diff) |
Topology: Add helper functions for cpu interactions in cluster/core
This patch adds the following functions/macros:
cpu_to_cluster(cpuid, topo) maps cpu index into containing cluster,
cpu_to_core(cpuid, topo) maps cpu index into containing core,
core_for_each_cpu(cpu, core) iterates all cpus in a core,
cluster_for_each_core(core, clust) iterates all cores in a cluster,
cluster_for_each_cpu(cpu, clust) iterates all cpus in a cluster and
topo_for_each_clister(clust, topo) iterates all clusters in a topology.
core_get_least_cstate(core) returns the smallest cstate index of all
cpus in a core.
core_get_highest_freq(core) returns the highest known frequency of
all cpus in a core. C-states are ignored. If the frequency is not
known for any of the cpus, the function returns 0.
get_affected_core_least_cstate(cpuid, topo) and
get_affected_core_highest_freq(cpuid, topo) return similar to the
two functions just above. The only difference is that they first
map the cpuid into the containing core.
For the above 4 functions/macros, there are cluster variants:
cluster_get_least_cstate(clust),
cluster_get_highest_freq(clust),
get_affected_cluster_least_cstate(cpuid, topo) and
get_affected_cluster_highest_freq(cpuid, topo).
Signed-off-by: Tuukka Tikkanen <tuukka.tikkanen@linaro.org>
-rw-r--r-- | topology.c | 138 | ||||
-rw-r--r-- | topology.h | 31 |
2 files changed, 153 insertions, 16 deletions
@@ -95,14 +95,14 @@ int add_topo_info(struct cpu_topology *topo_list, struct topology_info *info) s_phy->core_num = 0; s_phy->physical_id = info->physical_id; INIT_LIST_HEAD(&s_phy->core_head); + INIT_LIST_HEAD(&s_phy->cpu_enum_head); ptr = check_pos_from_head(&topo_list->physical_head, s_phy->physical_id); list_add_tail(&s_phy->list_physical, ptr); topo_list->physical_num++; } else { - s_phy = list_entry(ptr, struct cpu_physical, - list_physical); + s_phy = list_entry(ptr, struct cpu_physical, list_physical); } /* add cpu core info */ @@ -117,8 +117,7 @@ int add_topo_info(struct cpu_topology *topo_list, struct topology_info *info) s_core->core_id = info->core_id; INIT_LIST_HEAD(&s_core->cpu_head); - ptr = check_pos_from_head(&s_phy->core_head, - s_core->core_id); + ptr = check_pos_from_head(&s_phy->core_head, s_core->core_id); list_add_tail(&s_core->list_core, ptr); s_phy->core_num++; @@ -127,30 +126,63 @@ int add_topo_info(struct cpu_topology *topo_list, struct topology_info *info) } /* add cpu info */ - ptr = check_exist_from_head(&s_core->cpu_head, info->cpu_id); - if (!ptr) { - s_cpu = calloc(sizeof(struct cpu_cpu), 1); - if (!s_cpu) - return -1; + if (check_exist_from_head(&s_core->cpu_head, info->cpu_id) != NULL) + return 0; - s_cpu->cpu_id = info->cpu_id; + s_cpu = calloc(sizeof(struct cpu_cpu), 1); + if (!s_cpu) + return -1; - ptr = check_pos_from_head(&s_core->cpu_head, s_cpu->cpu_id); - list_add_tail(&s_cpu->list_cpu, ptr); - s_core->cpu_num++; - if (s_core->cpu_num > 1) - s_core->is_ht = true; - } + s_cpu->cpu_id = info->cpu_id; + + ptr = check_pos_from_head(&s_core->cpu_head, s_cpu->cpu_id); + list_add_tail(&s_cpu->list_cpu, ptr); + s_core->cpu_num++; + if (s_core->cpu_num > 1) + s_core->is_ht = true; + + /* Assumption: Same cpuid cannot exist in 2 different cores */ + assert(!check_exist_from_head(&s_phy->cpu_enum_head, info->cpu_id)); + + /* Add to the list (really a set) of all contained cpus in s_phy */ + list_add_tail(&s_cpu->list_phy_enum, &s_phy->cpu_enum_head); return 0; } +struct cpu_physical *cpu_to_cluster(int cpuid, struct cpu_topology *topo) +{ + struct cpu_physical *phy; + struct cpu_cpu *cpu; + + topo_for_each_cluster(phy, topo) + cluster_for_each_cpu(cpu, phy) + if (cpu->cpu_id == cpuid) + return phy; + return NULL; +} + +struct cpu_core *cpu_to_core(int cpuid, struct cpu_topology *topo) +{ + struct cpu_physical *phy; + struct cpu_core *core; + struct cpu_cpu *cpu; + + topo_for_each_cluster(phy, topo) + cluster_for_each_core(core, phy) + core_for_each_cpu(cpu, core) + if (cpu->cpu_id == cpuid) + return core; + return NULL; +} + void free_cpu_cpu_list(struct list_head *head) { struct cpu_cpu *lcpu, *n; list_for_each_entry_safe(lcpu, n, head, list_cpu) { list_del(&lcpu->list_cpu); + list_del(&lcpu->list_phy_enum); free(lcpu); } } @@ -559,3 +591,77 @@ int release_cpu_topo_cstates(struct cpu_topology *topo) return 0; } + +int cluster_get_least_cstate(struct cpu_physical *clust) +{ + struct cpu_cpu *cpu; + int cpu_cstate; + int ret = MAXCSTATE; + + cluster_for_each_cpu(cpu, clust) { + cpu_cstate = cpu->cstates->current_cstate; + if (cpu_cstate < ret) + ret = cpu_cstate; + } + return ret; +} + +int cluster_get_highest_freq(struct cpu_physical *clust) +{ + struct cpu_cpu *cpu; + int cpu_pstate_index; + unsigned int cpu_freq; + unsigned int ret = ~0; + + cluster_for_each_cpu(cpu, clust) { + cpu_pstate_index = cpu->pstates->current; + if (cpu_pstate_index < 0) + continue; + cpu_freq = cpu->pstates->pstate[cpu_pstate_index].freq; + if (cpu_freq < ret) + ret = cpu_freq; + } + + /* It is possible we don't know anything near the start of trace */ + if (ret == ~0) + ret = 0; + + return ret; +} + +int core_get_least_cstate(struct cpu_core *core) +{ + struct cpu_cpu *cpu; + int cpu_cstate; + int ret = MAXCSTATE; + + core_for_each_cpu(cpu, core) { + cpu_cstate = cpu->cstates->current_cstate; + if (cpu_cstate < ret) + ret = cpu_cstate; + } + return ret; +} + +int core_get_highest_freq(struct cpu_core *core) +{ + struct cpu_cpu *cpu; + int cpu_pstate_index; + unsigned int cpu_freq; + unsigned int ret = ~0; + + core_for_each_cpu(cpu, core) { + cpu_pstate_index = cpu->pstates->current; + if (cpu_pstate_index < 0) + continue; + cpu_freq = cpu->pstates->pstate[cpu_pstate_index].freq; + if (cpu_freq < ret) + ret = cpu_freq; + } + + /* It is possible we don't know anything near the start of trace */ + if (ret == ~0) + ret = 0; + + return ret; +} @@ -36,6 +36,7 @@ struct report_ops; struct cpu_cpu { struct list_head list_cpu; int cpu_id; + struct list_head list_phy_enum; struct cpuidle_cstates *cstates; struct cpufreq_pstates *pstates; struct cpuidle_cstates *base_cstates; @@ -56,6 +57,7 @@ struct cpu_physical { int physical_id; struct list_head core_head; int core_num; + struct list_head cpu_enum_head; struct cpuidle_cstates *cstates; }; @@ -77,4 +79,33 @@ extern struct cpuidle_cstates *core_cluster_data(struct cpu_core *s_core); extern struct cpuidle_cstates * physical_cluster_data(struct cpu_physical *s_phy); +extern struct cpu_physical *cpu_to_cluster(int cpuid, struct cpu_topology *topo); +extern struct cpu_core *cpu_to_core(int cpuid, struct cpu_topology *topo); + +#define core_for_each_cpu(cpu, core) \ + list_for_each_entry(cpu, &core->cpu_head, list_cpu) + +#define cluster_for_each_core(core, clust) \ + list_for_each_entry(core, &clust->core_head, list_core) + +#define cluster_for_each_cpu(cpu, clust) \ + list_for_each_entry(cpu, &clust->cpu_enum_head, list_phy_enum) + +#define topo_for_each_cluster(clust, topo) \ + list_for_each_entry(clust, &topo->physical_head, list_physical) + +extern int cluster_get_least_cstate(struct cpu_physical *clust); +extern int cluster_get_highest_freq(struct cpu_physical *clust); +#define get_affected_cluster_least_cstate(cpuid, topo) \ + cluster_get_least_cstate(cpu_to_cluster(cpuid, topo)) +#define get_affected_cluster_highest_freq(cpuid, topo) \ + cluster_get_highest_freq(cpu_to_cluster(cpuid, topo)) + +extern int core_get_least_cstate(struct cpu_core *core); +extern int core_get_highest_freq(struct cpu_core *core); +#define get_affected_core_least_cstate(cpuid, topo) \ + core_get_least_cstate(cpu_to_core(cpuid, topo)) +#define get_affected_core_highest_freq(cpuid, topo) \ + core_get_highest_freq(cpu_to_core(cpuid, topo)) + #endif |