/* * Copyright (c) 2012, ARM Limited. All rights reserved. * * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * Redistributions in binary form must reproduce the * above copyright notice, this list of conditions and * the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of ARM nor the names of its * contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. */ #include "misc.h" #include "virt_helpers.h" #include "context.h" #define REGS 32 #define PMCR_IDX 0 #define PMSELR_IDX 1 #define PMCNTENSET_IDX 2 #define PMCNTENCLR_IDX 3 #define PMCCNTR_IDX 4 #define PMOVSR_IDX 5 #define PMINTENSET_IDX 6 #define PMINTENCLR_IDX 7 #define PMXEVTYPE0_IDX 8 #define PMXEVCNT0_IDX 9 #define PMXEVTYPE1_IDX 10 #define PMXEVCNT1_IDX 11 #define PMXEVTYPE2_IDX 12 #define PMXEVCNT2_IDX 13 #define PMXEVTYPE3_IDX 14 #define PMXEVCNT3_IDX 15 unsigned int clusters_ctx[MAX_CLUSTERS][MAX_CORES][REGS]; unsigned int migration_ctx[MAX_CORES][REGS]; /* * Defines for PMU states */ static int pmu_mode = PMU_STATE0; static int pmu_counters; #define ENTRIES 15 struct descriptor { union { struct header_ { unsigned int entries; unsigned int active_cluster_id; } header; struct counter_ { unsigned int cluster_id; unsigned int selected_counter; unsigned int event_type; unsigned int counter_value; unsigned int reset_value; unsigned int request_code; } counter; } u; }; enum { PMU_CLUSTER_A15 = 0x00, PMU_CLUSTER_A7 = 0x01, }; enum { PMU_CNT_CYCLE_COUNTER = 0x00, PMU_CNT_OVERFLOW_FLAG = 0x01, PMU_CNT_EVENT_COUNTER_0 = 0x02, PMU_CNT_EVENT_COUNTER_1 = 0x03, PMU_CNT_EVENT_COUNTER_2 = 0x04, PMU_CNT_EVENT_COUNTER_3 = 0x05, PMU_CNT_EVENT_COUNTER_4 = 0x06, PMU_CNT_EVENT_COUNTER_5 = 0x07, }; enum { PMU_REQ_DISABLE_COUNTER = 0x01, PMU_REQ_CONF_COUNTER = 0x02, PMU_REQ_CONF_RESET_COUNTER = 0x03, PMU_REQ_READ_COUNTER = 0x04, PMU_REQ_READ_RESET_COUNTER = 0x05, }; void set_pmu_vcnt(unsigned vcnts) { #define HDCR_HPMN_MASK 0x1F unsigned long hdcr = read_hdcr(); hdcr = (hdcr & ~HDCR_HPMN_MASK) | vcnts; pmu_counters = vcnts; write_hdcr(hdcr); #undef HDCR_HPMN_MASK } void set_pmu_state(unsigned new) { #define HDCR_TPM (1 << 6) #define HDCR_TPMCR (1 << 5) unsigned long hdcr; switch (new) { case PMU_STATE0: hdcr = read_hdcr(); hdcr |= HDCR_TPM; write_hdcr(hdcr); pmu_mode = PMU_STATE0; break; case PMU_STATE1: hdcr = read_hdcr(); hdcr &= ~HDCR_TPM; write_hdcr(hdcr); pmu_mode = PMU_STATE1; break; case PMU_STATE2: hdcr = read_hdcr(); hdcr |= HDCR_TPM; write_hdcr(hdcr); pmu_mode = PMU_STATE2; break; default: break; } #undef HDCR_TPM #undef HDCR_TPMCR } static void handle_desc(struct descriptor *desc, unsigned cluster_id, unsigned cpu_id) { unsigned entry_cluster = desc->u.counter.cluster_id; unsigned selected_counter = desc->u.counter.selected_counter; unsigned event_type = desc->u.counter.event_type; unsigned reset_value = desc->u.counter.reset_value; unsigned request_code = desc->u.counter.request_code; unsigned tmp = 0; switch (request_code) { case PMU_REQ_DISABLE_COUNTER: if (cluster_id == entry_cluster) { switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: write_pmcntenclr(1UL << 31); break; case PMU_CNT_OVERFLOW_FLAG: /* Can't disable overflow flags. */ break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; write_pmcntenclr(1UL << (selected_counter)); break; default: break; }; } else { switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: clusters_ctx[entry_cluster] [cpu_id] [PMCNTENSET_IDX] &= 0x7FFFFFFF; break; case PMU_CNT_OVERFLOW_FLAG: /* Can't disable overflow flags. */ break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; clusters_ctx[entry_cluster] [cpu_id] [PMCNTENSET_IDX] &= ~(1 << selected_counter); break; default: break; }; } break; case PMU_REQ_CONF_COUNTER: if (cluster_id == entry_cluster) { /* Toggle global enable bit. */ tmp = read_pmcr(); tmp |= 1; write_pmcr(tmp); switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: write_pmcntenset(1UL << 31); break; case PMU_CNT_OVERFLOW_FLAG: /* Can't configure overflow flags. */ break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; write_pmselr(selected_counter); write_pmxevtyper(event_type); write_pmcntenset(1UL << (selected_counter)); break; default: break; }; } else { clusters_ctx[entry_cluster][cpu_id][PMCR_IDX] |= (pmu_counters << 11) | 1; switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: clusters_ctx[entry_cluster] [cpu_id] [PMCNTENSET_IDX] |= 0x80000000; break; case PMU_CNT_OVERFLOW_FLAG: /* Can't configure overflow flags. */ break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; clusters_ctx[entry_cluster] [cpu_id] [PMXEVTYPE0_IDX + (selected_counter * 2)] = event_type; clusters_ctx[entry_cluster] [cpu_id] [PMCNTENSET_IDX] |= (1 << selected_counter); break; default: break; }; } break; case PMU_REQ_CONF_RESET_COUNTER: if (cluster_id == entry_cluster) { /* Toggle global enable bit. */ tmp = read_pmcr(); tmp |= 1; write_pmcr(tmp); switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: write_pmccntr(reset_value); write_pmcntenset(1UL << 31); break; case PMU_CNT_OVERFLOW_FLAG: /* Can't configure overflow flags. */ break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; write_pmselr(selected_counter); write_pmxevtyper(event_type); write_pmxevcntr(reset_value); write_pmcntenset(1UL << (selected_counter)); break; default: break; }; } else { clusters_ctx[entry_cluster][cpu_id][PMCR_IDX] |= (pmu_counters << 11) | 1; switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: clusters_ctx[entry_cluster] [cpu_id] [PMCCNTR_IDX] = reset_value; clusters_ctx[entry_cluster] [cpu_id] [PMCNTENSET_IDX] |= 0x80000000; break; case PMU_CNT_OVERFLOW_FLAG: break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; clusters_ctx[entry_cluster] [cpu_id] [PMXEVTYPE0_IDX + (selected_counter * 2)] = event_type; clusters_ctx[entry_cluster] [cpu_id] [PMXEVCNT0_IDX + (selected_counter * 2)] = reset_value; clusters_ctx[entry_cluster] [cpu_id] [PMCNTENSET_IDX] |= (1 << selected_counter); break; default: break; }; } break; case PMU_REQ_READ_COUNTER: if (cluster_id == entry_cluster) { switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: desc->u.counter.counter_value = read_pmccntr(); break; case PMU_CNT_OVERFLOW_FLAG: desc->u.counter.counter_value = read_pmovsr(); break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; write_pmselr(selected_counter); desc->u.counter.event_type = read_pmxevtyper(); desc->u.counter.counter_value = read_pmxevcntr(); break; default: break; }; } else { switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: desc->u.counter.counter_value = clusters_ctx[entry_cluster] [cpu_id] [PMCCNTR_IDX]; break; case PMU_CNT_OVERFLOW_FLAG: desc->u.counter.counter_value = clusters_ctx[entry_cluster] [cpu_id] [PMOVSR_IDX]; break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; desc->u.counter.event_type = clusters_ctx[entry_cluster] [cpu_id] [PMXEVTYPE0_IDX + (selected_counter * 2)]; desc->u.counter.counter_value = clusters_ctx[entry_cluster] [cpu_id] [PMXEVCNT0_IDX + (selected_counter * 2)]; break; default: break; }; } break; case PMU_REQ_READ_RESET_COUNTER: if (cluster_id == entry_cluster) { switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: desc->u.counter.counter_value = read_pmccntr(); write_pmccntr(reset_value); break; case PMU_CNT_OVERFLOW_FLAG: desc->u.counter.counter_value = read_pmovsr(); write_pmovsr(reset_value); break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; write_pmselr(selected_counter); desc->u.counter.event_type = read_pmxevtyper(); desc->u.counter.counter_value = read_pmxevcntr(); write_pmxevcntr(reset_value); break; default: break; } } else { switch (selected_counter) { case PMU_CNT_CYCLE_COUNTER: desc->u.counter.counter_value = clusters_ctx[entry_cluster] [cpu_id] [PMCCNTR_IDX]; clusters_ctx[cluster_id] [cpu_id] [PMCCNTR_IDX] = reset_value; case PMU_CNT_OVERFLOW_FLAG: desc->u.counter.counter_value = clusters_ctx[entry_cluster] [cpu_id] [PMOVSR_IDX]; clusters_ctx[entry_cluster] [cpu_id] [PMOVSR_IDX] = reset_value; break; case PMU_CNT_EVENT_COUNTER_0: case PMU_CNT_EVENT_COUNTER_1: case PMU_CNT_EVENT_COUNTER_2: case PMU_CNT_EVENT_COUNTER_3: selected_counter -= PMU_CNT_EVENT_COUNTER_0; desc->u.counter.event_type = clusters_ctx[entry_cluster] [cpu_id] [PMXEVTYPE0_IDX + (selected_counter * 2)]; desc->u.counter.counter_value = clusters_ctx[entry_cluster] [cpu_id] [PMXEVCNT0_IDX + (selected_counter * 2)]; clusters_ctx[entry_cluster] [cpu_id] [PMXEVCNT0_IDX + (selected_counter * 2)] = reset_value; break; default: break; } } break; } } unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second) { unsigned cluster_id = read_clusterid(); unsigned cpu_id = read_cpuid(); unsigned ret = 0; unsigned tmp; struct descriptor *desc; if ((pmu_mode != PMU_STATE2) && (opcode != HVC_PMU_SWITCH)) return 0; switch (opcode) { case HVC_PMU_PMCR_READ: if (cluster_id == first) ret = read_pmcr(); else ret = clusters_ctx[first][cpu_id][PMCR_IDX]; break; case HVC_PMU_PMCR_WRITE: if (cluster_id == first) write_pmcr(second); else clusters_ctx[first][cpu_id][PMCR_IDX] = second; break; case HVC_PMU_PMSELR_READ: if (cluster_id == first) ret = read_pmselr(); else ret = clusters_ctx[first][cpu_id][PMSELR_IDX]; break; case HVC_PMU_PMSELR_WRITE: if (cluster_id == first) write_pmselr(second); else clusters_ctx[first][cpu_id][PMSELR_IDX] = second; break; case HVC_PMU_PMXEVTYPER_READ: if (cluster_id == first) { ret = read_pmxevtyper(); } else { tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; ret = clusters_ctx[first] [cpu_id] [PMXEVTYPE0_IDX + (tmp * 2)]; } break; case HVC_PMU_PMXEVTYPER_WRITE: if (cluster_id == first) { write_pmxevtyper(second); } else { tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; clusters_ctx[first] [cpu_id] [PMXEVTYPE0_IDX + (tmp * 2)] = second; } break; case HVC_PMU_PMCNTENSET_READ: if (cluster_id == first) ret = read_pmcntenset(); else ret = clusters_ctx[first][cpu_id][PMCNTENSET_IDX]; break; case HVC_PMU_PMCNTENSET_WRITE: if (cluster_id == first) write_pmcntenset(second); else clusters_ctx[first][cpu_id][PMCNTENSET_IDX] = second; break; case HVC_PMU_PMCNTENCLR_READ: if (cluster_id == first) ret = read_pmcntenclr(); else ret = clusters_ctx[first][cpu_id][PMCNTENCLR_IDX]; break; case HVC_PMU_PMCNTENCLR_WRITE: if (cluster_id == first) write_pmcntenclr(second); else clusters_ctx[first][cpu_id][PMCNTENCLR_IDX] = second; break; case HVC_PMU_PMCCNTR_READ: if (cluster_id == first) ret = read_pmccntr(); else ret = clusters_ctx[first][cpu_id][PMCCNTR_IDX]; break; case HVC_PMU_PMCCNTR_WRITE: if (cluster_id == first) write_pmccntr(second); else clusters_ctx[first][cpu_id][PMCCNTR_IDX] = second; break; case HVC_PMU_PMOVSR_READ: if (cluster_id == first) ret = read_pmovsr(); else ret = clusters_ctx[first][cpu_id][PMOVSR_IDX]; break; case HVC_PMU_PMOVSR_WRITE: if (cluster_id == first) write_pmovsr(second); else clusters_ctx[first][cpu_id][PMOVSR_IDX] = second; break; case HVC_PMU_PMXEVCNTR_READ: if (cluster_id == first) { ret = read_pmxevcntr(); } else { tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; ret = clusters_ctx[first] [cpu_id] [PMXEVCNT0_IDX + (tmp * 2)]; } break; case HVC_PMU_PMXEVCNTR_WRITE: if (cluster_id == first) { write_pmxevcntr(second); } else { tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; clusters_ctx[first] [cpu_id] [PMXEVCNT0_IDX + (tmp * 2)] = second; } break; case HVC_PMU_PMINTENSET_READ: if (cluster_id == first) ret = read_pmintenset(); else ret = clusters_ctx[first][cpu_id][PMINTENSET_IDX]; break; case HVC_PMU_PMINTENSET_WRITE: if (cluster_id == first) write_pmintenset(second); else clusters_ctx[first][cpu_id][PMINTENSET_IDX] = second; break; case HVC_PMU_PMINTENCLR_READ: if (cluster_id == first) ret = read_pmintenclr(); else ret = clusters_ctx[first][cpu_id][PMINTENCLR_IDX]; break; case HVC_PMU_PMINTENCLR_WRITE: if (cluster_id == first) write_pmintenclr(second); else clusters_ctx[first][cpu_id][PMINTENCLR_IDX] = second; break; case HVC_PMU_SWITCH: if (first) set_pmu_state(PMU_STATE2); else set_pmu_state(PMU_STATE0); break; case HVC_PMU_GET_COUNTERS_SIZE: ret = sizeof(struct descriptor) * ENTRIES; break; case HVC_PMU_SYNC_PMU_COUNTERS: { int i; int entries; unsigned int *pentries; desc = (struct descriptor *)first; pentries = &desc->u.header.entries; entries = *pentries; desc->u.header.active_cluster_id = cluster_id; for (i = 0, desc++; i < entries; i++, desc++) { handle_desc(desc, cluster_id, cpu_id); } } break; } return ret; } void save_pmu_context(unsigned cluster_id, unsigned cpu_id) { switch (pmu_mode) { case PMU_STATE1: save_performance_monitors(migration_ctx[cpu_id]); break; case PMU_STATE2: save_performance_monitors(clusters_ctx[cluster_id][cpu_id]); break; case PMU_STATE0: default: break; }; } void restore_pmu_context(unsigned cluster_id, unsigned cpu_id) { switch (pmu_mode) { case PMU_STATE1: restore_performance_monitors(migration_ctx[cpu_id]); break; case PMU_STATE2: restore_performance_monitors(clusters_ctx[cluster_id][cpu_id]); break; case PMU_STATE0: default: break; }; }