diff options
author | Giuseppe Calderaro <giuseppe.calderaro@arm.com> | 2012-05-03 17:39:18 +0100 |
---|---|---|
committer | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2012-05-23 12:44:34 +0100 |
commit | 44f2df4f1dd3bb520bcd26c59fac091a7d1af4ab (patch) | |
tree | 72c4d40cda4d38c985ce7f9b3e1b98626f485b88 /big-little/virtualisor | |
parent | de800aa53df31a5048d3b7f9740300f3ed87419b (diff) |
Completed hvc interface implementation.
Completed PMU states handling.
Diffstat (limited to 'big-little/virtualisor')
-rw-r--r-- | big-little/virtualisor/pmu_trap_handler.c | 773 | ||||
-rw-r--r-- | big-little/virtualisor/virt_handle.c | 69 |
2 files changed, 492 insertions, 350 deletions
diff --git a/big-little/virtualisor/pmu_trap_handler.c b/big-little/virtualisor/pmu_trap_handler.c index 9e711d1..2aa28e3 100644 --- a/big-little/virtualisor/pmu_trap_handler.c +++ b/big-little/virtualisor/pmu_trap_handler.c @@ -46,9 +46,6 @@ unsigned int migration_ctx[MAX_CORES][REGS]; /* * Defines for PMU states */ -#define PMU_STATE0 0 -#define PMU_STATE1 1 -#define PMU_STATE2 2 static int pmu_mode = PMU_STATE0; #define ENTRIES 15 @@ -70,440 +67,568 @@ struct descriptor { }; enum { - A15_CLUSTER = 0x00, - A7_CLUSTER = 0x01, + PMU_CLUSTER_A15 = 0x00, + PMU_CLUSTER_A7 = 0x01, }; enum { - PMU_CYCLE_COUNTER = 0x00, - PMU_OVERFLOW_FLAG = 0x01, - PMU_EVENT_COUNTER_0 = 0x02, - PMU_EVENT_COUNTER_1 = 0x03, - PMU_EVENT_COUNTER_2 = 0x04, - PMU_EVENT_COUNTER_3 = 0x05, - PMU_EVENT_COUNTER_4 = 0x06, - PMU_EVENT_COUNTER_5 = 0x07, + PMU_CNT_CYCLE_COUNTER = 0x00, + PMU_CNT_OVERFLOW_FLAG = 0x01, + PMU_CNT_EVENT_COUNTER_0 = 0x02, + PMU_CNT_EVENT_COUNTER_1 = 0x03, + PMU_CNT_EVENT_COUNTER_2 = 0x04, + PMU_CNT_EVENT_COUNTER_3 = 0x05, + PMU_CNT_EVENT_COUNTER_4 = 0x06, + PMU_CNT_EVENT_COUNTER_5 = 0x07, }; enum { - PMU_DISABLE_COUNTER = 0x01, - PMU_CONF_COUNTER = 0x02, - PMU_CONF_RESET_COUNTER = 0x03, - PMU_READ_COUNTER = 0x04, - PMU_READ_RESET_COUNTER = 0x05, + PMU_REQ_DISABLE_COUNTER = 0x01, + PMU_REQ_CONF_COUNTER = 0x02, + PMU_REQ_CONF_RESET_COUNTER = 0x03, + PMU_REQ_READ_COUNTER = 0x04, + PMU_REQ_READ_RESET_COUNTER = 0x05, }; +void set_pmu_vcnt(unsigned vcnts) +{ +#define HDCR_HPMN_MASK 0x1F + + unsigned long hdcr = read_hdcr(); + hdcr |= vcnts & HDCR_HPMN_MASK; + write_hdcr(hdcr); + +#undef HDCR_HPMN_MASK +} + void set_pmu_state(unsigned new) { +#define HDCR_TPM (1 << 6) +#define HDCR_TPMCR (1 << 5) + + unsigned long hdcr; + switch (new) { case PMU_STATE0: - // Enable traps + hdcr = read_hdcr(); + hdcr |= HDCR_TPM; + write_hdcr(hdcr); pmu_mode = PMU_STATE0; break; case PMU_STATE1: - // Disable traps + hdcr = read_hdcr(); + hdcr &= ~HDCR_TPM; + write_hdcr(hdcr); pmu_mode = PMU_STATE1; break; case PMU_STATE2: - // Enable traps + hdcr = read_hdcr(); + hdcr |= HDCR_TPM; + write_hdcr(hdcr); pmu_mode = PMU_STATE2; break; default: break; } + +#undef HDCR_TPM +#undef HDCR_TPMCR +} + +static void handle_desc(struct descriptor *desc, + unsigned cluster_id, + unsigned cpu_id) +{ + unsigned entry_cluster = desc->u.counter.cluster_id; + unsigned selected_counter = desc->u.counter.selected_counter; + unsigned event_type = desc->u.counter.event_type; + unsigned reset_value = desc->u.counter.reset_value; + unsigned request_code = desc->u.counter.request_code; + unsigned tmp = 0; + + switch (request_code) { + case PMU_REQ_DISABLE_COUNTER: + if (cluster_id == entry_cluster) { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + write_pmcntenclr(1UL << 31); + break; + case PMU_CNT_OVERFLOW_FLAG: + /* Can't disable overflow flags. */ + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + write_pmcntenclr(1UL << (selected_counter)); + break; + default: + break; + }; + } else { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + clusters_ctx + [cluster_id] + [cpu_id] + [PMCNTENSET_IDX] &= 0x7FFFFFFF; + break; + case PMU_CNT_OVERFLOW_FLAG: + /* Can't disable overflow flags. */ + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + clusters_ctx + [cluster_id] + [cpu_id] + [PMCNTENSET_IDX] &= + ~(1 << selected_counter); + break; + default: + break; + }; + } + break; + case PMU_REQ_CONF_COUNTER: + if (cluster_id == entry_cluster) { + /* Toggle global enable bit. */ + tmp = read_pmcr(); + tmp |= 1; + write_pmcr(tmp); + + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + write_pmcntenset(1UL << 31); + break; + case PMU_CNT_OVERFLOW_FLAG: + /* Can't configure overflow flags. */ + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + write_pmselr(selected_counter); + write_pmxevtyper(event_type); + write_pmcntenset(1UL << (selected_counter)); + break; + default: + break; + }; + } else { + clusters_ctx[cluster_id][cpu_id][PMCR_IDX] |= 1; + + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + clusters_ctx + [cluster_id] + [cpu_id] + [PMCNTENSET_IDX] |= 0x80000000; + break; + case PMU_CNT_OVERFLOW_FLAG: + /* Can't configure overflow flags. */ + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVTYPE0_IDX + + (selected_counter *2)] = + event_type; + clusters_ctx + [cluster_id] + [cpu_id] + [PMCNTENSET_IDX] |= + (1 << selected_counter); + break; + default: + break; + }; + } + break; + case PMU_REQ_CONF_RESET_COUNTER: + if (cluster_id == entry_cluster) { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + write_pmccntr(reset_value); + write_pmcntenset(1UL << 31); + break; + case PMU_CNT_OVERFLOW_FLAG: + /* Can't configure overflow flags. */ + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + write_pmselr(selected_counter); + write_pmxevtyper(event_type); + write_pmxevcntr(reset_value); + write_pmcntenset(1UL << (selected_counter)); + break; + default: + break; + }; + } else { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + clusters_ctx + [cluster_id] + [cpu_id] + [PMCCNTR_IDX] = reset_value; + clusters_ctx + [cluster_id] + [cpu_id] + [PMCNTENSET_IDX] |= + 0x80000000; + break; + case PMU_CNT_OVERFLOW_FLAG: + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVTYPE0_IDX + + (selected_counter * 2)] = event_type; + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVCNT0_IDX + + (selected_counter * 2)] = reset_value; + clusters_ctx + [cluster_id] + [cpu_id] + [PMCNTENSET_IDX] |= + (1 << selected_counter); + break; + default: + break; + }; + } + break; + case PMU_REQ_READ_COUNTER: + if (cluster_id == entry_cluster) { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + desc->u.counter.counter_value = read_pmccntr(); + break; + case PMU_CNT_OVERFLOW_FLAG: + desc->u.counter.counter_value = read_pmovsr(); + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + write_pmselr(selected_counter); + desc->u.counter.event_type=read_pmxevtyper(); + desc->u.counter.counter_value=read_pmxevcntr(); + break; + default: + break; + }; + } else { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + desc->u.counter.counter_value = + clusters_ctx + [cluster_id] + [cpu_id] + [PMCCNTR_IDX]; + break; + case PMU_CNT_OVERFLOW_FLAG: + desc->u.counter.counter_value = + clusters_ctx + [cluster_id] + [cpu_id] + [PMOVSR_IDX]; + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + desc->u.counter.event_type = + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVTYPE0_IDX + + (selected_counter * 2)]; + desc->u.counter.counter_value = + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVCNT0_IDX + + (selected_counter * 2)]; + break; + default: + break; + }; + } + break; + case PMU_REQ_READ_RESET_COUNTER: + if (cluster_id == entry_cluster) { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + desc->u.counter.counter_value = read_pmccntr(); + write_pmccntr(reset_value); + break; + case PMU_CNT_OVERFLOW_FLAG: + desc->u.counter.counter_value = read_pmovsr(); + write_pmovsr(reset_value); + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + write_pmselr(selected_counter); + desc->u.counter.event_type=read_pmxevtyper(); + desc->u.counter.counter_value=read_pmxevcntr(); + write_pmxevcntr(reset_value); + break; + default: + break; + } + } else { + switch (selected_counter) { + case PMU_CNT_CYCLE_COUNTER: + desc->u.counter.counter_value = + clusters_ctx + [cluster_id] + [cpu_id] + [PMCCNTR_IDX]; + clusters_ctx + [cluster_id] + [cpu_id] + [PMCCNTR_IDX] = reset_value; + case PMU_CNT_OVERFLOW_FLAG: + desc->u.counter.counter_value = + clusters_ctx + [cluster_id] + [cpu_id] + [PMOVSR_IDX]; + clusters_ctx + [cluster_id] + [cpu_id] + [PMOVSR_IDX] = reset_value; + break; + case PMU_CNT_EVENT_COUNTER_0: + case PMU_CNT_EVENT_COUNTER_1: + case PMU_CNT_EVENT_COUNTER_2: + case PMU_CNT_EVENT_COUNTER_3: + selected_counter -= PMU_CNT_EVENT_COUNTER_0; + desc->u.counter.event_type = + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVTYPE0_IDX + + (selected_counter * 2)]; + desc->u.counter.counter_value = + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVCNT0_IDX + + (selected_counter * 2)]; + clusters_ctx + [cluster_id] + [cpu_id] + [PMXEVCNT0_IDX + + (selected_counter * 2)] = reset_value; + break; + default: + break; + } + } + break; + } } unsigned handle_pmu(unsigned opcode, unsigned first, unsigned second) { - unsigned cluster = read_clusterid(); - unsigned cpu = read_cpuid(); + unsigned cluster_id = read_clusterid(); + unsigned cpu_id = read_cpuid(); unsigned ret = 0; unsigned tmp; struct descriptor *desc; - if ((pmu_mode != PMU_STATE2) && (opcode != HVC_SWITCH)) + if ((pmu_mode != PMU_STATE2) && (opcode != HVC_PMU_SWITCH)) return 0; switch (opcode) { - case PMCR_READ: - if (cluster == first) + case HVC_PMU_PMCR_READ: + if (cluster_id == first) ret = read_pmcr(); else - ret = clusters_ctx[first][cpu][PMCR_IDX]; + ret = clusters_ctx[first][cpu_id][PMCR_IDX]; break; - case PMCR_WRITE: - if (cluster == first) + case HVC_PMU_PMCR_WRITE: + if (cluster_id == first) write_pmcr(second); else - clusters_ctx[first][cpu][PMCR_IDX] = second; + clusters_ctx[first][cpu_id][PMCR_IDX] = second; break; - case PMSELR_READ: - if (cluster == first) + case HVC_PMU_PMSELR_READ: + if (cluster_id == first) ret = read_pmselr(); else - ret = clusters_ctx[first][cpu][PMSELR_IDX]; + ret = clusters_ctx[first][cpu_id][PMSELR_IDX]; break; - case PMSELR_WRITE: - if (cluster == first) + case HVC_PMU_PMSELR_WRITE: + if (cluster_id == first) write_pmselr(second); else - clusters_ctx[first][cpu][PMSELR_IDX] = second; + clusters_ctx[first][cpu_id][PMSELR_IDX] = second; break; - case PMXEVTYPER_READ: - if (cluster == first) { + case HVC_PMU_PMXEVTYPER_READ: + if (cluster_id == first) { ret = read_pmxevtyper(); } else { - tmp = clusters_ctx[first][cpu][PMSELR_IDX]; - ret = clusters_ctx[first][cpu][PMXEVTYPE0_IDX + (tmp * 2)]; + tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; + ret = clusters_ctx + [first] + [cpu_id] + [PMXEVTYPE0_IDX + (tmp * 2)]; } break; - case PMXEVTYPER_WRITE: - if (cluster == first) { + case HVC_PMU_PMXEVTYPER_WRITE: + if (cluster_id == first) { write_pmxevtyper(second); } else { - tmp = clusters_ctx[first][cpu][PMSELR_IDX]; - clusters_ctx[first][cpu][PMXEVTYPE0_IDX + (tmp * 2)] = second; + tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; + clusters_ctx + [first] + [cpu_id] + [PMXEVTYPE0_IDX + (tmp * 2)] = second; } break; - case PMCNTENSET_READ: - if (cluster == first) + case HVC_PMU_PMCNTENSET_READ: + if (cluster_id == first) ret = read_pmcntenset(); else - ret = clusters_ctx[first][cpu][PMCNTENSET_IDX]; + ret = clusters_ctx[first][cpu_id][PMCNTENSET_IDX]; break; - case PMCNTENSET_WRITE: - if (cluster == first) + case HVC_PMU_PMCNTENSET_WRITE: + if (cluster_id == first) write_pmcntenset(second); else - clusters_ctx[first][cpu][PMCNTENSET_IDX] = second; + clusters_ctx[first][cpu_id][PMCNTENSET_IDX] = second; break; - case PMCNTENCLR_READ: - if (cluster == first) + case HVC_PMU_PMCNTENCLR_READ: + if (cluster_id == first) ret = read_pmcntenclr(); else - ret = clusters_ctx[first][cpu][PMCNTENCLR_IDX]; + ret = clusters_ctx[first][cpu_id][PMCNTENCLR_IDX]; break; - case PMCNTENCLR_WRITE: - if (cluster == first) + case HVC_PMU_PMCNTENCLR_WRITE: + if (cluster_id == first) write_pmcntenclr(second); else - clusters_ctx[first][cpu][PMCNTENCLR_IDX] = second; + clusters_ctx[first][cpu_id][PMCNTENCLR_IDX] = second; break; - case PMCCNTR_READ: - if (cluster == first) + case HVC_PMU_PMCCNTR_READ: + if (cluster_id == first) ret = read_pmccntr(); else - ret = clusters_ctx[first][cpu][PMCCNTR_IDX]; + ret = clusters_ctx[first][cpu_id][PMCCNTR_IDX]; break; - case PMCCNTR_WRITE: - if (cluster == first) + case HVC_PMU_PMCCNTR_WRITE: + if (cluster_id == first) write_pmccntr(second); else - clusters_ctx[first][cpu][PMCCNTR_IDX] = second; + clusters_ctx[first][cpu_id][PMCCNTR_IDX] = second; break; - case PMOVSR_READ: - if (cluster == first) + case HVC_PMU_PMOVSR_READ: + if (cluster_id == first) ret = read_pmovsr(); else - ret = clusters_ctx[first][cpu][PMOVSR_IDX]; + ret = clusters_ctx[first][cpu_id][PMOVSR_IDX]; break; - case PMOVSR_WRITE: - if (cluster == first) + case HVC_PMU_PMOVSR_WRITE: + if (cluster_id == first) write_pmovsr(second); else - clusters_ctx[first][cpu][PMOVSR_IDX] = second; + clusters_ctx[first][cpu_id][PMOVSR_IDX] = second; break; - case PMXEVCNTR_READ: - if (cluster == first) { + case HVC_PMU_PMXEVCNTR_READ: + if (cluster_id == first) { ret = read_pmxevcntr(); } else { - tmp = clusters_ctx[first][cpu][PMSELR_IDX]; - ret = clusters_ctx[first][cpu][PMXEVCNT0_IDX + (tmp * 2)]; + tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; + ret = clusters_ctx + [first] + [cpu_id] + [PMXEVCNT0_IDX + (tmp * 2)]; } break; - case PMXEVCNTR_WRITE: - if (cluster == first) { + case HVC_PMU_PMXEVCNTR_WRITE: + if (cluster_id == first) { write_pmxevcntr(second); } else { - tmp = clusters_ctx[first][cpu][PMSELR_IDX]; - clusters_ctx[first][cpu][PMXEVCNT0_IDX + (tmp * 2)] = second; + tmp = clusters_ctx[first][cpu_id][PMSELR_IDX]; + clusters_ctx + [first] + [cpu_id] + [PMXEVCNT0_IDX + (tmp * 2)] = second; } break; - case PMINTENSET_READ: - if (cluster == first) + case HVC_PMU_PMINTENSET_READ: + if (cluster_id == first) ret = read_pmintenset(); else - ret = clusters_ctx[first][cpu][PMINTENSET_IDX]; + ret = clusters_ctx[first][cpu_id][PMINTENSET_IDX]; break; - case PMINTENSET_WRITE: - if (cluster == first) + case HVC_PMU_PMINTENSET_WRITE: + if (cluster_id == first) write_pmintenset(second); else - clusters_ctx[first][cpu][PMINTENSET_IDX] = second; + clusters_ctx[first][cpu_id][PMINTENSET_IDX] = second; break; - case PMINTENCLR_READ: - if (cluster == first) + case HVC_PMU_PMINTENCLR_READ: + if (cluster_id == first) ret = read_pmintenclr(); else - ret = clusters_ctx[first][cpu][PMINTENCLR_IDX]; + ret = clusters_ctx[first][cpu_id][PMINTENCLR_IDX]; break; - case PMINTENCLR_WRITE: - if (cluster == first) + case HVC_PMU_PMINTENCLR_WRITE: + if (cluster_id == first) write_pmintenclr(second); else - clusters_ctx[first][cpu][PMINTENCLR_IDX] = second; + clusters_ctx[first][cpu_id][PMINTENCLR_IDX] = second; break; - case HVC_SWITCH: + case HVC_PMU_SWITCH: if (first) set_pmu_state(PMU_STATE2); else set_pmu_state(PMU_STATE0); break; - case GET_COUNTERS_SIZE: + case HVC_PMU_GET_COUNTERS_SIZE: ret = sizeof(struct descriptor) * ENTRIES; break; - case SYNC_PMU_COUNTERS: + case HVC_PMU_SYNC_PMU_COUNTERS: { int i; int entries; - unsigned cluster_id; - unsigned selected_counter; - unsigned event_type; - unsigned reset_value; - unsigned request_code; - unsigned tmp = 0; unsigned int *pentries; + desc = (struct descriptor *)first; pentries = &desc->u.header.entries; entries = *pentries; - desc->u.header.active_cluster_id = cluster; + desc->u.header.active_cluster_id = cluster_id; for (i = 0, desc++; i < entries; i++, desc++) { - cluster_id = desc->u.counter.cluster_id; - selected_counter = desc->u.counter.selected_counter; - event_type = desc->u.counter.event_type; - reset_value = desc->u.counter.reset_value; - request_code = desc->u.counter.request_code; - switch (request_code) { - case PMU_DISABLE_COUNTER: - if (cluster == cluster_id) { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - write_pmcntenclr(1UL << 31); - break; - case PMU_OVERFLOW_FLAG: - break; - case PMU_EVENT_COUNTER_0: - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - write_pmcntenclr(1UL << (selected_counter)); - break; - default: - break; - }; - } else { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - clusters_ctx[cluster_id][cpu][PMCCNTR_IDX] &= 0x7FFFFFFF; - break; - case PMU_OVERFLOW_FLAG: - break; - case PMU_EVENT_COUNTER_0: - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - clusters_ctx[cluster_id][cpu][PMCCNTR_IDX] &= ~(1 << selected_counter); - break; - default: - break; - }; - } - break; - case PMU_CONF_COUNTER: - if (cluster == cluster_id) { - /* Toggle global enable bit. */ - tmp = read_pmcr(); - tmp |= 1; - write_pmcr(tmp); - - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - write_pmcntenset(1UL << 31); - break; - case PMU_OVERFLOW_FLAG: - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - write_pmselr(selected_counter); - write_pmxevtyper(event_type); - write_pmcntenset(1UL << (selected_counter)); - break; - default: - break; - }; - } else { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - clusters_ctx[cluster_id][cpu][PMCCNTR_IDX] |= 0x80000000; - break; - case PMU_OVERFLOW_FLAG: - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - clusters_ctx[cluster_id][cpu][PMCCNTR_IDX] |= (1 << selected_counter); - break; - default: - break; - }; - } - break; - case PMU_CONF_RESET_COUNTER: - if (cluster == cluster_id) { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - write_pmccntr(reset_value); - write_pmcntenset(1UL << 31); - break; - case PMU_OVERFLOW_FLAG: - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - write_pmselr(selected_counter); - write_pmxevtyper(event_type); - write_pmxevcntr(reset_value); - write_pmcntenset(1UL << (selected_counter)); - break; - default: - break; - }; - } else { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - // Conf reset cycle counter - break; - case PMU_OVERFLOW_FLAG: - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - // Conf reset event counter - break; - default: - break; - }; - } - break; - case PMU_READ_COUNTER: - if (cluster == cluster_id) { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - desc->u.counter.counter_value = read_pmccntr(); - break; - case PMU_OVERFLOW_FLAG: - // read flag - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - write_pmselr(selected_counter); - desc->u.counter.counter_value = read_pmxevcntr(); - break; - default: - break; - }; - } else { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - desc->u.counter.counter_value = clusters_ctx[cluster_id][cpu][PMCCNTR_IDX]; - break; - case PMU_OVERFLOW_FLAG: - // read flag - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - tmp = clusters_ctx[first][cpu][PMSELR_IDX]; - desc->u.counter.counter_value = clusters_ctx[cluster_id][cpu][PMXEVCNT0_IDX + (tmp * 2)]; - break; - default: - break; - }; - } - break; - case PMU_READ_RESET_COUNTER: - if (cluster == cluster_id) { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - desc->u.counter.counter_value = read_pmccntr(); - write_pmccntr(reset_value); - break; - case PMU_OVERFLOW_FLAG: - // read flag - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - write_pmselr(selected_counter); - desc->u.counter.counter_value = read_pmxevcntr(); - write_pmxevcntr(reset_value); - break; - default: - break; - } - } else { - switch (selected_counter) { - case PMU_CYCLE_COUNTER: - desc->u.counter.counter_value = 0; - desc->u.counter.counter_value = clusters_ctx[cluster_id][cpu][PMCCNTR_IDX]; - clusters_ctx[cluster_id][cpu][PMCCNTR_IDX] = desc->u.counter.reset_value; - case PMU_OVERFLOW_FLAG: - // read flag - break; - case PMU_EVENT_COUNTER_1: - case PMU_EVENT_COUNTER_2: - case PMU_EVENT_COUNTER_3: - case PMU_EVENT_COUNTER_4: - case PMU_EVENT_COUNTER_5: - selected_counter -= PMU_EVENT_COUNTER_0; - tmp = clusters_ctx[first][cpu][PMSELR_IDX]; - desc->u.counter.counter_value = clusters_ctx[cluster_id][cpu][PMXEVCNT0_IDX + (tmp * 2)]; - break; - default: - break; - } - } - break; - } + handle_desc(desc, cluster_id, cpu_id); } } break; diff --git a/big-little/virtualisor/virt_handle.c b/big-little/virtualisor/virt_handle.c index 684dacb..abdf604 100644 --- a/big-little/virtualisor/virt_handle.c +++ b/big-little/virtualisor/virt_handle.c @@ -27,6 +27,7 @@ #include "mem_trap.h" #include "gic_registers.h" #include "bl.h" +#include "misc.h" extern cache_geometry host_cache_geometry[]; extern cache_geometry target_cache_geometry[]; @@ -35,6 +36,7 @@ extern cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS]; extern void signal_switchover(void); extern unsigned cluster_reset_status(unsigned); extern unsigned handle_pmu(unsigned, unsigned, unsigned); +extern void set_pmu_state(unsigned); void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs) { @@ -411,24 +413,28 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs) write_pmuserenr(regs->r[Rt]); else regs->r[Rt] = read_pmuserenr(); + set_pmu_state(PMU_STATE1); break; case 1: if (write) write_pmintenset(regs->r[Rt]); else regs->r[Rt] = read_pmintenset(); + set_pmu_state(PMU_STATE1); break; case 2: if (write) write_pmintenclr(regs->r[Rt]); else regs->r[Rt] = read_pmintenclr(); + set_pmu_state(PMU_STATE1); break; case 3: if (write) write_pmovsset(regs->r[Rt]); else regs->r[Rt] = read_pmovsset(); + set_pmu_state(PMU_STATE1); break; default: goto error; @@ -442,18 +448,21 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs) write_pmccntr(regs->r[Rt]); else regs->r[Rt] = read_pmccntr(); + set_pmu_state(PMU_STATE1); break; case 1: if (write) write_pmxevtyper(regs->r[Rt]); else regs->r[Rt] = read_pmxevtyper(); + set_pmu_state(PMU_STATE1); break; case 2: if (write) write_pmxevcntr(regs->r[Rt]); else regs->r[Rt] = read_pmxevcntr(); + set_pmu_state(PMU_STATE1); break; default: goto error; @@ -467,48 +476,56 @@ void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs) write_pmcr(regs->r[Rt]); else regs->r[Rt] = read_pmcr(); + set_pmu_state(PMU_STATE1); break; case 1: if (write) write_pmcntenset(regs->r[Rt]); else regs->r[Rt] = read_pmcntenset(); + set_pmu_state(PMU_STATE1); break; case 2: if (write) write_pmcntenclr(regs->r[Rt]); else regs->r[Rt] = read_pmcntenclr(); + set_pmu_state(PMU_STATE1); break; case 3: if (write) write_pmovsr(regs->r[Rt]); else regs->r[Rt] = read_pmovsr(); + set_pmu_state(PMU_STATE1); break; case 4: if (write) write_pmswinc(regs->r[Rt]); else regs->r[Rt] = read_pmswinc(); + set_pmu_state(PMU_STATE1); break; case 5: if (write) write_pmselr(regs->r[Rt]); else regs->r[Rt] = read_pmselr(); + set_pmu_state(PMU_STATE1); break; case 6: if (write) write_pmceid0(regs->r[Rt]); else regs->r[Rt] = read_pmceid0(); + set_pmu_state(PMU_STATE1); break; case 7: if (write) write_pmceid1(regs->r[Rt]); else regs->r[Rt] = read_pmceid1(); + set_pmu_state(PMU_STATE1); break; default: goto error; @@ -568,8 +585,8 @@ void trap_hvc_handle(unsigned hsr, gp_regs *regs) * HVC call to switch to the other cluster. This is done * by sending a switchover IPI to all the cores in the cluster. */ - case SYNC_SWITCHOVER: - /* Do not switch till previous one has not completed */ + case HVC_SWITCHER_CLUSTER_SWITCH: + /* Do not switch till previous one has completed */ while (FALSE == cluster_reset_status(!read_clusterid())); signal_switchover(); break; @@ -577,33 +594,33 @@ void trap_hvc_handle(unsigned hsr, gp_regs *regs) /* * HVC call to return the physical MPIDR */ - case READ_MPIDR: + case HVC_VIRT_MPIDR_READ: regs->r[0] = read_mpidr(); break; - case PMCR_READ: - case PMCR_WRITE: - case PMSELR_READ: - case PMSELR_WRITE: - case PMXEVTYPER_READ: - case PMXEVTYPER_WRITE: - case PMCNTENSET_READ: - case PMCNTENSET_WRITE: - case PMCNTENCLR_READ: - case PMCNTENCLR_WRITE: - case PMCCNTR_READ: - case PMCCNTR_WRITE: - case PMOVSR_READ: - case PMOVSR_WRITE: - case PMXEVCNTR_READ: - case PMXEVCNTR_WRITE: - case PMINTENSET_READ: - case PMINTENSET_WRITE: - case PMINTENCLR_READ: - case PMINTENCLR_WRITE: - case HVC_SWITCH: - case GET_COUNTERS_SIZE: - case SYNC_PMU_COUNTERS: + case HVC_PMU_PMCR_READ: + case HVC_PMU_PMCR_WRITE: + case HVC_PMU_PMSELR_READ: + case HVC_PMU_PMSELR_WRITE: + case HVC_PMU_PMXEVTYPER_READ: + case HVC_PMU_PMXEVTYPER_WRITE: + case HVC_PMU_PMCNTENSET_READ: + case HVC_PMU_PMCNTENSET_WRITE: + case HVC_PMU_PMCNTENCLR_READ: + case HVC_PMU_PMCNTENCLR_WRITE: + case HVC_PMU_PMCCNTR_READ: + case HVC_PMU_PMCCNTR_WRITE: + case HVC_PMU_PMOVSR_READ: + case HVC_PMU_PMOVSR_WRITE: + case HVC_PMU_PMXEVCNTR_READ: + case HVC_PMU_PMXEVCNTR_WRITE: + case HVC_PMU_PMINTENSET_READ: + case HVC_PMU_PMINTENSET_WRITE: + case HVC_PMU_PMINTENCLR_READ: + case HVC_PMU_PMINTENCLR_WRITE: + case HVC_PMU_SWITCH: + case HVC_PMU_GET_COUNTERS_SIZE: + case HVC_PMU_SYNC_PMU_COUNTERS: regs->r[0] = handle_pmu(opcode, regs->r[1], regs->r[2]); break; default: |