/* * Copyright (c) 2012, ARM Limited. All rights reserved. * * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * Redistributions in binary form must reproduce the * above copyright notice, this list of conditions and * the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of ARM nor the names of its * contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. */ #include "secure_world.h" #include "events.h" #include "bakery.h" /* Bakery lock to serialize access to the tube. */ bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0}; /* * Compile time switch to decided whether the outbound * L2 will be kept on always for inbound cache warming * or it will be flushed and reset after the BL context * has been picked up. */ static unsigned flush_ob_l2 = FLUSH_OB_L2; #if FLUSH_L2_FIX /* * TODO: * Dirty hack for backward compatibility. This * variable helps determine whether this is the * first switch. */ static unsigned switch_count = 0; #endif #if FM_BETA /* * Variable in secure world to indicate the * reset type i.e. cold (0) or warm reset (!0). */ unsigned ve_reset_type[NUM_CPUS]; #endif /* * Allocate secure events in our device page */ unsigned event[MAX_CORES][MAX_EVENTS] __attribute__ ((section("BL_SEC_DV_PAGE"))); /* * Normal spinlock to guard inbound cluster registers * in the KFSCB. It will always be used when the MMU * is on. Each cluster will anyways use it sequentially. */ static unsigned lock_ib_kfscb; /* * Bakery lock to guard outbound cluster registers in * KFSCB. It will always be used when the MMU is off. * Each cluster will anyways use it sequentially */ static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {0}; /* * Small stacks for after we have turned our caches off. */ static unsigned long long powerdown_stacks[NUM_CPUS][32] __attribute__ ((section("BL_SEC_DV_PAGE"))); unsigned long long *get_powerdown_stack(unsigned cpu_id) { return &powerdown_stacks[cpu_id + 1][0]; } static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void) { #if FM_BETA return (void (*)(void))ve_reset_type[cpu_id]; #else return (void (*)(void))read32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3)); #endif } /* * The way a warm reset is detected has changed in the post beta FastModels. * The following workarounds make the earlier approach coexist with the * new one. Instead of dealing with a function pointer, they manipulate a * variable. */ void set_reset_handler(unsigned cluster_id, unsigned cpu_id, void (*handler)(void)) { void (*prev_reset_handler)(void) = get_reset_handler(cluster_id, cpu_id); if (prev_reset_handler != handler) { #if FM_BETA ve_reset_type[cpu_id]++; cln_dcache_mva_poc(&ve_reset_type[cpu_id]); #else write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3), (unsigned) handler); dsb(); #endif } return; } unsigned get_inbound() { return !read_clusterid(); } /* * Simple function which will bring our corresponding core out of reset */ void powerup_ib_core(unsigned cluster_id, unsigned cpu_id) { unsigned rst_stat_reg = 0x0; unsigned cpu_mask = 0x0; #if FLUSH_L2_FIX if (0 == switch_count) { set_event(FLUSH_L2, cpu_id); /* * We really do not care about a race to update * this variable as long it has a non-zero value * after a switch. */ switch_count++; } #endif write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(), 0x0, 0x0); spin_lock(&lock_ib_kfscb); rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2)); cpu_mask = 1 << 8 | (1 << 4) << cpu_id | 1 << cpu_id; rst_stat_reg &= ~cpu_mask; write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), rst_stat_reg); spin_unlock(&lock_ib_kfscb); return; } /* * Simple function to place a core in the outbound cluster * in reset. */ void powerdown_ob_core(unsigned cluster_id, unsigned cpu_id) { unsigned val = 0x0; unsigned mask = 0x0; get_bakery_spinlock(cpu_id, &lock_ob_kfscb); val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2)); mask = (1 << cpu_id) << 4; val |= mask; write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val); release_bakery_spinlock(cpu_id, &lock_ob_kfscb); return; } /* * Simple function to the outbound cluster in reset. */ void powerdown_ob_cluster(unsigned cluster_id, unsigned cpu_id) { unsigned val = 0x0; unsigned mask = 0x0; get_bakery_spinlock(cpu_id, &lock_ob_kfscb); val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2)); mask = 1 << 8; val |= mask; write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val); release_bakery_spinlock(cpu_id, &lock_ob_kfscb); return; } /* * Do not use this function for Read-Modify-Write of KFSCB registers * as it does not hold a lock. */ unsigned reset_status(unsigned cluster_id, unsigned rst_level, unsigned cpu_mask) { unsigned rst_stat_reg = 0x0; rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2)); switch (rst_level) { case CLUSTER_RESET: return rst_stat_reg >> 8; case CORE_PORESET: return ((rst_stat_reg >> 4) & 0xf) & cpu_mask; case CORE_RESET: return (rst_stat_reg & 0xf) & cpu_mask; default: return 0; } } void do_power_op(unsigned cpu_mask, unsigned op_type) { unsigned cpu_id = read_cpuid(); unsigned cluster_id = read_clusterid(); unsigned secondary_mask = 0x0; unsigned first_cpu = find_first_cpu(); /* * Brute force way of cleaning the L1 and L2 caches of the outbound cluster. * All cpus flush their L1 caches. The 'first_cpu' waits for the others to * finish this operation before flushing the L2 */ write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush Begin", read_cntpct(), 0x0, 0x0); write_sctlr(read_sctlr() & ~CR_C & ~CR_M); dsb(); isb(); inv_icache_all(); cache_maint_op(L1, CLN_INV); disable_coherency(); write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0, 0x0); if (OP_TYPE_HP != op_type) set_event(SEC_L1_DONE, cpu_id); /* This code will never be executed for hotplug */ if (cpu_id == first_cpu) { wait_for_events(SEC_L1_DONE, cpu_mask); if (flush_ob_l2) { #if FLUSH_L2_FIX wait_for_event(FLUSH_L2, cpu_id); reset_event(FLUSH_L2, cpu_id); #endif write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush Begin", read_cntpct(), 0x0, 0x0); cache_maint_op(L2, CLN_INV); write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush End", read_cntpct(), 0x0, 0x0); /* Turn off CCI snoops & DVM messages */ if (cluster_id) write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0); else write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0); dsb(); /* Wait for the dust to settle down */ while (read32(CCI_BASE + STATUS_REG) & 0x1) ; } /********************* RESET HANDLING ************************************** * Secondaries place themselves in reset while the 'first_cpu' waits for * them to do so. ***************************************************************************/ secondary_mask = cpu_mask & ~(1 << cpu_id); /* Wait for other cpus to enter reset */ while (secondary_mask != reset_status(cluster_id, CORE_PORESET, secondary_mask)) ; if (flush_ob_l2) powerdown_ob_cluster(cluster_id, cpu_id); else powerdown_ob_core(cluster_id, cpu_id); } else { switch (op_type) { case (OP_TYPE_HP): get_bakery_spinlock(cpu_id, &lock_ob_kfscb); write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), cpu_mask); release_bakery_spinlock(cpu_id, &lock_ob_kfscb); break; case (OP_TYPE_SWITCH): powerdown_ob_core(cluster_id, cpu_id); break; default: panic(); } } write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(), 0x0, 0x0); return; }