aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/mmu.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-11-28 13:53:28 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2012-04-17 15:29:32 +0100
commit7fec1b57b8a925d83c194f995f83d9f8442fd48e (patch)
tree320c333459779e1388f5aae50ae50edb4482e82c /arch/arm/include/asm/mmu.h
parent3c5f7e7b4a0346de670b08f595bd15e7eec91f97 (diff)
ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs
Since the ASIDs must be unique to an mm across all the CPUs in a system, the __new_context() function needs to broadcast a context reset event to all the CPUs during ASID allocation if a roll-over occurred. Such IPIs cannot be issued with interrupts disabled and ARM had to define __ARCH_WANT_INTERRUPTS_ON_CTXSW. This patch changes the check_context() function to check_and_switch_context() called from switch_mm(). In case of ASID-capable CPUs (ARMv6 onwards), if a new ASID is needed and the interrupts are disabled, it defers the __new_context() and cpu_switch_mm() calls to the post-lock switch hook where the interrupts are enabled. Setting the reserved TTBR0 was also moved to check_and_switch_context() from cpu_v7_switch_mm(). Reviewed-by: Will Deacon <will.deacon@arm.com> Tested-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Tested-by: Marc Zyngier <Marc.Zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/include/asm/mmu.h')
-rw-r--r--arch/arm/include/asm/mmu.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b8e580a297e4..20b43d6f23b3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -39,6 +39,8 @@ typedef struct {
* so enable interrupts over the context switch to avoid high
* latency.
*/
+#ifndef CONFIG_CPU_HAS_ASID
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+#endif
#endif