aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-10-06 18:46:27 +0100
committerAlex Shi <alex.shi@linaro.org>2016-05-19 16:44:44 +0800
commit9ff946691f00ae631bc3b743e6e88a0e79e32036 (patch)
tree8b5ec0172ba3f2ffda7a7dd85d06a320f3d30182
parent99324769e801c42ff36e801559decc795ac7cab5 (diff)
arm64: switch_mm: simplify mm and CPU checksv3.18/topic/mm-kaslr
switch_mm performs some checks to try and avoid entering the ASID allocator: (1) If we're switching to the init_mm (no user mappings), then simply set a reserved TTBR0 value with no page table (the zero page) (2) If prev == next *and* the mm_cpumask indicates that we've run on this CPU before, then we can skip the allocator. However, there is plenty of redundancy here. With the new ASID allocator, if prev == next, then we know that our ASID is valid and do not need to worry about re-allocation. Consequently, we can drop the mm_cpumask check in (2) and move the prev == next check before the init_mm check, since if prev == next == init_mm then there's nothing to do. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit c2775b2ee5caca19f661ee2ab5af92462596db71) Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/mm/context.c2
2 files changed, 5 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 70b1d831f12c..341c6703c9db 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -185,6 +185,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
+ if (prev == next)
+ return;
+
/*
* init_mm.pgd does not contain any user mappings and it is always
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -194,8 +197,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
return;
}
- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
- check_and_switch_context(next, tsk);
+ check_and_switch_context(next, cpu);
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index df9970ef0537..2ab1da942efc 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -167,10 +167,10 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
local_flush_tlb_all();
atomic64_set(&per_cpu(active_asids, cpu), asid);
- cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm);
}