aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/hash_native_64.c
diff options
context:
space:
mode:
authorMatt Evans <matt@ozlabs.org>2011-04-06 19:48:50 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-04-27 14:18:52 +1000
commit44ae3ab3358e962039c36ad4ae461ae9fb29596c (patch)
tree08c0628a5226c0535b7fe236be64b48e5eb0fbd6 /arch/powerpc/mm/hash_native_64.c
parenteca590f402332ab873d13f2d8d00fa0b91cfff36 (diff)
powerpc: Free up some CPU feature bits by moving out MMU-related features
Some of the 64bit PPC CPU features are MMU-related, so this patch moves them to MMU_FTR_ bits. All cpu_has_feature()-style tests are moved to mmu_has_feature(), and seven feature bits are freed as a result. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/hash_native_64.c')
-rw-r--r--arch/powerpc/mm/hash_native_64.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 784a400e0781..c23eef2b81a6 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -98,8 +98,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
static inline void tlbie(unsigned long va, int psize, int ssize, int local)
{
- unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
+ unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
@@ -503,7 +503,7 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
- if (cpu_has_feature(CPU_FTR_TLBIEL) &&
+ if (mmu_has_feature(MMU_FTR_TLBIEL) &&
mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
@@ -517,7 +517,7 @@ static void native_flush_hash_range(unsigned long number, int local)
}
asm volatile("ptesync":::"memory");
} else {
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);