aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/exception-64s.h10
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h6
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h6
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/reg.h32
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/kernel/align.c10
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cacheinfo.c3
-rw-r--r--arch/powerpc/kernel/crash_dump.c8
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S57
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c22
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom.c43
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/reloc_64.S1
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c76
-rw-r--r--arch/powerpc/kernel/signal_64.c14
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S88
-rw-r--r--arch/powerpc/kernel/traps.c55
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1
-rw-r--r--arch/powerpc/kvm/e500_mmu.c2
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/mm/numa.c134
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c7
-rw-r--r--arch/powerpc/perf/core-book3s.c46
-rw-r--r--arch/powerpc/perf/power8-pmu.c22
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c33
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c22
-rw-r--r--arch/powerpc/platforms/pseries/setup.c31
51 files changed, 676 insertions, 278 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad2c8f..fe404e77246 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -572,7 +572,7 @@ config SCHED_SMT
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
- default "n"
+ default "y" if PPC_POWERNV
---help---
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
@@ -986,6 +986,7 @@ config RELOCATABLE
must live at a different physical address than the primary
kernel.
+# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET
hex
default "0xc000000000000000"
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 46793b58a76..e17d94d429a 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -264,7 +264,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
+1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
@@ -358,12 +358,12 @@ label##_relon_pSeries: \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_STD, KVMTEST_PR, vec)
+ EXC_STD, NOTEST, vec)
#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
.globl label##_relon_pSeries; \
label##_relon_pSeries: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
@@ -374,12 +374,12 @@ label##_relon_hv: \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_HV, KVMTEST, vec)
+ EXC_HV, NOTEST, vec)
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
.globl label##_relon_hv; \
label##_relon_hv: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
/* This associate vector numbers with bits in paca->irq_happened */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f0..f016bb699b5 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:\n\t"
+ asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590ec44..49fa55bfbac 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
-#ifdef CONFIG_MODVERSIONS
+#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
+#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812aab5..b9f426212d3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
#endif
+#endif
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 27b2386f738..842846c1b71 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index b66ae722a8e..64aaf016b47 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -144,11 +144,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#else /* if CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 14a65836369..419e7125cce 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+
/*
* Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bc2da154f68..ac204e02292 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4a9e408644f..e1fb161252e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
-#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
-#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
@@ -626,6 +635,7 @@
#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdc..48cfc858abd 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 200d763a0a6..685ecc86aa8 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 161ab662843..884b001bafc 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,7 +22,15 @@ struct device_node;
static inline int cpu_to_node(int cpu)
{
- return numa_cpu_lookup_table[cpu];
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * During early boot, the numa-cpu lookup table might not have been
+ * setup for all CPUs yet. In such cases, default to node 0.
+ */
+ return (nid < 0) ? 0 : nid;
}
#define parent_node(node) (node)
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ee5b690a0be..52e5758ea36 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
+ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+ nb = 8;
+ flags = LD+SW;
+ } else if (IS_XFORM(instruction) &&
+ ((instruction >> 1) & 0x3ff) == 660) {
+ nb = 8;
+ flags = ST+SW;
+ }
+
/* Byteswap little endian loads and stores */
swiz = 0;
if (regs->msr & MSR_LE) {
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffafa6f..302886b77de 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -139,6 +139,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+ DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+ DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 92c6b008dd2..b4437e8a7a8 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -788,6 +788,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
+ /* Remove cache dir from sysfs */
+ kobject_del(cache_dir->kobj);
+
kobject_put(cache_dir->kobj);
kfree(cache_dir);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ec3fe174cb..555ae67e408 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
+ phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
+ paddr = pfn << PAGE_SHIFT;
- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
- vaddr = __va(pfn << PAGE_SHIFT);
+ if (memblock_is_region_memory(paddr, csize)) {
+ vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
+ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8741c854e03..38847767012 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
- /*
- * Back up the TAR across context switches. Note that the TAR is not
- * available for use in the kernel. (To provide this, the TAR should
- * be backed up/restored on exception entry/exit instead, and be in
- * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
- */
- mfspr r0,SPRN_TAR
- std r0,THREAD_TAR(r3)
-
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
+ li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
-1: cmpd r0,r25
+ b 3f
+1:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ or r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ or r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ b 4f
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ andc r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ andc r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 40e4a17c8ba..902ca3c6b4b 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -341,10 +341,17 @@ vsx_unavailable_pSeries_1:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_pSeries
+facility_unavailable_trampoline:
. = 0xf60
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tm_unavailable_pSeries
+ b facility_unavailable_pSeries
+
+hv_facility_unavailable_trampoline:
+ . = 0xf80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b facility_unavailable_hv
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
@@ -522,8 +529,10 @@ denorm_done:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
- STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+ STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -793,14 +802,10 @@ system_call_relon_pSeries:
STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
. = 0x4e00
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_data_storage_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e20
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_instr_storage_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e40
SET_SCRATCH0(r13)
@@ -808,9 +813,7 @@ system_call_relon_pSeries:
b emulation_assist_relon_hv
. = 0x4e60
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b hmi_exception_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e80
SET_SCRATCH0(r13)
@@ -835,11 +838,17 @@ vsx_unavailable_relon_pSeries_1:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_relon_pSeries
-tm_unavailable_relon_pSeries_1:
+facility_unavailable_relon_trampoline:
. = 0x4f60
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tm_unavailable_relon_pSeries
+ b facility_unavailable_relon_pSeries
+
+hv_facility_unavailable_relon_trampoline:
+ . = 0x4f80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1165,36 +1174,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
bl .vsx_unavailable_exception
b .ret_from_except
- .align 7
- .globl tm_unavailable_common
-tm_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
- bl .save_nvgprs
- DISABLE_INTS
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .tm_unavailable_exception
- b .ret_from_except
+ STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
__end_handlers:
/* Equivalents to the above handlers for relocation-on interrupt vectors */
- STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
- STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
- STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d557b..192a3f562bd 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
+.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a949bdfc962..f0b47d1a6b0 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((bp->attr.bp_addr >> 10) !=
- ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
+ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
return -EINVAL;
}
if (info->len >
@@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
* we still need to single-step the instruction, but we don't
* generate an event.
*/
+ info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
if (!((bp->attr.bp_addr <= dar) &&
(dar - bp->attr.bp_addr < bp->attr.bp_len)))
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c0d0dbddfba..93d8d96840b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -658,7 +658,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f3871e9c..e2a0a162299 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
}
static const struct file_operations lparcfg_fops = {
- .owner = THIS_MODULE,
.read = seq_read,
.write = lparcfg_write,
.open = lparcfg_open,
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
}
return 0;
}
-
-static void __exit lparcfg_cleanup(void)
-{
- remove_proc_subtree("powerpc/lparcfg", NULL);
-}
-
-module_init(lparcfg_init);
-module_exit(lparcfg_cleanup);
-MODULE_DESCRIPTION("Interface for LPAR configuration data");
-MODULE_AUTHOR("Dave Engebretsen");
-MODULE_LICENSE("GPL");
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 076d1242507..7baa27b7abb 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
+ /* Back up the TAR across context switches.
+ * Note that the TAR is not available for use in the kernel. (To
+ * provide this, the TAR should be backed up/restored on exception
+ * entry/exit instead, and be in pt_regs. FIXME, this should be in
+ * pt_regs anyway (for debug).)
+ * Save the TAR here before we do treclaim/trecheckpoint as these
+ * will change the TAR.
+ */
+ save_tar(&prev->thread);
+
__switch_to_tm(prev);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b6f7a99cce..00610a34fb9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -827,49 +827,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-/* Find the device node for a given logical cpu number, also returns the cpu
- * local thread number (index in ibm,interrupt-server#s) if relevant and
- * asked for (non NULL)
- */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- int hardid;
- struct device_node *np;
-
- hardid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- const u32 *intserv;
- unsigned int plen, t;
-
- /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
- * fallback to "reg" property and assume no threads
- */
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &plen);
- if (intserv == NULL) {
- const u32 *reg = of_get_property(np, "reg", NULL);
- if (reg == NULL)
- continue;
- if (*reg == hardid) {
- if (thread)
- *thread = 0;
- return np;
- }
- } else {
- plen /= sizeof(u32);
- for (t = 0; t < plen; t++) {
- if (hardid == intserv[t]) {
- if (thread)
- *thread = t;
- return np;
- }
- }
- }
- }
- return NULL;
+ return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 98c2fc19871..64f7bd5b1b0 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1449,7 +1449,9 @@ static long ppc_set_hwdebug(struct task_struct *child,
*/
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
len = bp_info->addr2 - bp_info->addr;
- } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+ } else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ len = 1;
+ else {
ptrace_put_breakpoints(child);
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab00..c712ecec13b 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -81,6 +81,7 @@ _GLOBAL(relocate)
6: blr
+.balign 8
p_dyn: .llong __dynamic_start - 0b
p_rela: .llong __rela_dyn_start - 0b
p_st: .llong _stext - 0b
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e379d3fd169..389fb8077cc 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -76,7 +76,7 @@
#endif
int boot_cpuid = 0;
-int __initdata spinning_secondaries;
+int spinning_secondaries;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 201385c3a1a..7e9dff80e1d 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
* altivec/spe instructions at some point.
*/
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
- int sigret, int ctx_has_vsx_region)
+ struct mcontext __user *tm_frame, int sigret,
+ int ctx_has_vsx_region)
{
unsigned long msr = regs->msr;
@@ -441,6 +442,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
#endif /* CONFIG_ALTIVEC */
if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
@@ -475,6 +482,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
return 1;
+ /* We need to write 0 the MSR top 32 bits in the tm frame so that we
+ * can check it on the restore to see if TM is active
+ */
+ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
+ return 1;
+
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -747,7 +760,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *tm_sr)
{
long err;
- unsigned long msr;
+ unsigned long msr, msr_hi;
#ifdef CONFIG_VSX
int i;
#endif
@@ -852,8 +865,11 @@ static long restore_tm_user_regs(struct pt_regs *regs,
tm_enable();
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* The task has moved into TM state S, so ensure MSR reflects this */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
+ /* Get the top half of the MSR */
+ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+ return 1;
+ /* Pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
@@ -952,6 +968,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
{
struct rt_sigframe __user *rt_sf;
struct mcontext __user *frame;
+ struct mcontext __user *tm_frame = NULL;
void __user *addr;
unsigned long newsp = 0;
int sigret;
@@ -985,23 +1002,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_frame = &rt_sf->uc_transact.uc_mcontext;
if (MSR_TM_ACTIVE(regs->msr)) {
- if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
- &rt_sf->uc_transact.uc_mcontext, sigret))
+ if (save_tm_user_regs(regs, frame, tm_frame, sigret))
goto badframe;
}
else
#endif
- if (save_user_regs(regs, frame, sigret, 1))
+ {
+ if (save_user_regs(regs, frame, tm_frame, sigret, 1))
goto badframe;
+ }
regs->link = tramp;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr)) {
if (__put_user((unsigned long)&rt_sf->uc_transact,
&rt_sf->uc.uc_link)
- || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
- &rt_sf->uc_transact.uc_regs))
+ || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
goto badframe;
}
else
@@ -1170,7 +1188,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
mctx = (struct mcontext __user *)
((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
- || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
+ || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
|| put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
|| __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
return -EFAULT;
@@ -1233,7 +1251,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
goto bad;
- if (MSR_TM_SUSPENDED(msr_hi<<32)) {
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
/* We only recheckpoint on return if we're
* transaction.
*/
@@ -1392,6 +1410,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
{
struct sigcontext __user *sc;
struct sigframe __user *frame;
+ struct mcontext __user *tm_mctx = NULL;
unsigned long newsp = 0;
int sigret;
unsigned long tramp;
@@ -1425,6 +1444,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_mctx = &frame->mctx_transact;
if (MSR_TM_ACTIVE(regs->msr)) {
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
sigret))
@@ -1432,8 +1452,10 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
}
else
#endif
- if (save_user_regs(regs, &frame->mctx, sigret, 1))
+ {
+ if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
goto badframe;
+ }
regs->link = tramp;
@@ -1481,16 +1503,22 @@ badframe:
long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
struct pt_regs *regs)
{
+ struct sigframe __user *sf;
struct sigcontext __user *sc;
struct sigcontext sigctx;
struct mcontext __user *sr;
void __user *addr;
sigset_t set;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct mcontext __user *mcp, *tm_mcp;
+ unsigned long msr_hi;
+#endif
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ sc = &sf->sctx;
addr = sc;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
@@ -1507,11 +1535,25 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
#endif
set_current_blocked(&set);
- sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
- addr = sr;
- if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
- || restore_user_regs(regs, sr, 1))
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ mcp = (struct mcontext __user *)&sf->mctx;
+ tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
+ if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
goto badframe;
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto badframe;
+ if (restore_tm_user_regs(regs, mcp, tm_mcp))
+ goto badframe;
+ } else
+#endif
+ {
+ sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
+ addr = sr;
+ if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
+ || restore_user_regs(regs, sr, 1))
+ goto badframe;
+ }
set_thread_flag(TIF_RESTOREALL);
return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 345947367ec..35c20a1fb36 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -121,6 +121,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread(current);
/* copy fpr regs and fpscr */
err |= copy_fpr_to_user(&sc->fp_regs, current);
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSX low doubleword to local buffer for formatting,
@@ -410,6 +416,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
/* get MSR separately, transfer the LE bit if doing signal return */
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ /* pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+ /* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
@@ -505,8 +515,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
tm_enable();
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* The task has moved into TM state S, so ensure MSR reflects this: */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
@@ -654,7 +662,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
- if (MSR_TM_SUSPENDED(msr)) {
+ if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
if (__get_user(uc_transact, &uc->uc_link))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e68a84568b8..a15fd1a0690 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
+#include <asm/firmware.h>
#include "cacheinfo.h"
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
SYSFS_PMCSETUP(dscr, SPRN_DSCR);
SYSFS_PMCSETUP(pir, SPRN_PIR);
+/*
+ Lets only enable read for phyp resources and
+ enable write when needed with a separate function.
+ Lets be conservative and default to pseries.
+*/
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
unsigned long dscr_default = 0;
EXPORT_SYMBOL(dscr_default);
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+ attr->attr.mode |= 0200;
+}
+
static ssize_t show_dscr_default(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -394,8 +405,11 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
- if (cpu_has_feature(CPU_FTR_PURR))
+ if (cpu_has_feature(CPU_FTR_PURR)) {
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ add_write_permission_dev_attr(&dev_attr_purr);
device_create_file(s, &dev_attr_purr);
+ }
if (cpu_has_feature(CPU_FTR_SPURR))
device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2da67e7a16d..f2abb219a17 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
TABORT(R3)
blr
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
/* void tm_reclaim(struct thread_struct *thread,
* unsigned long orig_msr,
@@ -178,11 +183,18 @@ dont_backup_fp:
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
+ /* Store the PPR in r11 and reset to decent value */
+ std r11, GPR11(r1) /* Temporary stash */
+ mfspr r11, SPRN_PPR
+ HMT_MEDIUM
+
/* Now get some more GPRS free */
std r7, GPR7(r1) /* Temporary stash */
std r12, GPR12(r1) /* '' '' '' */
ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
+ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
+
addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
/* Make r7 look like an exception frame so that we
@@ -194,15 +206,19 @@ dont_backup_fp:
SAVE_GPR(0, r7) /* user r0 */
SAVE_GPR(2, r7) /* user r2 */
SAVE_4GPRS(3, r7) /* user r3-r6 */
- SAVE_4GPRS(8, r7) /* user r8-r11 */
+ SAVE_GPR(8, r7) /* user r8 */
+ SAVE_GPR(9, r7) /* user r9 */
+ SAVE_GPR(10, r7) /* user r10 */
ld r3, PACATMSCRATCH(r13) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
- ld r5, GPR12(r1) /* user r12 */
- GET_SCRATCH0(6) /* user r13 */
+ ld r5, GPR11(r1) /* user r11 */
+ ld r6, GPR12(r1) /* user r12 */
+ GET_SCRATCH0(8) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
- std r5, GPR12(r7)
- std r6, GPR13(r7)
+ std r5, GPR11(r7)
+ std r6, GPR12(r7)
+ std r8, GPR13(r7)
SAVE_NVGPRS(r7) /* user r14-r31 */
@@ -224,6 +240,14 @@ dont_backup_fp:
std r5, _CCR(r7)
std r6, _XER(r7)
+
+ /* ******************** TAR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+ mfspr r4, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+ std r4, THREAD_TM_DSCR(r12)
+
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@@ -239,7 +263,7 @@ dont_backup_fp:
std r3, THREAD_TM_TFHAR(r12)
std r4, THREAD_TM_TFIAR(r12)
- /* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+ /* AMR is checkpointed too, but is unsupported by Linux. */
/* Restore original MSR/IRQ state & clear TM mode */
ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -255,6 +279,12 @@ dont_backup_fp:
mtcr r4
mtlr r0
ld r2, 40(r1)
+
+ /* Load system default DSCR */
+ ld r4, DSCR_DEFAULT@toc(r2)
+ ld r0, 0(r4)
+ mtspr SPRN_DSCR, r0
+
blr
@@ -338,35 +368,51 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
+
/* ******************** CR,LR,CCR,MSR ********** */
- ld r3, _CTR(r7)
- ld r4, _LINK(r7)
- ld r5, _CCR(r7)
- ld r6, _XER(r7)
+ ld r4, _CTR(r7)
+ ld r5, _LINK(r7)
+ ld r6, _CCR(r7)
+ ld r8, _XER(r7)
- mtctr r3
- mtlr r4
- mtcr r5
- mtxer r6
+ mtctr r4
+ mtlr r5
+ mtcr r6
+ mtxer r8
+
+ /* ******************** TAR ******************** */
+ ld r4, THREAD_TM_TAR(r3)
+ mtspr SPRN_TAR, r4
+
+ /* Load up the PPR and DSCR in GPRs only at this stage */
+ ld r5, THREAD_TM_DSCR(r3)
+ ld r6, THREAD_TM_PPR(r3)
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
REST_4GPRS(0, r7) /* GPR0-3 */
- REST_GPR(4, r7) /* GPR4-6 */
- REST_GPR(5, r7)
- REST_GPR(6, r7)
+ REST_GPR(4, r7) /* GPR4 */
REST_4GPRS(8, r7) /* GPR8-11 */
REST_2GPRS(12, r7) /* GPR12-13 */
REST_NVGPRS(r7) /* GPR14-31 */
- ld r7, GPR7(r7) /* GPR7 */
+ /* Load up PPR and DSCR here so we don't run with user values for long
+ */
+ mtspr SPRN_DSCR, r5
+ mtspr SPRN_PPR, r6
+
+ REST_GPR(5, r7) /* GPR5-7 */
+ REST_GPR(6, r7)
+ ld r7, GPR7(r7)
/* Commit register state as checkpointed state: */
TRECHKPT
+ HMT_MEDIUM
+
/* Our transactional state has now changed.
*
* Now just get out of here. Transactional (current) state will be
@@ -385,6 +431,12 @@ restore_gprs:
mtcr r4
mtlr r0
ld r2, 40(r1)
+
+ /* Load system default DSCR */
+ ld r4, DSCR_DEFAULT@toc(r2)
+ ld r0, 0(r4)
+ mtspr SPRN_DSCR, r0
+
blr
/* ****************************************************************** */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c0e5caf8ccc..88929b1f4f7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
-#ifdef CONFIG_PPC32
#include <asm/reg.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@@ -1282,26 +1280,63 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
-void tm_unavailable_exception(struct pt_regs *regs)
+#ifdef CONFIG_PPC64
+void facility_unavailable_exception(struct pt_regs *regs)
{
+ static char *facility_strings[] = {
+ [FSCR_FP_LG] = "FPU",
+ [FSCR_VECVSX_LG] = "VMX/VSX",
+ [FSCR_DSCR_LG] = "DSCR",
+ [FSCR_PM_LG] = "PMU SPRs",
+ [FSCR_BHRB_LG] = "BHRB",
+ [FSCR_TM_LG] = "TM",
+ [FSCR_EBB_LG] = "EBB",
+ [FSCR_TAR_LG] = "TAR",
+ };
+ char *facility = "unknown";
+ u64 value;
+ u8 status;
+ bool hv;
+
+ hv = (regs->trap == 0xf80);
+ if (hv)
+ value = mfspr(SPRN_HFSCR);
+ else
+ value = mfspr(SPRN_FSCR);
+
+ status = value >> 56;
+ if (status == FSCR_DSCR_LG) {
+ /* User is acessing the DSCR. Set the inherit bit and allow
+ * the user to set it directly in future by setting via the
+ * H/FSCR DSCR bit.
+ */
+ current->thread.dscr_inherit = 1;
+ if (hv)
+ mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+ else
+ mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ return;
+ }
+
+ if ((status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
+
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- /* Currently we never expect a TMU exception. Catch
- * this and kill the process!
- */
- printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
- "(msr %lx)\n",
- regs->nip, regs->msr);
+ pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
+ hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
- die("Unexpected TM unavailable exception", regs, SIGABRT);
+ die("Unexpected facility unavailable exception", regs, SIGABRT);
}
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 536016d792b..56d2e72c85d 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1529,11 +1529,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
const char *cp;
dn = dev->of_node;
- if (!dn)
- return -ENODEV;
+ if (!dn) {
+ strcpy(buf, "\n");
+ return strlen(buf);
+ }
cp = of_get_property(dn, "compatible", NULL);
- if (!cp)
- return -ENODEV;
+ if (!cp) {
+ strcpy(buf, "\n");
+ return strlen(buf);
+ }
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f..f096e72262f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
- . = 0;
- reloc_start = .;
-
. = KERNELBASE;
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 5880dfb3107..b616e364dbe 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -473,11 +473,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
slb_v = vcpu->kvm->arch.vrma_slb_v;
}
+ preempt_disable();
/* Find the HPTE in the hash table */
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
HPTE_V_VALID | HPTE_V_ABSENT);
- if (index < 0)
+ if (index < 0) {
+ preempt_enable();
return -ENOENT;
+ }
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
v = hptep[0] & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
@@ -485,6 +488,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Unlock the HPTE */
asm volatile("lwsync" : : : "memory");
hptep[0] = v;
+ preempt_enable();
gpte->eaddr = eaddr;
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 550f5928b39..102ad8a255f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -82,10 +82,13 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
/* CPU points to the first thread of the core */
if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+#ifdef CONFIG_KVM_XICS
int real_cpu = cpu + vcpu->arch.ptid;
if (paca[real_cpu].kvm_hstate.xics_phys)
xics_wake_cpu(real_cpu);
- else if (cpu_online(cpu))
+ else
+#endif
+ if (cpu_online(cpu))
smp_send_reschedule(cpu);
}
put_cpu();
@@ -1090,7 +1093,9 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
smp_wmb();
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
+#ifdef CONFIG_KVM_XICS
xics_wake_cpu(cpu);
+#endif
++vc->n_woken;
}
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6dcbb49105a..049b899e40e 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -724,6 +724,10 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */
};
+/* When called from virtmode, this func should be protected by
+ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
+ * can trigger deadlock issue.
+ */
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long valid)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b02f91e4c70..7bcd4d6e177 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1054,7 +1054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
- std r8, VCPU_DSCR(r7)
+ std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd46b83..a3a5cb8ee7e 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
#include <asm/hvcall.h>
#include <asm/xics.h>
#include <asm/debug.h>
+#include <asm/time.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 6d6f153b6c1..c17600de7d5 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
}
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
- unsigned int eaddr, int as)
+ gva_t eaddr, int as)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int victim, tsized;
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d6..57a07206505 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
blr
- .macro source
+ .macro srcnr
100:
.section __ex_table,"a"
.align 3
- .llong 100b,.Lsrc_error
+ .llong 100b,.Lsrc_error_nr
.previous
.endm
- .macro dest
+ .macro source
+150:
+ .section __ex_table,"a"
+ .align 3
+ .llong 150b,.Lsrc_error
+ .previous
+ .endm
+
+ .macro dstnr
200:
.section __ex_table,"a"
.align 3
- .llong 200b,.Ldest_error
+ .llong 200b,.Ldest_error_nr
+ .previous
+ .endm
+
+ .macro dest
+250:
+ .section __ex_table,"a"
+ .align 3
+ .llong 250b,.Ldest_error
.previous
.endm
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcopy_aligned
- li r7,4
- sub r6,r7,r6
+ li r9,4
+ sub r6,r9,r6
mtctr r6
1:
-source; lhz r6,0(r3) /* align to doubleword */
+srcnr; lhz r6,0(r3) /* align to doubleword */
subi r5,r5,2
addi r3,r3,2
adde r0,r0,r6
-dest; sth r6,0(r4)
+dstnr; sth r6,0(r4)
addi r4,r4,2
bdnz 1b
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
mtctr r6
3:
-source; ld r6,0(r3)
+srcnr; ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
-dest; std r6,0(r4)
+dstnr; std r6,0(r4)
addi r4,r4,8
bdnz 3b
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
srdi. r6,r5,2
beq .Lcopy_tail_halfword
-source; lwz r6,0(r3)
+srcnr; lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
-dest; stw r6,0(r4)
+dstnr; stw r6,0(r4)
addi r4,r4,4
subi r5,r5,4
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
srdi. r6,r5,1
beq .Lcopy_tail_byte
-source; lhz r6,0(r3)
+srcnr; lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
-dest; sth r6,0(r4)
+dstnr; sth r6,0(r4)
addi r4,r4,2
subi r5,r5,2
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
andi. r6,r5,1
beq .Lcopy_finish
-source; lbz r6,0(r3)
+srcnr; lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
-dest; stb r6,0(r4)
+dstnr; stb r6,0(r4)
.Lcopy_finish:
addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
blr
.Lsrc_error:
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ addi r1,r1,STACKFRAMESIZE
+.Lsrc_error_nr:
cmpdi 0,r7,0
beqlr
li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
blr
.Ldest_error:
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ addi r1,r1,STACKFRAMESIZE
+.Ldest_error_nr:
cmpdi 0,r8,0
beqlr
li r6,-EFAULT
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 88c0425dc0a..b7293bba006 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,9 +27,12 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/topology.h>
#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/hvcall.h>
@@ -151,9 +154,22 @@ static void __init get_node_active_region(unsigned long pfn,
}
}
-static void map_cpu_to_node(int cpu, int node)
+static void reset_numa_cpu_lookup_table(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ numa_cpu_lookup_table[cpu] = -1;
+}
+
+static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
+}
+
+static void map_cpu_to_node(int cpu, int node)
+{
+ update_numa_cpu_lookup_table(cpu, node);
dbg("adding cpu %d to node %d\n", cpu, node);
@@ -518,11 +534,24 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
*/
static int __cpuinit numa_setup_cpu(unsigned long lcpu)
{
- int nid = 0;
- struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
+ int nid;
+ struct device_node *cpu;
+
+ /*
+ * If a valid cpu-to-node mapping is already available, use it
+ * directly instead of querying the firmware, since it represents
+ * the most recent mapping notified to us by the platform (eg: VPHN).
+ */
+ if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ map_cpu_to_node(lcpu, nid);
+ return nid;
+ }
+
+ cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
WARN_ON(1);
+ nid = 0;
goto out;
}
@@ -1065,6 +1094,7 @@ void __init do_init_bootmem(void)
*/
setup_node_to_cpumask_map();
+ reset_numa_cpu_lookup_table();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
@@ -1319,7 +1349,8 @@ static int update_cpu_associativity_changes_mask(void)
}
}
if (changed) {
- cpumask_set_cpu(cpu, changes);
+ cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
}
}
@@ -1427,17 +1458,42 @@ static int update_cpu_topology(void *data)
if (!data)
return -EINVAL;
- cpu = get_cpu();
+ cpu = smp_processor_id();
for (update = data; update; update = update->next) {
if (cpu != update->cpu)
continue;
- unregister_cpu_under_node(update->cpu, update->old_nid);
unmap_cpu_from_node(update->cpu);
map_cpu_to_node(update->cpu, update->new_nid);
vdso_getcpu_init();
- register_cpu_under_node(update->cpu, update->new_nid);
+ }
+
+ return 0;
+}
+
+static int update_lookup_table(void *data)
+{
+ struct topology_update_data *update;
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Upon topology update, the numa-cpu lookup table needs to be updated
+ * for all threads in the core, including offline CPUs, to ensure that
+ * future hotplug operations respect the cpu-to-node associativity
+ * properly.
+ */
+ for (update = data; update; update = update->next) {
+ int nid, base, j;
+
+ nid = update->new_nid;
+ base = cpu_first_thread_sibling(update->cpu);
+
+ for (j = 0; j < threads_per_core; j++) {
+ update_numa_cpu_lookup_table(base + j, nid);
+ }
}
return 0;
@@ -1449,12 +1505,12 @@ static int update_cpu_topology(void *data)
*/
int arch_update_cpu_topology(void)
{
- unsigned int cpu, changed = 0;
+ unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus;
struct device *dev;
- int weight, i = 0;
+ int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight)
@@ -1467,24 +1523,62 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- ud = &updates[i++];
- ud->cpu = cpu;
- vphn_get_associativity(cpu, associativity);
- ud->new_nid = associativity_to_nid(associativity);
-
- if (ud->new_nid < 0 || !node_online(ud->new_nid))
- ud->new_nid = first_online_node;
+ /*
+ * If siblings aren't flagged for changes, updates list
+ * will be too short. Skip on this update and set for next
+ * update.
+ */
+ if (!cpumask_subset(cpu_sibling_mask(cpu),
+ &cpu_associativity_changes_mask)) {
+ pr_info("Sibling bits not set for associativity "
+ "change, cpu%d\n", cpu);
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- ud->old_nid = numa_cpu_lookup_table[cpu];
- cpumask_set_cpu(cpu, &updated_cpus);
+ /* Use associativity from first thread for all siblings */
+ vphn_get_associativity(cpu, associativity);
+ new_nid = associativity_to_nid(associativity);
+ if (new_nid < 0 || !node_online(new_nid))
+ new_nid = first_online_node;
+
+ if (new_nid == numa_cpu_lookup_table[cpu]) {
+ cpumask_andnot(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- if (i < weight)
- ud->next = &updates[i];
+ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+ ud = &updates[i++];
+ ud->cpu = sibling;
+ ud->new_nid = new_nid;
+ ud->old_nid = numa_cpu_lookup_table[sibling];
+ cpumask_set_cpu(sibling, &updated_cpus);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+ cpu = cpu_last_thread_sibling(cpu);
}
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+ /*
+ * Update the numa-cpu lookup table with the new mappings, even for
+ * offline CPUs. It is best to perform this update from the stop-
+ * machine context.
+ */
+ stop_machine(update_lookup_table, &updates[0],
+ cpumask_of(raw_smp_processor_id()));
+
for (ud = &updates[0]; ud; ud = ud->next) {
+ unregister_cpu_under_node(ud->cpu, ud->old_nid);
+ register_cpu_under_node(ud->cpu, ud->new_nid);
+
dev = get_cpu_device(ud->cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 3e99c149271..7ce9cf3b698 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
slice = GET_HIGH_SLICE_INDEX(addr);
*boundary_addr = (slice + end) ?
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
- return !!(available.high_slices & (1u << slice));
+ return !!(available.high_slices & (1ul << slice));
}
}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index c427ae36374..a012a9747cd 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -209,10 +209,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
}
PPC_DIVWU(r_A, r_A, r_X);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
PPC_LI32(r_scratch1, K);
- /* Top 32 bits of 64bit result -> A */
- PPC_MULHWU(r_A, r_A, r_scratch1);
+ PPC_DIVWU(r_A, r_A, r_scratch1);
break;
case BPF_S_ALU_AND_X:
ctx->seen |= SEEN_XREG;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29c6482890c..d3ee2e50a3a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -75,6 +75,8 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
+#define MMCR0_FC56 0
+#define MMCR0_PMAO 0
#define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0
@@ -852,7 +854,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
- unsigned long flags;
+ unsigned long flags, val;
if (!ppmu)
return;
@@ -860,9 +862,6 @@ static void power_pmu_disable(struct pmu *pmu)
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
- cpuhw->disabled = 1;
- cpuhw->n_added = 0;
-
/*
* Check if we ever enabled the PMU on this cpu.
*/
@@ -872,6 +871,21 @@ static void power_pmu_disable(struct pmu *pmu)
}
/*
+ * Set the 'freeze counters' bit, clear PMAO/FC56.
+ */
+ val = mfspr(SPRN_MMCR0);
+ val |= MMCR0_FC;
+ val &= ~(MMCR0_PMAO | MMCR0_FC56);
+
+ /*
+ * The barrier is to make sure the mtspr has been
+ * executed and the PMU has frozen the events etc.
+ * before we return.
+ */
+ write_mmcr0(cpuhw, val);
+ mb();
+
+ /*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
@@ -880,14 +894,8 @@ static void power_pmu_disable(struct pmu *pmu)
mb();
}
- /*
- * Set the 'freeze counters' bit.
- * The barrier is to make sure the mtspr has been
- * executed and the PMU has frozen the events
- * before we return.
- */
- write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
- mb();
+ cpuhw->disabled = 1;
+ cpuhw->n_added = 0;
}
local_irq_restore(flags);
}
@@ -911,12 +919,18 @@ static void power_pmu_enable(struct pmu *pmu)
if (!ppmu)
return;
+
local_irq_save(flags);
+
cpuhw = &__get_cpu_var(cpu_hw_events);
- if (!cpuhw->disabled) {
- local_irq_restore(flags);
- return;
+ if (!cpuhw->disabled)
+ goto out;
+
+ if (cpuhw->n_events == 0) {
+ ppc_set_pmu_inuse(0);
+ goto out;
}
+
cpuhw->disabled = 0;
/*
@@ -928,8 +942,6 @@ static void power_pmu_enable(struct pmu *pmu)
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
- if (cpuhw->n_events == 0)
- ppc_set_pmu_inuse(0);
goto out_enable;
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index f7d1c4fff30..9aefaebedef 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -109,6 +109,16 @@
#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+#define EVENT_VALID_MASK \
+ ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
+ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
+ (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
+ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
+ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
+ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
+ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
+ EVENT_PSEL_MASK)
+
/* MMCRA IFM bits - POWER8 */
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
@@ -184,6 +194,7 @@
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT 36
#define MMCR1_DC_QUAL_SHIFT 47
#define MMCR1_IC_QUAL_SHIFT 46
@@ -212,6 +223,9 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
mask = value = 0;
+ if (event & ~EVENT_VALID_MASK)
+ return -1;
+
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
@@ -354,8 +368,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
* the threshold bits are used for the match value.
*/
if (event_is_fab_match(event[i])) {
- mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
- EVENT_THR_CTL_MASK;
+ mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+ EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
} else {
val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
mmcra |= val << MMCRA_THR_CTL_SHIFT;
@@ -378,6 +392,10 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
if (pmc_inuse & 0x7c)
mmcr[0] |= MMCR0_PMCjCE;
+ /* If we're not using PMC 5 or 6, freeze them */
+ if (!(pmc_inuse & 0x60))
+ mmcr[0] |= MMCR0_FC56;
+
mmcr[1] = mmcr1;
mmcr[2] = mmcra;
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 90f4496017e..af54174801f 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -57,5 +57,5 @@ config PPC_MPC5200_BUGFIX
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
- depends on PPC_MPC52xx
+ depends on PPC_MPC52xx && PPC_BESTCOMM
select PPC_BESTCOMM_GEN_BD
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 9c9d15e4cdf..f75607c93e8 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -151,13 +151,23 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
rid_end = pe->rid + 1;
}
- /* Associate PE in PELT */
+ /*
+ * Associate PE in PELT. We need add the PE into the
+ * corresponding PELT-V as well. Otherwise, the error
+ * originated from the PE might contribute to other
+ * PEs.
+ */
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
bcomp, dcomp, fcomp, OPAL_MAP_PE);
if (rc) {
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
return -ENXIO;
}
+
+ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+ pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
+ if (rc)
+ pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
@@ -441,6 +451,17 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ if (dev->subordinate)
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ }
+}
+
static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
u64 *startp, u64 *endp)
{
@@ -596,6 +617,11 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
}
iommu_init_table(tbl, phb->hose->node);
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ else
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -667,6 +693,11 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
}
iommu_init_table(tbl, phb->hose->node);
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ else
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
return;
fail:
if (pe->tce32_seg >= 0)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 217ca5c75b2..2882d614221 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -34,12 +34,7 @@
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
+static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
@@ -92,15 +87,20 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
- struct rtas_args *args = &rtas_stop_self_args;
+ struct rtas_args args = {
+ .token = cpu_to_be32(rtas_stop_self_token),
+ .nargs = 0,
+ .nret = 1,
+ .rets = &args.args[0],
+ };
local_irq_disable();
- BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+ BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(args));
+ enter_rtas(__pa(&args));
panic("Alas, I survived.\n");
}
@@ -391,10 +391,10 @@ static int __init pseries_cpu_hotplug_init(void)
}
}
- rtas_stop_self_args.token = rtas_token("stop-self");
+ rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
- if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+ if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c11c8238797..54b998f2750 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
}
early_initcall(alloc_dispatch_log_kmem_cache);
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
{
/* This would call on the cpuidle framework, and the back-end pseries
* driver to go to idle states
@@ -362,10 +362,22 @@ static void pSeries_idle(void)
if (cpuidle_idle_call()) {
/* On error, execute default handler
* to go into low thread priority and possibly
- * low power mode.
+ * low power mode by cedeing processor to hypervisor
*/
- HMT_low();
- HMT_very_low();
+
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
+
+ get_lppaca()->idle = 0;
}
}
@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
pSeries_nvram_init();
- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- ppc_md.power_save = pSeries_idle;
- }
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
+ ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
- else
+ } else {
+ /* No special idle routine */
ppc_md.enable_pmcs = power4_enable_pmcs;
+ }
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;