aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c10
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cacheinfo.c3
-rw-r--r--arch/powerpc/kernel/crash_dump.c8
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S57
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c22
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom.c43
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c76
-rw-r--r--arch/powerpc/kernel/signal_64.c14
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S88
-rw-r--r--arch/powerpc/kernel/traps.c55
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
21 files changed, 311 insertions, 159 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ee5b690a0bed..52e5758ea368 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
+ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+ nb = 8;
+ flags = LD+SW;
+ } else if (IS_XFORM(instruction) &&
+ ((instruction >> 1) & 0x3ff) == 660) {
+ nb = 8;
+ flags = ST+SW;
+ }
+
/* Byteswap little endian loads and stores */
swiz = 0;
if (regs->msr & MSR_LE) {
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffafa6f0..302886b77de2 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -139,6 +139,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+ DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+ DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 92c6b008dd2b..b4437e8a7a8f 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -788,6 +788,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
+ /* Remove cache dir from sysfs */
+ kobject_del(cache_dir->kobj);
+
kobject_put(cache_dir->kobj);
kfree(cache_dir);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ec3fe174cba..555ae67e4086 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
+ phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
+ paddr = pfn << PAGE_SHIFT;
- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
- vaddr = __va(pfn << PAGE_SHIFT);
+ if (memblock_is_region_memory(paddr, csize)) {
+ vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
+ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8741c854e03d..38847767012d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
- /*
- * Back up the TAR across context switches. Note that the TAR is not
- * available for use in the kernel. (To provide this, the TAR should
- * be backed up/restored on exception entry/exit instead, and be in
- * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
- */
- mfspr r0,SPRN_TAR
- std r0,THREAD_TAR(r3)
-
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
+ li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
-1: cmpd r0,r25
+ b 3f
+1:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ or r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ or r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ b 4f
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ andc r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ andc r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 40e4a17c8ba0..902ca3c6b4b6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -341,10 +341,17 @@ vsx_unavailable_pSeries_1:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_pSeries
+facility_unavailable_trampoline:
. = 0xf60
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tm_unavailable_pSeries
+ b facility_unavailable_pSeries
+
+hv_facility_unavailable_trampoline:
+ . = 0xf80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b facility_unavailable_hv
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
@@ -522,8 +529,10 @@ denorm_done:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
- STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+ STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -793,14 +802,10 @@ system_call_relon_pSeries:
STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
. = 0x4e00
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_data_storage_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e20
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_instr_storage_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e40
SET_SCRATCH0(r13)
@@ -808,9 +813,7 @@ system_call_relon_pSeries:
b emulation_assist_relon_hv
. = 0x4e60
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b hmi_exception_relon_hv
+ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
. = 0x4e80
SET_SCRATCH0(r13)
@@ -835,11 +838,17 @@ vsx_unavailable_relon_pSeries_1:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_relon_pSeries
-tm_unavailable_relon_pSeries_1:
+facility_unavailable_relon_trampoline:
. = 0x4f60
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b tm_unavailable_relon_pSeries
+ b facility_unavailable_relon_pSeries
+
+hv_facility_unavailable_relon_trampoline:
+ . = 0x4f80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1165,36 +1174,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
bl .vsx_unavailable_exception
b .ret_from_except
- .align 7
- .globl tm_unavailable_common
-tm_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
- bl .save_nvgprs
- DISABLE_INTS
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .tm_unavailable_exception
- b .ret_from_except
+ STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
__end_handlers:
/* Equivalents to the above handlers for relocation-on interrupt vectors */
- STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
- STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
- STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d557b5..192a3f562bdb 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
+.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a949bdfc9623..f0b47d1a6b0e 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((bp->attr.bp_addr >> 10) !=
- ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
+ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
return -EINVAL;
}
if (info->len >
@@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
* we still need to single-step the instruction, but we don't
* generate an event.
*/
+ info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
if (!((bp->attr.bp_addr <= dar) &&
(dar - bp->attr.bp_addr < bp->attr.bp_len)))
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c0d0dbddfba1..93d8d96840b5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -658,7 +658,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f3871e9cf..e2a0a162299b 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
}
static const struct file_operations lparcfg_fops = {
- .owner = THIS_MODULE,
.read = seq_read,
.write = lparcfg_write,
.open = lparcfg_open,
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
}
return 0;
}
-
-static void __exit lparcfg_cleanup(void)
-{
- remove_proc_subtree("powerpc/lparcfg", NULL);
-}
-
-module_init(lparcfg_init);
-module_exit(lparcfg_cleanup);
-MODULE_DESCRIPTION("Interface for LPAR configuration data");
-MODULE_AUTHOR("Dave Engebretsen");
-MODULE_LICENSE("GPL");
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 076d1242507a..7baa27b7abbe 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
+ /* Back up the TAR across context switches.
+ * Note that the TAR is not available for use in the kernel. (To
+ * provide this, the TAR should be backed up/restored on exception
+ * entry/exit instead, and be in pt_regs. FIXME, this should be in
+ * pt_regs anyway (for debug).)
+ * Save the TAR here before we do treclaim/trecheckpoint as these
+ * will change the TAR.
+ */
+ save_tar(&prev->thread);
+
__switch_to_tm(prev);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b6f7a99cce2..00610a34fb9a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -827,49 +827,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-/* Find the device node for a given logical cpu number, also returns the cpu
- * local thread number (index in ibm,interrupt-server#s) if relevant and
- * asked for (non NULL)
- */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- int hardid;
- struct device_node *np;
-
- hardid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- const u32 *intserv;
- unsigned int plen, t;
-
- /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
- * fallback to "reg" property and assume no threads
- */
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &plen);
- if (intserv == NULL) {
- const u32 *reg = of_get_property(np, "reg", NULL);
- if (reg == NULL)
- continue;
- if (*reg == hardid) {
- if (thread)
- *thread = 0;
- return np;
- }
- } else {
- plen /= sizeof(u32);
- for (t = 0; t < plen; t++) {
- if (hardid == intserv[t]) {
- if (thread)
- *thread = t;
- return np;
- }
- }
- }
- }
- return NULL;
+ return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 98c2fc198712..64f7bd5b1b0f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1449,7 +1449,9 @@ static long ppc_set_hwdebug(struct task_struct *child,
*/
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
len = bp_info->addr2 - bp_info->addr;
- } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+ } else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ len = 1;
+ else {
ptrace_put_breakpoints(child);
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e379d3fd1694..389fb8077cc9 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -76,7 +76,7 @@
#endif
int boot_cpuid = 0;
-int __initdata spinning_secondaries;
+int spinning_secondaries;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 201385c3a1ae..7e9dff80e1dc 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
* altivec/spe instructions at some point.
*/
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
- int sigret, int ctx_has_vsx_region)
+ struct mcontext __user *tm_frame, int sigret,
+ int ctx_has_vsx_region)
{
unsigned long msr = regs->msr;
@@ -441,6 +442,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
#endif /* CONFIG_ALTIVEC */
if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
@@ -475,6 +482,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
return 1;
+ /* We need to write 0 the MSR top 32 bits in the tm frame so that we
+ * can check it on the restore to see if TM is active
+ */
+ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
+ return 1;
+
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -747,7 +760,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *tm_sr)
{
long err;
- unsigned long msr;
+ unsigned long msr, msr_hi;
#ifdef CONFIG_VSX
int i;
#endif
@@ -852,8 +865,11 @@ static long restore_tm_user_regs(struct pt_regs *regs,
tm_enable();
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* The task has moved into TM state S, so ensure MSR reflects this */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
+ /* Get the top half of the MSR */
+ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+ return 1;
+ /* Pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
@@ -952,6 +968,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
{
struct rt_sigframe __user *rt_sf;
struct mcontext __user *frame;
+ struct mcontext __user *tm_frame = NULL;
void __user *addr;
unsigned long newsp = 0;
int sigret;
@@ -985,23 +1002,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_frame = &rt_sf->uc_transact.uc_mcontext;
if (MSR_TM_ACTIVE(regs->msr)) {
- if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
- &rt_sf->uc_transact.uc_mcontext, sigret))
+ if (save_tm_user_regs(regs, frame, tm_frame, sigret))
goto badframe;
}
else
#endif
- if (save_user_regs(regs, frame, sigret, 1))
+ {
+ if (save_user_regs(regs, frame, tm_frame, sigret, 1))
goto badframe;
+ }
regs->link = tramp;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr)) {
if (__put_user((unsigned long)&rt_sf->uc_transact,
&rt_sf->uc.uc_link)
- || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
- &rt_sf->uc_transact.uc_regs))
+ || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
goto badframe;
}
else
@@ -1170,7 +1188,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
mctx = (struct mcontext __user *)
((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
- || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
+ || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
|| put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
|| __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
return -EFAULT;
@@ -1233,7 +1251,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
goto bad;
- if (MSR_TM_SUSPENDED(msr_hi<<32)) {
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
/* We only recheckpoint on return if we're
* transaction.
*/
@@ -1392,6 +1410,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
{
struct sigcontext __user *sc;
struct sigframe __user *frame;
+ struct mcontext __user *tm_mctx = NULL;
unsigned long newsp = 0;
int sigret;
unsigned long tramp;
@@ -1425,6 +1444,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_mctx = &frame->mctx_transact;
if (MSR_TM_ACTIVE(regs->msr)) {
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
sigret))
@@ -1432,8 +1452,10 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
}
else
#endif
- if (save_user_regs(regs, &frame->mctx, sigret, 1))
+ {
+ if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
goto badframe;
+ }
regs->link = tramp;
@@ -1481,16 +1503,22 @@ badframe:
long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
struct pt_regs *regs)
{
+ struct sigframe __user *sf;
struct sigcontext __user *sc;
struct sigcontext sigctx;
struct mcontext __user *sr;
void __user *addr;
sigset_t set;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct mcontext __user *mcp, *tm_mcp;
+ unsigned long msr_hi;
+#endif
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+ sc = &sf->sctx;
addr = sc;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
@@ -1507,11 +1535,25 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
#endif
set_current_blocked(&set);
- sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
- addr = sr;
- if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
- || restore_user_regs(regs, sr, 1))
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ mcp = (struct mcontext __user *)&sf->mctx;
+ tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
+ if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
goto badframe;
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto badframe;
+ if (restore_tm_user_regs(regs, mcp, tm_mcp))
+ goto badframe;
+ } else
+#endif
+ {
+ sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
+ addr = sr;
+ if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
+ || restore_user_regs(regs, sr, 1))
+ goto badframe;
+ }
set_thread_flag(TIF_RESTOREALL);
return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 345947367ec0..35c20a1fb365 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -121,6 +121,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread(current);
/* copy fpr regs and fpscr */
err |= copy_fpr_to_user(&sc->fp_regs, current);
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSX low doubleword to local buffer for formatting,
@@ -410,6 +416,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
/* get MSR separately, transfer the LE bit if doing signal return */
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ /* pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+ /* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
@@ -505,8 +515,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
tm_enable();
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* The task has moved into TM state S, so ensure MSR reflects this: */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
@@ -654,7 +662,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
- if (MSR_TM_SUSPENDED(msr)) {
+ if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
if (__get_user(uc_transact, &uc->uc_link))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e68a84568b8b..a15fd1a0690c 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
+#include <asm/firmware.h>
#include "cacheinfo.h"
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
SYSFS_PMCSETUP(dscr, SPRN_DSCR);
SYSFS_PMCSETUP(pir, SPRN_PIR);
+/*
+ Lets only enable read for phyp resources and
+ enable write when needed with a separate function.
+ Lets be conservative and default to pseries.
+*/
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
unsigned long dscr_default = 0;
EXPORT_SYMBOL(dscr_default);
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+ attr->attr.mode |= 0200;
+}
+
static ssize_t show_dscr_default(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -394,8 +405,11 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
- if (cpu_has_feature(CPU_FTR_PURR))
+ if (cpu_has_feature(CPU_FTR_PURR)) {
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ add_write_permission_dev_attr(&dev_attr_purr);
device_create_file(s, &dev_attr_purr);
+ }
if (cpu_has_feature(CPU_FTR_SPURR))
device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2da67e7a16d5..f2abb219a17b 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
TABORT(R3)
blr
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
/* void tm_reclaim(struct thread_struct *thread,
* unsigned long orig_msr,
@@ -178,11 +183,18 @@ dont_backup_fp:
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
+ /* Store the PPR in r11 and reset to decent value */
+ std r11, GPR11(r1) /* Temporary stash */
+ mfspr r11, SPRN_PPR
+ HMT_MEDIUM
+
/* Now get some more GPRS free */
std r7, GPR7(r1) /* Temporary stash */
std r12, GPR12(r1) /* '' '' '' */
ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
+ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
+
addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
/* Make r7 look like an exception frame so that we
@@ -194,15 +206,19 @@ dont_backup_fp:
SAVE_GPR(0, r7) /* user r0 */
SAVE_GPR(2, r7) /* user r2 */
SAVE_4GPRS(3, r7) /* user r3-r6 */
- SAVE_4GPRS(8, r7) /* user r8-r11 */
+ SAVE_GPR(8, r7) /* user r8 */
+ SAVE_GPR(9, r7) /* user r9 */
+ SAVE_GPR(10, r7) /* user r10 */
ld r3, PACATMSCRATCH(r13) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
- ld r5, GPR12(r1) /* user r12 */
- GET_SCRATCH0(6) /* user r13 */
+ ld r5, GPR11(r1) /* user r11 */
+ ld r6, GPR12(r1) /* user r12 */
+ GET_SCRATCH0(8) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
- std r5, GPR12(r7)
- std r6, GPR13(r7)
+ std r5, GPR11(r7)
+ std r6, GPR12(r7)
+ std r8, GPR13(r7)
SAVE_NVGPRS(r7) /* user r14-r31 */
@@ -224,6 +240,14 @@ dont_backup_fp:
std r5, _CCR(r7)
std r6, _XER(r7)
+
+ /* ******************** TAR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+ mfspr r4, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+ std r4, THREAD_TM_DSCR(r12)
+
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@@ -239,7 +263,7 @@ dont_backup_fp:
std r3, THREAD_TM_TFHAR(r12)
std r4, THREAD_TM_TFIAR(r12)
- /* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+ /* AMR is checkpointed too, but is unsupported by Linux. */
/* Restore original MSR/IRQ state & clear TM mode */
ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -255,6 +279,12 @@ dont_backup_fp:
mtcr r4
mtlr r0
ld r2, 40(r1)
+
+ /* Load system default DSCR */
+ ld r4, DSCR_DEFAULT@toc(r2)
+ ld r0, 0(r4)
+ mtspr SPRN_DSCR, r0
+
blr
@@ -338,35 +368,51 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
+
/* ******************** CR,LR,CCR,MSR ********** */
- ld r3, _CTR(r7)
- ld r4, _LINK(r7)
- ld r5, _CCR(r7)
- ld r6, _XER(r7)
+ ld r4, _CTR(r7)
+ ld r5, _LINK(r7)
+ ld r6, _CCR(r7)
+ ld r8, _XER(r7)
- mtctr r3
- mtlr r4
- mtcr r5
- mtxer r6
+ mtctr r4
+ mtlr r5
+ mtcr r6
+ mtxer r8
+
+ /* ******************** TAR ******************** */
+ ld r4, THREAD_TM_TAR(r3)
+ mtspr SPRN_TAR, r4
+
+ /* Load up the PPR and DSCR in GPRs only at this stage */
+ ld r5, THREAD_TM_DSCR(r3)
+ ld r6, THREAD_TM_PPR(r3)
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
REST_4GPRS(0, r7) /* GPR0-3 */
- REST_GPR(4, r7) /* GPR4-6 */
- REST_GPR(5, r7)
- REST_GPR(6, r7)
+ REST_GPR(4, r7) /* GPR4 */
REST_4GPRS(8, r7) /* GPR8-11 */
REST_2GPRS(12, r7) /* GPR12-13 */
REST_NVGPRS(r7) /* GPR14-31 */
- ld r7, GPR7(r7) /* GPR7 */
+ /* Load up PPR and DSCR here so we don't run with user values for long
+ */
+ mtspr SPRN_DSCR, r5
+ mtspr SPRN_PPR, r6
+
+ REST_GPR(5, r7) /* GPR5-7 */
+ REST_GPR(6, r7)
+ ld r7, GPR7(r7)
/* Commit register state as checkpointed state: */
TRECHKPT
+ HMT_MEDIUM
+
/* Our transactional state has now changed.
*
* Now just get out of here. Transactional (current) state will be
@@ -385,6 +431,12 @@ restore_gprs:
mtcr r4
mtlr r0
ld r2, 40(r1)
+
+ /* Load system default DSCR */
+ ld r4, DSCR_DEFAULT@toc(r2)
+ ld r0, 0(r4)
+ mtspr SPRN_DSCR, r0
+
blr
/* ****************************************************************** */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c0e5caf8ccc7..88929b1f4f77 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
-#ifdef CONFIG_PPC32
#include <asm/reg.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@@ -1282,26 +1280,63 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
-void tm_unavailable_exception(struct pt_regs *regs)
+#ifdef CONFIG_PPC64
+void facility_unavailable_exception(struct pt_regs *regs)
{
+ static char *facility_strings[] = {
+ [FSCR_FP_LG] = "FPU",
+ [FSCR_VECVSX_LG] = "VMX/VSX",
+ [FSCR_DSCR_LG] = "DSCR",
+ [FSCR_PM_LG] = "PMU SPRs",
+ [FSCR_BHRB_LG] = "BHRB",
+ [FSCR_TM_LG] = "TM",
+ [FSCR_EBB_LG] = "EBB",
+ [FSCR_TAR_LG] = "TAR",
+ };
+ char *facility = "unknown";
+ u64 value;
+ u8 status;
+ bool hv;
+
+ hv = (regs->trap == 0xf80);
+ if (hv)
+ value = mfspr(SPRN_HFSCR);
+ else
+ value = mfspr(SPRN_FSCR);
+
+ status = value >> 56;
+ if (status == FSCR_DSCR_LG) {
+ /* User is acessing the DSCR. Set the inherit bit and allow
+ * the user to set it directly in future by setting via the
+ * H/FSCR DSCR bit.
+ */
+ current->thread.dscr_inherit = 1;
+ if (hv)
+ mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+ else
+ mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ return;
+ }
+
+ if ((status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
+
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- /* Currently we never expect a TMU exception. Catch
- * this and kill the process!
- */
- printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
- "(msr %lx)\n",
- regs->nip, regs->msr);
+ pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
+ hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
- die("Unexpected TM unavailable exception", regs, SIGABRT);
+ die("Unexpected facility unavailable exception", regs, SIGABRT);
}
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 536016d792ba..56d2e72c85de 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1529,11 +1529,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
const char *cp;
dn = dev->of_node;
- if (!dn)
- return -ENODEV;
+ if (!dn) {
+ strcpy(buf, "\n");
+ return strlen(buf);
+ }
cp = of_get_property(dn, "compatible", NULL);
- if (!cp)
- return -ENODEV;
+ if (!cp) {
+ strcpy(buf, "\n");
+ return strlen(buf);
+ }
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f2..f096e72262f4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
- . = 0;
- reloc_start = .;
-
. = KERNELBASE;
/*