aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c189
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S17
-rw-r--r--arch/powerpc/kernel/cputable.c117
-rw-r--r--arch/powerpc/kernel/crash.c13
-rw-r--r--arch/powerpc/kernel/crash_dump.c11
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/iommu.c14
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c99
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c47
-rw-r--r--arch/powerpc/kernel/pci_dn.c6
-rw-r--r--arch/powerpc/kernel/pci_iommu.c23
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c2
-rw-r--r--arch/powerpc/kernel/process.c55
-rw-r--r--arch/powerpc/kernel/prom.c100
-rw-r--r--arch/powerpc/kernel/prom_init.c118
-rw-r--r--arch/powerpc/kernel/prom_parse.c25
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c30
-rw-r--r--arch/powerpc/kernel/rtas.c85
-rw-r--r--arch/powerpc/kernel/rtas_flash.c25
-rw-r--r--arch/powerpc/kernel/setup-common.c20
-rw-r--r--arch/powerpc/kernel/setup.h3
-rw-r--r--arch/powerpc/kernel/setup_32.c18
-rw-r--r--arch/powerpc/kernel/setup_64.c31
-rw-r--r--arch/powerpc/kernel/signal_32.c23
-rw-r--r--arch/powerpc/kernel/signal_64.c16
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/udbg.c7
-rw-r--r--arch/powerpc/kernel/vdso.c57
-rw-r--r--arch/powerpc/kernel/vio.c344
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
36 files changed, 947 insertions, 588 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index faaec9c6f78..4734b5de599 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -35,17 +35,19 @@ struct aligninfo {
#define INVALID { 0, 0 }
-#define LD 1 /* load */
-#define ST 2 /* store */
-#define SE 4 /* sign-extend value */
-#define F 8 /* to/from fp regs */
-#define U 0x10 /* update index register */
-#define M 0x20 /* multiple load/store */
-#define SW 0x40 /* byte swap int or ... */
-#define S 0x40 /* ... single-precision fp */
-#define SX 0x40 /* byte count in XER */
+/* Bits in the flags field */
+#define LD 0 /* load */
+#define ST 1 /* store */
+#define SE 2 /* sign-extend value */
+#define F 4 /* to/from fp regs */
+#define U 8 /* update index register */
+#define M 0x10 /* multiple load/store */
+#define SW 0x20 /* byte swap */
+#define S 0x40 /* single-precision fp or... */
+#define SX 0x40 /* ... byte count in XER */
#define HARD 0x80 /* string, stwcx. */
+/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
+#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
- unsigned int flags, unsigned int instr)
+ unsigned int flags, unsigned int instr,
+ unsigned long swiz)
{
unsigned long *rptr;
- unsigned int nb0, i;
+ unsigned int nb0, i, bswiz;
+ unsigned long p;
/*
* We do not try to emulate 8 bytes multiple as they aren't really
@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
if (nb == 0)
return 1;
} else {
- if (__get_user(instr,
- (unsigned int __user *)regs->nip))
+ unsigned long pc = regs->nip ^ (swiz & 4);
+
+ if (__get_user(instr, (unsigned int __user *)pc))
return -EFAULT;
+ if (swiz == 0 && (flags & SW))
+ instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
if (nb == 0)
nb = 32;
@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
return -EFAULT; /* bad address */
rptr = &regs->gpr[reg];
- if (flags & LD) {
+ p = (unsigned long) addr;
+ bswiz = (flags & SW)? 3: 0;
+
+ if (!(flags & ST)) {
/*
* This zeroes the top 4 bytes of the affected registers
* in 64-bit mode, and also zeroes out any remaining
@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
memset(&regs->gpr[0], 0,
((nb0 + 3) / 4) * sizeof(unsigned long));
- for (i = 0; i < nb; ++i)
- if (__get_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb; ++i, ++p)
+ if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
- for (i = 0; i < nb0; ++i)
- if (__get_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb0; ++i, ++p)
+ if (__get_user(REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
- for (i = 0; i < nb; ++i)
- if (__put_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb; ++i, ++p)
+ if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
- for (i = 0; i < nb0; ++i)
- if (__put_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb0; ++i, ++p)
+ if (__put_user(REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int reg, areg;
unsigned int dsisr;
unsigned char __user *addr;
- unsigned char __user *p;
+ unsigned long p, swiz;
int ret, t;
union {
u64 ll;
@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs)
* let's make one up from the instruction
*/
if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
- unsigned int real_instr;
- if (unlikely(__get_user(real_instr,
- (unsigned int __user *)regs->nip)))
+ unsigned long pc = regs->nip;
+
+ if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
+ pc ^= 4;
+ if (unlikely(__get_user(instr, (unsigned int __user *)pc)))
return -EFAULT;
- dsisr = make_dsisr(real_instr);
+ if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
+ instr = cpu_to_le32(instr);
+ dsisr = make_dsisr(instr);
}
/* extract the operation and registers from the dsisr */
@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
+ /* Byteswap little endian loads and stores */
+ swiz = 0;
+ if (regs->msr & MSR_LE) {
+ flags ^= SW;
+ /*
+ * So-called "PowerPC little endian" mode works by
+ * swizzling addresses rather than by actually doing
+ * any byte-swapping. To emulate this, we XOR each
+ * byte address with 7. We also byte-swap, because
+ * the processor's address swizzling depends on the
+ * operand size (it xors the address with 7 for bytes,
+ * 6 for halfwords, 4 for words, 0 for doublewords) but
+ * we will xor with 7 and load/store each byte separately.
+ */
+ if (cpu_has_feature(CPU_FTR_PPC_LE))
+ swiz = 7;
+ }
+
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs)
* function
*/
if (flags & M)
- return emulate_multiple(regs, addr, reg, nb, flags, instr);
+ return emulate_multiple(regs, addr, reg, nb,
+ flags, instr, swiz);
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs)
/* If we are loading, get the data from user space, else
* get it from register values
*/
- if (flags & LD) {
+ if (!(flags & ST)) {
data.ll = 0;
ret = 0;
- p = addr;
+ p = (unsigned long) addr;
switch (nb) {
case 8:
- ret |= __get_user(data.v[0], p++);
- ret |= __get_user(data.v[1], p++);
- ret |= __get_user(data.v[2], p++);
- ret |= __get_user(data.v[3], p++);
+ ret |= __get_user(data.v[0], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[1], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[2], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __get_user(data.v[4], p++);
- ret |= __get_user(data.v[5], p++);
+ ret |= __get_user(data.v[4], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[5], SWIZ_PTR(p++));
case 2:
- ret |= __get_user(data.v[6], p++);
- ret |= __get_user(data.v[7], p++);
+ ret |= __get_user(data.v[6], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret))
return -EFAULT;
}
- } else if (flags & F)
+ } else if (flags & F) {
data.dd = current->thread.fpr[reg];
- else
+ if (flags & S) {
+ /* Single-precision FP store requires conversion... */
+#ifdef CONFIG_PPC_FPU
+ preempt_disable();
+ enable_kernel_fp();
+ cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
+ preempt_enable();
+#else
+ return 0;
+#endif
+ }
+ } else
data.ll = regs->gpr[reg];
- /* Perform other misc operations like sign extension, byteswap,
+ if (flags & SW) {
+ switch (nb) {
+ case 8:
+ SWAP(data.v[0], data.v[7]);
+ SWAP(data.v[1], data.v[6]);
+ SWAP(data.v[2], data.v[5]);
+ SWAP(data.v[3], data.v[4]);
+ break;
+ case 4:
+ SWAP(data.v[4], data.v[7]);
+ SWAP(data.v[5], data.v[6]);
+ break;
+ case 2:
+ SWAP(data.v[6], data.v[7]);
+ break;
+ }
+ }
+
+ /* Perform other misc operations like sign extension
* or floating point single precision conversion
*/
- switch (flags & ~U) {
+ switch (flags & ~(U|SW)) {
case LD+SE: /* sign extend */
if ( nb == 2 )
data.ll = data.x16.low16;
else /* nb must be 4 */
data.ll = data.x32.low32;
break;
- case LD+S: /* byte-swap */
- case ST+S:
- if (nb == 2) {
- SWAP(data.v[6], data.v[7]);
- } else {
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
- }
- break;
- /* Single-precision FP load and store require conversions... */
+ /* Single-precision FP load requires conversion... */
case LD+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
@@ -486,34 +543,24 @@ int fix_alignment(struct pt_regs *regs)
return 0;
#endif
break;
- case ST+F+S:
-#ifdef CONFIG_PPC_FPU
- preempt_disable();
- enable_kernel_fp();
- cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
- preempt_enable();
-#else
- return 0;
-#endif
- break;
}
/* Store result to memory or update registers */
if (flags & ST) {
ret = 0;
- p = addr;
+ p = (unsigned long) addr;
switch (nb) {
case 8:
- ret |= __put_user(data.v[0], p++);
- ret |= __put_user(data.v[1], p++);
- ret |= __put_user(data.v[2], p++);
- ret |= __put_user(data.v[3], p++);
+ ret |= __put_user(data.v[0], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[1], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[2], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __put_user(data.v[4], p++);
- ret |= __put_user(data.v[5], p++);
+ ret |= __put_user(data.v[4], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[5], SWIZ_PTR(p++));
case 2:
- ret |= __put_user(data.v[6], p++);
- ret |= __put_user(data.v[7], p++);
+ ret |= __put_user(data.v[6], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[7], SWIZ_PTR(p++));
}
if (unlikely(ret))
return -EFAULT;
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index b61d86e7ceb..271418308d5 100644
--- a/arch/powerpc/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -73,23 +73,6 @@ _GLOBAL(__970_cpu_preinit)
isync
blr
-_GLOBAL(__setup_cpu_power4)
- blr
-
-_GLOBAL(__setup_cpu_be)
- /* Set large page sizes LP=0: 16MB, LP=1: 64KB */
- addi r3, 0, 0
- ori r3, r3, HID6_LB
- sldi r3, r3, 32
- nor r3, r3, r3
- mfspr r4, SPRN_HID6
- and r4, r4, r3
- addi r3, 0, 0x02000
- sldi r3, r3, 32
- or r4, r4, r3
- mtspr SPRN_HID6, r4
- blr
-
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3f7182db9ed..dfe2fcfb20a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -30,11 +30,7 @@ EXPORT_SYMBOL(cur_cpu_spec);
* part of the cputable though. That has to be fixed for both ppc32
* and ppc64
*/
-#ifdef CONFIG_PPC64
-extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
-#else
+#ifdef CONFIG_PPC32
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -58,7 +54,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE)
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
@@ -78,11 +75,10 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00400000,
.cpu_name = "POWER3 (630)",
.cpu_features = CPU_FTRS_POWER3,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "power3",
@@ -92,11 +88,10 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00410000,
.cpu_name = "POWER3 (630+)",
.cpu_features = CPU_FTRS_POWER3,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "power3",
@@ -110,7 +105,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -124,7 +118,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -138,7 +131,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -152,7 +144,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -166,7 +157,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power4",
@@ -180,7 +170,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power4",
@@ -232,6 +221,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .num_pmcs = 8,
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
@@ -246,9 +236,13 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power5",
.oprofile_type = PPC_OPROFILE_POWER4,
+ /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
+ * and above but only works on POWER5 and above
+ */
+ .oprofile_mmcra_sihv = MMCRA_SIHV,
+ .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5",
},
{ /* Power5 GS */
@@ -260,9 +254,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power5+",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = MMCRA_SIHV,
+ .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* Power6 */
@@ -273,10 +268,13 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
+ .num_pmcs = 8,
.oprofile_cpu_type = "ppc64/power6",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
.platform = "power6",
},
{ /* Cell Broadband Engine */
@@ -289,7 +287,6 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_SMT,
.icache_bsize = 128,
.dcache_bsize = 128,
- .cpu_setup = __setup_cpu_be,
.platform = "ppc-cell-be",
},
{ /* default match */
@@ -301,7 +298,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.platform = "power4",
}
#endif /* CONFIG_PPC64 */
@@ -323,7 +319,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00030000,
.cpu_name = "603",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -334,7 +330,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00060000,
.cpu_name = "603e",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -345,7 +341,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00070000,
.cpu_name = "603ev",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -356,7 +352,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00040000,
.cpu_name = "604",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 2,
@@ -368,7 +364,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00090000,
.cpu_name = "604e",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -380,7 +376,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00090000,
.cpu_name = "604r",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -392,7 +388,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000a0000,
.cpu_name = "604ev",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -404,7 +400,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00084202,
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740_NOTAU,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -416,7 +412,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00080100,
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -428,7 +424,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00082200,
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -440,7 +436,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00082210,
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -452,7 +448,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00083214,
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -464,7 +460,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00083000,
.cpu_name = "745/755",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -476,7 +472,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000100,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX1,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -488,7 +484,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000200,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX2,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -500,7 +496,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000000,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -512,7 +508,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70020000,
.cpu_name = "750GX",
.cpu_features = CPU_FTRS_750GX,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -524,7 +520,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00080000,
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -536,7 +532,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000c1101,
.cpu_name = "7400 (1.1)",
.cpu_features = CPU_FTRS_7400_NOTAU,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -548,7 +545,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000c0000,
.cpu_name = "7400",
.cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -560,7 +558,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x800c0000,
.cpu_name = "7410",
.cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -572,7 +571,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000200,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_20,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -586,7 +586,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000201,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_21,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -600,7 +601,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000000,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_23,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -614,7 +616,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010100,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455_1,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -628,7 +631,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010200,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455_20,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -642,7 +646,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010000,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -656,7 +661,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020100,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -670,7 +676,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020101,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -684,7 +691,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020000,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -698,7 +705,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80030000,
.cpu_name = "7447A",
.cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -712,7 +720,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80040000,
.cpu_name = "7448",
.cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 778f22fd85d..dbcb85994f4 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -22,6 +22,7 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/types.h>
#include <asm/processor.h>
@@ -174,6 +175,8 @@ static void crash_kexec_prepare_cpus(void)
void default_machine_crash_shutdown(struct pt_regs *regs)
{
+ unsigned int irq;
+
/*
* This function is only called after the system
* has paniced or is otherwise in a critical state.
@@ -186,6 +189,16 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
local_irq_disable();
+ for_each_irq(irq) {
+ struct irq_desc *desc = irq_descp(irq);
+
+ if (desc->status & IRQ_INPROGRESS)
+ desc->handler->end(irq);
+
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->disable(irq);
+ }
+
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 764d0732971..371973be8d7 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -25,6 +25,11 @@
#define DBG(fmt...)
#endif
+void reserve_kdump_trampoline(void)
+{
+ lmb_reserve(0, KDUMP_RESERVE_LIMIT);
+}
+
static void __init create_trampoline(unsigned long addr)
{
/* The maximum range of a single instruction branch, is the current
@@ -39,11 +44,11 @@ static void __init create_trampoline(unsigned long addr)
create_branch(addr + 4, addr + PHYSICAL_START, 0);
}
-void __init kdump_setup(void)
+void __init setup_kdump_trampoline(void)
{
unsigned long i;
- DBG(" -> kdump_setup()\n");
+ DBG(" -> setup_kdump_trampoline()\n");
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
create_trampoline(i);
@@ -52,7 +57,7 @@ void __init kdump_setup(void)
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
- DBG(" <- kdump_setup()\n");
+ DBG(" <- setup_kdump_trampoline()\n");
}
#ifdef CONFIG_PROC_VMCORE
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index fd8214caede..a13a93dfc65 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,8 +106,6 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- if (!_IO_IS_VALID(port))
- return NULL;
return (void __iomem *) (port+pci_io_base);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 4eba60a3289..cef8cba8329 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -536,11 +536,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
+ dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
+ struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
@@ -560,9 +561,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
return NULL;
/* Alloc enough pages (and possibly more) */
- ret = (void *)__get_free_pages(flag, order);
- if (!ret)
+ page = alloc_pages_node(node, flag, order);
+ if (!page)
return NULL;
+ ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
@@ -570,9 +572,9 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
mask >> PAGE_SHIFT, order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
- ret = NULL;
- } else
- *dma_handle = mapping;
+ return NULL;
+ }
+ *dma_handle = mapping;
return ret;
}
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 2cbde865d4f..c02deaab26c 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -521,10 +521,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
current_weight = (resource >> 5 * 8) & 0xFF;
- pr_debug("%s: current_entitled = %lu, current_weight = %lu\n",
+ pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__FUNCTION__, current_entitled, current_weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %lu\n",
+ pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
__FUNCTION__, *new_entitled_ptr, *new_weight_ptr);
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ee166c58664..a8fa04ef27c 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/paca.h>
+#include <asm/lmb.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
#include <asm/prom.h>
@@ -335,7 +336,105 @@ static void __init export_htab_values(void)
of_node_put(node);
}
+static struct property crashk_base_prop = {
+ .name = "linux,crashkernel-base",
+ .length = sizeof(unsigned long),
+ .value = (unsigned char *)&crashk_res.start,
+};
+
+static unsigned long crashk_size;
+
+static struct property crashk_size_prop = {
+ .name = "linux,crashkernel-size",
+ .length = sizeof(unsigned long),
+ .value = (unsigned char *)&crashk_size,
+};
+
+static void __init export_crashk_values(void)
+{
+ struct device_node *node;
+ struct property *prop;
+
+ node = of_find_node_by_path("/chosen");
+ if (!node)
+ return;
+
+ /* There might be existing crash kernel properties, but we can't
+ * be sure what's in them, so remove them. */
+ prop = of_find_property(node, "linux,crashkernel-base", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ prop = of_find_property(node, "linux,crashkernel-size", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ if (crashk_res.start != 0) {
+ prom_add_property(node, &crashk_base_prop);
+ crashk_size = crashk_res.end - crashk_res.start + 1;
+ prom_add_property(node, &crashk_size_prop);
+ }
+
+ of_node_put(node);
+}
+
void __init kexec_setup(void)
{
export_htab_values();
+ export_crashk_values();
+}
+
+static int __init early_parse_crashk(char *p)
+{
+ unsigned long size;
+
+ if (!p)
+ return 1;
+
+ size = memparse(p, &p);
+
+ if (*p == '@')
+ crashk_res.start = memparse(p + 1, &p);
+ else
+ crashk_res.start = KDUMP_KERNELBASE;
+
+ crashk_res.end = crashk_res.start + size - 1;
+
+ return 0;
+}
+early_param("crashkernel", early_parse_crashk);
+
+void __init reserve_crashkernel(void)
+{
+ unsigned long size;
+
+ if (crashk_res.start == 0)
+ return;
+
+ /* We might have got these values via the command line or the
+ * device tree, either way sanitise them now. */
+
+ size = crashk_res.end - crashk_res.start + 1;
+
+ if (crashk_res.start != KDUMP_KERNELBASE)
+ printk("Crash kernel location must be 0x%x\n",
+ KDUMP_KERNELBASE);
+
+ crashk_res.start = KDUMP_KERNELBASE;
+ size = PAGE_ALIGN(size);
+ crashk_res.end = crashk_res.start + size - 1;
+
+ /* Crash kernel trumps memory limit */
+ if (memory_limit && memory_limit <= crashk_res.end) {
+ memory_limit = crashk_res.end + 1;
+ printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
+ memory_limit);
+ }
+
+ lmb_reserve(crashk_res.start, size);
+}
+
+int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+ return (start + size) > crashk_res.start && start <= crashk_res.end;
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index be982023409..01d3916c4cb 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -216,7 +216,7 @@ _GLOBAL(call_setup_cpu)
lwz r4,0(r4)
add r4,r4,r3
lwz r5,CPU_SPEC_SETUP(r4)
- cmpi 0,r5,0
+ cmpwi 0,r5,0
add r5,r5,r3
beqlr
mtctr r5
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 2778cce058e..e8883d42c43 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -482,7 +482,9 @@ _GLOBAL(identify_cpu)
sub r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
+ cmpdi 0,r4,0
add r4,r4,r5
+ beqlr
ld r4,0(r4)
add r4,r4,r5
mtctr r4
@@ -768,9 +770,6 @@ _GLOBAL(giveup_altivec)
#endif /* CONFIG_ALTIVEC */
-_GLOBAL(__setup_cpu_power3)
- blr
-
_GLOBAL(execve)
li r0,__NR_execve
sc
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index ada50aa5b60..6960f090991 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -204,7 +204,7 @@ static void nvram_print_partitions(char * label)
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each(p, &nvram_part->partition) {
tmp_part = list_entry(p, struct nvram_partition, partition);
- printk(KERN_WARNING "%d \t%02x\t%02x\t%d\t%s\n",
+ printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 4c4449be81c..30a4e6a1368 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,14 +42,6 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
-/*
- * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
- * devices we don't have access to.
- */
-unsigned long io_page_mask;
-
-EXPORT_SYMBOL(io_page_mask);
-
#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
@@ -605,7 +597,7 @@ static int __init pcibios_init(void)
iSeries_pcibios_init();
#endif
- printk("PCI: Probing PCI hardware\n");
+ printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -630,14 +622,14 @@ static int __init pcibios_init(void)
/* Cache the location of the ISA bridge (if we have one) */
ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (ppc64_isabridge_dev != NULL)
- printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
+ printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
#ifdef CONFIG_PPC_MULTIPLATFORM
/* map in PCI I/O space */
phbs_remap_io();
#endif
- printk("PCI: Probing PCI hardware done\n");
+ printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0;
}
@@ -804,7 +796,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
+ printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
prot);
return __pgprot(prot);
@@ -894,8 +886,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pci_show_devspec(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
@@ -907,13 +899,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
void pcibios_add_platform_entries(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC_MULTIPLATFORM
device_create_file(&pdev->dev, &dev_attr_devspec);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
}
#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -1104,8 +1093,6 @@ void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
hose->io_base_virt);
of_node_put(isa_dn);
- /* Allow all IO */
- io_page_mask = -1;
}
}
@@ -1212,7 +1199,7 @@ int remap_bus_range(struct pci_bus *bus)
return 1;
if (start_phys == 0)
return 1;
- printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
+ printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
if (__ioremap_explicit(start_phys, start_virt, size,
_PAGE_NO_CACHE | _PAGE_GUARDED))
return 1;
@@ -1232,27 +1219,13 @@ static void phbs_remap_io(void)
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long start, end, mask, offset;
+ unsigned long offset;
if (res->flags & IORESOURCE_IO) {
offset = (unsigned long)hose->io_base_virt - pci_io_base;
- start = res->start += offset;
- end = res->end += offset;
-
- /* Need to allow IO access to pages that are in the
- ISA range */
- if (start < MAX_ISA_PORT) {
- if (end > MAX_ISA_PORT)
- end = MAX_ISA_PORT;
-
- start >>= PAGE_SHIFT;
- end >>= PAGE_SHIFT;
-
- /* get the range of pages for the map */
- mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
- io_page_mask |= mask;
- }
+ res->start += offset;
+ res->end += offset;
} else if (res->flags & IORESOURCE_MEM) {
res->start += hose->pci_mem_offset;
res->end += hose->pci_mem_offset;
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 12c4c9e9bbc..1c18953514c 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h>
#include <asm/pSeries_reconfig.h>
#include <asm/ppc-pci.h>
+#include <asm/firmware.h>
/*
* Traverse_func that inits the PCI fields of the device node.
@@ -59,6 +60,11 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
pdn->busno = (regs[0] >> 16) & 0xff;
pdn->devfn = (regs[0] >> 8) & 0xff;
}
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ u32 *busp = (u32 *)get_property(dn, "linux,subbus", NULL);
+ if (busp)
+ pdn->bussubno = *busp;
+ }
pdn->pci_ext_config_space = (type && *type == 1);
return NULL;
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
index c1d95e14bbe..7fb4cca021b 100644
--- a/arch/powerpc/kernel/pci_iommu.c
+++ b/arch/powerpc/kernel/pci_iommu.c
@@ -44,16 +44,16 @@
*/
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-static inline struct iommu_table *devnode_table(struct device *dev)
+static inline struct iommu_table *device_to_table(struct device *hwdev)
{
struct pci_dev *pdev;
- if (!dev) {
+ if (!hwdev) {
pdev = ppc64_isabridge_dev;
if (!pdev)
return NULL;
} else
- pdev = to_pci_dev(dev);
+ pdev = to_pci_dev(hwdev);
return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
}
@@ -85,14 +85,15 @@ static inline unsigned long device_to_mask(struct device *hwdev)
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
- device_to_mask(hwdev), flag);
+ return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
+ device_to_mask(hwdev), flag,
+ pcibus_to_node(to_pci_dev(hwdev)->bus));
}
static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
+ iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -104,7 +105,7 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
- return iommu_map_single(devnode_table(hwdev), vaddr, size,
+ return iommu_map_single(device_to_table(hwdev), vaddr, size,
device_to_mask(hwdev), direction);
}
@@ -112,27 +113,27 @@ static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
- iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
+ iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- return iommu_map_sg(pdev, devnode_table(pdev), sglist,
+ return iommu_map_sg(pdev, device_to_table(pdev), sglist,
nelems, device_to_mask(pdev), direction);
}
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
+ iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{
- struct iommu_table *tbl = devnode_table(dev);
+ struct iommu_table *tbl = device_to_table(dev);
if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index 3c2cf661f6d..2b87f82df13 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -52,7 +52,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
- if (!machine_is(pseries) && !machine_is(cell))
+ if (!of_find_node_by_path("/rtas"))
return 0;
if (!proc_mkdir("rtas", root))
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 2dd47d2dd99..e4732459c48 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -708,6 +708,61 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
return put_user(val, (unsigned int __user *) adr);
}
+int set_endian(struct task_struct *tsk, unsigned int val)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+
+ if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
+ (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (val == PR_ENDIAN_BIG)
+ regs->msr &= ~MSR_LE;
+ else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
+ regs->msr |= MSR_LE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int get_endian(struct task_struct *tsk, unsigned long adr)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned int val;
+
+ if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
+ !cpu_has_feature(CPU_FTR_REAL_LE))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (regs->msr & MSR_LE) {
+ if (cpu_has_feature(CPU_FTR_REAL_LE))
+ val = PR_ENDIAN_LITTLE;
+ else
+ val = PR_ENDIAN_PPC_LITTLE;
+ } else
+ val = PR_ENDIAN_BIG;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ tsk->thread.align_ctl = val;
+ return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+ return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
+}
+
#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
int sys_clone(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9a07f97f071..969f4abcc0b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -50,6 +50,7 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
+#include <asm/kexec.h>
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -836,6 +837,42 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
return mem;
}
+static int __init early_parse_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = PAGE_ALIGN(memparse(p, &p));
+ DBG("memory limit = 0x%lx\n", memory_limit);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+/*
+ * The device tree may be allocated below our memory limit, or inside the
+ * crash kernel region for kdump. If so, move it out now.
+ */
+static void move_device_tree(void)
+{
+ unsigned long start, size;
+ void *p;
+
+ DBG("-> move_device_tree\n");
+
+ start = __pa(initial_boot_params);
+ size = initial_boot_params->totalsize;
+
+ if ((memory_limit && (start + size) > memory_limit) ||
+ overlaps_crashkernel(start, size)) {
+ p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
+ memcpy(p, initial_boot_params, size);
+ initial_boot_params = (struct boot_param_header *)p;
+ DBG("Moved device tree to 0x%p\n", p);
+ }
+
+ DBG("<- move_device_tree\n");
+}
/**
* unflattens the device-tree passed by the firmware, creating the
@@ -1070,6 +1107,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
iommu_force_on = 1;
#endif
+ /* mem=x on the command line is the preferred mechanism */
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop)
memory_limit = *lprop;
@@ -1123,17 +1161,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
DBG("Command line is: %s\n", cmd_line);
- if (strstr(cmd_line, "mem=")) {
- char *p, *q;
-
- for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
- q = p + 4;
- if (p > cmd_line && p[-1] != ' ')
- continue;
- memory_limit = memparse(q, &q);
- }
- }
-
/* break now */
return 1;
}
@@ -1240,6 +1267,11 @@ static void __init early_reserve_mem(void)
reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
initial_boot_params->off_mem_rsvmap);
+
+ /* before we do anything, lets reserve the dt blob */
+ lmb_reserve(__pa((unsigned long)initial_boot_params),
+ initial_boot_params->totalsize);
+
#ifdef CONFIG_PPC32
/*
* Handle the case where we might be booting from an old kexec
@@ -1292,18 +1324,26 @@ void __init early_init_devtree(void *params)
lmb_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
- lmb_enforce_memory_limit(memory_limit);
- lmb_analyze();
- DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+ /* Save command line for /proc/cmdline and then parse parameters */
+ strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
+ parse_early_param();
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
-#ifdef CONFIG_CRASH_DUMP
- lmb_reserve(0, KDUMP_RESERVE_LIMIT);
-#endif
+ reserve_kdump_trampoline();
+ reserve_crashkernel();
early_reserve_mem();
+ lmb_enforce_memory_limit(memory_limit);
+ lmb_analyze();
+
+ DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+
+ /* We may need to relocate the flat tree, do it now.
+ * FIXME .. and the initrd too? */
+ move_device_tree();
+
DBG("Scanning CPUs ...\n");
/* Retreive CPU related informations from the flat tree
@@ -2053,29 +2093,3 @@ int prom_update_property(struct device_node *np,
return 0;
}
-#ifdef CONFIG_KEXEC
-/* We may have allocated the flat device tree inside the crash kernel region
- * in prom_init. If so we need to move it out into regular memory. */
-void kdump_move_device_tree(void)
-{
- unsigned long start, end;
- struct boot_param_header *new;
-
- start = __pa((unsigned long)initial_boot_params);
- end = start + initial_boot_params->totalsize;
-
- if (end < crashk_res.start || start > crashk_res.end)
- return;
-
- new = (struct boot_param_header*)
- __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE));
-
- memcpy(new, initial_boot_params, initial_boot_params->totalsize);
-
- initial_boot_params = new;
-
- DBG("Flat device tree blob moved to %p\n", initial_boot_params);
-
- /* XXX should we unreserve the old DT? */
-}
-#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index f70bd090dac..57d8a16438a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -194,19 +194,12 @@ static int __initdata of_platform;
static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
-static unsigned long __initdata prom_memory_limit;
-
static unsigned long __initdata alloc_top;
static unsigned long __initdata alloc_top_high;
static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top;
-#ifdef CONFIG_KEXEC
-static unsigned long __initdata prom_crashk_base;
-static unsigned long __initdata prom_crashk_size;
-#endif
-
static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt;
@@ -593,45 +586,6 @@ static void __init early_cmdline_parse(void)
RELOC(iommu_force_on) = 1;
}
#endif
-
- opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
- if (opt) {
- opt += 4;
- RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
-#ifdef CONFIG_PPC64
- /* Align to 16 MB == size of ppc64 large page */
- RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
-#endif
- }
-
-#ifdef CONFIG_KEXEC
- /*
- * crashkernel=size@addr specifies the location to reserve for
- * crash kernel.
- */
- opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
- if (opt) {
- opt += 12;
- RELOC(prom_crashk_size) =
- prom_memparse(opt, (const char **)&opt);
-
- if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
- RELOC(prom_crashk_size)) {
- prom_printf("Warning: crashkernel size is not "
- "aligned to 16MB\n");
- }
-
- /*
- * At present, the crash kernel always run at 32MB.
- * Just ignore whatever user passed.
- */
- RELOC(prom_crashk_base) = 0x2000000;
- if (*opt == '@') {
- prom_printf("Warning: PPC64 kdump kernel always runs "
- "at 32 MB\n");
- }
- }
-#endif
}
#ifdef CONFIG_PPC_PSERIES
@@ -1116,29 +1070,6 @@ static void __init prom_init_mem(void)
}
/*
- * If prom_memory_limit is set we reduce the upper limits *except* for
- * alloc_top_high. This must be the real top of RAM so we can put
- * TCE's up there.
- */
-
- RELOC(alloc_top_high) = RELOC(ram_top);
-
- if (RELOC(prom_memory_limit)) {
- if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
- prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
- prom_printf("Ignoring mem=%x >= ram_top.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else {
- RELOC(ram_top) = RELOC(prom_memory_limit);
- RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
- }
- }
-
- /*
* Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR.
* Some RS64 machines have buggy firmware where claims up at
@@ -1150,20 +1081,14 @@ static void __init prom_init_mem(void)
RELOC(rmo_top) = RELOC(ram_top);
RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
RELOC(alloc_top) = RELOC(rmo_top);
+ RELOC(alloc_top_high) = RELOC(ram_top);
prom_printf("memory layout at init:\n");
- prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
prom_printf(" ram_top : %x\n", RELOC(ram_top));
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base)) {
- prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
- prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
- }
-#endif
}
@@ -1349,16 +1274,10 @@ static void __init prom_initialize_tce_table(void)
reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
- if (RELOC(prom_memory_limit)) {
- /*
- * We align the start to a 16MB boundary so we can map
- * the TCE area using large pages if possible.
- * The end should be the top of RAM so no need to align it.
- */
- RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
- 0x1000000);
- RELOC(prom_tce_alloc_end) = local_alloc_top;
- }
+ /* These are only really needed if there is a memory limit in
+ * effect, but we don't know so export them always. */
+ RELOC(prom_tce_alloc_start) = local_alloc_bottom;
+ RELOC(prom_tce_alloc_end) = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
@@ -2041,11 +1960,7 @@ static void __init flatten_device_tree(void)
/* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10;
- /* Reserve the whole thing and copy the reserve map in, we
- * also bump mem_reserve_cnt to cause further reservations to
- * fail since it's too late.
- */
- reserve_mem(RELOC(dt_header_start), hdr->totalsize);
+ /* Copy the reserve map in */
memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
#ifdef DEBUG_PROM
@@ -2058,6 +1973,9 @@ static void __init flatten_device_tree(void)
RELOC(mem_reserve_map)[i].size);
}
#endif
+ /* Bump mem_reserve_cnt to cause further reservations to fail
+ * since it's too late.
+ */
RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
@@ -2280,10 +2198,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_mem();
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base))
- reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
-#endif
/*
* Determine which cpu is actually running right _now_
*/
@@ -2317,10 +2231,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* Fill in some infos for use by the kernel later on
*/
- if (RELOC(prom_memory_limit))
- prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
- &RELOC(prom_memory_limit),
- sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
@@ -2340,16 +2250,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
}
#endif
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base)) {
- prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
- PTRRELOC(&prom_crashk_base),
- sizeof(RELOC(prom_crashk_base)));
- prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
- PTRRELOC(&prom_crashk_size),
- sizeof(RELOC(prom_crashk_size)));
- }
-#endif
/*
* Fixup any known bugs in the device-tree
*/
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 3934c227549..45df420383c 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -548,3 +548,28 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
return __of_address_to_resource(dev, addrp, size, flags, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size)
+{
+ u32 *dma_window, cells;
+ unsigned char *prop;
+
+ dma_window = (u32 *)dma_window_prop;
+
+ /* busno is always one cell */
+ *busno = *(dma_window++);
+
+ prop = get_property(dn, "ibm,#dma-address-cells", NULL);
+ if (!prop)
+ prop = get_property(dn, "#address-cells", NULL);
+
+ cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn);
+ *phys = of_read_addr(dma_window, cells);
+
+ dma_window += cells;
+
+ prop = get_property(dn, "ibm,#dma-size-cells", NULL);
+ cells = prop ? *(u32 *)prop : prom_n_size_cells(dn);
+ *size = of_read_addr(dma_window, cells);
+}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 4a677d1bd4e..5563e2e7d89 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -404,7 +404,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_detach(child, data);
break;
-#ifdef CONFIG_PPC64
case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
int i;
unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
@@ -468,7 +467,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
break;
}
-#endif /* CONFIG_PPC64 */
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 34d073fb609..77578c093dd 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -14,19 +14,20 @@
unsigned long __init rtas_get_boot_time(void)
{
int ret[8];
- int error, wait_time;
+ int error;
+ unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
- wait_time = rtas_extended_busy_delay_time(error);
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
/* This is boot time so we spin. */
udelay(wait_time*1000);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
@@ -44,24 +45,25 @@ unsigned long __init rtas_get_boot_time(void)
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
- int error, wait_time;
+ int error;
+ unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
if (in_interrupt() && printk_ratelimit()) {
memset(rtc_tm, 0, sizeof(struct rtc_time));
printk(KERN_WARNING "error: reading clock"
" would delay interrupt\n");
return; /* delay not allowed */
}
- wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
@@ -88,14 +90,14 @@ int rtas_set_rtc_time(struct rtc_time *tm)
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
if (in_interrupt())
return 1; /* probably decrementer */
- wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 0112318213a..13496f31985 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -370,24 +370,36 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
return ret;
}
-/* Given an RTAS status code of 990n compute the hinted delay of 10^n
- * (last digit) milliseconds. For now we bound at n=5 (100 sec).
+/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
+ * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
*/
-unsigned int rtas_extended_busy_delay_time(int status)
+unsigned int rtas_busy_delay_time(int status)
{
- int order = status - 9900;
- unsigned long ms;
+ int order;
+ unsigned int ms = 0;
+
+ if (status == RTAS_BUSY) {
+ ms = 1;
+ } else if (status >= 9900 && status <= 9905) {
+ order = status - 9900;
+ for (ms = 1; order > 0; order--)
+ ms *= 10;
+ }
- if (order < 0)
- order = 0; /* RTC depends on this for -2 clock busy */
- else if (order > 5)
- order = 5; /* bound */
+ return ms;
+}
- /* Use microseconds for reasonable accuracy */
- for (ms = 1; order > 0; order--)
- ms *= 10;
+/* For an RTAS busy status code, perform the hinted delay. */
+unsigned int rtas_busy_delay(int status)
+{
+ unsigned int ms;
- return ms;
+ might_sleep();
+ ms = rtas_busy_delay_time(status);
+ if (ms)
+ msleep(ms);
+
+ return ms;
}
int rtas_error_rc(int rtas_rc)
@@ -438,22 +450,14 @@ int rtas_get_power_level(int powerdomain, int *level)
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
int token = rtas_token("set-power-level");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -463,22 +467,14 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
int rtas_get_sensor(int sensor, int index, int *state)
{
int token = rtas_token("get-sensor-state");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 2, 2, state, sensor, index);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -488,23 +484,14 @@ int rtas_get_sensor(int sensor, int index, int *state)
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_token("set-indicator");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- }
- else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -555,13 +542,11 @@ void rtas_os_term(char *str)
do {
status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
__pa(rtas_os_term_buf));
+ } while (rtas_busy_delay(status));
- if (status == RTAS_BUSY)
- udelay(1);
- else if (status != 0)
- printk(KERN_EMERG "ibm,os-term call failed %d\n",
+ if (status != 0)
+ printk(KERN_EMERG "ibm,os-term call failed %d\n",
status);
- } while (status == RTAS_BUSY);
}
static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
@@ -789,7 +774,7 @@ EXPORT_SYMBOL(rtas_token);
EXPORT_SYMBOL(rtas_call);
EXPORT_SYMBOL(rtas_data_buf);
EXPORT_SYMBOL(rtas_data_buf_lock);
-EXPORT_SYMBOL(rtas_extended_busy_delay_time);
+EXPORT_SYMBOL(rtas_busy_delay_time);
EXPORT_SYMBOL(rtas_get_sensor);
EXPORT_SYMBOL(rtas_get_power_level);
EXPORT_SYMBOL(rtas_set_power_level);
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index aaf384c3f04..1442b63a75d 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -365,20 +365,12 @@ static int rtas_excl_release(struct inode *inode, struct file *file)
static void manage_flash(struct rtas_manage_flash_t *args_buf)
{
- unsigned int wait_time;
s32 rc;
- while (1) {
+ do {
rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
1, NULL, args_buf->op);
- if (rc == RTAS_RC_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
args_buf->status = rc;
}
@@ -451,27 +443,18 @@ static ssize_t manage_flash_write(struct file *file, const char __user *buf,
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_token("ibm,validate-flash-image");
- unsigned int wait_time;
int update_results;
s32 rc;
rc = 0;
- while(1) {
+ do {
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE);
rc = rtas_call(token, 2, 2, &update_results,
(u32) __pa(rtas_data_buf), args_buf->buf_size);
memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
-
- if (rc == RTAS_RC_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
args_buf->status = rc;
args_buf->update_results = update_results;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 684ab1d49c6..bd328123af7 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -443,6 +443,7 @@ void __init smp_setup_cpu_maps(void)
}
#endif /* CONFIG_SMP */
+int __initdata do_early_xmon;
#ifdef CONFIG_XMON
static int __init early_xmon(char *p)
{
@@ -456,7 +457,7 @@ static int __init early_xmon(char *p)
return 0;
}
xmon_init(1);
- debugger(NULL);
+ do_early_xmon = 1;
return 0;
}
@@ -524,3 +525,20 @@ int check_legacy_ioport(unsigned long base_port)
return ppc_md.check_legacy_ioport(base_port);
}
EXPORT_SYMBOL(check_legacy_ioport);
+
+static int ppc_panic_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ppc_md.panic(ptr); /* May not return */
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_panic_block = {
+ .notifier_call = ppc_panic_event,
+ .priority = INT_MIN /* may not return; must be done last */
+};
+
+void __init setup_panic(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+}
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 2ebba755272..4c67ad7fae0 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -2,5 +2,8 @@
#define _POWERPC_KERNEL_SETUP_H
void check_for_initrd(void);
+void do_init_bootmem(void);
+void setup_panic(void);
+extern int do_early_xmon;
#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 69ac2570134..e5a44812441 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -131,12 +131,6 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
- /* Check default command line */
-#ifdef CONFIG_CMDLINE
- if (cmd_line[0] == 0)
- strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
-#endif /* CONFIG_CMDLINE */
-
probe_machine();
#ifdef CONFIG_6xx
@@ -235,7 +229,7 @@ arch_initcall(ppc_init);
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
- extern void do_init_bootmem(void);
+ *cmdline_p = cmd_line;
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy = 500000000 / HZ;
@@ -285,16 +279,16 @@ void __init setup_arch(char **cmdline_p)
/* reboot on panic */
panic_timeout = 180;
+ if (ppc_md.panic)
+ setup_panic();
+
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
- *cmdline_p = cmd_line;
-
- parse_early_param();
+ if (do_early_xmon)
+ debugger(NULL);
/* set up the bootmem stuff with available memory */
do_init_bootmem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4467c49903b..78f3a5fd43f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -100,12 +100,6 @@ unsigned long SYSRQ_KEY;
#endif /* CONFIG_MAGIC_SYSRQ */
-static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
-static struct notifier_block ppc64_panic_block = {
- .notifier_call = ppc64_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
#ifdef CONFIG_SMP
static int smt_enabled_cmdline;
@@ -199,9 +193,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Probe the machine type */
probe_machine();
-#ifdef CONFIG_CRASH_DUMP
- kdump_setup();
-#endif
+ setup_kdump_trampoline();
DBG("Found, Initializing memory management...\n");
@@ -353,9 +345,6 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
-#ifdef CONFIG_KEXEC
- kdump_move_device_tree();
-#endif
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
@@ -420,10 +409,8 @@ void __init setup_system(void)
*/
register_early_udbg_console();
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
-
- parse_early_param();
+ if (do_early_xmon)
+ debugger(NULL);
check_smt_enabled();
smp_setup_cpu_maps();
@@ -456,13 +443,6 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
-static int ppc64_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- ppc_md.panic((char *)ptr); /* May not return */
- return NOTIFY_DONE;
-}
-
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
@@ -517,8 +497,6 @@ static void __init emergency_stack_init(void)
*/
void __init setup_arch(char **cmdline_p)
{
- extern void do_init_bootmem(void);
-
ppc64_boot_msg(0x12, "Setup Arch");
*cmdline_p = cmd_line;
@@ -535,8 +513,7 @@ void __init setup_arch(char **cmdline_p)
panic_timeout = 180;
if (ppc_md.panic)
- atomic_notifier_chain_register(&panic_notifier_list,
- &ppc64_panic_block);
+ setup_panic();
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 8fdeca2d459..d73b25e22fc 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -419,9 +419,7 @@ static long restore_user_regs(struct pt_regs *regs,
{
long err;
unsigned int save_r2 = 0;
-#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
unsigned long msr;
-#endif
/*
* restore general registers but not including MSR or SOFTE. Also
@@ -430,11 +428,16 @@ static long restore_user_regs(struct pt_regs *regs,
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
+ err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
if (err)
return 1;
+ /* if doing signal return, restore the previous little-endian mode */
+ if (sig)
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
/*
* Do this before updating the thread state in
* current->thread.fpr/vr/evr. That way, if we get preempted
@@ -455,7 +458,7 @@ static long restore_user_regs(struct pt_regs *regs,
/* force the process to reload the altivec registers from
current->thread when it next does altivec instructions */
regs->msr &= ~MSR_VEC;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
+ if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
@@ -472,7 +475,7 @@ static long restore_user_regs(struct pt_regs *regs,
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
regs->msr &= ~MSR_SPE;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
+ if (msr & MSR_SPE) {
/* restore spe registers from the stack */
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
@@ -757,10 +760,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
- if (vdso32_rt_sigtramp && current->thread.vdso_base) {
+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, frame, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
if (save_user_regs(regs, frame, __NR_rt_sigreturn))
goto badframe;
@@ -777,6 +780,8 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[5] = (unsigned long) &rt_sf->uc;
regs->gpr[6] = (unsigned long) rt_sf;
regs->nip = (unsigned long) ka->sa.sa_handler;
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
@@ -1038,10 +1043,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
|| __put_user(sig, &sc->signal))
goto badframe;
- if (vdso32_sigtramp && current->thread.vdso_base) {
+ if (vdso32_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, &frame->mctx, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
} else {
if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
goto badframe;
@@ -1056,6 +1061,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[3] = sig;
regs->gpr[4] = (unsigned long) sc;
regs->nip = (unsigned long) ka->sa.sa_handler;
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c2db642f4cd..6e75d7ab6d4 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -141,9 +141,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
unsigned long err = 0;
unsigned long save_r13 = 0;
elf_greg_t *gregs = (elf_greg_t *)regs;
-#ifdef CONFIG_ALTIVEC
unsigned long msr;
-#endif
int i;
/* If this is not a signal return, we preserve the TLS in r13 */
@@ -154,7 +152,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __copy_from_user(regs, &sc->gp_regs,
PT_MSR*sizeof(unsigned long));
- /* skip MSR and SOFTE */
+ /* get MSR separately, transfer the LE bit if doing signal return */
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ if (sig)
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
+ /* skip SOFTE */
for (i = PT_MSR+1; i <= PT_RESULT; i++) {
if (i == PT_SOFTE)
continue;
@@ -179,7 +182,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
- err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
if (err)
return err;
if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
@@ -396,8 +398,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
- if (vdso64_rt_sigtramp && current->thread.vdso_base) {
- regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
@@ -412,6 +414,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Set up "regs" so we "return" to the signal handler. */
err |= get_user(regs->nip, &funct_desc_ptr->entry);
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->gpr[1] = newsp;
err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
regs->gpr[3] = signr;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 24e3ad756de..528e7f84cb6 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -76,7 +76,6 @@
/* keep track of when we need to update the rtc */
time_t last_rtc_update;
-extern int piranha_simulator;
#ifdef CONFIG_PPC_ISERIES
unsigned long iSeries_recal_titan = 0;
unsigned long iSeries_recal_tb = 0;
@@ -945,9 +944,9 @@ void __init time_init(void)
} else {
/* Normal PowerPC with timebase register */
ppc_md.calibrate_decr();
- printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
- printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
+ printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
tb_last_stamp = tb_last_jiffy = get_tb();
}
@@ -1010,10 +1009,7 @@ void __init time_init(void)
tb_to_ns_scale = scale;
tb_to_ns_shift = shift;
-#ifdef CONFIG_PPC_ISERIES
- if (!piranha_simulator)
-#endif
- tm = get_boot_time();
+ tm = get_boot_time();
write_seqlock_irqsave(&xtime_lock, flags);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 064a5256469..91a6e04d974 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -658,7 +658,7 @@ static int emulate_instruction(struct pt_regs *regs)
u32 instword;
u32 rd;
- if (!user_mode(regs))
+ if (!user_mode(regs) || (regs->msr & MSR_LE))
return -EINVAL;
CHECK_FULL_REGS(regs);
@@ -805,9 +805,11 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
- int fixed;
+ int fixed = 0;
- fixed = fix_alignment(regs);
+ /* we don't implement logging of alignment exceptions */
+ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ fixed = fix_alignment(regs);
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 3774e80094f..67d9fd9ae2b 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/console.h>
+#include <linux/init.h>
#include <asm/processor.h>
#include <asm/udbg.h>
@@ -141,12 +142,14 @@ static int early_console_initialized;
void __init disable_early_printk(void)
{
-#if 1
if (!early_console_initialized)
return;
+ if (strstr(saved_command_line, "udbg-immortal")) {
+ printk(KERN_INFO "early console immortal !\n");
+ return;
+ }
unregister_console(&udbg_console);
early_console_initialized = 0;
-#endif
}
/* called by setup_system */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 573afb68d69..bc3e15be308 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
+ int rc;
#ifdef CONFIG_PPC64
if (test_thread_flag(TIF_32BIT)) {
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_base = VDSO32_MBASE;
#endif
- current->thread.vdso_base = 0;
+ current->mm->context.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (vma == NULL)
- return -ENOMEM;
-
- memset(vma, 0, sizeof(*vma));
-
/* Add a page to the vdso size for the data page */
vdso_pages ++;
@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
+ down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
- if (vdso_base & ~PAGE_MASK) {
- kmem_cache_free(vm_area_cachep, vma);
- return (int)vdso_base;
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
}
- current->thread.vdso_base = vdso_base;
+ /* Allocate a VMA structure and fill it up */
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (vma == NULL) {
+ rc = -ENOMEM;
+ goto fail_mmapsem;
+ }
vma->vm_mm = mm;
- vma->vm_start = current->thread.vdso_base;
+ vma->vm_start = vdso_base;
vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT);
/*
@@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*/
- vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
- down_write(&mm->mmap_sem);
- if (insert_vm_struct(mm, vma)) {
- up_write(&mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
- return -ENOMEM;
- }
+ /* Insert new VMA */
+ rc = insert_vm_struct(mm, vma);
+ if (rc)
+ goto fail_vma;
+
+ /* Put vDSO base into mm struct and account for memory usage */
+ current->mm->context.vdso_base = vdso_base;
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);
-
return 0;
+
+ fail_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+ fail_mmapsem:
+ up_write(&mm->mmap_sem);
+ return rc;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
+ return "[vdso]";
+ return NULL;
}
+
+
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 971020cf3f7..e746686d48b 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -13,27 +13,116 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/types.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/kobject.h>
+
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/vio.h>
#include <asm/prom.h>
-
-static const struct vio_device_id *vio_match_device(
- const struct vio_device_id *, const struct vio_dev *);
-
-struct vio_dev vio_bus_device = { /* fake "parent" device */
+#include <asm/firmware.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/iommu.h>
+
+extern struct subsystem devices_subsys; /* needed for vio_find_name() */
+
+static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = vio_bus_device.dev.bus_id,
.type = "",
.dev.bus_id = "vio",
.dev.bus = &vio_bus_type,
};
-static struct vio_bus_ops vio_bus_ops;
+#ifdef CONFIG_PPC_ISERIES
+struct device *iSeries_vio_dev = &vio_bus_device.dev;
+EXPORT_SYMBOL(iSeries_vio_dev);
+
+static struct iommu_table veth_iommu_table;
+static struct iommu_table vio_iommu_table;
+
+static void __init iommu_vio_init(void)
+{
+ iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
+ veth_iommu_table.it_size /= 2;
+ vio_iommu_table = veth_iommu_table;
+ vio_iommu_table.it_offset += veth_iommu_table.it_size;
+
+ if (!iommu_init_table(&veth_iommu_table))
+ printk("Virtual Bus VETH TCE table failed.\n");
+ if (!iommu_init_table(&vio_iommu_table))
+ printk("Virtual Bus VIO TCE table failed.\n");
+}
+#endif
+
+static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
+{
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ if (strcmp(dev->type, "network") == 0)
+ return &veth_iommu_table;
+ return &vio_iommu_table;
+ } else
+#endif
+ {
+ unsigned char *dma_window;
+ struct iommu_table *tbl;
+ unsigned long offset, size;
+
+ dma_window = get_property(dev->dev.platform_data,
+ "ibm,my-dma-window", NULL);
+ if (!dma_window)
+ return NULL;
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+
+ of_parse_dma_window(dev->dev.platform_data, dma_window,
+ &tbl->it_index, &offset, &size);
+
+ /* TCE table size - measured in tce entries */
+ tbl->it_size = size >> PAGE_SHIFT;
+ /* offset for VIO should always be 0 */
+ tbl->it_offset = offset >> PAGE_SHIFT;
+ tbl->it_busno = 0;
+ tbl->it_type = TCE_VB;
+
+ return iommu_init_table(tbl);
+ }
+}
+
+/**
+ * vio_match_device: - Tell if a VIO device has a matching
+ * VIO device id structure.
+ * @ids: array of VIO device id structures to search in
+ * @dev: the VIO device structure to match against
+ *
+ * Used by a driver to check whether a VIO device present in the
+ * system is in its list of supported devices. Returns the matching
+ * vio_device_id structure or NULL if there is no match.
+ */
+static const struct vio_device_id *vio_match_device(
+ const struct vio_device_id *ids, const struct vio_dev *dev)
+{
+ while (ids->type[0] != '\0') {
+ if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
+ device_is_compatible(dev->dev.platform_data, ids->compat))
+ return ids;
+ ids++;
+ }
+ return NULL;
+}
/*
* Convert from struct device to struct vio_dev and pass to driver.
@@ -106,35 +195,110 @@ void vio_unregister_driver(struct vio_driver *viodrv)
}
EXPORT_SYMBOL(vio_unregister_driver);
+/* vio_dev refcount hit 0 */
+static void __devinit vio_dev_release(struct device *dev)
+{
+ if (dev->platform_data) {
+ /* XXX free TCE table */
+ of_node_put(dev->platform_data);
+ }
+ kfree(to_vio_dev(dev));
+}
+
/**
- * vio_match_device: - Tell if a VIO device has a matching
- * VIO device id structure.
- * @ids: array of VIO device id structures to search in
- * @dev: the VIO device structure to match against
+ * vio_register_device_node: - Register a new vio device.
+ * @of_node: The OF node for this device.
*
- * Used by a driver to check whether a VIO device present in the
- * system is in its list of supported devices. Returns the matching
- * vio_device_id structure or NULL if there is no match.
+ * Creates and initializes a vio_dev structure from the data in
+ * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * Returns a pointer to the created vio_dev or NULL if node has
+ * NULL device_type or compatible fields.
*/
-static const struct vio_device_id *vio_match_device(
- const struct vio_device_id *ids, const struct vio_dev *dev)
+struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
{
- while (ids->type[0] != '\0') {
- if (vio_bus_ops.match(ids, dev))
- return ids;
- ids++;
+ struct vio_dev *viodev;
+ unsigned int *unit_address;
+ unsigned int *irq_p;
+
+ /* we need the 'device_type' property, in order to match with drivers */
+ if (of_node->type == NULL) {
+ printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
+ __FUNCTION__,
+ of_node->name ? of_node->name : "<unknown>");
+ return NULL;
}
- return NULL;
+
+ unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
+ if (unit_address == NULL) {
+ printk(KERN_WARNING "%s: node %s missing 'reg'\n",
+ __FUNCTION__,
+ of_node->name ? of_node->name : "<unknown>");
+ return NULL;
+ }
+
+ /* allocate a vio_dev for this node */
+ viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
+ if (viodev == NULL)
+ return NULL;
+
+ viodev->dev.platform_data = of_node_get(of_node);
+
+ viodev->irq = NO_IRQ;
+ irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
+ if (irq_p) {
+ int virq = virt_irq_create_mapping(*irq_p);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", of_node->full_name);
+ } else
+ viodev->irq = irq_offset_up(virq);
+ }
+
+ snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
+ viodev->name = of_node->name;
+ viodev->type = of_node->type;
+ viodev->unit_address = *unit_address;
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ unit_address = (unsigned int *)get_property(of_node,
+ "linux,unit_address", NULL);
+ if (unit_address != NULL)
+ viodev->unit_address = *unit_address;
+ }
+ viodev->iommu_table = vio_build_iommu_table(viodev);
+
+ /* init generic 'struct device' fields: */
+ viodev->dev.parent = &vio_bus_device.dev;
+ viodev->dev.bus = &vio_bus_type;
+ viodev->dev.release = vio_dev_release;
+
+ /* register with generic device framework */
+ if (device_register(&viodev->dev)) {
+ printk(KERN_ERR "%s: failed to register device %s\n",
+ __FUNCTION__, viodev->dev.bus_id);
+ /* XXX free TCE table */
+ kfree(viodev);
+ return NULL;
+ }
+
+ return viodev;
}
+EXPORT_SYMBOL(vio_register_device_node);
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
-int __init vio_bus_init(struct vio_bus_ops *ops)
+static int __init vio_bus_init(void)
{
int err;
+ struct device_node *node_vroot;
- vio_bus_ops = *ops;
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ iommu_vio_init();
+ vio_bus_device.iommu_table = &vio_iommu_table;
+ iSeries_vio_dev = &vio_bus_device.dev;
+ }
+#endif
err = bus_register(&vio_bus_type);
if (err) {
@@ -153,47 +317,48 @@ int __init vio_bus_init(struct vio_bus_ops *ops)
return err;
}
- return 0;
-}
+ node_vroot = find_devices("vdevice");
+ if (node_vroot) {
+ struct device_node *of_node;
+
+ /*
+ * Create struct vio_devices for each virtual device in
+ * the device tree. Drivers will associate with them later.
+ */
+ for (of_node = node_vroot->child; of_node != NULL;
+ of_node = of_node->sibling) {
+ printk(KERN_DEBUG "%s: processing %p\n",
+ __FUNCTION__, of_node);
+ vio_register_device_node(of_node);
+ }
+ }
-/* vio_dev refcount hit 0 */
-static void __devinit vio_dev_release(struct device *dev)
-{
- if (vio_bus_ops.release_device)
- vio_bus_ops.release_device(dev);
- kfree(to_vio_dev(dev));
+ return 0;
}
+__initcall(vio_bus_init);
-static ssize_t viodev_show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
}
-DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
-struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- /* init generic 'struct device' fields: */
- viodev->dev.parent = &vio_bus_device.dev;
- viodev->dev.bus = &vio_bus_type;
- viodev->dev.release = vio_dev_release;
-
- /* register with generic device framework */
- if (device_register(&viodev->dev)) {
- printk(KERN_ERR "%s: failed to register device %s\n",
- __FUNCTION__, viodev->dev.bus_id);
- return NULL;
- }
- device_create_file(&viodev->dev, &dev_attr_name);
+ struct device_node *of_node = dev->platform_data;
- return viodev;
+ return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
+static struct device_attribute vio_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(devspec),
+ __ATTR_NULL
+};
+
void __devinit vio_unregister_device(struct vio_dev *viodev)
{
- if (vio_bus_ops.unregister_device)
- vio_bus_ops.unregister_device(viodev);
- device_remove_file(&viodev->dev, &dev_attr_name);
device_unregister(&viodev->dev);
}
EXPORT_SYMBOL(vio_unregister_device);
@@ -229,7 +394,7 @@ static void *vio_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
- dma_handle, ~0ul, flag);
+ dma_handle, ~0ul, flag, -1);
}
static void vio_free_coherent(struct device *dev, size_t size,
@@ -267,22 +432,23 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
+ struct device_node *dn = dev->platform_data;
char *cp;
int length;
if (!num_envp)
return -ENOMEM;
- if (!vio_dev->dev.platform_data)
+ if (!dn)
return -ENODEV;
- cp = (char *)get_property(vio_dev->dev.platform_data, "compatible", &length);
+ cp = (char *)get_property(dn, "compatible", &length);
if (!cp)
return -ENODEV;
envp[0] = buffer;
length = scnprintf(buffer, buffer_size, "MODALIAS=vio:T%sS%s",
vio_dev->type, cp);
- if (buffer_size - length <= 0)
+ if ((buffer_size - length) <= 0)
return -ENOMEM;
envp[1] = NULL;
return 0;
@@ -290,9 +456,81 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
struct bus_type vio_bus_type = {
.name = "vio",
+ .dev_attrs = vio_dev_attrs,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
.shutdown = vio_bus_shutdown,
};
+
+/**
+ * vio_get_attribute: - get attribute for virtual device
+ * @vdev: The vio device to get property.
+ * @which: The property/attribute to be extracted.
+ * @length: Pointer to length of returned data size (unused if NULL).
+ *
+ * Calls prom.c's get_property() to return the value of the
+ * attribute specified by @which
+*/
+const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
+{
+ return get_property(vdev->dev.platform_data, which, length);
+}
+EXPORT_SYMBOL(vio_get_attribute);
+
+#ifdef CONFIG_PPC_PSERIES
+/* vio_find_name() - internal because only vio.c knows how we formatted the
+ * kobject name
+ * XXX once vio_bus_type.devices is actually used as a kset in
+ * drivers/base/bus.c, this function should be removed in favor of
+ * "device_find(kobj_name, &vio_bus_type)"
+ */
+static struct vio_dev *vio_find_name(const char *kobj_name)
+{
+ struct kobject *found;
+
+ found = kset_find_obj(&devices_subsys.kset, kobj_name);
+ if (!found)
+ return NULL;
+
+ return to_vio_dev(container_of(found, struct device, kobj));
+}
+
+/**
+ * vio_find_node - find an already-registered vio_dev
+ * @vnode: device_node of the virtual device we're looking for
+ */
+struct vio_dev *vio_find_node(struct device_node *vnode)
+{
+ uint32_t *unit_address;
+ char kobj_name[BUS_ID_SIZE];
+
+ /* construct the kobject name from the device node */
+ unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
+ if (!unit_address)
+ return NULL;
+ snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
+
+ return vio_find_name(kobj_name);
+}
+EXPORT_SYMBOL(vio_find_node);
+
+int vio_enable_interrupts(struct vio_dev *dev)
+{
+ int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(vio_enable_interrupts);
+
+int vio_disable_interrupts(struct vio_dev *dev)
+{
+ int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(vio_disable_interrupts);
+#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index fe79c2584cb..8b25953dc4f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -93,6 +93,11 @@ SECTIONS
__ptov_table_begin = .;
*(.ptov_fixup);
__ptov_table_end = .;
+#ifdef CONFIG_PPC_ISERIES
+ __dt_strings_start = .;
+ *(.dt_strings);
+ __dt_strings_end = .;
+#endif
}
. = ALIGN(16);