aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-02-06 12:03:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-06 12:03:10 -0800
commit07675f484bb9de0c4bd0722e174cb27043dbacef (patch)
treea82f9378da66021881c06a348eb3e767ded4e04f /arch/x86
parent585a7c666e67b7a6757bd12b734e22f4f76998a2 (diff)
parent11d4c3f9b671720e80353dd7e433ff2bf65e9500 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86-32: Make sure the stack is set up before we use it x86, mtrr: Avoid MTRR reprogramming on BP during boot on UP platforms x86, nx: Don't force pages RW when setting NX bits
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/smp.h5
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c10
-rw-r--r--arch/x86/kernel/head_32.S30
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/mm/pageattr.c8
6 files changed, 26 insertions, 33 deletions
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4c2f63c7fc1..1f469513677 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
/* Static state in head.S used to set up a CPU */
-extern struct {
- void *sp;
- unsigned short ss;
-} stack_start;
+extern unsigned long stack_start; /* Initial stack pointer address */
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72aa559..4d9ebbab223 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -100,7 +100,7 @@ int acpi_save_state_mem(void)
#else /* CONFIG_64BIT */
header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP
- stack_start.sp = temp_stack + sizeof(temp_stack);
+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id());
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3ee6cc..bebabec5b44 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
}
/*
- * MTRR initialization for all AP's
+ * Delayed MTRR initialization for all AP's
*/
void mtrr_aps_init(void)
{
if (!use_intel())
return;
+ /*
+ * Check if someone has requested the delay of AP MTRR initialization,
+ * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
+ * then we are done.
+ */
+ if (!mtrr_aps_delayed_init)
+ return;
+
set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fc293dc8dc3..767d6c43de3 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
*/
__HEAD
ENTRY(startup_32)
+ movl pa(stack_start),%ecx
+
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@ ENTRY(startup_32)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
+ movl %eax,%ss
2:
+ leal -__PAGE_OFFSET(%ecx),%esp
/*
* Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@ ENTRY(startup_32)
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end.
- *
- * Note that the stack is not yet set up!
*/
#ifdef CONFIG_X86_PAE
@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
+ movl pa(stack_start),%ecx
+ movl %eax,%ss
+ leal -__PAGE_OFFSET(%ecx),%esp
#endif /* CONFIG_SMP */
default_entry:
@@ -347,8 +352,8 @@ default_entry:
movl %eax,%cr0 /* ..and set paging (PG) bit */
ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
1:
- /* Set up the stack pointer */
- lss stack_start,%esp
+ /* Shift the stack pointer to a virtual address */
+ addl $__PAGE_OFFSET, %esp
/*
* Initialize eflags. Some BIOS's leave bits like NT set. This would
@@ -360,9 +365,7 @@ default_entry:
#ifdef CONFIG_SMP
cmpb $0, ready
- jz 1f /* Initial CPU cleans BSS */
- jmp checkCPUtype
-1:
+ jnz checkCPUtype
#endif /* CONFIG_SMP */
/*
@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder
-#ifdef CONFIG_SMP
- movb ready, %cl
movb $1, ready
- cmpb $0,%cl # the first CPU calls start_kernel
- je 1f
- movl (stack_start), %esp
-1:
-#endif /* CONFIG_SMP */
jmp *(initial_code)
/*
@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
#endif
.data
+.balign 4
ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE
- .long __BOOT_DS
-
-ready: .byte 0
early_recursion_flag:
.long 0
+ready: .byte 0
+
int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n"
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0cbe8c0b35e..03273b6c272 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
* target processor state.
*/
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
- (unsigned long)stack_start.sp);
+ stack_start);
/*
* Run STARTUP IPI loop.
@@ -785,7 +785,7 @@ do_rest:
#endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
- stack_start.sp = (void *) c_idle.idle->thread.sp;
+ stack_start = c_idle.idle->thread.sp;
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca14ac..d343b3c81f3 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
unsigned long pfn)
{
pgprot_t forbidden = __pgprot(0);
- pgprot_t required = __pgprot(0);
/*
* The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
- /*
- * .data and .bss should always be writable.
- */
- if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
- within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
- pgprot_val(required) |= _PAGE_RW;
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/*
@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
- prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
return prot;
}