aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c18
-rw-r--r--arch/x86_64/kernel/early-quirks.c17
-rw-r--r--arch/x86_64/kernel/entry.S10
-rw-r--r--arch/x86_64/kernel/genapic_flat.c2
-rw-r--r--arch/x86_64/kernel/i8259.c7
-rw-r--r--arch/x86_64/kernel/io_apic.c197
-rw-r--r--arch/x86_64/kernel/irq.c2
-rw-r--r--arch/x86_64/kernel/pci-calgary.c44
-rw-r--r--arch/x86_64/kernel/process.c29
-rw-r--r--arch/x86_64/kernel/smp.c3
-rw-r--r--arch/x86_64/kernel/smpboot.c7
-rw-r--r--arch/x86_64/kernel/time.c13
-rw-r--r--arch/x86_64/kernel/traps.c6
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S12
-rw-r--r--arch/x86_64/kernel/vsmp.c1
-rw-r--r--arch/x86_64/kernel/vsyscall.c47
16 files changed, 286 insertions, 129 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index b3f0908668ec..6fe191c58084 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -54,13 +54,13 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
/* various gunk below that needed for SMP startup */
if (addr < 0x8000) {
- *addrp = 0x8000;
+ *addrp = PAGE_ALIGN(0x8000);
return 1;
}
/* direct mapping tables of the kernel */
if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
- *addrp = table_end << PAGE_SHIFT;
+ *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT);
return 1;
}
@@ -68,18 +68,18 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
addr < INITRD_START+INITRD_SIZE) {
- *addrp = INITRD_START + INITRD_SIZE;
+ *addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE);
return 1;
}
#endif
/* kernel code */
- if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
- *addrp = __pa_symbol(&_end);
+ if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
+ *addrp = PAGE_ALIGN(__pa_symbol(&_end));
return 1;
}
if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
- *addrp = ebda_addr + ebda_size;
+ *addrp = PAGE_ALIGN(ebda_addr + ebda_size);
return 1;
}
@@ -152,7 +152,7 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsi
continue;
while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
;
- last = addr + size;
+ last = PAGE_ALIGN(addr) + size;
if (last > ei->addr + ei->size)
continue;
if (last > end)
@@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
>> PAGE_SHIFT;
/* Skip map entries smaller than a page */
- if (ei_startpfn > ei_endpfn)
+ if (ei_startpfn >= ei_endpfn)
continue;
/* Check if end_pfn_map should be updated */
@@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p)
* size before original memory map is
* reset.
*/
+ e820_register_active_regions(0, 0, -1UL);
saved_max_pfn = e820_end_of_ram();
+ remove_all_active_ranges();
#endif
end_pfn_map = 0;
e820.nr_map = 0;
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 208e38a372c1..68273bff58cc 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -45,7 +45,13 @@ static void nvidia_bugs(void)
/*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
+ * Unfortunately that's not true on many Asus boards.
+ * We don't know yet how to detect this automatically, but
+ * at least allow a command line override.
*/
+ if (acpi_use_timer_override)
+ return;
+
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
@@ -53,6 +59,8 @@ static void nvidia_bugs(void)
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
"timer override.\n");
+ printk(KERN_INFO "If you got timer trouble "
+ "try acpi_use_timer_override\n");
}
#endif
/* RED-PEN skip them on mptables too? */
@@ -61,10 +69,11 @@ static void nvidia_bugs(void)
static void ati_bugs(void)
{
-#if 1 /* for testing */
- printk("ATI board detected\n");
-#endif
- /* No bugs right now */
+ if (timer_over_8254 == 1) {
+ timer_over_8254 = 0;
+ printk(KERN_INFO
+ "ATI board detected. Disabling timer routing over 8254.\n");
+ }
}
struct chipset {
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 38a7b2d528e2..7d401b00d822 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -315,6 +315,8 @@ tracesys:
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
cmpq $__NR_syscall_max,%rax
+ movq $-ENOSYS,%rcx
+ cmova %rcx,%rax
ja 1f
movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8)
@@ -535,8 +537,6 @@ END(stub_rt_sigreturn)
1: incl %gs:pda_irqcount
cmoveq %gs:pda_irqstackptr,%rsp
push %rbp # backlink for old unwinder
- CFI_ADJUST_CFA_OFFSET 8
- CFI_REL_OFFSET rbp,0
/*
* We entered an interrupt context - irqs are off:
*/
@@ -980,11 +980,6 @@ ENTRY(kernel_thread)
call do_fork
movq %rax,RAX(%rsp)
xorl %edi,%edi
- test %rax,%rax
- jnz 1f
- /* terminate stack in child */
- movq %rdi,RIP(%rsp)
-1:
/*
* It isn't worth to check for reschedule here,
@@ -1176,7 +1171,6 @@ ENTRY(call_softirq)
incl %gs:pda_irqcount
cmove %gs:pda_irqstackptr,%rsp
push %rbp # backlink for old unwinder
- CFI_ADJUST_CFA_OFFSET 8
call __do_softirq
leaveq
CFI_DEF_CFA_REGISTER rsp
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 0dfc223c1839..7c01db8fa9d1 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -153,7 +153,7 @@ struct genapic apic_flat = {
static cpumask_t physflat_target_cpus(void)
{
- return cpumask_of_cpu(0);
+ return cpu_online_map;
}
static cpumask_t physflat_vector_allocation_domain(int cpu)
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 0612a33bb896..c4ef801b765b 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -178,7 +178,8 @@ void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
- set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+ "XT");
enable_irq(irq);
}
@@ -431,8 +432,8 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- set_irq_chip_and_handler(i, &i8259A_chip,
- handle_level_irq);
+ set_irq_chip_and_handler_name(i, &i8259A_chip,
+ handle_level_irq, "XT");
} else {
/*
* 'high' PCI IRQs filled in on demand
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 771bcf77daf2..14654e682411 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -57,13 +57,13 @@ static int no_timer_check;
static int disable_timer_pin_1 __initdata;
-int timer_over_8254 __initdata = 0;
+int timer_over_8254 __initdata = 1;
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+DEFINE_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -88,6 +88,52 @@ static struct irq_pin_list {
short apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];
+struct io_apic {
+ unsigned int index;
+ unsigned int unused[3];
+ unsigned int data;
+};
+
+static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
+{
+ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
+ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
+}
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ writel(reg, &io_apic->index);
+ return readl(&io_apic->data);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ writel(reg, &io_apic->index);
+ writel(value, &io_apic->data);
+}
+
+/*
+ * Re-write a value: to be used for read-modify-write
+ * cycles where the read already set up the index register.
+ */
+static inline void io_apic_modify(unsigned int apic, unsigned int value)
+{
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ writel(value, &io_apic->data);
+}
+
+/*
+ * Synchronize the IO-APIC and the CPU by doing
+ * a dummy read from the IO-APIC
+ */
+static inline void io_apic_sync(unsigned int apic)
+{
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ readl(&io_apic->data);
+}
+
#define __DO_ACTION(R, ACTION, FINAL) \
\
{ \
@@ -126,12 +172,34 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
return eu.entry;
}
+/*
+ * When we write a new IO APIC routing entry, we need to write the high
+ * word first! If the mask bit in the low word is clear, we will enable
+ * the interrupt, and we need to make sure the entry is fully populated
+ * before that happens.
+ */
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
union entry_union eu;
eu.entry = e;
spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+/*
+ * When we mask an IO APIC routing entry, we need to write the low
+ * word first, in order to set the mask bit before we change the
+ * high bits!
+ */
+static void ioapic_mask_entry(int apic, int pin)
+{
+ unsigned long flags;
+ union entry_union eu = { .entry.mask = 1 };
+
+ spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -256,9 +324,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
/*
* Disable it in the IO-APIC irq-routing table:
*/
- memset(&entry, 0, sizeof(entry));
- entry.mask = 1;
- ioapic_write_entry(apic, pin, entry);
+ ioapic_mask_entry(apic, pin);
}
static void clear_IO_APIC (void)
@@ -612,15 +678,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- static struct {
- int vector;
- int offset;
- } pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} };
+ static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
int old_vector = -1;
int cpu;
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
+ /* Only try and allocate irqs on cpus that are present */
+ cpus_and(mask, mask, cpu_online_map);
+
if (irq_vector[irq] > 0)
old_vector = irq_vector[irq];
if (old_vector > 0) {
@@ -630,15 +696,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
}
for_each_cpu_mask(cpu, mask) {
- cpumask_t domain;
- int first, new_cpu;
+ cpumask_t domain, new_mask;
+ int new_cpu;
int vector, offset;
domain = vector_allocation_domain(cpu);
- first = first_cpu(domain);
+ cpus_and(new_mask, domain, cpu_online_map);
- vector = pos[first].vector;
- offset = pos[first].offset;
+ vector = current_vector;
+ offset = current_offset;
next:
vector += 8;
if (vector >= FIRST_SYSTEM_VECTOR) {
@@ -646,24 +712,24 @@ next:
offset = (offset + 1) % 8;
vector = FIRST_DEVICE_VECTOR + offset;
}
- if (unlikely(pos[first].vector == vector))
+ if (unlikely(current_vector == vector))
continue;
if (vector == IA32_SYSCALL_VECTOR)
goto next;
- for_each_cpu_mask(new_cpu, domain)
- if (per_cpu(vector_irq, cpu)[vector] != -1)
+ for_each_cpu_mask(new_cpu, new_mask)
+ if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next;
/* Found one! */
- for_each_cpu_mask(new_cpu, domain) {
- pos[cpu].vector = vector;
- pos[cpu].offset = offset;
- }
+ current_vector = vector;
+ current_offset = offset;
if (old_vector >= 0) {
+ cpumask_t old_mask;
int old_cpu;
- for_each_cpu_mask(old_cpu, domain)
+ cpus_and(old_mask, irq_domain[irq], cpu_online_map);
+ for_each_cpu_mask(old_cpu, old_mask)
per_cpu(vector_irq, old_cpu)[old_vector] = -1;
}
- for_each_cpu_mask(new_cpu, domain)
+ for_each_cpu_mask(new_cpu, new_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq;
irq_vector[irq] = vector;
irq_domain[irq] = domain;
@@ -684,6 +750,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
return vector;
}
+void __setup_vector_irq(int cpu)
+{
+ /* Initialize vector_irq on a new cpu */
+ /* This function must be called with vector_lock held */
+ unsigned long flags;
+ int irq, vector;
+
+
+ /* Mark the inuse vectors */
+ for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
+ if (!cpu_isset(cpu, irq_domain[irq]))
+ continue;
+ vector = irq_vector[irq];
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ }
+ /* Mark the free vectors */
+ for (vector = 0; vector < NR_VECTORS; ++vector) {
+ irq = per_cpu(vector_irq, cpu)[vector];
+ if (irq < 0)
+ continue;
+ if (!cpu_isset(cpu, irq_domain[irq]))
+ per_cpu(vector_irq, cpu)[vector] = -1;
+ }
+}
+
+
extern void (*interrupt[NR_IRQS])(void);
static struct irq_chip ioapic_chip;
@@ -696,11 +788,13 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
- set_irq_chip_and_handler(irq, &ioapic_chip,
- handle_fasteoi_irq);
- else
- set_irq_chip_and_handler(irq, &ioapic_chip,
- handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
+ else {
+ irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
+ }
}
static void __init setup_IO_APIC_irqs(void)
@@ -806,7 +900,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
- set_irq_chip_and_handler(0, &ioapic_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
/*
* Add it to the IO-APIC irq-routing table:
@@ -1255,12 +1349,15 @@ static int ioapic_retrigger_irq(unsigned int irq)
{
cpumask_t mask;
unsigned vector;
+ unsigned long flags;
+ spin_lock_irqsave(&vector_lock, flags);
vector = irq_vector[irq];
cpus_clear(mask);
- cpu_set(vector >> 8, mask);
+ cpu_set(first_cpu(irq_domain[irq]), mask);
- send_IPI_mask(mask, vector & 0xff);
+ send_IPI_mask(mask, vector);
+ spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
@@ -1839,7 +1936,7 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
return 0;
}
@@ -1860,18 +1957,16 @@ void arch_teardown_msi_irq(unsigned int irq)
static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
{
- u32 low, high;
- low = read_ht_irq_low(irq);
- high = read_ht_irq_high(irq);
+ struct ht_irq_msg msg;
+ fetch_ht_irq_msg(irq, &msg);
- low &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
- high &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+ msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
+ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
- low |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
- high |= HT_IRQ_HIGH_DEST_ID(dest);
+ msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
+ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
- write_ht_irq_low(irq, low);
- write_ht_irq_high(irq, high);
+ write_ht_irq_msg(irq, &msg);
}
static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
@@ -1892,12 +1987,12 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
dest = cpu_mask_to_apicid(tmp);
- target_ht_irq(irq, dest, vector & 0xff);
+ target_ht_irq(irq, dest, vector);
set_native_irq_info(irq, mask);
}
#endif
-static struct hw_interrupt_type ht_irq_chip = {
+static struct irq_chip ht_irq_chip = {
.name = "PCI-HT",
.mask = mask_ht_irq,
.unmask = unmask_ht_irq,
@@ -1915,14 +2010,15 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
if (vector >= 0) {
- u32 low, high;
+ struct ht_irq_msg msg;
unsigned dest;
dest = cpu_mask_to_apicid(tmp);
- high = HT_IRQ_HIGH_DEST_ID(dest);
+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
- low = HT_IRQ_LOW_BASE |
+ msg.address_lo =
+ HT_IRQ_LOW_BASE |
HT_IRQ_LOW_DEST_ID(dest) |
HT_IRQ_LOW_VECTOR(vector) |
((INT_DEST_MODE == 0) ?
@@ -1931,12 +2027,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
HT_IRQ_LOW_RQEOI_EDGE |
((INT_DELIVERY_MODE != dest_LowestPrio) ?
HT_IRQ_LOW_MT_FIXED :
- HT_IRQ_LOW_MT_ARBITRATED);
+ HT_IRQ_LOW_MT_ARBITRATED) |
+ HT_IRQ_LOW_IRQ_MASKED;
- write_ht_irq_low(irq, low);
- write_ht_irq_high(irq, high);
+ write_ht_irq_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
}
return vector;
}
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index dff68eb2b787..e46c55856d40 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -75,7 +75,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %8s", irq_desc[i].chip->name);
- seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
+ seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index b3296cc2f2f2..37a770859e71 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -52,7 +52,8 @@
#define ONE_BASED_CHASSIS_NUM 1
/* register offsets inside the host bridge space */
-#define PHB_CSR_OFFSET 0x0110
+#define CALGARY_CONFIG_REG 0x0108
+#define PHB_CSR_OFFSET 0x0110 /* Channel Status */
#define PHB_PLSSR_OFFSET 0x0120
#define PHB_CONFIG_RW_OFFSET 0x0160
#define PHB_IOBASE_BAR_LOW 0x0170
@@ -83,6 +84,8 @@
#define TAR_VALID 0x0000000000000008UL
/* CSR (Channel/DMA Status Register) */
#define CSR_AGENT_MASK 0xffe0ffff
+/* CCR (Calgary Configuration Register) */
+#define CCR_2SEC_TIMEOUT 0x000000000000000EUL
#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
#define MAX_NUM_CHASSIS 8 /* max number of chassis */
@@ -732,6 +735,38 @@ static void calgary_watchdog(unsigned long data)
}
}
+static void __init calgary_increase_split_completion_timeout(void __iomem *bbar,
+ unsigned char busnum)
+{
+ u64 val64;
+ void __iomem *target;
+ unsigned long phb_shift = -1;
+ u64 mask;
+
+ switch (busno_to_phbid(busnum)) {
+ case 0: phb_shift = (63 - 19);
+ break;
+ case 1: phb_shift = (63 - 23);
+ break;
+ case 2: phb_shift = (63 - 27);
+ break;
+ case 3: phb_shift = (63 - 35);
+ break;
+ default:
+ BUG_ON(busno_to_phbid(busnum));
+ }
+
+ target = calgary_reg(bbar, CALGARY_CONFIG_REG);
+ val64 = be64_to_cpu(readq(target));
+
+ /* zero out this PHB's timer bits */
+ mask = ~(0xFUL << phb_shift);
+ val64 &= mask;
+ val64 |= (CCR_2SEC_TIMEOUT << phb_shift);
+ writeq(cpu_to_be64(val64), target);
+ readq(target); /* flush */
+}
+
static void __init calgary_enable_translation(struct pci_dev *dev)
{
u32 val32;
@@ -756,6 +791,13 @@ static void __init calgary_enable_translation(struct pci_dev *dev)
writel(cpu_to_be32(val32), target);
readl(target); /* flush */
+ /*
+ * Give split completion a longer timeout on bus 1 for aic94xx
+ * http://bugzilla.kernel.org/show_bug.cgi?id=7180
+ */
+ if (busnum == 1)
+ calgary_increase_split_completion_timeout(bbar, busnum);
+
init_timer(&tbl->watchdog_timer);
tbl->watchdog_timer.function = &calgary_watchdog;
tbl->watchdog_timer.data = (unsigned long)dev;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 5e95b257ee26..7451a4c43c16 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -88,9 +88,8 @@ void enter_idle(void)
static void __exit_idle(void)
{
- if (read_pda(isidle) == 0)
+ if (test_and_clear_bit_pda(0, isidle) == 0)
return;
- write_pda(isidle, 0);
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}
@@ -145,7 +144,7 @@ static void poll_idle (void)
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
- cpumask_t map;
+ cpumask_t map, tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
@@ -168,6 +167,8 @@ void cpu_idle_wait(void)
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
+
+ set_cpus_allowed(current, tmp);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
@@ -238,20 +239,28 @@ void cpu_idle (void)
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
*/
-static void mwait_idle(void)
+void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
- local_irq_enable();
-
- while (!need_resched()) {
+ if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
- if (need_resched())
- break;
- __mwait(0, 0);
+ if (!need_resched())
+ __mwait(eax, ecx);
}
}
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+ local_irq_enable();
+ while (!need_resched())
+ mwait_idle_with_hints(0,0);
+}
+
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
static int printed;
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4f67697f5036..9f74c883568c 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
/* prevent preemption and reschedule on another processor */
int me = get_cpu();
if (cpu == me) {
- WARN_ON(1);
put_cpu();
- return -EBUSY;
+ return 0;
}
spin_lock_bh(&call_lock);
__smp_call_function_single(cpu, func, info, nonatomic, wait);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 7b7a6870288a..62c2e747af58 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -581,12 +581,16 @@ void __cpuinit start_secondary(void)
* smp_call_function().
*/
lock_ipi_call_lock();
+ spin_lock(&vector_lock);
+ /* Setup the per cpu irq handling data structures */
+ __setup_vector_irq(smp_processor_id());
/*
* Allow the master to continue.
*/
cpu_set(smp_processor_id(), cpu_online_map);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+ spin_unlock(&vector_lock);
unlock_ipi_call_lock();
cpu_idle();
@@ -799,7 +803,6 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
cpu, node);
}
-
alternatives_smp_switch(1);
c_idle.idle = get_idle_for_cpu(cpu);
@@ -1246,8 +1249,10 @@ int __cpu_disable(void)
local_irq_disable();
remove_siblinginfo(cpu);
+ spin_lock(&vector_lock);
/* It's now safe to remove this processor from the online map */
cpu_clear(cpu, cpu_online_map);
+ spin_unlock(&vector_lock);
remove_cpu_from_maps();
fixup_irqs(cpu_online_map);
return 0;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 1ba5a442ac32..e3ef544d2cfb 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -876,15 +876,6 @@ static struct irqaction irq0 = {
timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
};
-static int __cpuinit
-time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu)
-{
- unsigned cpu = (unsigned long) hcpu;
- if (action == CPU_ONLINE)
- vsyscall_set_cpu(cpu);
- return NOTIFY_DONE;
-}
-
void __init time_init(void)
{
if (nohpet)
@@ -925,8 +916,6 @@ void __init time_init(void)
vxtime.last_tsc = get_cycles_sync();
set_cyc2ns_scale(cpu_khz);
setup_irq(0, &irq0);
- hotcpu_notifier(time_cpu_notifier, 0);
- time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
#ifndef CONFIG_SMP
time_init_gtod();
@@ -948,7 +937,7 @@ __cpuinit int unsynchronized_tsc(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
#ifdef CONFIG_ACPI
/* But TSC doesn't tick in C3 so don't use it there */
- if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100)
+ if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000)
return 1;
#endif
return 0;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7819022a8db5..a153d0a01b72 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -290,6 +290,12 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
if (tsk && tsk != current)
stack = (unsigned long *)tsk->thread.rsp;
}
+ /*
+ * Align the stack pointer on word boundary, later loops
+ * rely on that (and corruption / debug info bugs can cause
+ * unaligned values here):
+ */
+ stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
/*
* Print function call entries within a stack. 'cond' is the
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b9df2ab6529f..d9534e750d4f 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -17,6 +17,7 @@ PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(7); /* RWE */
user PT_LOAD FLAGS(7); /* RWE */
+ data.init PT_LOAD FLAGS(7); /* RWE */
note PT_NOTE FLAGS(4); /* R__ */
}
SECTIONS
@@ -59,6 +60,7 @@ SECTIONS
}
#endif
+ . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
*(.data)
@@ -131,7 +133,7 @@ SECTIONS
. = ALIGN(8192); /* init_task */
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
*(.data.init_task)
- } :data
+ }:data.init
. = ALIGN(4096);
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
@@ -174,13 +176,7 @@ SECTIONS
__setup_end = .;
__initcall_start = .;
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
+ INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
diff --git a/arch/x86_64/kernel/vsmp.c b/arch/x86_64/kernel/vsmp.c
index 044e852bd25e..414caf0c5f9a 100644
--- a/arch/x86_64/kernel/vsmp.c
+++ b/arch/x86_64/kernel/vsmp.c
@@ -14,6 +14,7 @@
#include <linux/pci_ids.h>
#include <linux/pci_regs.h>
#include <asm/pci-direct.h>
+#include <asm/io.h>
static int __init vsmp_init(void)
{
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index a98b460af6a1..92546c1526f1 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -27,6 +27,9 @@
#include <linux/jiffies.h>
#include <linux/sysctl.h>
#include <linux/getcpu.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/notifier.h>
#include <asm/vsyscall.h>
#include <asm/pgtable.h>
@@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = {
#endif
-static void __cpuinit write_rdtscp_cb(void *info)
-{
- write_rdtscp_aux((unsigned long)info);
-}
-
-void __cpuinit vsyscall_set_cpu(int cpu)
+/* Assume __initcall executes before all user space. Hopefully kmod
+ doesn't violate that. We'll find out if it does. */
+static void __cpuinit vsyscall_set_cpu(int cpu)
{
unsigned long *d;
unsigned long node = 0;
#ifdef CONFIG_NUMA
node = cpu_to_node[cpu];
#endif
- if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
- void *info = (void *)((node << 12) | cpu);
- /* Can happen on preemptive kernel */
- if (get_cpu() == cpu)
- write_rdtscp_cb(info);
-#ifdef CONFIG_SMP
- else {
- /* the notifier is unfortunately not executed on the
- target CPU */
- smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
- }
-#endif
- put_cpu();
- }
+ if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
+ write_rdtscp_aux((node << 12) | cpu);
/* Store cpu number in limit so that it can be loaded quickly
in user space in vgetcpu.
@@ -280,6 +268,23 @@ void __cpuinit vsyscall_set_cpu(int cpu)
*d |= (node >> 4) << 48;
}
+static void __cpuinit cpu_vsyscall_init(void *arg)
+{
+ /* preemption should be already off */
+ vsyscall_set_cpu(raw_smp_processor_id());
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __cpuinit
+cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
+{
+ long cpu = (long)arg;
+ if (action == CPU_ONLINE)
+ smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
+ return NOTIFY_DONE;
+}
+#endif
+
static void __init map_vsyscall(void)
{
extern char __vsyscall_0;
@@ -299,6 +304,8 @@ static int __init vsyscall_init(void)
#ifdef CONFIG_SYSCTL
register_sysctl_table(kernel_root_table2, 0);
#endif
+ on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
+ hotcpu_notifier(cpu_vsyscall_notifier, 0);
return 0;
}