aboutsummaryrefslogtreecommitdiff
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c177
1 files changed, 127 insertions, 50 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3f30af21574e..60773005b8af 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -39,6 +39,25 @@
#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/latency.h>
+#include <linux/clockchips.h>
+
+/*
+ * Include the apic definitions for x86 to have the APIC timer related defines
+ * available also for UP (on SMP it gets magically included via linux/smp.h).
+ * asm/acpi.h is not an option, as it would require more include magic. Also
+ * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
+ */
+#ifdef CONFIG_X86
+#include <asm/apic.h>
+#endif
+
+/*
+ * Include the apic definitions for x86 to have the APIC timer related defines
+ * available also for UP (on SMP it gets magically included via linux/smp.h).
+ */
+#ifdef CONFIG_X86
+#include <asm/apic.h>
+#endif
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -48,9 +67,8 @@
#define ACPI_PROCESSOR_COMPONENT 0x01000000
#define ACPI_PROCESSOR_CLASS "processor"
-#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
-ACPI_MODULE_NAME("acpi_processor")
+ACPI_MODULE_NAME("processor_idle");
#define ACPI_PROCESSOR_FILE_POWER "power"
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
@@ -160,7 +178,7 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
{
if (t2 >= t1)
return (t2 - t1);
- else if (!acpi_fadt.tmr_val_ext)
+ else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
else
return ((0xFFFFFFFF - t1) + t2);
@@ -187,8 +205,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
case ACPI_STATE_C3:
/* Disable bus master reload */
if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
- acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
break;
}
}
@@ -198,8 +215,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
case ACPI_STATE_C3:
/* Enable bus master reload */
if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
- acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
break;
}
@@ -236,10 +252,85 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
- unused = inl(acpi_fadt.xpm_tmr_blk.address);
+ unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
}
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+
+/*
+ * Some BIOS implementations switch to C3 in the published C2 state.
+ * This seems to be a common problem on AMD boxen, but other vendors
+ * are affected too. We pick the most conservative approach: we assume
+ * that the local APIC stops in both C2 and C3.
+ */
+static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
+{
+ struct acpi_processor_power *pwr = &pr->power;
+
+ /*
+ * Check, if one of the previous states already marked the lapic
+ * unstable
+ */
+ if (pwr->timer_broadcast_on_state < state)
+ return;
+
+ if (cx->type >= ACPI_STATE_C2)
+ pr->power.timer_broadcast_on_state = state;
+}
+
+static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
+{
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ unsigned long reason;
+
+ reason = pr->power.timer_broadcast_on_state < INT_MAX ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &pr->id);
+#else
+ cpumask_t mask = cpumask_of_cpu(pr->id);
+
+ if (pr->power.timer_broadcast_on_state < INT_MAX)
+ on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
+ else
+ on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+#endif
+}
+
+/* Power(C) State timer broadcast control */
+static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int broadcast)
+{
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+ int state = cx - pr->power.states;
+
+ if (state >= pr->power.timer_broadcast_on_state) {
+ unsigned long reason;
+
+ reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
+ CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
+ clockevents_notify(reason, &pr->id);
+ }
+#endif
+}
+
+#else
+
+static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+ struct acpi_processor_cx *cstate) { }
+static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
+static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int broadcast)
+{
+}
+
+#endif
+
static void acpi_processor_idle(void)
{
struct acpi_processor *pr = NULL;
@@ -291,12 +382,10 @@ static void acpi_processor_idle(void)
pr->power.bm_activity <<= diff;
- acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
- &bm_status, ACPI_MTX_DO_NOT_LOCK);
+ acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
if (bm_status) {
pr->power.bm_activity |= 0x1;
- acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
- 1, ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
}
/*
* PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
@@ -338,7 +427,7 @@ static void acpi_processor_idle(void)
* detection phase, to work cleanly with logical CPU hotplug.
*/
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst && !acpi_fadt.plvl2_up)
+ !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
cx = &pr->power.states[ACPI_STATE_C1];
#endif
@@ -384,11 +473,12 @@ static void acpi_processor_idle(void)
case ACPI_STATE_C2:
/* Get start time (ticks) */
- t1 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Invoke C2 */
+ acpi_state_timer_broadcast(pr, cx, 1);
acpi_cstate_enter(cx);
/* Get end time (ticks) */
- t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#ifdef CONFIG_GENERIC_TIME
/* TSC halts in C2, so notify users */
@@ -400,6 +490,7 @@ static void acpi_processor_idle(void)
/* Compute time (ticks) that we were actually asleep */
sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
+ acpi_state_timer_broadcast(pr, cx, 0);
break;
case ACPI_STATE_C3:
@@ -411,8 +502,7 @@ static void acpi_processor_idle(void)
* All CPUs are trying to go to C3
* Disable bus master arbitration
*/
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
}
} else {
/* SMP with no shared cache... Invalidate cache */
@@ -420,16 +510,16 @@ static void acpi_processor_idle(void)
}
/* Get start time (ticks) */
- t1 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Invoke C3 */
+ acpi_state_timer_broadcast(pr, cx, 1);
acpi_cstate_enter(cx);
/* Get end time (ticks) */
- t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
if (pr->flags.bm_check) {
/* Enable bus master arbitration */
atomic_dec(&c3_cpu_count);
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
- ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
#ifdef CONFIG_GENERIC_TIME
@@ -442,6 +532,7 @@ static void acpi_processor_idle(void)
/* Compute time (ticks) that we were actually asleep */
sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
+ acpi_state_timer_broadcast(pr, cx, 0);
break;
default:
@@ -457,7 +548,7 @@ static void acpi_processor_idle(void)
#ifdef CONFIG_HOTPLUG_CPU
/* Don't do promotion/demotion */
if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst && !acpi_fadt.plvl2_up) {
+ !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
next_state = cx;
goto end;
}
@@ -627,7 +718,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
* Check for P_LVL2_UP flag before entering C2 and above on
* an SMP system.
*/
- if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
+ if ((num_online_cpus() > 1) &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
return -ENODEV;
#endif
@@ -636,8 +728,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
- pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
- pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
+ pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
+ pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
@@ -883,14 +975,13 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
* WBINVD should be set in fadt, for C3 state to be
* supported on when bm_check is not required.
*/
- if (acpi_fadt.wb_invd != 1) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Cache invalidation should work properly"
" for C3 to be enabled on SMP systems\n"));
return;
}
- acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
- 0, ACPI_MTX_DO_NOT_LOCK);
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
}
/*
@@ -910,11 +1001,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
unsigned int i;
unsigned int working = 0;
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- int timer_broadcast = 0;
- cpumask_t mask = cpumask_of_cpu(pr->id);
- on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
-#endif
+ pr->power.timer_broadcast_on_state = INT_MAX;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
@@ -926,21 +1013,14 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- /* Some AMD systems fake C3 as C2, but still
- have timer troubles */
- if (cx->valid &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- timer_broadcast++;
-#endif
+ if (cx->valid)
+ acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
if (cx->valid)
- timer_broadcast++;
-#endif
+ acpi_timer_check_state(i, pr, cx);
break;
}
@@ -948,10 +1028,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
working++;
}
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- if (timer_broadcast)
- on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
-#endif
+ acpi_propagate_timer_broadcast(pr);
return (working);
}
@@ -1096,7 +1173,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
pr->power.states[i].latency,
pr->power.states[i].usage,
- pr->power.states[i].time);
+ (unsigned long long)pr->power.states[i].time);
}
end:
@@ -1164,9 +1241,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
if (!pr)
return -EINVAL;
- if (acpi_fadt.cst_cnt && !nocst) {
+ if (acpi_gbl_FADT.cst_control && !nocst) {
status =
- acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
+ acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Notifying BIOS of _CST ability failed"));