summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2019-01-17 15:40:35 +0100
committerDaniel Lezcano <daniel.lezcano@linaro.org>2019-01-22 22:25:30 +0100
commit8783e6f4fc5590dff28ed4f0bf80805b5c3f7a47 (patch)
tree2bba5d562e2f552d3d6f79bed77727a3ba61dc93
parentb85f87c808a00779d08e89b255f37afbb20391bb (diff)
Investigating latency impactirq/prediction-beta
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--kernel/irq/internals.h13
-rw-r--r--kernel/irq/timings.c2
-rw-r--r--kernel/sched/idle.c8
5 files changed, 22 insertions, 4 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 599f23b1e386..f51c5454d853 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -233,8 +233,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
- /* The cpu is no longer idle or about to enter idle. */
- sched_idle_set_state(NULL);
if (broadcast) {
if (WARN_ON_ONCE(!irqs_disabled()))
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4dff74f48d4b..7d5af0540db5 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -213,6 +213,7 @@ static inline void cpuidle_use_deepest_state(bool enable)
/* kernel/sched/idle.c */
extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern struct cpuidle_state *sched_idle_get_state(void);
extern void default_idle_call(void);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index ca6afa267070..cad032fcfd09 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -346,6 +346,8 @@ static inline int irq_timing_decode(u64 value, u64 *timestamp)
return value & U16_MAX;
}
+#include <linux/cpuidle.h>
+
/*
* The function record_irq_time is only called in one place in the
* interrupts handler. We want this function always inline so the code
@@ -361,9 +363,18 @@ static __always_inline void record_irq_time(struct irq_desc *desc)
if (desc->istate & IRQS_TIMINGS) {
struct irq_timings *timings = this_cpu_ptr(&irq_timings);
+ u64 now = local_clock();
+ u64 latency = 0;
+
+ if (is_idle_task(current)) {
+ struct cpuidle_state *idle = sched_idle_get_state();
+
+ if (idle)
+ latency = idle->exit_latency;
+ }
timings->values[timings->count & IRQ_TIMINGS_MASK] =
- irq_timing_encode(local_clock(),
+ irq_timing_encode(now - latency,
irq_desc_get_irq(desc));
timings->count++;
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index 29ba9ae532e4..e88cd01f4423 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -352,7 +352,7 @@ static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
* type in our computation, that prevent mindfuck issues with
* overflow, sign and division.
*/
- interval = ts - old_ts;
+ interval = (old_ts > ts) ? 1 : ts - old_ts;
/*
* The interrupt triggered more than one second apart, that
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index db71062bb013..b74821ee3465 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -21,6 +21,11 @@ void sched_idle_set_state(struct cpuidle_state *idle_state)
idle_set_state(this_rq(), idle_state);
}
+struct cpuidle_state *sched_idle_get_state(void)
+{
+ return idle_get_state(this_rq());
+}
+
static int __read_mostly cpu_idle_force_poll;
void cpu_idle_poll_ctrl(bool enable)
@@ -213,6 +218,9 @@ exit_idle:
if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();
+ /* The cpu is no longer idle or about to enter idle. */
+ sched_idle_set_state(NULL);
+
rcu_idle_exit();
}