blob: dc9e647640220a99d965a25a3d745a231b10261c [file] [log] [blame]
Colin Cross4126c012012-05-07 17:57:41 -07001/*
2 * coupled.c - helper functions to enter the same idle state on multiple cpus
3 *
4 * Copyright (c) 2011 Google, Inc.
5 *
6 * Author: Colin Cross <ccross@android.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/cpu.h>
21#include <linux/cpuidle.h>
22#include <linux/mutex.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "cpuidle.h"
28
29/**
30 * DOC: Coupled cpuidle states
31 *
32 * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
33 * cpus cannot be independently powered down, either due to
34 * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
35 * power down), or due to HW bugs (on OMAP4460, a cpu powering up
36 * will corrupt the gic state unless the other cpu runs a work
37 * around). Each cpu has a power state that it can enter without
38 * coordinating with the other cpu (usually Wait For Interrupt, or
39 * WFI), and one or more "coupled" power states that affect blocks
40 * shared between the cpus (L2 cache, interrupt controller, and
41 * sometimes the whole SoC). Entering a coupled power state must
42 * be tightly controlled on both cpus.
43 *
44 * This file implements a solution, where each cpu will wait in the
45 * WFI state until all cpus are ready to enter a coupled state, at
46 * which point the coupled state function will be called on all
47 * cpus at approximately the same time.
48 *
49 * Once all cpus are ready to enter idle, they are woken by an smp
50 * cross call. At this point, there is a chance that one of the
51 * cpus will find work to do, and choose not to enter idle. A
52 * final pass is needed to guarantee that all cpus will call the
53 * power state enter function at the same time. During this pass,
54 * each cpu will increment the ready counter, and continue once the
55 * ready counter matches the number of online coupled cpus. If any
56 * cpu exits idle, the other cpus will decrement their counter and
57 * retry.
58 *
59 * requested_state stores the deepest coupled idle state each cpu
60 * is ready for. It is assumed that the states are indexed from
61 * shallowest (highest power, lowest exit latency) to deepest
62 * (lowest power, highest exit latency). The requested_state
63 * variable is not locked. It is only written from the cpu that
64 * it stores (or by the on/offlining cpu if that cpu is offline),
65 * and only read after all the cpus are ready for the coupled idle
66 * state are are no longer updating it.
67 *
68 * Three atomic counters are used. alive_count tracks the number
69 * of cpus in the coupled set that are currently or soon will be
70 * online. waiting_count tracks the number of cpus that are in
71 * the waiting loop, in the ready loop, or in the coupled idle state.
72 * ready_count tracks the number of cpus that are in the ready loop
73 * or in the coupled idle state.
74 *
75 * To use coupled cpuidle states, a cpuidle driver must:
76 *
77 * Set struct cpuidle_device.coupled_cpus to the mask of all
78 * coupled cpus, usually the same as cpu_possible_mask if all cpus
79 * are part of the same cluster. The coupled_cpus mask must be
80 * set in the struct cpuidle_device for each cpu.
81 *
82 * Set struct cpuidle_device.safe_state to a state that is not a
83 * coupled state. This is usually WFI.
84 *
85 * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
86 * state that affects multiple cpus.
87 *
88 * Provide a struct cpuidle_state.enter function for each state
89 * that affects multiple cpus. This function is guaranteed to be
90 * called on all cpus at approximately the same time. The driver
91 * should ensure that the cpus all abort together if any cpu tries
92 * to abort once the function is called. The function should return
93 * with interrupts still disabled.
94 */
95
96/**
97 * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
98 * @coupled_cpus: mask of cpus that are part of the coupled set
99 * @requested_state: array of requested states for cpus in the coupled set
100 * @ready_waiting_counts: combined count of cpus in ready or waiting loops
101 * @online_count: count of cpus that are online
102 * @refcnt: reference count of cpuidle devices that are using this struct
103 * @prevent: flag to prevent coupled idle while a cpu is hotplugging
104 */
105struct cpuidle_coupled {
106 cpumask_t coupled_cpus;
107 int requested_state[NR_CPUS];
108 atomic_t ready_waiting_counts;
Colin Cross61704f02013-08-28 18:41:47 -0700109 atomic_t abort_barrier;
Colin Cross4126c012012-05-07 17:57:41 -0700110 int online_count;
111 int refcnt;
112 int prevent;
113};
114
115#define WAITING_BITS 16
116#define MAX_WAITING_CPUS (1 << WAITING_BITS)
117#define WAITING_MASK (MAX_WAITING_CPUS - 1)
118#define READY_MASK (~WAITING_MASK)
119
120#define CPUIDLE_COUPLED_NOT_IDLE (-1)
121
122static DEFINE_MUTEX(cpuidle_coupled_lock);
123static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
124
125/*
Colin Cross61704f02013-08-28 18:41:47 -0700126 * The cpuidle_coupled_poke_pending mask is used to avoid calling
Colin Cross4126c012012-05-07 17:57:41 -0700127 * __smp_call_function_single with the per cpu call_single_data struct already
128 * in use. This prevents a deadlock where two cpus are waiting for each others
129 * call_single_data struct to be available
130 */
Colin Cross61704f02013-08-28 18:41:47 -0700131static cpumask_t cpuidle_coupled_poke_pending;
132
133/*
134 * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
135 * once to minimize entering the ready loop with a poke pending, which would
136 * require aborting and retrying.
137 */
138static cpumask_t cpuidle_coupled_poked;
Colin Cross4126c012012-05-07 17:57:41 -0700139
140/**
Colin Cross20ff51a2012-05-07 17:57:42 -0700141 * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
142 * @dev: cpuidle_device of the calling cpu
143 * @a: atomic variable to hold the barrier
144 *
145 * No caller to this function will return from this function until all online
146 * cpus in the same coupled group have called this function. Once any caller
147 * has returned from this function, the barrier is immediately available for
148 * reuse.
149 *
150 * The atomic variable a must be initialized to 0 before any cpu calls
151 * this function, will be reset to 0 before any cpu returns from this function.
152 *
153 * Must only be called from within a coupled idle state handler
154 * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
155 *
156 * Provides full smp barrier semantics before and after calling.
157 */
158void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
159{
160 int n = dev->coupled->online_count;
161
162 smp_mb__before_atomic_inc();
163 atomic_inc(a);
164
165 while (atomic_read(a) < n)
166 cpu_relax();
167
168 if (atomic_inc_return(a) == n * 2) {
169 atomic_set(a, 0);
170 return;
171 }
172
173 while (atomic_read(a) > n)
174 cpu_relax();
175}
176
177/**
Colin Cross4126c012012-05-07 17:57:41 -0700178 * cpuidle_state_is_coupled - check if a state is part of a coupled set
179 * @dev: struct cpuidle_device for the current cpu
180 * @drv: struct cpuidle_driver for the platform
181 * @state: index of the target state in drv->states
182 *
183 * Returns true if the target state is coupled with cpus besides this one
184 */
185bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
186 struct cpuidle_driver *drv, int state)
187{
188 return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
189}
190
191/**
192 * cpuidle_coupled_set_ready - mark a cpu as ready
193 * @coupled: the struct coupled that contains the current cpu
194 */
195static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
196{
197 atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
198}
199
200/**
201 * cpuidle_coupled_set_not_ready - mark a cpu as not ready
202 * @coupled: the struct coupled that contains the current cpu
203 *
204 * Decrements the ready counter, unless the ready (and thus the waiting) counter
205 * is equal to the number of online cpus. Prevents a race where one cpu
206 * decrements the waiting counter and then re-increments it just before another
207 * cpu has decremented its ready counter, leading to the ready counter going
208 * down from the number of online cpus without going through the coupled idle
209 * state.
210 *
211 * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
212 * counter was equal to the number of online cpus.
213 */
214static
215inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
216{
217 int all;
218 int ret;
219
Sivaram Nair92638e22012-12-18 13:52:54 +0100220 all = coupled->online_count | (coupled->online_count << WAITING_BITS);
Colin Cross4126c012012-05-07 17:57:41 -0700221 ret = atomic_add_unless(&coupled->ready_waiting_counts,
222 -MAX_WAITING_CPUS, all);
223
224 return ret ? 0 : -EINVAL;
225}
226
227/**
228 * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
229 * @coupled: the struct coupled that contains the current cpu
230 *
231 * Returns true if all of the cpus in a coupled set are out of the ready loop.
232 */
233static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
234{
235 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
236 return r == 0;
237}
238
239/**
240 * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
241 * @coupled: the struct coupled that contains the current cpu
242 *
243 * Returns true if all cpus coupled to this target state are in the ready loop
244 */
245static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
246{
247 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
248 return r == coupled->online_count;
249}
250
251/**
252 * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
253 * @coupled: the struct coupled that contains the current cpu
254 *
255 * Returns true if all cpus coupled to this target state are in the wait loop
256 */
257static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
258{
259 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
260 return w == coupled->online_count;
261}
262
263/**
264 * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
265 * @coupled: the struct coupled that contains the current cpu
266 *
267 * Returns true if all of the cpus in a coupled set are out of the waiting loop.
268 */
269static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
270{
271 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
272 return w == 0;
273}
274
275/**
276 * cpuidle_coupled_get_state - determine the deepest idle state
277 * @dev: struct cpuidle_device for this cpu
278 * @coupled: the struct coupled that contains the current cpu
279 *
280 * Returns the deepest idle state that all coupled cpus can enter
281 */
282static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
283 struct cpuidle_coupled *coupled)
284{
285 int i;
286 int state = INT_MAX;
287
288 /*
289 * Read barrier ensures that read of requested_state is ordered after
290 * reads of ready_count. Matches the write barriers
291 * cpuidle_set_state_waiting.
292 */
293 smp_rmb();
294
295 for_each_cpu_mask(i, coupled->coupled_cpus)
296 if (cpu_online(i) && coupled->requested_state[i] < state)
297 state = coupled->requested_state[i];
298
299 return state;
300}
301
Colin Cross61704f02013-08-28 18:41:47 -0700302static void cpuidle_coupled_handle_poke(void *info)
Colin Cross4126c012012-05-07 17:57:41 -0700303{
304 int cpu = (unsigned long)info;
Colin Cross61704f02013-08-28 18:41:47 -0700305 cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
306 cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
Colin Cross4126c012012-05-07 17:57:41 -0700307}
308
309/**
310 * cpuidle_coupled_poke - wake up a cpu that may be waiting
311 * @cpu: target cpu
312 *
313 * Ensures that the target cpu exits it's waiting idle state (if it is in it)
314 * and will see updates to waiting_count before it re-enters it's waiting idle
315 * state.
316 *
317 * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
318 * either has or will soon have a pending IPI that will wake it out of idle,
319 * or it is currently processing the IPI and is not in idle.
320 */
321static void cpuidle_coupled_poke(int cpu)
322{
323 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
324
Colin Cross61704f02013-08-28 18:41:47 -0700325 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
Colin Cross4126c012012-05-07 17:57:41 -0700326 __smp_call_function_single(cpu, csd, 0);
327}
328
329/**
330 * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
331 * @dev: struct cpuidle_device for this cpu
332 * @coupled: the struct coupled that contains the current cpu
333 *
334 * Calls cpuidle_coupled_poke on all other online cpus.
335 */
336static void cpuidle_coupled_poke_others(int this_cpu,
337 struct cpuidle_coupled *coupled)
338{
339 int cpu;
340
341 for_each_cpu_mask(cpu, coupled->coupled_cpus)
342 if (cpu != this_cpu && cpu_online(cpu))
343 cpuidle_coupled_poke(cpu);
344}
345
346/**
347 * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
348 * @dev: struct cpuidle_device for this cpu
349 * @coupled: the struct coupled that contains the current cpu
350 * @next_state: the index in drv->states of the requested state for this cpu
351 *
Colin Cross61704f02013-08-28 18:41:47 -0700352 * Updates the requested idle state for the specified cpuidle device.
353 * Returns the number of waiting cpus.
Colin Cross4126c012012-05-07 17:57:41 -0700354 */
Colin Cross61704f02013-08-28 18:41:47 -0700355static int cpuidle_coupled_set_waiting(int cpu,
Colin Cross4126c012012-05-07 17:57:41 -0700356 struct cpuidle_coupled *coupled, int next_state)
357{
Colin Cross4126c012012-05-07 17:57:41 -0700358 coupled->requested_state[cpu] = next_state;
359
360 /*
Colin Cross4126c012012-05-07 17:57:41 -0700361 * The atomic_inc_return provides a write barrier to order the write
362 * to requested_state with the later write that increments ready_count.
363 */
Colin Cross61704f02013-08-28 18:41:47 -0700364 return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
Colin Cross4126c012012-05-07 17:57:41 -0700365}
366
367/**
368 * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
369 * @dev: struct cpuidle_device for this cpu
370 * @coupled: the struct coupled that contains the current cpu
371 *
372 * Removes the requested idle state for the specified cpuidle device.
373 */
374static void cpuidle_coupled_set_not_waiting(int cpu,
375 struct cpuidle_coupled *coupled)
376{
377 /*
378 * Decrementing waiting count can race with incrementing it in
379 * cpuidle_coupled_set_waiting, but that's OK. Worst case, some
380 * cpus will increment ready_count and then spin until they
381 * notice that this cpu has cleared it's requested_state.
382 */
383 atomic_dec(&coupled->ready_waiting_counts);
384
385 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
386}
387
388/**
389 * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
390 * @cpu: the current cpu
391 * @coupled: the struct coupled that contains the current cpu
392 *
393 * Marks this cpu as no longer in the ready and waiting loops. Decrements
394 * the waiting count first to prevent another cpu looping back in and seeing
395 * this cpu as waiting just before it exits idle.
396 */
397static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
398{
399 cpuidle_coupled_set_not_waiting(cpu, coupled);
400 atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
401}
402
403/**
404 * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
405 * @cpu - this cpu
406 *
407 * Turns on interrupts and spins until any outstanding poke interrupts have
408 * been processed and the poke bit has been cleared.
409 *
410 * Other interrupts may also be processed while interrupts are enabled, so
411 * need_resched() must be tested after turning interrupts off again to make sure
412 * the interrupt didn't schedule work that should take the cpu out of idle.
413 *
414 * Returns 0 if need_resched was false, -EINTR if need_resched was true.
415 */
416static int cpuidle_coupled_clear_pokes(int cpu)
417{
418 local_irq_enable();
Colin Cross61704f02013-08-28 18:41:47 -0700419 while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
Colin Cross4126c012012-05-07 17:57:41 -0700420 cpu_relax();
421 local_irq_disable();
422
423 return need_resched() ? -EINTR : 0;
424}
425
Colin Cross61704f02013-08-28 18:41:47 -0700426static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
427{
428 cpumask_t cpus;
429 int ret;
430
431 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
432 ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
433
434 return ret;
435}
436
Colin Cross4126c012012-05-07 17:57:41 -0700437/**
438 * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
439 * @dev: struct cpuidle_device for the current cpu
440 * @drv: struct cpuidle_driver for the platform
441 * @next_state: index of the requested state in drv->states
442 *
443 * Coordinate with coupled cpus to enter the target state. This is a two
444 * stage process. In the first stage, the cpus are operating independently,
445 * and may call into cpuidle_enter_state_coupled at completely different times.
446 * To save as much power as possible, the first cpus to call this function will
447 * go to an intermediate state (the cpuidle_device's safe state), and wait for
448 * all the other cpus to call this function. Once all coupled cpus are idle,
449 * the second stage will start. Each coupled cpu will spin until all cpus have
450 * guaranteed that they will call the target_state.
451 *
452 * This function must be called with interrupts disabled. It may enable
453 * interrupts while preparing for idle, and it will always return with
454 * interrupts enabled.
455 */
456int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
457 struct cpuidle_driver *drv, int next_state)
458{
459 int entered_state = -1;
460 struct cpuidle_coupled *coupled = dev->coupled;
Colin Cross61704f02013-08-28 18:41:47 -0700461 int w;
Colin Cross4126c012012-05-07 17:57:41 -0700462
463 if (!coupled)
464 return -EINVAL;
465
466 while (coupled->prevent) {
467 if (cpuidle_coupled_clear_pokes(dev->cpu)) {
468 local_irq_enable();
469 return entered_state;
470 }
471 entered_state = cpuidle_enter_state(dev, drv,
472 dev->safe_state_index);
473 }
474
475 /* Read barrier ensures online_count is read after prevent is cleared */
476 smp_rmb();
477
Colin Cross61704f02013-08-28 18:41:47 -0700478reset:
479 cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
480
481 w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
482 /*
483 * If this is the last cpu to enter the waiting state, poke
484 * all the other cpus out of their waiting state so they can
485 * enter a deeper state. This can race with one of the cpus
486 * exiting the waiting state due to an interrupt and
487 * decrementing waiting_count, see comment below.
488 */
489 if (w == coupled->online_count) {
490 cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
491 cpuidle_coupled_poke_others(dev->cpu, coupled);
492 }
Colin Cross4126c012012-05-07 17:57:41 -0700493
494retry:
495 /*
496 * Wait for all coupled cpus to be idle, using the deepest state
Colin Cross61704f02013-08-28 18:41:47 -0700497 * allowed for a single cpu. If this was not the poking cpu, wait
498 * for at least one poke before leaving to avoid a race where
499 * two cpus could arrive at the waiting loop at the same time,
500 * but the first of the two to arrive could skip the loop without
501 * processing the pokes from the last to arrive.
Colin Cross4126c012012-05-07 17:57:41 -0700502 */
Colin Cross61704f02013-08-28 18:41:47 -0700503 while (!cpuidle_coupled_cpus_waiting(coupled) ||
504 !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
Colin Cross4126c012012-05-07 17:57:41 -0700505 if (cpuidle_coupled_clear_pokes(dev->cpu)) {
506 cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
507 goto out;
508 }
509
510 if (coupled->prevent) {
511 cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
512 goto out;
513 }
514
515 entered_state = cpuidle_enter_state(dev, drv,
516 dev->safe_state_index);
517 }
518
519 if (cpuidle_coupled_clear_pokes(dev->cpu)) {
520 cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
521 goto out;
522 }
523
524 /*
Colin Cross61704f02013-08-28 18:41:47 -0700525 * Make sure final poke status for this cpu is visible before setting
526 * cpu as ready.
527 */
528 smp_wmb();
529
530 /*
Colin Cross4126c012012-05-07 17:57:41 -0700531 * All coupled cpus are probably idle. There is a small chance that
532 * one of the other cpus just became active. Increment the ready count,
533 * and spin until all coupled cpus have incremented the counter. Once a
534 * cpu has incremented the ready counter, it cannot abort idle and must
535 * spin until either all cpus have incremented the ready counter, or
536 * another cpu leaves idle and decrements the waiting counter.
537 */
538
539 cpuidle_coupled_set_ready(coupled);
540 while (!cpuidle_coupled_cpus_ready(coupled)) {
541 /* Check if any other cpus bailed out of idle. */
542 if (!cpuidle_coupled_cpus_waiting(coupled))
543 if (!cpuidle_coupled_set_not_ready(coupled))
544 goto retry;
545
546 cpu_relax();
547 }
548
Colin Cross61704f02013-08-28 18:41:47 -0700549 /*
550 * Make sure read of all cpus ready is done before reading pending pokes
551 */
552 smp_rmb();
553
554 /*
555 * There is a small chance that a cpu left and reentered idle after this
556 * cpu saw that all cpus were waiting. The cpu that reentered idle will
557 * have sent this cpu a poke, which will still be pending after the
558 * ready loop. The pending interrupt may be lost by the interrupt
559 * controller when entering the deep idle state. It's not possible to
560 * clear a pending interrupt without turning interrupts on and handling
561 * it, and it's too late to turn on interrupts here, so reset the
562 * coupled idle state of all cpus and retry.
563 */
564 if (cpuidle_coupled_any_pokes_pending(coupled)) {
565 cpuidle_coupled_set_done(dev->cpu, coupled);
566 /* Wait for all cpus to see the pending pokes */
567 cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
568 goto reset;
569 }
570
Colin Cross4126c012012-05-07 17:57:41 -0700571 /* all cpus have acked the coupled state */
572 next_state = cpuidle_coupled_get_state(dev, coupled);
573
574 entered_state = cpuidle_enter_state(dev, drv, next_state);
575
576 cpuidle_coupled_set_done(dev->cpu, coupled);
577
578out:
579 /*
580 * Normal cpuidle states are expected to return with irqs enabled.
581 * That leads to an inefficiency where a cpu receiving an interrupt
582 * that brings it out of idle will process that interrupt before
583 * exiting the idle enter function and decrementing ready_count. All
584 * other cpus will need to spin waiting for the cpu that is processing
585 * the interrupt. If the driver returns with interrupts disabled,
586 * all other cpus will loop back into the safe idle state instead of
587 * spinning, saving power.
588 *
589 * Calling local_irq_enable here allows coupled states to return with
590 * interrupts disabled, but won't cause problems for drivers that
591 * exit with interrupts enabled.
592 */
593 local_irq_enable();
594
595 /*
596 * Wait until all coupled cpus have exited idle. There is no risk that
597 * a cpu exits and re-enters the ready state because this cpu has
598 * already decremented its waiting_count.
599 */
600 while (!cpuidle_coupled_no_cpus_ready(coupled))
601 cpu_relax();
602
603 return entered_state;
604}
605
606static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
607{
608 cpumask_t cpus;
609 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
610 coupled->online_count = cpumask_weight(&cpus);
611}
612
613/**
614 * cpuidle_coupled_register_device - register a coupled cpuidle device
615 * @dev: struct cpuidle_device for the current cpu
616 *
617 * Called from cpuidle_register_device to handle coupled idle init. Finds the
618 * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
619 * exists yet.
620 */
621int cpuidle_coupled_register_device(struct cpuidle_device *dev)
622{
623 int cpu;
624 struct cpuidle_device *other_dev;
625 struct call_single_data *csd;
626 struct cpuidle_coupled *coupled;
627
628 if (cpumask_empty(&dev->coupled_cpus))
629 return 0;
630
631 for_each_cpu_mask(cpu, dev->coupled_cpus) {
632 other_dev = per_cpu(cpuidle_devices, cpu);
633 if (other_dev && other_dev->coupled) {
634 coupled = other_dev->coupled;
635 goto have_coupled;
636 }
637 }
638
639 /* No existing coupled info found, create a new one */
640 coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
641 if (!coupled)
642 return -ENOMEM;
643
644 coupled->coupled_cpus = dev->coupled_cpus;
645
646have_coupled:
647 dev->coupled = coupled;
648 if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
649 coupled->prevent++;
650
651 cpuidle_coupled_update_online_cpus(coupled);
652
653 coupled->refcnt++;
654
655 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
Colin Cross61704f02013-08-28 18:41:47 -0700656 csd->func = cpuidle_coupled_handle_poke;
Colin Cross4126c012012-05-07 17:57:41 -0700657 csd->info = (void *)(unsigned long)dev->cpu;
658
659 return 0;
660}
661
662/**
663 * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
664 * @dev: struct cpuidle_device for the current cpu
665 *
666 * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
667 * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
668 * this was the last cpu in the set.
669 */
670void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
671{
672 struct cpuidle_coupled *coupled = dev->coupled;
673
674 if (cpumask_empty(&dev->coupled_cpus))
675 return;
676
677 if (--coupled->refcnt)
678 kfree(coupled);
679 dev->coupled = NULL;
680}
681
682/**
683 * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
684 * @coupled: the struct coupled that contains the cpu that is changing state
685 *
686 * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
687 * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
688 */
689static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
690{
691 int cpu = get_cpu();
692
693 /* Force all cpus out of the waiting loop. */
694 coupled->prevent++;
695 cpuidle_coupled_poke_others(cpu, coupled);
696 put_cpu();
697 while (!cpuidle_coupled_no_cpus_waiting(coupled))
698 cpu_relax();
699}
700
701/**
702 * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
703 * @coupled: the struct coupled that contains the cpu that is changing state
704 *
705 * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
706 * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
707 */
708static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
709{
710 int cpu = get_cpu();
711
712 /*
713 * Write barrier ensures readers see the new online_count when they
714 * see prevent == 0.
715 */
716 smp_wmb();
717 coupled->prevent--;
718 /* Force cpus out of the prevent loop. */
719 cpuidle_coupled_poke_others(cpu, coupled);
720 put_cpu();
721}
722
723/**
724 * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
725 * @nb: notifier block
726 * @action: hotplug transition
727 * @hcpu: target cpu number
728 *
729 * Called when a cpu is brought on or offline using hotplug. Updates the
730 * coupled cpu set appropriately
731 */
732static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
733 unsigned long action, void *hcpu)
734{
735 int cpu = (unsigned long)hcpu;
736 struct cpuidle_device *dev;
737
Colin Cross63c6ba42012-08-15 22:10:50 +0200738 switch (action & ~CPU_TASKS_FROZEN) {
739 case CPU_UP_PREPARE:
740 case CPU_DOWN_PREPARE:
741 case CPU_ONLINE:
742 case CPU_DEAD:
743 case CPU_UP_CANCELED:
744 case CPU_DOWN_FAILED:
745 break;
746 default:
747 return NOTIFY_OK;
748 }
749
Colin Cross4126c012012-05-07 17:57:41 -0700750 mutex_lock(&cpuidle_lock);
751
752 dev = per_cpu(cpuidle_devices, cpu);
Jon Medhurst (Tixy)5fbbb902012-08-15 22:11:00 +0200753 if (!dev || !dev->coupled)
Colin Cross4126c012012-05-07 17:57:41 -0700754 goto out;
755
756 switch (action & ~CPU_TASKS_FROZEN) {
757 case CPU_UP_PREPARE:
758 case CPU_DOWN_PREPARE:
759 cpuidle_coupled_prevent_idle(dev->coupled);
760 break;
761 case CPU_ONLINE:
762 case CPU_DEAD:
763 cpuidle_coupled_update_online_cpus(dev->coupled);
764 /* Fall through */
765 case CPU_UP_CANCELED:
766 case CPU_DOWN_FAILED:
767 cpuidle_coupled_allow_idle(dev->coupled);
768 break;
769 }
770
771out:
772 mutex_unlock(&cpuidle_lock);
773 return NOTIFY_OK;
774}
775
776static struct notifier_block cpuidle_coupled_cpu_notifier = {
777 .notifier_call = cpuidle_coupled_cpu_notify,
778};
779
780static int __init cpuidle_coupled_init(void)
781{
782 return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
783}
784core_initcall(cpuidle_coupled_init);