Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/sched.c |
| 3 | * |
| 4 | * Kernel scheduler and related syscalls |
| 5 | * |
| 6 | * Copyright (C) 1991-2002 Linus Torvalds |
| 7 | * |
| 8 | * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and |
| 9 | * make semaphores SMP safe |
| 10 | * 1998-11-19 Implemented schedule_timeout() and related stuff |
| 11 | * by Andrea Arcangeli |
| 12 | * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: |
| 13 | * hybrid priority-list and round-robin design with |
| 14 | * an array-switch method of distributing timeslices |
| 15 | * and per-CPU runqueues. Cleanups and useful suggestions |
| 16 | * by Davide Libenzi, preemptible kernel bits by Robert Love. |
| 17 | * 2003-09-03 Interactivity tuning by Con Kolivas. |
| 18 | * 2004-04-02 Scheduler domains code by Nick Piggin |
Ingo Molnar | c31f2e8 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 19 | * 2007-04-15 Work begun on replacing all interactivity tuning with a |
| 20 | * fair scheduling design by Con Kolivas. |
| 21 | * 2007-05-05 Load balancing (smp-nice) and other improvements |
| 22 | * by Peter Williams |
| 23 | * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith |
| 24 | * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri |
Ingo Molnar | b913176 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 25 | * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, |
| 26 | * Thomas Gleixner, Mike Kravetz |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | */ |
| 28 | |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/nmi.h> |
| 32 | #include <linux/init.h> |
Ingo Molnar | dff06c1 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 33 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/highmem.h> |
| 35 | #include <linux/smp_lock.h> |
| 36 | #include <asm/mmu_context.h> |
| 37 | #include <linux/interrupt.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 38 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/completion.h> |
| 40 | #include <linux/kernel_stat.h> |
Ingo Molnar | 9a11b49 | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 41 | #include <linux/debug_locks.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 42 | #include <linux/perf_event.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/security.h> |
| 44 | #include <linux/notifier.h> |
| 45 | #include <linux/profile.h> |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 46 | #include <linux/freezer.h> |
akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 47 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/blkdev.h> |
| 49 | #include <linux/delay.h> |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 50 | #include <linux/pid_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/smp.h> |
| 52 | #include <linux/threads.h> |
| 53 | #include <linux/timer.h> |
| 54 | #include <linux/rcupdate.h> |
| 55 | #include <linux/cpu.h> |
| 56 | #include <linux/cpuset.h> |
| 57 | #include <linux/percpu.h> |
Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 58 | #include <linux/proc_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/seq_file.h> |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 60 | #include <linux/stop_machine.h> |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 61 | #include <linux/sysctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #include <linux/syscalls.h> |
| 63 | #include <linux/times.h> |
Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 64 | #include <linux/tsacct_kern.h> |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 65 | #include <linux/kprobes.h> |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 66 | #include <linux/delayacct.h> |
Ingo Molnar | dff06c1 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 67 | #include <linux/unistd.h> |
Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 68 | #include <linux/pagemap.h> |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 69 | #include <linux/hrtimer.h> |
Reynes Philippe | 30914a5 | 2008-03-17 16:19:05 -0700 | [diff] [blame] | 70 | #include <linux/tick.h> |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 71 | #include <linux/debugfs.h> |
| 72 | #include <linux/ctype.h> |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 73 | #include <linux/ftrace.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 74 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Eric Dumazet | 5517d86 | 2007-05-08 00:32:57 -0700 | [diff] [blame] | 76 | #include <asm/tlb.h> |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 77 | #include <asm/irq_regs.h> |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 78 | #include <asm/mutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 80 | #include "sched_cpupri.h" |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 81 | #include "workqueue_sched.h" |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 82 | #include "sched_autogroup.h" |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 83 | |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 84 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 85 | #include <trace/events/sched.h> |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 86 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | /* |
| 88 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
| 89 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
| 90 | * and back. |
| 91 | */ |
| 92 | #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) |
| 93 | #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) |
| 94 | #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) |
| 95 | |
| 96 | /* |
| 97 | * 'User priority' is the nice value converted to something we |
| 98 | * can work with better when scaling various scheduler parameters, |
| 99 | * it's a [ 0 ... 39 ] range. |
| 100 | */ |
| 101 | #define USER_PRIO(p) ((p)-MAX_RT_PRIO) |
| 102 | #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) |
| 103 | #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) |
| 104 | |
| 105 | /* |
Ingo Molnar | d7876a0 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 106 | * Helpers for converting nanosecond timing to jiffy resolution |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | */ |
Eric Dumazet | d6322fa | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 108 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 110 | #define NICE_0_LOAD SCHED_LOAD_SCALE |
| 111 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | /* |
| 114 | * These are the 'tuning knobs' of the scheduler: |
| 115 | * |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 116 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | * Timeslices get refilled after they expire. |
| 118 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | #define DEF_TIMESLICE (100 * HZ / 1000) |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 120 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 121 | /* |
| 122 | * single value that denotes runtime == period, ie unlimited time. |
| 123 | */ |
| 124 | #define RUNTIME_INF ((u64)~0ULL) |
| 125 | |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 126 | static inline int rt_policy(int policy) |
| 127 | { |
Roel Kluin | 3f33a7c | 2008-05-13 23:44:11 +0200 | [diff] [blame] | 128 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 129 | return 1; |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | static inline int task_has_rt_policy(struct task_struct *p) |
| 134 | { |
| 135 | return rt_policy(p->policy); |
| 136 | } |
| 137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | /* |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 139 | * This is the priority-queue data structure of the RT scheduling class: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | */ |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 141 | struct rt_prio_array { |
| 142 | DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| 143 | struct list_head queue[MAX_RT_PRIO]; |
| 144 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 146 | struct rt_bandwidth { |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 147 | /* nests inside the rq lock: */ |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 148 | raw_spinlock_t rt_runtime_lock; |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 149 | ktime_t rt_period; |
| 150 | u64 rt_runtime; |
| 151 | struct hrtimer rt_period_timer; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 152 | }; |
| 153 | |
| 154 | static struct rt_bandwidth def_rt_bandwidth; |
| 155 | |
| 156 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); |
| 157 | |
| 158 | static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) |
| 159 | { |
| 160 | struct rt_bandwidth *rt_b = |
| 161 | container_of(timer, struct rt_bandwidth, rt_period_timer); |
| 162 | ktime_t now; |
| 163 | int overrun; |
| 164 | int idle = 0; |
| 165 | |
| 166 | for (;;) { |
| 167 | now = hrtimer_cb_get_time(timer); |
| 168 | overrun = hrtimer_forward(timer, now, rt_b->rt_period); |
| 169 | |
| 170 | if (!overrun) |
| 171 | break; |
| 172 | |
| 173 | idle = do_sched_rt_period_timer(rt_b, overrun); |
| 174 | } |
| 175 | |
| 176 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
| 177 | } |
| 178 | |
| 179 | static |
| 180 | void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) |
| 181 | { |
| 182 | rt_b->rt_period = ns_to_ktime(period); |
| 183 | rt_b->rt_runtime = runtime; |
| 184 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 185 | raw_spin_lock_init(&rt_b->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 186 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 187 | hrtimer_init(&rt_b->rt_period_timer, |
| 188 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 189 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 190 | } |
| 191 | |
Krzysztof Helt | c8bfff6 | 2008-09-05 23:46:19 +0200 | [diff] [blame] | 192 | static inline int rt_bandwidth_enabled(void) |
| 193 | { |
| 194 | return sysctl_sched_rt_runtime >= 0; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
| 198 | { |
| 199 | ktime_t now; |
| 200 | |
Hiroshi Shimamoto | cac64d0 | 2009-02-25 09:59:26 -0800 | [diff] [blame] | 201 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 202 | return; |
| 203 | |
| 204 | if (hrtimer_active(&rt_b->rt_period_timer)) |
| 205 | return; |
| 206 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 207 | raw_spin_lock(&rt_b->rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 208 | for (;;) { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 209 | unsigned long delta; |
| 210 | ktime_t soft, hard; |
| 211 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 212 | if (hrtimer_active(&rt_b->rt_period_timer)) |
| 213 | break; |
| 214 | |
| 215 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); |
| 216 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 217 | |
| 218 | soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); |
| 219 | hard = hrtimer_get_expires(&rt_b->rt_period_timer); |
| 220 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
| 221 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 222 | HRTIMER_MODE_ABS_PINNED, 0); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 223 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 224 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | #ifdef CONFIG_RT_GROUP_SCHED |
| 228 | static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) |
| 229 | { |
| 230 | hrtimer_cancel(&rt_b->rt_period_timer); |
| 231 | } |
| 232 | #endif |
| 233 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 234 | /* |
| 235 | * sched_domains_mutex serializes calls to arch_init_sched_domains, |
| 236 | * detach_destroy_domains and partition_sched_domains. |
| 237 | */ |
| 238 | static DEFINE_MUTEX(sched_domains_mutex); |
| 239 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 240 | #ifdef CONFIG_CGROUP_SCHED |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 241 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 242 | #include <linux/cgroup.h> |
| 243 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 244 | struct cfs_rq; |
| 245 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 246 | static LIST_HEAD(task_groups); |
| 247 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 248 | /* task group related information */ |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 249 | struct task_group { |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 250 | struct cgroup_subsys_state css; |
Arun R Bharadwaj | 6c415b9 | 2008-12-01 20:49:05 +0530 | [diff] [blame] | 251 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 252 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 253 | /* schedulable entities of this group on each cpu */ |
| 254 | struct sched_entity **se; |
| 255 | /* runqueue "owned" by this group on each cpu */ |
| 256 | struct cfs_rq **cfs_rq; |
| 257 | unsigned long shares; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 258 | |
| 259 | atomic_t load_weight; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 260 | #endif |
| 261 | |
| 262 | #ifdef CONFIG_RT_GROUP_SCHED |
| 263 | struct sched_rt_entity **rt_se; |
| 264 | struct rt_rq **rt_rq; |
| 265 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 266 | struct rt_bandwidth rt_bandwidth; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 267 | #endif |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 268 | |
Srivatsa Vaddagiri | ae8393e | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 269 | struct rcu_head rcu; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 270 | struct list_head list; |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 271 | |
| 272 | struct task_group *parent; |
| 273 | struct list_head siblings; |
| 274 | struct list_head children; |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 275 | |
| 276 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 277 | struct autogroup *autogroup; |
| 278 | #endif |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 279 | }; |
| 280 | |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 281 | #define root_task_group init_task_group |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 282 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 283 | /* task_group_lock serializes the addition/removal of task groups */ |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 284 | static DEFINE_SPINLOCK(task_group_lock); |
Srivatsa Vaddagiri | ec2c507 | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 285 | |
Cyrill Gorcunov | e9036b3 | 2009-10-26 22:24:14 +0300 | [diff] [blame] | 286 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 287 | |
Srivatsa Vaddagiri | 93f992c | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 288 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
Srivatsa Vaddagiri | 24e377a | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 289 | |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 290 | /* |
Lai Jiangshan | 2e08478 | 2008-06-12 16:42:58 +0800 | [diff] [blame] | 291 | * A weight of 0 or 1 can cause arithmetics problems. |
| 292 | * A weight of a cfs_rq is the sum of weights of which entities |
| 293 | * are queued on this cfs_rq, so a weight of a entity should not be |
| 294 | * too large, so as the shares value of a task group. |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 295 | * (The default weight is 1024 - so there's no practical |
| 296 | * limitation from this.) |
| 297 | */ |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 298 | #define MIN_SHARES 2 |
Lai Jiangshan | 2e08478 | 2008-06-12 16:42:58 +0800 | [diff] [blame] | 299 | #define MAX_SHARES (1UL << 18) |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 300 | |
Srivatsa Vaddagiri | 93f992c | 2008-01-25 21:07:59 +0100 | [diff] [blame] | 301 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 302 | #endif |
| 303 | |
| 304 | /* Default task group. |
| 305 | * Every task in system belong to this group at bootup. |
| 306 | */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 307 | struct task_group init_task_group; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 308 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 309 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 310 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 311 | /* CFS-related fields in a runqueue */ |
| 312 | struct cfs_rq { |
| 313 | struct load_weight load; |
| 314 | unsigned long nr_running; |
| 315 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 316 | u64 exec_clock; |
Ingo Molnar | e9acbff | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 317 | u64 min_vruntime; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 318 | |
| 319 | struct rb_root tasks_timeline; |
| 320 | struct rb_node *rb_leftmost; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 321 | |
| 322 | struct list_head tasks; |
| 323 | struct list_head *balance_iterator; |
| 324 | |
| 325 | /* |
| 326 | * 'curr' points to currently running entity on this cfs_rq. |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 327 | * It is set to NULL otherwise (i.e when none are currently running). |
| 328 | */ |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 329 | struct sched_entity *curr, *next, *last; |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 330 | |
Peter Zijlstra | 5ac5c4d | 2008-11-10 10:46:32 +0100 | [diff] [blame] | 331 | unsigned int nr_spread_over; |
Peter Zijlstra | ddc9729 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 332 | |
Ingo Molnar | 62160e3 | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 333 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 334 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
| 335 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 336 | /* |
| 337 | * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 338 | * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| 339 | * (like users, containers etc.) |
| 340 | * |
| 341 | * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This |
| 342 | * list is used during load balance. |
| 343 | */ |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 344 | int on_list; |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 345 | struct list_head leaf_cfs_rq_list; |
| 346 | struct task_group *tg; /* group that "owns" this runqueue */ |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 347 | |
| 348 | #ifdef CONFIG_SMP |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 349 | /* |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 350 | * the part of load.weight contributed by tasks |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 351 | */ |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 352 | unsigned long task_weight; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 353 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 354 | /* |
| 355 | * h_load = weight * f(tg) |
| 356 | * |
| 357 | * Where f(tg) is the recursive weight fraction assigned to |
| 358 | * this group. |
| 359 | */ |
| 360 | unsigned long h_load; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 361 | |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 362 | /* |
| 363 | * Maintaining per-cpu shares distribution for group scheduling |
| 364 | * |
| 365 | * load_stamp is the last time we updated the load average |
| 366 | * load_last is the last time we updated the load average and saw load |
| 367 | * load_unacc_exec_time is currently unaccounted execution time |
| 368 | */ |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 369 | u64 load_avg; |
| 370 | u64 load_period; |
Paul Turner | 3b3d190 | 2010-11-15 15:47:08 -0800 | [diff] [blame] | 371 | u64 load_stamp, load_last, load_unacc_exec_time; |
Peter Zijlstra | f1d239f | 2008-06-27 13:41:38 +0200 | [diff] [blame] | 372 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 373 | unsigned long load_contribution; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 374 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 375 | #endif |
| 376 | }; |
| 377 | |
| 378 | /* Real-Time classes' related field in a runqueue: */ |
| 379 | struct rt_rq { |
| 380 | struct rt_prio_array active; |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 381 | unsigned long rt_nr_running; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 382 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 383 | struct { |
| 384 | int curr; /* highest queued rt task prio */ |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 385 | #ifdef CONFIG_SMP |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 386 | int next; /* next highest */ |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 387 | #endif |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 388 | } highest_prio; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 389 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 390 | #ifdef CONFIG_SMP |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 391 | unsigned long rt_nr_migratory; |
Peter Zijlstra | a1ba4d8 | 2009-04-01 18:40:15 +0200 | [diff] [blame] | 392 | unsigned long rt_nr_total; |
Gregory Haskins | a22d7fc | 2008-01-25 21:08:12 +0100 | [diff] [blame] | 393 | int overloaded; |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 394 | struct plist_head pushable_tasks; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 395 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 396 | int rt_throttled; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 397 | u64 rt_time; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 398 | u64 rt_runtime; |
Ingo Molnar | ea736ed | 2008-03-25 13:51:45 +0100 | [diff] [blame] | 399 | /* Nests inside the rq lock: */ |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 400 | raw_spinlock_t rt_runtime_lock; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 401 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 402 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 23b0fdf | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 403 | unsigned long rt_nr_boosted; |
| 404 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 405 | struct rq *rq; |
| 406 | struct list_head leaf_rt_rq_list; |
| 407 | struct task_group *tg; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 408 | #endif |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 409 | }; |
| 410 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 411 | #ifdef CONFIG_SMP |
| 412 | |
| 413 | /* |
| 414 | * We add the notion of a root-domain which will be used to define per-domain |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 415 | * variables. Each exclusive cpuset essentially defines an island domain by |
| 416 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 417 | * exclusive cpuset is created, we also create and attach a new root-domain |
| 418 | * object. |
| 419 | * |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 420 | */ |
| 421 | struct root_domain { |
| 422 | atomic_t refcount; |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 423 | cpumask_var_t span; |
| 424 | cpumask_var_t online; |
Gregory Haskins | 637f508 | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 425 | |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 426 | /* |
Gregory Haskins | 637f508 | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 427 | * The "RT overload" flag: it gets set if a CPU has more than |
| 428 | * one runnable RT task. |
| 429 | */ |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 430 | cpumask_var_t rto_mask; |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 431 | atomic_t rto_count; |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 432 | struct cpupri cpupri; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 433 | }; |
| 434 | |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 435 | /* |
| 436 | * By default the system creates a single root-domain with all cpus as |
| 437 | * members (mimicking the global state we have today). |
| 438 | */ |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 439 | static struct root_domain def_root_domain; |
| 440 | |
Christian Dietrich | ed2d372 | 2010-09-06 16:37:05 +0200 | [diff] [blame] | 441 | #endif /* CONFIG_SMP */ |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 442 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 443 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | * This is the main, per-CPU runqueue data structure. |
| 445 | * |
| 446 | * Locking rule: those places that want to lock multiple runqueues |
| 447 | * (such as the load balancing or the thread migration code), lock |
| 448 | * acquire operations must be ordered by ascending &runqueue. |
| 449 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 450 | struct rq { |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 451 | /* runqueue lock: */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 452 | raw_spinlock_t lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | |
| 454 | /* |
| 455 | * nr_running and cpu_load should be in the same cacheline because |
| 456 | * remote CPUs use both these fields when doing load calculation. |
| 457 | */ |
| 458 | unsigned long nr_running; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 459 | #define CPU_LOAD_IDX_MAX 5 |
| 460 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 461 | unsigned long last_load_update_tick; |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 462 | #ifdef CONFIG_NO_HZ |
Mike Galbraith | 39c0cbe | 2010-03-11 17:17:13 +0100 | [diff] [blame] | 463 | u64 nohz_stamp; |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 464 | unsigned char nohz_balance_kick; |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 465 | #endif |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 466 | unsigned int skip_clock_update; |
| 467 | |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 468 | /* capture load from *all* tasks on this cpu: */ |
| 469 | struct load_weight load; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 470 | unsigned long nr_load_updates; |
| 471 | u64 nr_switches; |
| 472 | |
| 473 | struct cfs_rq cfs; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 474 | struct rt_rq rt; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 475 | |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 476 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 477 | /* list of leaf cfs_rq on this cpu: */ |
| 478 | struct list_head leaf_cfs_rq_list; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 479 | #endif |
| 480 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 481 | struct list_head leaf_rt_rq_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | |
| 484 | /* |
| 485 | * This is part of a global counter where only the total sum |
| 486 | * over all CPUs matters. A task can increase this counter on |
| 487 | * one CPU and if it got migrated afterwards it may decrease |
| 488 | * it on another CPU. Always updated under the runqueue lock: |
| 489 | */ |
| 490 | unsigned long nr_uninterruptible; |
| 491 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 492 | struct task_struct *curr, *idle, *stop; |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 493 | unsigned long next_balance; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | struct mm_struct *prev_mm; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 495 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 496 | u64 clock; |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 497 | u64 clock_task; |
Ingo Molnar | 6aa645e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 498 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | atomic_t nr_iowait; |
| 500 | |
| 501 | #ifdef CONFIG_SMP |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 502 | struct root_domain *rd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | struct sched_domain *sd; |
| 504 | |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 505 | unsigned long cpu_power; |
| 506 | |
Henrik Austad | a0a522c | 2009-02-13 20:35:45 +0100 | [diff] [blame] | 507 | unsigned char idle_at_tick; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | /* For active balancing */ |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 509 | int post_schedule; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | int active_balance; |
| 511 | int push_cpu; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 512 | struct cpu_stop_work active_balance_work; |
Ingo Molnar | d801649 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 513 | /* cpu of this runqueue: */ |
| 514 | int cpu; |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 515 | int online; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 517 | unsigned long avg_load_per_task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 519 | u64 rt_avg; |
| 520 | u64 age_stamp; |
Mike Galbraith | 1b9508f | 2009-11-04 17:53:50 +0100 | [diff] [blame] | 521 | u64 idle_stamp; |
| 522 | u64 avg_idle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | #endif |
| 524 | |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 525 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 526 | u64 prev_irq_time; |
| 527 | #endif |
| 528 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 529 | /* calc_load related fields */ |
| 530 | unsigned long calc_load_update; |
| 531 | long calc_load_active; |
| 532 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 533 | #ifdef CONFIG_SCHED_HRTICK |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 534 | #ifdef CONFIG_SMP |
| 535 | int hrtick_csd_pending; |
| 536 | struct call_single_data hrtick_csd; |
| 537 | #endif |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 538 | struct hrtimer hrtick_timer; |
| 539 | #endif |
| 540 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | #ifdef CONFIG_SCHEDSTATS |
| 542 | /* latency stats */ |
| 543 | struct sched_info rq_sched_info; |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 544 | unsigned long long rq_cpu_time; |
| 545 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | |
| 547 | /* sys_sched_yield() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 548 | unsigned int yld_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | |
| 550 | /* schedule() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 551 | unsigned int sched_switch; |
| 552 | unsigned int sched_count; |
| 553 | unsigned int sched_goidle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | |
| 555 | /* try_to_wake_up() stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 556 | unsigned int ttwu_count; |
| 557 | unsigned int ttwu_local; |
Ingo Molnar | b8efb56 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 558 | |
| 559 | /* BKL stats */ |
Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 560 | unsigned int bkl_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | #endif |
| 562 | }; |
| 563 | |
Fenghua Yu | f34e3b6 | 2007-07-19 01:48:13 -0700 | [diff] [blame] | 564 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 566 | |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 567 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 568 | |
Christoph Lameter | 0a2966b | 2006-09-25 23:30:51 -0700 | [diff] [blame] | 569 | static inline int cpu_of(struct rq *rq) |
| 570 | { |
| 571 | #ifdef CONFIG_SMP |
| 572 | return rq->cpu; |
| 573 | #else |
| 574 | return 0; |
| 575 | #endif |
| 576 | } |
| 577 | |
Paul E. McKenney | 497f0ab | 2010-02-22 17:04:51 -0800 | [diff] [blame] | 578 | #define rcu_dereference_check_sched_domain(p) \ |
Paul E. McKenney | d11c563 | 2010-02-22 17:04:50 -0800 | [diff] [blame] | 579 | rcu_dereference_check((p), \ |
| 580 | rcu_read_lock_sched_held() || \ |
| 581 | lockdep_is_held(&sched_domains_mutex)) |
| 582 | |
Ingo Molnar | 20d315d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 583 | /* |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 584 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 585 | * See detach_destroy_domains: synchronize_sched for details. |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 586 | * |
| 587 | * The domain tree of any CPU may only be accessed from within |
| 588 | * preempt-disabled sections. |
| 589 | */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 590 | #define for_each_domain(cpu, __sd) \ |
Paul E. McKenney | 497f0ab | 2010-02-22 17:04:51 -0800 | [diff] [blame] | 591 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | |
| 593 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
| 594 | #define this_rq() (&__get_cpu_var(runqueues)) |
| 595 | #define task_rq(p) cpu_rq(task_cpu(p)) |
| 596 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 597 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 599 | #ifdef CONFIG_CGROUP_SCHED |
| 600 | |
| 601 | /* |
| 602 | * Return the group to which this tasks belongs. |
| 603 | * |
| 604 | * We use task_subsys_state_check() and extend the RCU verification |
| 605 | * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() |
| 606 | * holds that lock for each task it moves into the cgroup. Therefore |
| 607 | * by holding that lock, we pin the task to the current cgroup. |
| 608 | */ |
| 609 | static inline struct task_group *task_group(struct task_struct *p) |
| 610 | { |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 611 | struct task_group *tg; |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 612 | struct cgroup_subsys_state *css; |
| 613 | |
| 614 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
| 615 | lockdep_is_held(&task_rq(p)->lock)); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 616 | tg = container_of(css, struct task_group, css); |
| 617 | |
| 618 | return autogroup_task_group(p, tg); |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 619 | } |
| 620 | |
| 621 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| 622 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| 623 | { |
| 624 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 625 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; |
| 626 | p->se.parent = task_group(p)->se[cpu]; |
| 627 | #endif |
| 628 | |
| 629 | #ifdef CONFIG_RT_GROUP_SCHED |
| 630 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; |
| 631 | p->rt.parent = task_group(p)->rt_se[cpu]; |
| 632 | #endif |
| 633 | } |
| 634 | |
| 635 | #else /* CONFIG_CGROUP_SCHED */ |
| 636 | |
| 637 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| 638 | static inline struct task_group *task_group(struct task_struct *p) |
| 639 | { |
| 640 | return NULL; |
| 641 | } |
| 642 | |
| 643 | #endif /* CONFIG_CGROUP_SCHED */ |
| 644 | |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 645 | static u64 irq_time_cpu(int cpu); |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 646 | static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 647 | |
Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 648 | inline void update_rq_clock(struct rq *rq) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 649 | { |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 650 | if (!rq->skip_clock_update) { |
| 651 | int cpu = cpu_of(rq); |
| 652 | u64 irq_time; |
| 653 | |
| 654 | rq->clock = sched_clock_cpu(cpu); |
| 655 | irq_time = irq_time_cpu(cpu); |
| 656 | if (rq->clock - irq_time > rq->clock_task) |
| 657 | rq->clock_task = rq->clock - irq_time; |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 658 | |
| 659 | sched_irq_time_avg_update(rq, irq_time); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 660 | } |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 661 | } |
| 662 | |
Ingo Molnar | e436d80 | 2007-07-19 21:28:35 +0200 | [diff] [blame] | 663 | /* |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 664 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| 665 | */ |
| 666 | #ifdef CONFIG_SCHED_DEBUG |
| 667 | # define const_debug __read_mostly |
| 668 | #else |
| 669 | # define const_debug static const |
| 670 | #endif |
| 671 | |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 672 | /** |
| 673 | * runqueue_is_locked |
Randy Dunlap | e17b38b | 2009-10-11 19:12:00 -0700 | [diff] [blame] | 674 | * @cpu: the processor in question. |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 675 | * |
| 676 | * Returns true if the current cpu runqueue is locked. |
| 677 | * This interface allows printk to be called with the runqueue lock |
| 678 | * held and know whether or not it is OK to wake up the klogd. |
| 679 | */ |
Andrew Morton | 89f19f0 | 2009-09-19 11:55:44 -0700 | [diff] [blame] | 680 | int runqueue_is_locked(int cpu) |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 681 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 682 | return raw_spin_is_locked(&cpu_rq(cpu)->lock); |
Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 683 | } |
| 684 | |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 685 | /* |
| 686 | * Debugging: various feature bits |
| 687 | */ |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 688 | |
| 689 | #define SCHED_FEAT(name, enabled) \ |
| 690 | __SCHED_FEAT_##name , |
| 691 | |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 692 | enum { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 693 | #include "sched_features.h" |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 694 | }; |
| 695 | |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 696 | #undef SCHED_FEAT |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 697 | |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 698 | #define SCHED_FEAT(name, enabled) \ |
| 699 | (1UL << __SCHED_FEAT_##name) * enabled | |
| 700 | |
| 701 | const_debug unsigned int sysctl_sched_features = |
| 702 | #include "sched_features.h" |
| 703 | 0; |
| 704 | |
| 705 | #undef SCHED_FEAT |
| 706 | |
| 707 | #ifdef CONFIG_SCHED_DEBUG |
| 708 | #define SCHED_FEAT(name, enabled) \ |
| 709 | #name , |
| 710 | |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 711 | static __read_mostly char *sched_feat_names[] = { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 712 | #include "sched_features.h" |
| 713 | NULL |
| 714 | }; |
| 715 | |
| 716 | #undef SCHED_FEAT |
| 717 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 718 | static int sched_feat_show(struct seq_file *m, void *v) |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 719 | { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 720 | int i; |
| 721 | |
| 722 | for (i = 0; sched_feat_names[i]; i++) { |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 723 | if (!(sysctl_sched_features & (1UL << i))) |
| 724 | seq_puts(m, "NO_"); |
| 725 | seq_printf(m, "%s ", sched_feat_names[i]); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 726 | } |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 727 | seq_puts(m, "\n"); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 728 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 729 | return 0; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 730 | } |
| 731 | |
| 732 | static ssize_t |
| 733 | sched_feat_write(struct file *filp, const char __user *ubuf, |
| 734 | size_t cnt, loff_t *ppos) |
| 735 | { |
| 736 | char buf[64]; |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 737 | char *cmp; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 738 | int neg = 0; |
| 739 | int i; |
| 740 | |
| 741 | if (cnt > 63) |
| 742 | cnt = 63; |
| 743 | |
| 744 | if (copy_from_user(&buf, ubuf, cnt)) |
| 745 | return -EFAULT; |
| 746 | |
| 747 | buf[cnt] = 0; |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 748 | cmp = strstrip(buf); |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 749 | |
Ingo Molnar | c24b7c5 | 2008-04-18 10:55:34 +0200 | [diff] [blame] | 750 | if (strncmp(buf, "NO_", 3) == 0) { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 751 | neg = 1; |
| 752 | cmp += 3; |
| 753 | } |
| 754 | |
| 755 | for (i = 0; sched_feat_names[i]; i++) { |
Mathieu Desnoyers | 7740191 | 2010-09-13 17:47:00 -0400 | [diff] [blame] | 756 | if (strcmp(cmp, sched_feat_names[i]) == 0) { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 757 | if (neg) |
| 758 | sysctl_sched_features &= ~(1UL << i); |
| 759 | else |
| 760 | sysctl_sched_features |= (1UL << i); |
| 761 | break; |
| 762 | } |
| 763 | } |
| 764 | |
| 765 | if (!sched_feat_names[i]) |
| 766 | return -EINVAL; |
| 767 | |
Jan Blunck | 4299472 | 2009-11-20 17:40:37 +0100 | [diff] [blame] | 768 | *ppos += cnt; |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 769 | |
| 770 | return cnt; |
| 771 | } |
| 772 | |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 773 | static int sched_feat_open(struct inode *inode, struct file *filp) |
| 774 | { |
| 775 | return single_open(filp, sched_feat_show, NULL); |
| 776 | } |
| 777 | |
Alexey Dobriyan | 828c095 | 2009-10-01 15:43:56 -0700 | [diff] [blame] | 778 | static const struct file_operations sched_feat_fops = { |
Li Zefan | 34f3a81 | 2008-10-30 15:23:32 +0800 | [diff] [blame] | 779 | .open = sched_feat_open, |
| 780 | .write = sched_feat_write, |
| 781 | .read = seq_read, |
| 782 | .llseek = seq_lseek, |
| 783 | .release = single_release, |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 784 | }; |
| 785 | |
| 786 | static __init int sched_init_debug(void) |
| 787 | { |
Peter Zijlstra | f00b45c | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 788 | debugfs_create_file("sched_features", 0644, NULL, NULL, |
| 789 | &sched_feat_fops); |
| 790 | |
| 791 | return 0; |
| 792 | } |
| 793 | late_initcall(sched_init_debug); |
| 794 | |
| 795 | #endif |
| 796 | |
| 797 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
Ingo Molnar | bf5c91b | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 798 | |
| 799 | /* |
Peter Zijlstra | b82d9fd | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 800 | * Number of tasks to iterate in a single balance run. |
| 801 | * Limited because this is done with IRQs disabled. |
| 802 | */ |
| 803 | const_debug unsigned int sysctl_sched_nr_migrate = 32; |
| 804 | |
| 805 | /* |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 806 | * period over which we average the RT time consumption, measured |
| 807 | * in ms. |
| 808 | * |
| 809 | * default: 1s |
| 810 | */ |
| 811 | const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; |
| 812 | |
| 813 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 814 | * period over which we measure -rt task cpu usage in us. |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 815 | * default: 1s |
| 816 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 817 | unsigned int sysctl_sched_rt_period = 1000000; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 818 | |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 819 | static __read_mostly int scheduler_running; |
| 820 | |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 821 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 822 | * part of the period that we allow rt tasks to run in us. |
| 823 | * default: 0.95s |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 824 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 825 | int sysctl_sched_rt_runtime = 950000; |
| 826 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 827 | static inline u64 global_rt_period(void) |
| 828 | { |
| 829 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| 830 | } |
| 831 | |
| 832 | static inline u64 global_rt_runtime(void) |
| 833 | { |
roel kluin | e26873b | 2008-07-22 16:51:15 -0400 | [diff] [blame] | 834 | if (sysctl_sched_rt_runtime < 0) |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 835 | return RUNTIME_INF; |
| 836 | |
| 837 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| 838 | } |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 839 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | #ifndef prepare_arch_switch |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 841 | # define prepare_arch_switch(next) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | #endif |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 843 | #ifndef finish_arch_switch |
| 844 | # define finish_arch_switch(prev) do { } while (0) |
| 845 | #endif |
| 846 | |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 847 | static inline int task_current(struct rq *rq, struct task_struct *p) |
| 848 | { |
| 849 | return rq->curr == p; |
| 850 | } |
| 851 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 852 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 853 | static inline int task_running(struct rq *rq, struct task_struct *p) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 854 | { |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 855 | return task_current(rq, p); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 856 | } |
| 857 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 858 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 859 | { |
| 860 | } |
| 861 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 862 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 863 | { |
Ingo Molnar | da04c03 | 2005-09-13 11:17:59 +0200 | [diff] [blame] | 864 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 865 | /* this is a valid case when another task releases the spinlock */ |
| 866 | rq->lock.owner = current; |
| 867 | #endif |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 868 | /* |
| 869 | * If we are tracking spinlock dependencies then we have to |
| 870 | * fix up the runqueue lock - which gets 'carried over' from |
| 871 | * prev into current: |
| 872 | */ |
| 873 | spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| 874 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 875 | raw_spin_unlock_irq(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 876 | } |
| 877 | |
| 878 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 879 | static inline int task_running(struct rq *rq, struct task_struct *p) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 880 | { |
| 881 | #ifdef CONFIG_SMP |
| 882 | return p->oncpu; |
| 883 | #else |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 884 | return task_current(rq, p); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 885 | #endif |
| 886 | } |
| 887 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 888 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 889 | { |
| 890 | #ifdef CONFIG_SMP |
| 891 | /* |
| 892 | * We can optimise this out completely for !SMP, because the |
| 893 | * SMP rebalancing from interrupt is the only thing that cares |
| 894 | * here. |
| 895 | */ |
| 896 | next->oncpu = 1; |
| 897 | #endif |
| 898 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 899 | raw_spin_unlock_irq(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 900 | #else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 901 | raw_spin_unlock(&rq->lock); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 902 | #endif |
| 903 | } |
| 904 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 905 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 906 | { |
| 907 | #ifdef CONFIG_SMP |
| 908 | /* |
| 909 | * After ->oncpu is cleared, the task can be moved to a different CPU. |
| 910 | * We must ensure this doesn't happen until the switch is completely |
| 911 | * finished. |
| 912 | */ |
| 913 | smp_wmb(); |
| 914 | prev->oncpu = 0; |
| 915 | #endif |
| 916 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 917 | local_irq_enable(); |
| 918 | #endif |
| 919 | } |
| 920 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | |
| 922 | /* |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 923 | * Check whether the task is waking, we use this to synchronize ->cpus_allowed |
| 924 | * against ttwu(). |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 925 | */ |
| 926 | static inline int task_is_waking(struct task_struct *p) |
| 927 | { |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 928 | return unlikely(p->state == TASK_WAKING); |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 929 | } |
| 930 | |
| 931 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 932 | * __task_rq_lock - lock the runqueue a given task resides on. |
| 933 | * Must be called interrupts disabled. |
| 934 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 935 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 936 | __acquires(rq->lock) |
| 937 | { |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 938 | struct rq *rq; |
| 939 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 940 | for (;;) { |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 941 | rq = task_rq(p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 942 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 943 | if (likely(rq == task_rq(p))) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 944 | return rq; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 945 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 946 | } |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 947 | } |
| 948 | |
| 949 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | * task_rq_lock - lock the runqueue a given task resides on and disable |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 951 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | * explicitly disabling preemption. |
| 953 | */ |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 954 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | __acquires(rq->lock) |
| 956 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 957 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 959 | for (;;) { |
| 960 | local_irq_save(*flags); |
| 961 | rq = task_rq(p); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 962 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 963 | if (likely(rq == task_rq(p))) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 964 | return rq; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 965 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | } |
| 968 | |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 969 | static void __task_rq_unlock(struct rq *rq) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 970 | __releases(rq->lock) |
| 971 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 972 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 973 | } |
| 974 | |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 975 | static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | __releases(rq->lock) |
| 977 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 978 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | } |
| 980 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | /* |
Robert P. J. Day | cc2a73b | 2006-12-10 02:20:00 -0800 | [diff] [blame] | 982 | * this_rq_lock - lock this runqueue and disable interrupts. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 984 | static struct rq *this_rq_lock(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | __acquires(rq->lock) |
| 986 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 987 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | |
| 989 | local_irq_disable(); |
| 990 | rq = this_rq(); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 991 | raw_spin_lock(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | |
| 993 | return rq; |
| 994 | } |
| 995 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 996 | #ifdef CONFIG_SCHED_HRTICK |
| 997 | /* |
| 998 | * Use HR-timers to deliver accurate preemption points. |
| 999 | * |
| 1000 | * Its all a bit involved since we cannot program an hrt while holding the |
| 1001 | * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a |
| 1002 | * reschedule event. |
| 1003 | * |
| 1004 | * When we get rescheduled we reprogram the hrtick_timer outside of the |
| 1005 | * rq->lock. |
| 1006 | */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1007 | |
| 1008 | /* |
| 1009 | * Use hrtick when: |
| 1010 | * - enabled by features |
| 1011 | * - hrtimer is actually high res |
| 1012 | */ |
| 1013 | static inline int hrtick_enabled(struct rq *rq) |
| 1014 | { |
| 1015 | if (!sched_feat(HRTICK)) |
| 1016 | return 0; |
Ingo Molnar | ba42059 | 2008-07-20 11:02:06 +0200 | [diff] [blame] | 1017 | if (!cpu_active(cpu_of(rq))) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1018 | return 0; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1019 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
| 1020 | } |
| 1021 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1022 | static void hrtick_clear(struct rq *rq) |
| 1023 | { |
| 1024 | if (hrtimer_active(&rq->hrtick_timer)) |
| 1025 | hrtimer_cancel(&rq->hrtick_timer); |
| 1026 | } |
| 1027 | |
| 1028 | /* |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1029 | * High-resolution timer tick. |
| 1030 | * Runs from hardirq context with interrupts disabled. |
| 1031 | */ |
| 1032 | static enum hrtimer_restart hrtick(struct hrtimer *timer) |
| 1033 | { |
| 1034 | struct rq *rq = container_of(timer, struct rq, hrtick_timer); |
| 1035 | |
| 1036 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
| 1037 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1038 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1039 | update_rq_clock(rq); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1040 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1041 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1042 | |
| 1043 | return HRTIMER_NORESTART; |
| 1044 | } |
| 1045 | |
Rabin Vincent | 95e904c | 2008-05-11 05:55:33 +0530 | [diff] [blame] | 1046 | #ifdef CONFIG_SMP |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1047 | /* |
| 1048 | * called from hardirq (IPI) context |
| 1049 | */ |
| 1050 | static void __hrtick_start(void *arg) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1051 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1052 | struct rq *rq = arg; |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1053 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1054 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1055 | hrtimer_restart(&rq->hrtick_timer); |
| 1056 | rq->hrtick_csd_pending = 0; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1057 | raw_spin_unlock(&rq->lock); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1058 | } |
| 1059 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1060 | /* |
| 1061 | * Called to set the hrtick timer state. |
| 1062 | * |
| 1063 | * called with rq->lock held and irqs disabled |
| 1064 | */ |
| 1065 | static void hrtick_start(struct rq *rq, u64 delay) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1066 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1067 | struct hrtimer *timer = &rq->hrtick_timer; |
| 1068 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1069 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1070 | hrtimer_set_expires(timer, time); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1071 | |
| 1072 | if (rq == this_rq()) { |
| 1073 | hrtimer_restart(timer); |
| 1074 | } else if (!rq->hrtick_csd_pending) { |
Peter Zijlstra | 6e27563 | 2009-02-25 13:59:48 +0100 | [diff] [blame] | 1075 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1076 | rq->hrtick_csd_pending = 1; |
| 1077 | } |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1078 | } |
| 1079 | |
| 1080 | static int |
| 1081 | hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 1082 | { |
| 1083 | int cpu = (int)(long)hcpu; |
| 1084 | |
| 1085 | switch (action) { |
| 1086 | case CPU_UP_CANCELED: |
| 1087 | case CPU_UP_CANCELED_FROZEN: |
| 1088 | case CPU_DOWN_PREPARE: |
| 1089 | case CPU_DOWN_PREPARE_FROZEN: |
| 1090 | case CPU_DEAD: |
| 1091 | case CPU_DEAD_FROZEN: |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1092 | hrtick_clear(cpu_rq(cpu)); |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1093 | return NOTIFY_OK; |
| 1094 | } |
| 1095 | |
| 1096 | return NOTIFY_DONE; |
| 1097 | } |
| 1098 | |
Rakib Mullick | fa74820 | 2008-09-22 14:55:45 -0700 | [diff] [blame] | 1099 | static __init void init_hrtick(void) |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1100 | { |
| 1101 | hotcpu_notifier(hotplug_hrtick, 0); |
| 1102 | } |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1103 | #else |
| 1104 | /* |
| 1105 | * Called to set the hrtick timer state. |
| 1106 | * |
| 1107 | * called with rq->lock held and irqs disabled |
| 1108 | */ |
| 1109 | static void hrtick_start(struct rq *rq, u64 delay) |
| 1110 | { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 1111 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 1112 | HRTIMER_MODE_REL_PINNED, 0); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1113 | } |
| 1114 | |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1115 | static inline void init_hrtick(void) |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1116 | { |
| 1117 | } |
Rabin Vincent | 95e904c | 2008-05-11 05:55:33 +0530 | [diff] [blame] | 1118 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1119 | |
| 1120 | static void init_rq_hrtick(struct rq *rq) |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1121 | { |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1122 | #ifdef CONFIG_SMP |
| 1123 | rq->hrtick_csd_pending = 0; |
| 1124 | |
| 1125 | rq->hrtick_csd.flags = 0; |
| 1126 | rq->hrtick_csd.func = __hrtick_start; |
| 1127 | rq->hrtick_csd.info = rq; |
| 1128 | #endif |
| 1129 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1130 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1131 | rq->hrtick_timer.function = hrtick; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1132 | } |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1133 | #else /* CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1134 | static inline void hrtick_clear(struct rq *rq) |
| 1135 | { |
| 1136 | } |
| 1137 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1138 | static inline void init_rq_hrtick(struct rq *rq) |
| 1139 | { |
| 1140 | } |
| 1141 | |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 1142 | static inline void init_hrtick(void) |
| 1143 | { |
| 1144 | } |
Andrew Morton | 006c75f | 2008-09-22 14:55:46 -0700 | [diff] [blame] | 1145 | #endif /* CONFIG_SCHED_HRTICK */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 1146 | |
Ingo Molnar | 1b9f19c | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1147 | /* |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1148 | * resched_task - mark a task 'to be rescheduled now'. |
| 1149 | * |
| 1150 | * On UP this means the setting of the need_resched flag, on SMP it |
| 1151 | * might also involve a cross-CPU call to trigger the scheduler on |
| 1152 | * the target CPU. |
| 1153 | */ |
| 1154 | #ifdef CONFIG_SMP |
| 1155 | |
| 1156 | #ifndef tsk_is_polling |
| 1157 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
| 1158 | #endif |
| 1159 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1160 | static void resched_task(struct task_struct *p) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1161 | { |
| 1162 | int cpu; |
| 1163 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1164 | assert_raw_spin_locked(&task_rq(p)->lock); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1165 | |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1166 | if (test_tsk_need_resched(p)) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1167 | return; |
| 1168 | |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1169 | set_tsk_need_resched(p); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1170 | |
| 1171 | cpu = task_cpu(p); |
| 1172 | if (cpu == smp_processor_id()) |
| 1173 | return; |
| 1174 | |
| 1175 | /* NEED_RESCHED must be visible before we test polling */ |
| 1176 | smp_mb(); |
| 1177 | if (!tsk_is_polling(p)) |
| 1178 | smp_send_reschedule(cpu); |
| 1179 | } |
| 1180 | |
| 1181 | static void resched_cpu(int cpu) |
| 1182 | { |
| 1183 | struct rq *rq = cpu_rq(cpu); |
| 1184 | unsigned long flags; |
| 1185 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1186 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1187 | return; |
| 1188 | resched_task(cpu_curr(cpu)); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1189 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1190 | } |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1191 | |
| 1192 | #ifdef CONFIG_NO_HZ |
| 1193 | /* |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 1194 | * In the semi idle case, use the nearest busy cpu for migrating timers |
| 1195 | * from an idle cpu. This is good for power-savings. |
| 1196 | * |
| 1197 | * We don't do similar optimization for completely idle system, as |
| 1198 | * selecting an idle cpu will add more delays to the timers than intended |
| 1199 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 1200 | */ |
| 1201 | int get_nohz_timer_target(void) |
| 1202 | { |
| 1203 | int cpu = smp_processor_id(); |
| 1204 | int i; |
| 1205 | struct sched_domain *sd; |
| 1206 | |
| 1207 | for_each_domain(cpu, sd) { |
| 1208 | for_each_cpu(i, sched_domain_span(sd)) |
| 1209 | if (!idle_cpu(i)) |
| 1210 | return i; |
| 1211 | } |
| 1212 | return cpu; |
| 1213 | } |
| 1214 | /* |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1215 | * When add_timer_on() enqueues a timer into the timer wheel of an |
| 1216 | * idle CPU then this timer might expire before the next timer event |
| 1217 | * which is scheduled to wake up that CPU. In case of a completely |
| 1218 | * idle system the next event might even be infinite time into the |
| 1219 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and |
| 1220 | * leaves the inner idle loop so the newly added timer is taken into |
| 1221 | * account when the CPU goes back to idle and evaluates the timer |
| 1222 | * wheel for the next timer event. |
| 1223 | */ |
| 1224 | void wake_up_idle_cpu(int cpu) |
| 1225 | { |
| 1226 | struct rq *rq = cpu_rq(cpu); |
| 1227 | |
| 1228 | if (cpu == smp_processor_id()) |
| 1229 | return; |
| 1230 | |
| 1231 | /* |
| 1232 | * This is safe, as this function is called with the timer |
| 1233 | * wheel base lock of (cpu) held. When the CPU is on the way |
| 1234 | * to idle and has not yet set rq->curr to idle then it will |
| 1235 | * be serialized on the timer wheel base lock and take the new |
| 1236 | * timer into account automatically. |
| 1237 | */ |
| 1238 | if (rq->curr != rq->idle) |
| 1239 | return; |
| 1240 | |
| 1241 | /* |
| 1242 | * We can set TIF_RESCHED on the idle task of the other CPU |
| 1243 | * lockless. The worst case is that the other CPU runs the |
| 1244 | * idle task through an additional NOOP schedule() |
| 1245 | */ |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 1246 | set_tsk_need_resched(rq->idle); |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1247 | |
| 1248 | /* NEED_RESCHED must be visible before we test polling */ |
| 1249 | smp_mb(); |
| 1250 | if (!tsk_is_polling(rq->idle)) |
| 1251 | smp_send_reschedule(cpu); |
| 1252 | } |
Mike Galbraith | 39c0cbe | 2010-03-11 17:17:13 +0100 | [diff] [blame] | 1253 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1254 | #endif /* CONFIG_NO_HZ */ |
Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 1255 | |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1256 | static u64 sched_avg_period(void) |
| 1257 | { |
| 1258 | return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; |
| 1259 | } |
| 1260 | |
| 1261 | static void sched_avg_update(struct rq *rq) |
| 1262 | { |
| 1263 | s64 period = sched_avg_period(); |
| 1264 | |
| 1265 | while ((s64)(rq->clock - rq->age_stamp) > period) { |
Will Deacon | 0d98bb2 | 2010-05-24 12:11:43 -0700 | [diff] [blame] | 1266 | /* |
| 1267 | * Inline assembly required to prevent the compiler |
| 1268 | * optimising this loop into a divmod call. |
| 1269 | * See __iter_div_u64_rem() for another example of this. |
| 1270 | */ |
| 1271 | asm("" : "+rm" (rq->age_stamp)); |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1272 | rq->age_stamp += period; |
| 1273 | rq->rt_avg /= 2; |
| 1274 | } |
| 1275 | } |
| 1276 | |
| 1277 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1278 | { |
| 1279 | rq->rt_avg += rt_delta; |
| 1280 | sched_avg_update(rq); |
| 1281 | } |
| 1282 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1283 | #else /* !CONFIG_SMP */ |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1284 | static void resched_task(struct task_struct *p) |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1285 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1286 | assert_raw_spin_locked(&task_rq(p)->lock); |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 1287 | set_tsk_need_resched(p); |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1288 | } |
Peter Zijlstra | e9e9250 | 2009-09-01 10:34:37 +0200 | [diff] [blame] | 1289 | |
| 1290 | static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1291 | { |
| 1292 | } |
Suresh Siddha | da2b71e | 2010-08-23 13:42:51 -0700 | [diff] [blame] | 1293 | |
| 1294 | static void sched_avg_update(struct rq *rq) |
| 1295 | { |
| 1296 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 1297 | #endif /* CONFIG_SMP */ |
Ingo Molnar | c24d20d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1298 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1299 | #if BITS_PER_LONG == 32 |
| 1300 | # define WMULT_CONST (~0UL) |
| 1301 | #else |
| 1302 | # define WMULT_CONST (1UL << 32) |
| 1303 | #endif |
| 1304 | |
| 1305 | #define WMULT_SHIFT 32 |
| 1306 | |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1307 | /* |
| 1308 | * Shift right and round: |
| 1309 | */ |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1310 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1311 | |
Peter Zijlstra | a7be37a | 2008-06-27 13:41:11 +0200 | [diff] [blame] | 1312 | /* |
| 1313 | * delta *= weight / lw |
| 1314 | */ |
Ingo Molnar | cb1c4fc | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1315 | static unsigned long |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1316 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
| 1317 | struct load_weight *lw) |
| 1318 | { |
| 1319 | u64 tmp; |
| 1320 | |
Lai Jiangshan | 7a232e0 | 2008-06-12 16:43:07 +0800 | [diff] [blame] | 1321 | if (!lw->inv_weight) { |
| 1322 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) |
| 1323 | lw->inv_weight = 1; |
| 1324 | else |
| 1325 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) |
| 1326 | / (lw->weight+1); |
| 1327 | } |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1328 | |
| 1329 | tmp = (u64)delta_exec * weight; |
| 1330 | /* |
| 1331 | * Check whether we'd overflow the 64-bit multiplication: |
| 1332 | */ |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1333 | if (unlikely(tmp > WMULT_CONST)) |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1334 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, |
Ingo Molnar | 194081e | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1335 | WMULT_SHIFT/2); |
| 1336 | else |
Ingo Molnar | cf2ab46 | 2007-09-05 14:32:49 +0200 | [diff] [blame] | 1337 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1338 | |
Ingo Molnar | ecf691d | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1339 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1340 | } |
| 1341 | |
Ingo Molnar | 1091985 | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1342 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1343 | { |
| 1344 | lw->weight += inc; |
Ingo Molnar | e89996a | 2008-03-14 23:48:28 +0100 | [diff] [blame] | 1345 | lw->inv_weight = 0; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1346 | } |
| 1347 | |
Ingo Molnar | 1091985 | 2007-10-15 17:00:04 +0200 | [diff] [blame] | 1348 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1349 | { |
| 1350 | lw->weight -= dec; |
Ingo Molnar | e89996a | 2008-03-14 23:48:28 +0100 | [diff] [blame] | 1351 | lw->inv_weight = 0; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1352 | } |
| 1353 | |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1354 | static inline void update_load_set(struct load_weight *lw, unsigned long w) |
| 1355 | { |
| 1356 | lw->weight = w; |
| 1357 | lw->inv_weight = 0; |
| 1358 | } |
| 1359 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | /* |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1361 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| 1362 | * of tasks with abnormal "nice" values across CPUs the contribution that |
| 1363 | * each task makes to its run queue's load is weighted according to its |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 1364 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1365 | * scaled version of the new time slice allocation that they receive on time |
| 1366 | * slice expiry etc. |
| 1367 | */ |
| 1368 | |
Peter Zijlstra | cce7ade | 2009-01-15 14:53:37 +0100 | [diff] [blame] | 1369 | #define WEIGHT_IDLEPRIO 3 |
| 1370 | #define WMULT_IDLEPRIO 1431655765 |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1371 | |
| 1372 | /* |
| 1373 | * Nice levels are multiplicative, with a gentle 10% change for every |
| 1374 | * nice level changed. I.e. when a CPU-bound task goes from nice 0 to |
| 1375 | * nice 1, it will get ~10% less CPU time than another CPU-bound task |
| 1376 | * that remained on nice 0. |
| 1377 | * |
| 1378 | * The "10% effect" is relative and cumulative: from _any_ nice level, |
| 1379 | * if you go up 1 level, it's -10% CPU usage, if you go down 1 level |
Ingo Molnar | f9153ee | 2007-07-16 09:46:30 +0200 | [diff] [blame] | 1380 | * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. |
| 1381 | * If a task goes up by ~10% and another task goes down by ~10% then |
| 1382 | * the relative distance between them is ~25%.) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1383 | */ |
| 1384 | static const int prio_to_weight[40] = { |
Ingo Molnar | 254753d | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1385 | /* -20 */ 88761, 71755, 56483, 46273, 36291, |
| 1386 | /* -15 */ 29154, 23254, 18705, 14949, 11916, |
| 1387 | /* -10 */ 9548, 7620, 6100, 4904, 3906, |
| 1388 | /* -5 */ 3121, 2501, 1991, 1586, 1277, |
| 1389 | /* 0 */ 1024, 820, 655, 526, 423, |
| 1390 | /* 5 */ 335, 272, 215, 172, 137, |
| 1391 | /* 10 */ 110, 87, 70, 56, 45, |
| 1392 | /* 15 */ 36, 29, 23, 18, 15, |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1393 | }; |
| 1394 | |
Ingo Molnar | 5714d2d | 2007-07-16 09:46:31 +0200 | [diff] [blame] | 1395 | /* |
| 1396 | * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. |
| 1397 | * |
| 1398 | * In cases where the weight does not change often, we can use the |
| 1399 | * precalculated inverse to speed up arithmetics by turning divisions |
| 1400 | * into multiplications: |
| 1401 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1402 | static const u32 prio_to_wmult[40] = { |
Ingo Molnar | 254753d | 2007-08-09 11:16:51 +0200 | [diff] [blame] | 1403 | /* -20 */ 48388, 59856, 76040, 92818, 118348, |
| 1404 | /* -15 */ 147320, 184698, 229616, 287308, 360437, |
| 1405 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, |
| 1406 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, |
| 1407 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, |
| 1408 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, |
| 1409 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
| 1410 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1411 | }; |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 1412 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1413 | /* Time spent by the tasks of the cpu accounting group executing in ... */ |
| 1414 | enum cpuacct_stat_index { |
| 1415 | CPUACCT_STAT_USER, /* ... user mode */ |
| 1416 | CPUACCT_STAT_SYSTEM, /* ... kernel mode */ |
| 1417 | |
| 1418 | CPUACCT_STAT_NSTATS, |
| 1419 | }; |
| 1420 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1421 | #ifdef CONFIG_CGROUP_CPUACCT |
| 1422 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1423 | static void cpuacct_update_stats(struct task_struct *tsk, |
| 1424 | enum cpuacct_stat_index idx, cputime_t val); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1425 | #else |
| 1426 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 1427 | static inline void cpuacct_update_stats(struct task_struct *tsk, |
| 1428 | enum cpuacct_stat_index idx, cputime_t val) {} |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 1429 | #endif |
| 1430 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1431 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) |
| 1432 | { |
| 1433 | update_load_add(&rq->load, load); |
| 1434 | } |
| 1435 | |
| 1436 | static inline void dec_cpu_load(struct rq *rq, unsigned long load) |
| 1437 | { |
| 1438 | update_load_sub(&rq->load, load); |
| 1439 | } |
| 1440 | |
Ingo Molnar | 7940ca3 | 2008-08-19 13:40:47 +0200 | [diff] [blame] | 1441 | #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED) |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1442 | typedef int (*tg_visitor)(struct task_group *, void *); |
| 1443 | |
| 1444 | /* |
| 1445 | * Iterate the full tree, calling @down when first entering a node and @up when |
| 1446 | * leaving it for the final time. |
| 1447 | */ |
| 1448 | static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| 1449 | { |
| 1450 | struct task_group *parent, *child; |
| 1451 | int ret; |
| 1452 | |
| 1453 | rcu_read_lock(); |
| 1454 | parent = &root_task_group; |
| 1455 | down: |
| 1456 | ret = (*down)(parent, data); |
| 1457 | if (ret) |
| 1458 | goto out_unlock; |
| 1459 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
| 1460 | parent = child; |
| 1461 | goto down; |
| 1462 | |
| 1463 | up: |
| 1464 | continue; |
| 1465 | } |
| 1466 | ret = (*up)(parent, data); |
| 1467 | if (ret) |
| 1468 | goto out_unlock; |
| 1469 | |
| 1470 | child = parent; |
| 1471 | parent = parent->parent; |
| 1472 | if (parent) |
| 1473 | goto up; |
| 1474 | out_unlock: |
| 1475 | rcu_read_unlock(); |
| 1476 | |
| 1477 | return ret; |
| 1478 | } |
| 1479 | |
| 1480 | static int tg_nop(struct task_group *tg, void *data) |
| 1481 | { |
| 1482 | return 0; |
| 1483 | } |
| 1484 | #endif |
| 1485 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1486 | #ifdef CONFIG_SMP |
Peter Zijlstra | f5f08f3 | 2009-09-10 13:35:28 +0200 | [diff] [blame] | 1487 | /* Used instead of source_load when we know the type == 0 */ |
| 1488 | static unsigned long weighted_cpuload(const int cpu) |
| 1489 | { |
| 1490 | return cpu_rq(cpu)->load.weight; |
| 1491 | } |
| 1492 | |
| 1493 | /* |
| 1494 | * Return a low guess at the load of a migration-source cpu weighted |
| 1495 | * according to the scheduling class and "nice" value. |
| 1496 | * |
| 1497 | * We want to under-estimate the load of migration sources, to |
| 1498 | * balance conservatively. |
| 1499 | */ |
| 1500 | static unsigned long source_load(int cpu, int type) |
| 1501 | { |
| 1502 | struct rq *rq = cpu_rq(cpu); |
| 1503 | unsigned long total = weighted_cpuload(cpu); |
| 1504 | |
| 1505 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 1506 | return total; |
| 1507 | |
| 1508 | return min(rq->cpu_load[type-1], total); |
| 1509 | } |
| 1510 | |
| 1511 | /* |
| 1512 | * Return a high guess at the load of a migration-target cpu weighted |
| 1513 | * according to the scheduling class and "nice" value. |
| 1514 | */ |
| 1515 | static unsigned long target_load(int cpu, int type) |
| 1516 | { |
| 1517 | struct rq *rq = cpu_rq(cpu); |
| 1518 | unsigned long total = weighted_cpuload(cpu); |
| 1519 | |
| 1520 | if (type == 0 || !sched_feat(LB_BIAS)) |
| 1521 | return total; |
| 1522 | |
| 1523 | return max(rq->cpu_load[type-1], total); |
| 1524 | } |
| 1525 | |
Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1526 | static unsigned long power_of(int cpu) |
| 1527 | { |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 1528 | return cpu_rq(cpu)->cpu_power; |
Peter Zijlstra | ae154be | 2009-09-10 14:40:57 +0200 | [diff] [blame] | 1529 | } |
| 1530 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 1531 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1532 | |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1533 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 1534 | { |
| 1535 | struct rq *rq = cpu_rq(cpu); |
Ingo Molnar | af6d596 | 2008-11-29 20:45:15 +0100 | [diff] [blame] | 1536 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1537 | |
Steven Rostedt | 4cd4262 | 2008-11-26 21:04:24 -0500 | [diff] [blame] | 1538 | if (nr_running) |
| 1539 | rq->avg_load_per_task = rq->load.weight / nr_running; |
Balbir Singh | a2d4777 | 2008-11-12 16:19:00 +0530 | [diff] [blame] | 1540 | else |
| 1541 | rq->avg_load_per_task = 0; |
Peter Zijlstra | a8a51d5 | 2008-06-27 13:41:26 +0200 | [diff] [blame] | 1542 | |
| 1543 | return rq->avg_load_per_task; |
| 1544 | } |
| 1545 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1546 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1547 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1548 | /* |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1549 | * Compute the cpu's hierarchical load factor for each task group. |
| 1550 | * This needs to be done in a top-down fashion because the load of a child |
| 1551 | * group is a fraction of its parents load. |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1552 | */ |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1553 | static int tg_load_down(struct task_group *tg, void *data) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1554 | { |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1555 | unsigned long load; |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1556 | long cpu = (long)data; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1557 | |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1558 | if (!tg->parent) { |
| 1559 | load = cpu_rq(cpu)->load.weight; |
| 1560 | } else { |
| 1561 | load = tg->parent->cfs_rq[cpu]->h_load; |
Peter Zijlstra | 2069dd7 | 2010-11-15 15:47:00 -0800 | [diff] [blame] | 1562 | load *= tg->se[cpu]->load.weight; |
Peter Zijlstra | c8cba85 | 2008-06-27 13:41:23 +0200 | [diff] [blame] | 1563 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; |
| 1564 | } |
| 1565 | |
| 1566 | tg->cfs_rq[cpu]->h_load = load; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1567 | |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1568 | return 0; |
Peter Zijlstra | 4d8d595 | 2008-06-27 13:41:19 +0200 | [diff] [blame] | 1569 | } |
| 1570 | |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1571 | static void update_h_load(long cpu) |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1572 | { |
Peter Zijlstra | eb75580 | 2008-08-19 12:33:05 +0200 | [diff] [blame] | 1573 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1574 | } |
| 1575 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1576 | #endif |
| 1577 | |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1578 | #ifdef CONFIG_PREEMPT |
| 1579 | |
Peter Zijlstra | b78bb86 | 2009-09-15 14:23:18 +0200 | [diff] [blame] | 1580 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| 1581 | |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1582 | /* |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1583 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| 1584 | * way at the expense of forcing extra atomic operations in all |
| 1585 | * invocations. This assures that the double_lock is acquired using the |
| 1586 | * same underlying policy as the spinlock_t on this architecture, which |
| 1587 | * reduces latency compared to the unfair variant below. However, it |
| 1588 | * also adds more overhead and therefore may reduce throughput. |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1589 | */ |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1590 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1591 | __releases(this_rq->lock) |
| 1592 | __acquires(busiest->lock) |
| 1593 | __acquires(this_rq->lock) |
| 1594 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1595 | raw_spin_unlock(&this_rq->lock); |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1596 | double_rq_lock(this_rq, busiest); |
| 1597 | |
| 1598 | return 1; |
| 1599 | } |
| 1600 | |
| 1601 | #else |
| 1602 | /* |
| 1603 | * Unfair double_lock_balance: Optimizes throughput at the expense of |
| 1604 | * latency by eliminating extra atomic operations when the locks are |
| 1605 | * already in proper order on entry. This favors lower cpu-ids and will |
| 1606 | * grant the double lock to lower cpus over higher ids under contention, |
| 1607 | * regardless of entry order into the function. |
| 1608 | */ |
| 1609 | static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1610 | __releases(this_rq->lock) |
| 1611 | __acquires(busiest->lock) |
| 1612 | __acquires(this_rq->lock) |
| 1613 | { |
| 1614 | int ret = 0; |
| 1615 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1616 | if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1617 | if (busiest < this_rq) { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1618 | raw_spin_unlock(&this_rq->lock); |
| 1619 | raw_spin_lock(&busiest->lock); |
| 1620 | raw_spin_lock_nested(&this_rq->lock, |
| 1621 | SINGLE_DEPTH_NESTING); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1622 | ret = 1; |
| 1623 | } else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1624 | raw_spin_lock_nested(&busiest->lock, |
| 1625 | SINGLE_DEPTH_NESTING); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1626 | } |
| 1627 | return ret; |
| 1628 | } |
| 1629 | |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1630 | #endif /* CONFIG_PREEMPT */ |
| 1631 | |
| 1632 | /* |
| 1633 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| 1634 | */ |
| 1635 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| 1636 | { |
| 1637 | if (unlikely(!irqs_disabled())) { |
| 1638 | /* printk() doesn't work good under rq->lock */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1639 | raw_spin_unlock(&this_rq->lock); |
Gregory Haskins | 8f45e2b | 2008-12-29 09:39:51 -0500 | [diff] [blame] | 1640 | BUG_ON(1); |
| 1641 | } |
| 1642 | |
| 1643 | return _double_lock_balance(this_rq, busiest); |
| 1644 | } |
| 1645 | |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1646 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 1647 | __releases(busiest->lock) |
| 1648 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 1649 | raw_spin_unlock(&busiest->lock); |
Alexey Dobriyan | 70574a9 | 2008-11-28 22:08:00 +0300 | [diff] [blame] | 1650 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1651 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1652 | |
| 1653 | /* |
| 1654 | * double_rq_lock - safely lock two runqueues |
| 1655 | * |
| 1656 | * Note this does not disable interrupts like task_rq_lock, |
| 1657 | * you need to do so manually before calling. |
| 1658 | */ |
| 1659 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| 1660 | __acquires(rq1->lock) |
| 1661 | __acquires(rq2->lock) |
| 1662 | { |
| 1663 | BUG_ON(!irqs_disabled()); |
| 1664 | if (rq1 == rq2) { |
| 1665 | raw_spin_lock(&rq1->lock); |
| 1666 | __acquire(rq2->lock); /* Fake it out ;) */ |
| 1667 | } else { |
| 1668 | if (rq1 < rq2) { |
| 1669 | raw_spin_lock(&rq1->lock); |
| 1670 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 1671 | } else { |
| 1672 | raw_spin_lock(&rq2->lock); |
| 1673 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 1674 | } |
| 1675 | } |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1676 | } |
| 1677 | |
| 1678 | /* |
| 1679 | * double_rq_unlock - safely unlock two runqueues |
| 1680 | * |
| 1681 | * Note this does not restore interrupts like task_rq_unlock, |
| 1682 | * you need to do so manually after calling. |
| 1683 | */ |
| 1684 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| 1685 | __releases(rq1->lock) |
| 1686 | __releases(rq2->lock) |
| 1687 | { |
| 1688 | raw_spin_unlock(&rq1->lock); |
| 1689 | if (rq1 != rq2) |
| 1690 | raw_spin_unlock(&rq2->lock); |
| 1691 | else |
| 1692 | __release(rq2->lock); |
| 1693 | } |
| 1694 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1695 | #endif |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1696 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 1697 | static void calc_load_account_idle(struct rq *this_rq); |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 1698 | static void update_sysctl(void); |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 1699 | static int get_update_sysctl_factor(void); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 1700 | static void update_cpu_load(struct rq *this_rq); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 1701 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 1702 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1703 | { |
| 1704 | set_task_rq(p, cpu); |
| 1705 | #ifdef CONFIG_SMP |
| 1706 | /* |
| 1707 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| 1708 | * successfuly executed on another CPU. We must ensure that updates of |
| 1709 | * per-task data have been completed by this moment. |
| 1710 | */ |
| 1711 | smp_wmb(); |
| 1712 | task_thread_info(p)->cpu = cpu; |
| 1713 | #endif |
| 1714 | } |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 1715 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1716 | static const struct sched_class rt_sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1717 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 1718 | #define sched_class_highest (&stop_sched_class) |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 1719 | #define for_each_class(class) \ |
| 1720 | for (class = sched_class_highest; class; class = class->next) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1721 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1722 | #include "sched_stats.h" |
| 1723 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1724 | static void inc_nr_running(struct rq *rq) |
Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1725 | { |
| 1726 | rq->nr_running++; |
Ingo Molnar | 6363ca5 | 2008-05-29 11:28:57 +0200 | [diff] [blame] | 1727 | } |
| 1728 | |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 1729 | static void dec_nr_running(struct rq *rq) |
Ingo Molnar | 9c21724 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1730 | { |
| 1731 | rq->nr_running--; |
Ingo Molnar | 9c21724 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 1732 | } |
| 1733 | |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1734 | static void set_load_weight(struct task_struct *p) |
| 1735 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1736 | /* |
| 1737 | * SCHED_IDLE tasks get minimal weight: |
| 1738 | */ |
| 1739 | if (p->policy == SCHED_IDLE) { |
| 1740 | p->se.load.weight = WEIGHT_IDLEPRIO; |
| 1741 | p->se.load.inv_weight = WMULT_IDLEPRIO; |
| 1742 | return; |
| 1743 | } |
| 1744 | |
| 1745 | p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO]; |
| 1746 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; |
Ingo Molnar | 45bf76d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1747 | } |
| 1748 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1749 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
Gregory Haskins | 2087a1a | 2008-06-27 14:30:00 -0600 | [diff] [blame] | 1750 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 1751 | update_rq_clock(rq); |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1752 | sched_info_queued(p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1753 | p->sched_class->enqueue_task(rq, p, flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1754 | p->se.on_rq = 1; |
| 1755 | } |
| 1756 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1757 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1758 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 1759 | update_rq_clock(rq); |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 1760 | sched_info_dequeued(p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1761 | p->sched_class->dequeue_task(rq, p, flags); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1762 | p->se.on_rq = 0; |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1763 | } |
| 1764 | |
| 1765 | /* |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1766 | * activate_task - move a task to the runqueue. |
| 1767 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1768 | static void activate_task(struct rq *rq, struct task_struct *p, int flags) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1769 | { |
| 1770 | if (task_contributes_to_load(p)) |
| 1771 | rq->nr_uninterruptible--; |
| 1772 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1773 | enqueue_task(rq, p, flags); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1774 | inc_nr_running(rq); |
| 1775 | } |
| 1776 | |
| 1777 | /* |
| 1778 | * deactivate_task - remove a task from the runqueue. |
| 1779 | */ |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1780 | static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1781 | { |
| 1782 | if (task_contributes_to_load(p)) |
| 1783 | rq->nr_uninterruptible++; |
| 1784 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 1785 | dequeue_task(rq, p, flags); |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1786 | dec_nr_running(rq); |
| 1787 | } |
| 1788 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1789 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 1790 | |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1791 | /* |
| 1792 | * There are no locks covering percpu hardirq/softirq time. |
| 1793 | * They are only modified in account_system_vtime, on corresponding CPU |
| 1794 | * with interrupts disabled. So, writes are safe. |
| 1795 | * They are read and saved off onto struct rq in update_rq_clock(). |
| 1796 | * This may result in other CPU reading this CPU's irq time and can |
| 1797 | * race with irq/account_system_vtime on this CPU. We would either get old |
| 1798 | * or new value (or semi updated value on 32 bit) with a side effect of |
| 1799 | * accounting a slice of irq time to wrong task when irq is in progress |
| 1800 | * while we read rq->clock. That is a worthy compromise in place of having |
| 1801 | * locks on each irq in account_system_time. |
| 1802 | */ |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1803 | static DEFINE_PER_CPU(u64, cpu_hardirq_time); |
| 1804 | static DEFINE_PER_CPU(u64, cpu_softirq_time); |
| 1805 | |
| 1806 | static DEFINE_PER_CPU(u64, irq_start_time); |
| 1807 | static int sched_clock_irqtime; |
| 1808 | |
| 1809 | void enable_sched_clock_irqtime(void) |
| 1810 | { |
| 1811 | sched_clock_irqtime = 1; |
| 1812 | } |
| 1813 | |
| 1814 | void disable_sched_clock_irqtime(void) |
| 1815 | { |
| 1816 | sched_clock_irqtime = 0; |
| 1817 | } |
| 1818 | |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1819 | static u64 irq_time_cpu(int cpu) |
| 1820 | { |
| 1821 | if (!sched_clock_irqtime) |
| 1822 | return 0; |
| 1823 | |
| 1824 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); |
| 1825 | } |
| 1826 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1827 | void account_system_vtime(struct task_struct *curr) |
| 1828 | { |
| 1829 | unsigned long flags; |
| 1830 | int cpu; |
| 1831 | u64 now, delta; |
| 1832 | |
| 1833 | if (!sched_clock_irqtime) |
| 1834 | return; |
| 1835 | |
| 1836 | local_irq_save(flags); |
| 1837 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1838 | cpu = smp_processor_id(); |
Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 1839 | now = sched_clock_cpu(cpu); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1840 | delta = now - per_cpu(irq_start_time, cpu); |
| 1841 | per_cpu(irq_start_time, cpu) = now; |
| 1842 | /* |
| 1843 | * We do not account for softirq time from ksoftirqd here. |
| 1844 | * We want to continue accounting softirq time to ksoftirqd thread |
| 1845 | * in that case, so as not to confuse scheduler with a special task |
| 1846 | * that do not consume any time, but still wants to run. |
| 1847 | */ |
| 1848 | if (hardirq_count()) |
| 1849 | per_cpu(cpu_hardirq_time, cpu) += delta; |
| 1850 | else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) |
| 1851 | per_cpu(cpu_softirq_time, cpu) += delta; |
| 1852 | |
| 1853 | local_irq_restore(flags); |
| 1854 | } |
Ingo Molnar | b7dadc3 | 2010-10-18 20:00:37 +0200 | [diff] [blame] | 1855 | EXPORT_SYMBOL_GPL(account_system_vtime); |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1856 | |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 1857 | static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) |
| 1858 | { |
| 1859 | if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { |
| 1860 | u64 delta_irq = curr_irq_time - rq->prev_irq_time; |
| 1861 | rq->prev_irq_time = curr_irq_time; |
| 1862 | sched_rt_avg_update(rq, delta_irq); |
| 1863 | } |
| 1864 | } |
| 1865 | |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 1866 | #else |
| 1867 | |
| 1868 | static u64 irq_time_cpu(int cpu) |
| 1869 | { |
| 1870 | return 0; |
| 1871 | } |
| 1872 | |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 1873 | static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } |
| 1874 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 1875 | #endif |
| 1876 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1877 | #include "sched_idletask.c" |
| 1878 | #include "sched_fair.c" |
| 1879 | #include "sched_rt.c" |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 1880 | #include "sched_autogroup.c" |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 1881 | #include "sched_stoptask.c" |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1882 | #ifdef CONFIG_SCHED_DEBUG |
| 1883 | # include "sched_debug.c" |
| 1884 | #endif |
| 1885 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 1886 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
| 1887 | { |
| 1888 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 1889 | struct task_struct *old_stop = cpu_rq(cpu)->stop; |
| 1890 | |
| 1891 | if (stop) { |
| 1892 | /* |
| 1893 | * Make it appear like a SCHED_FIFO task, its something |
| 1894 | * userspace knows about and won't get confused about. |
| 1895 | * |
| 1896 | * Also, it will make PI more or less work without too |
| 1897 | * much confusion -- but then, stop work should not |
| 1898 | * rely on PI working anyway. |
| 1899 | */ |
| 1900 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); |
| 1901 | |
| 1902 | stop->sched_class = &stop_sched_class; |
| 1903 | } |
| 1904 | |
| 1905 | cpu_rq(cpu)->stop = stop; |
| 1906 | |
| 1907 | if (old_stop) { |
| 1908 | /* |
| 1909 | * Reset it back to a normal scheduling class so that |
| 1910 | * it can die in pieces. |
| 1911 | */ |
| 1912 | old_stop->sched_class = &rt_sched_class; |
| 1913 | } |
| 1914 | } |
| 1915 | |
Peter Zijlstra | 1e3c88b | 2009-12-17 17:00:43 +0100 | [diff] [blame] | 1916 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1917 | * __normal_prio - return the priority that is based on the static prio |
Ingo Molnar | 71f8bd4 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1918 | */ |
Ingo Molnar | 1453118 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1919 | static inline int __normal_prio(struct task_struct *p) |
| 1920 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1921 | return p->static_prio; |
Ingo Molnar | 1453118 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1922 | } |
| 1923 | |
| 1924 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1925 | * Calculate the expected normal priority: i.e. priority |
| 1926 | * without taking RT-inheritance into account. Might be |
| 1927 | * boosted by interactivity modifiers. Changes upon fork, |
| 1928 | * setprio syscalls, and whenever the interactivity |
| 1929 | * estimator recalculates. |
| 1930 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1931 | static inline int normal_prio(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1932 | { |
| 1933 | int prio; |
| 1934 | |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 1935 | if (task_has_rt_policy(p)) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1936 | prio = MAX_RT_PRIO-1 - p->rt_priority; |
| 1937 | else |
| 1938 | prio = __normal_prio(p); |
| 1939 | return prio; |
| 1940 | } |
| 1941 | |
| 1942 | /* |
| 1943 | * Calculate the current priority, i.e. the priority |
| 1944 | * taken into account by the scheduler. This value might |
| 1945 | * be boosted by RT tasks, or might be boosted by |
| 1946 | * interactivity modifiers. Will be RT if the task got |
| 1947 | * RT-boosted. If not then it returns p->normal_prio. |
| 1948 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1949 | static int effective_prio(struct task_struct *p) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 1950 | { |
| 1951 | p->normal_prio = normal_prio(p); |
| 1952 | /* |
| 1953 | * If we are RT tasks or we were boosted to RT priority, |
| 1954 | * keep the priority unchanged. Otherwise, update priority |
| 1955 | * to the normal priority: |
| 1956 | */ |
| 1957 | if (!rt_prio(p->prio)) |
| 1958 | return p->normal_prio; |
| 1959 | return p->prio; |
| 1960 | } |
| 1961 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | /** |
| 1963 | * task_curr - is this task currently executing on a CPU? |
| 1964 | * @p: the task in question. |
| 1965 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1966 | inline int task_curr(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1967 | { |
| 1968 | return cpu_curr(task_cpu(p)) == p; |
| 1969 | } |
| 1970 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 1971 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
| 1972 | const struct sched_class *prev_class, |
| 1973 | int oldprio, int running) |
| 1974 | { |
| 1975 | if (prev_class != p->sched_class) { |
| 1976 | if (prev_class->switched_from) |
| 1977 | prev_class->switched_from(rq, p, running); |
| 1978 | p->sched_class->switched_to(rq, p, running); |
| 1979 | } else |
| 1980 | p->sched_class->prio_changed(rq, p, oldprio, running); |
| 1981 | } |
| 1982 | |
Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 1983 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
| 1984 | { |
| 1985 | const struct sched_class *class; |
| 1986 | |
| 1987 | if (p->sched_class == rq->curr->sched_class) { |
| 1988 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
| 1989 | } else { |
| 1990 | for_each_class(class) { |
| 1991 | if (class == rq->curr->sched_class) |
| 1992 | break; |
| 1993 | if (class == p->sched_class) { |
| 1994 | resched_task(rq->curr); |
| 1995 | break; |
| 1996 | } |
| 1997 | } |
| 1998 | } |
| 1999 | |
| 2000 | /* |
| 2001 | * A queue event has occurred, and we're going to schedule. In |
| 2002 | * this case, we can save a useless back to back clock update. |
| 2003 | */ |
| 2004 | if (test_tsk_need_resched(rq->curr)) |
| 2005 | rq->skip_clock_update = 1; |
| 2006 | } |
| 2007 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2008 | #ifdef CONFIG_SMP |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2009 | /* |
| 2010 | * Is this task likely cache-hot: |
| 2011 | */ |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2012 | static int |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2013 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) |
| 2014 | { |
| 2015 | s64 delta; |
| 2016 | |
Peter Zijlstra | e6c8fba | 2009-12-16 18:04:33 +0100 | [diff] [blame] | 2017 | if (p->sched_class != &fair_sched_class) |
| 2018 | return 0; |
| 2019 | |
Nikhil Rao | ef8002f | 2010-10-13 12:09:35 -0700 | [diff] [blame] | 2020 | if (unlikely(p->policy == SCHED_IDLE)) |
| 2021 | return 0; |
| 2022 | |
Ingo Molnar | f540a60 | 2008-03-15 17:10:34 +0100 | [diff] [blame] | 2023 | /* |
| 2024 | * Buddy candidates are cache hot: |
| 2025 | */ |
Mike Galbraith | f685cea | 2009-10-23 23:09:22 +0200 | [diff] [blame] | 2026 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
Peter Zijlstra | 4793241 | 2008-11-04 21:25:09 +0100 | [diff] [blame] | 2027 | (&p->se == cfs_rq_of(&p->se)->next || |
| 2028 | &p->se == cfs_rq_of(&p->se)->last)) |
Ingo Molnar | f540a60 | 2008-03-15 17:10:34 +0100 | [diff] [blame] | 2029 | return 1; |
| 2030 | |
Ingo Molnar | 6bc1665 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2031 | if (sysctl_sched_migration_cost == -1) |
| 2032 | return 1; |
| 2033 | if (sysctl_sched_migration_cost == 0) |
| 2034 | return 0; |
| 2035 | |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2036 | delta = now - p->se.exec_start; |
| 2037 | |
| 2038 | return delta < (s64)sysctl_sched_migration_cost; |
| 2039 | } |
| 2040 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2041 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2042 | { |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2043 | #ifdef CONFIG_SCHED_DEBUG |
| 2044 | /* |
| 2045 | * We should never call set_task_cpu() on a blocked task, |
| 2046 | * ttwu() will sort out the placement. |
| 2047 | */ |
Peter Zijlstra | 077614e | 2009-12-17 13:16:31 +0100 | [diff] [blame] | 2048 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
| 2049 | !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2050 | #endif |
| 2051 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 2052 | trace_sched_migrate_task(p, new_cpu); |
Peter Zijlstra | cbc34ed | 2008-12-10 08:08:22 +0100 | [diff] [blame] | 2053 | |
Peter Zijlstra | 0c69774 | 2009-12-22 15:43:19 +0100 | [diff] [blame] | 2054 | if (task_cpu(p) != new_cpu) { |
| 2055 | p->se.nr_migrations++; |
| 2056 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); |
| 2057 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2058 | |
| 2059 | __set_task_cpu(p, new_cpu); |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 2060 | } |
| 2061 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2062 | struct migration_arg { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2063 | struct task_struct *task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2064 | int dest_cpu; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2065 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2067 | static int migration_cpu_stop(void *data); |
| 2068 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2069 | /* |
| 2070 | * The task's runqueue lock must be held. |
| 2071 | * Returns true if you have to wait for migration thread. |
| 2072 | */ |
Nikanth Karthikesan | b7a2b39 | 2010-11-26 12:37:09 +0530 | [diff] [blame] | 2073 | static bool migrate_task(struct task_struct *p, struct rq *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | /* |
| 2076 | * If the task is not on a runqueue (and not running), then |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2077 | * the next wake-up will properly place the task. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 2079 | return p->se.on_rq || task_running(rq, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | } |
| 2081 | |
| 2082 | /* |
| 2083 | * wait_task_inactive - wait for a thread to unschedule. |
| 2084 | * |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2085 | * If @match_state is nonzero, it's the @p->state value just checked and |
| 2086 | * not expected to change. If it changes, i.e. @p might have woken up, |
| 2087 | * then return zero. When we succeed in waiting for @p to be off its CPU, |
| 2088 | * we return a positive number (its total switch count). If a second call |
| 2089 | * a short while later returns the same number, the caller can be sure that |
| 2090 | * @p has remained unscheduled the whole time. |
| 2091 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2092 | * The caller must ensure that the task *will* unschedule sometime soon, |
| 2093 | * else this function might spin for a *long* time. This function can't |
| 2094 | * be called with interrupts off, or it may introduce deadlock with |
| 2095 | * smp_call_function() if an IPI is sent by the same process we are |
| 2096 | * waiting to become inactive. |
| 2097 | */ |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2098 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | { |
| 2100 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2101 | int running, on_rq; |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2102 | unsigned long ncsw; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2103 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2104 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2105 | for (;;) { |
| 2106 | /* |
| 2107 | * We do the initial early heuristics without holding |
| 2108 | * any task-queue locks at all. We'll only try to get |
| 2109 | * the runqueue lock when things look like they will |
| 2110 | * work out! |
| 2111 | */ |
| 2112 | rq = task_rq(p); |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2113 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2114 | /* |
| 2115 | * If the task is actively running on another CPU |
| 2116 | * still, just relax and busy-wait without holding |
| 2117 | * any locks. |
| 2118 | * |
| 2119 | * NOTE! Since we don't hold any locks, it's not |
| 2120 | * even sure that "rq" stays as the right runqueue! |
| 2121 | * But we don't care, since "task_running()" will |
| 2122 | * return false if the runqueue has changed and p |
| 2123 | * is actually now running somewhere else! |
| 2124 | */ |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2125 | while (task_running(rq, p)) { |
| 2126 | if (match_state && unlikely(p->state != match_state)) |
| 2127 | return 0; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2128 | cpu_relax(); |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2129 | } |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2130 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2131 | /* |
| 2132 | * Ok, time to look more closely! We need the rq |
| 2133 | * lock now, to be *sure*. If we're wrong, we'll |
| 2134 | * just go back and repeat. |
| 2135 | */ |
| 2136 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 2137 | trace_sched_wait_task(p); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2138 | running = task_running(rq, p); |
| 2139 | on_rq = p->se.on_rq; |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2140 | ncsw = 0; |
Oleg Nesterov | f31e11d | 2008-08-20 16:54:44 -0700 | [diff] [blame] | 2141 | if (!match_state || p->state == match_state) |
Oleg Nesterov | 93dcf55 | 2008-08-20 16:54:44 -0700 | [diff] [blame] | 2142 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2143 | task_rq_unlock(rq, &flags); |
Linus Torvalds | fa490cf | 2007-06-18 09:34:40 -0700 | [diff] [blame] | 2144 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2145 | /* |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2146 | * If it changed from the expected state, bail out now. |
| 2147 | */ |
| 2148 | if (unlikely(!ncsw)) |
| 2149 | break; |
| 2150 | |
| 2151 | /* |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2152 | * Was it really running after all now that we |
| 2153 | * checked with the proper locks actually held? |
| 2154 | * |
| 2155 | * Oops. Go back and try again.. |
| 2156 | */ |
| 2157 | if (unlikely(running)) { |
| 2158 | cpu_relax(); |
| 2159 | continue; |
| 2160 | } |
| 2161 | |
| 2162 | /* |
| 2163 | * It's not enough that it's not actively running, |
| 2164 | * it must be off the runqueue _entirely_, and not |
| 2165 | * preempted! |
| 2166 | * |
Luis Henriques | 80dd99b | 2009-03-16 19:58:09 +0000 | [diff] [blame] | 2167 | * So if it was still runnable (but just not actively |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 2168 | * running right now), it's preempted, and we should |
| 2169 | * yield - it could be a while. |
| 2170 | */ |
| 2171 | if (unlikely(on_rq)) { |
| 2172 | schedule_timeout_uninterruptible(1); |
| 2173 | continue; |
| 2174 | } |
| 2175 | |
| 2176 | /* |
| 2177 | * Ahh, all good. It wasn't running, and it wasn't |
| 2178 | * runnable, which means that it will never become |
| 2179 | * running in the future either. We're all done! |
| 2180 | */ |
| 2181 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2182 | } |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 2183 | |
| 2184 | return ncsw; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | } |
| 2186 | |
| 2187 | /*** |
| 2188 | * kick_process - kick a running thread to enter/exit the kernel |
| 2189 | * @p: the to-be-kicked thread |
| 2190 | * |
| 2191 | * Cause a process which is running on another CPU to enter |
| 2192 | * kernel-mode, without any delay. (to get signals handled.) |
| 2193 | * |
| 2194 | * NOTE: this function doesnt have to take the runqueue lock, |
| 2195 | * because all it wants to ensure is that the remote task enters |
| 2196 | * the kernel. If the IPI races and the task has been migrated |
| 2197 | * to another CPU then no harm is done and the purpose has been |
| 2198 | * achieved as well. |
| 2199 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2200 | void kick_process(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2201 | { |
| 2202 | int cpu; |
| 2203 | |
| 2204 | preempt_disable(); |
| 2205 | cpu = task_cpu(p); |
| 2206 | if ((cpu != smp_processor_id()) && task_curr(p)) |
| 2207 | smp_send_reschedule(cpu); |
| 2208 | preempt_enable(); |
| 2209 | } |
Rusty Russell | b43e352 | 2009-06-12 22:27:00 -0600 | [diff] [blame] | 2210 | EXPORT_SYMBOL_GPL(kick_process); |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2211 | #endif /* CONFIG_SMP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2212 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 2213 | /** |
| 2214 | * task_oncpu_function_call - call a function on the cpu on which a task runs |
| 2215 | * @p: the task to evaluate |
| 2216 | * @func: the function to be called |
| 2217 | * @info: the function call argument |
| 2218 | * |
| 2219 | * Calls the function @func when the task is currently running. This might |
| 2220 | * be on the current CPU, which just calls the function directly |
| 2221 | */ |
| 2222 | void task_oncpu_function_call(struct task_struct *p, |
| 2223 | void (*func) (void *info), void *info) |
| 2224 | { |
| 2225 | int cpu; |
| 2226 | |
| 2227 | preempt_disable(); |
| 2228 | cpu = task_cpu(p); |
| 2229 | if (task_curr(p)) |
| 2230 | smp_call_function_single(cpu, func, info, 1); |
| 2231 | preempt_enable(); |
| 2232 | } |
| 2233 | |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2234 | #ifdef CONFIG_SMP |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 2235 | /* |
| 2236 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. |
| 2237 | */ |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2238 | static int select_fallback_rq(int cpu, struct task_struct *p) |
| 2239 | { |
| 2240 | int dest_cpu; |
| 2241 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); |
| 2242 | |
| 2243 | /* Look for allowed, online CPU in same node. */ |
| 2244 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) |
| 2245 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
| 2246 | return dest_cpu; |
| 2247 | |
| 2248 | /* Any allowed, online CPU? */ |
| 2249 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); |
| 2250 | if (dest_cpu < nr_cpu_ids) |
| 2251 | return dest_cpu; |
| 2252 | |
| 2253 | /* No more Mr. Nice Guy. */ |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 2254 | dest_cpu = cpuset_cpus_allowed_fallback(p); |
| 2255 | /* |
| 2256 | * Don't tell them about moving exiting tasks or |
| 2257 | * kernel threads (both mm NULL), since they never |
| 2258 | * leave kernel. |
| 2259 | */ |
| 2260 | if (p->mm && printk_ratelimit()) { |
| 2261 | printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n", |
| 2262 | task_pid_nr(p), p->comm, cpu); |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2263 | } |
| 2264 | |
| 2265 | return dest_cpu; |
| 2266 | } |
| 2267 | |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2268 | /* |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 2269 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2270 | */ |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2271 | static inline |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2272 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2273 | { |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2274 | int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2275 | |
| 2276 | /* |
| 2277 | * In order not to call set_task_cpu() on a blocking task we need |
| 2278 | * to rely on ttwu() to place the task on a valid ->cpus_allowed |
| 2279 | * cpu. |
| 2280 | * |
| 2281 | * Since this is common to all placement strategies, this lives here. |
| 2282 | * |
| 2283 | * [ this allows ->select_task() to simply return task_cpu(p) and |
| 2284 | * not worry about this generic constraint ] |
| 2285 | */ |
| 2286 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || |
Peter Zijlstra | 70f1120 | 2009-12-20 17:36:27 +0100 | [diff] [blame] | 2287 | !cpu_online(cpu))) |
Peter Zijlstra | 5da9a0f | 2009-12-16 18:04:38 +0100 | [diff] [blame] | 2288 | cpu = select_fallback_rq(task_cpu(p), p); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 2289 | |
| 2290 | return cpu; |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2291 | } |
Mike Galbraith | 09a40af | 2010-04-15 07:29:59 +0200 | [diff] [blame] | 2292 | |
| 2293 | static void update_avg(u64 *avg, u64 sample) |
| 2294 | { |
| 2295 | s64 diff = sample - *avg; |
| 2296 | *avg += diff >> 3; |
| 2297 | } |
Peter Zijlstra | 970b13b | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 2298 | #endif |
| 2299 | |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2300 | static inline void ttwu_activate(struct task_struct *p, struct rq *rq, |
| 2301 | bool is_sync, bool is_migrate, bool is_local, |
| 2302 | unsigned long en_flags) |
| 2303 | { |
| 2304 | schedstat_inc(p, se.statistics.nr_wakeups); |
| 2305 | if (is_sync) |
| 2306 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
| 2307 | if (is_migrate) |
| 2308 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
| 2309 | if (is_local) |
| 2310 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
| 2311 | else |
| 2312 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
| 2313 | |
| 2314 | activate_task(rq, p, en_flags); |
| 2315 | } |
| 2316 | |
| 2317 | static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, |
| 2318 | int wake_flags, bool success) |
| 2319 | { |
| 2320 | trace_sched_wakeup(p, success); |
| 2321 | check_preempt_curr(rq, p, wake_flags); |
| 2322 | |
| 2323 | p->state = TASK_RUNNING; |
| 2324 | #ifdef CONFIG_SMP |
| 2325 | if (p->sched_class->task_woken) |
| 2326 | p->sched_class->task_woken(rq, p); |
| 2327 | |
| 2328 | if (unlikely(rq->idle_stamp)) { |
| 2329 | u64 delta = rq->clock - rq->idle_stamp; |
| 2330 | u64 max = 2*sysctl_sched_migration_cost; |
| 2331 | |
| 2332 | if (delta > max) |
| 2333 | rq->avg_idle = max; |
| 2334 | else |
| 2335 | update_avg(&rq->avg_idle, delta); |
| 2336 | rq->idle_stamp = 0; |
| 2337 | } |
| 2338 | #endif |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2339 | /* if a worker is waking up, notify workqueue */ |
| 2340 | if ((p->flags & PF_WQ_WORKER) && success) |
| 2341 | wq_worker_waking_up(p, cpu_of(rq)); |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2342 | } |
| 2343 | |
| 2344 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2345 | * try_to_wake_up - wake up a thread |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2346 | * @p: the thread to be awakened |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2347 | * @state: the mask of task states that can be woken |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2348 | * @wake_flags: wake modifier flags (WF_*) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2349 | * |
| 2350 | * Put it on the run-queue if it's not already there. The "current" |
| 2351 | * thread is always on the run-queue (except when the actual |
| 2352 | * re-schedule is in progress), and as such you're allowed to do |
| 2353 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 2354 | * runnable without the overhead of this. |
| 2355 | * |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2356 | * Returns %true if @p was woken up, %false if it was already running |
| 2357 | * or @state didn't match @p's state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2358 | */ |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 2359 | static int try_to_wake_up(struct task_struct *p, unsigned int state, |
| 2360 | int wake_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2361 | { |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2362 | int cpu, orig_cpu, this_cpu, success = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2363 | unsigned long flags; |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2364 | unsigned long en_flags = ENQUEUE_WAKEUP; |
Dan Carpenter | ab3b3aa | 2010-03-06 14:17:52 +0300 | [diff] [blame] | 2365 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2366 | |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2367 | this_cpu = get_cpu(); |
Peter Zijlstra | 2398f2c | 2008-06-27 13:41:35 +0200 | [diff] [blame] | 2368 | |
Linus Torvalds | 04e2f17 | 2008-02-23 18:05:03 -0800 | [diff] [blame] | 2369 | smp_wmb(); |
Dan Carpenter | ab3b3aa | 2010-03-06 14:17:52 +0300 | [diff] [blame] | 2370 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2371 | if (!(p->state & state)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2372 | goto out; |
| 2373 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2374 | if (p->se.on_rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2375 | goto out_running; |
| 2376 | |
| 2377 | cpu = task_cpu(p); |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 2378 | orig_cpu = cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2379 | |
| 2380 | #ifdef CONFIG_SMP |
| 2381 | if (unlikely(task_running(rq, p))) |
| 2382 | goto out_activate; |
| 2383 | |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2384 | /* |
| 2385 | * In order to handle concurrent wakeups and release the rq->lock |
| 2386 | * we put the task in TASK_WAKING state. |
Ingo Molnar | eb24073 | 2009-09-16 21:09:13 +0200 | [diff] [blame] | 2387 | * |
| 2388 | * First fix up the nr_uninterruptible count: |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2389 | */ |
Peter Zijlstra | cc87f76 | 2010-03-26 12:22:14 +0100 | [diff] [blame] | 2390 | if (task_contributes_to_load(p)) { |
| 2391 | if (likely(cpu_online(orig_cpu))) |
| 2392 | rq->nr_uninterruptible--; |
| 2393 | else |
| 2394 | this_rq()->nr_uninterruptible--; |
| 2395 | } |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2396 | p->state = TASK_WAKING; |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2397 | |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2398 | if (p->sched_class->task_waking) { |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2399 | p->sched_class->task_waking(rq, p); |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 2400 | en_flags |= ENQUEUE_WAKING; |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 2401 | } |
Peter Zijlstra | ab19cb2 | 2009-11-27 15:44:43 +0100 | [diff] [blame] | 2402 | |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2403 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); |
| 2404 | if (cpu != orig_cpu) |
Mike Galbraith | f5dc375 | 2009-10-09 08:35:03 +0200 | [diff] [blame] | 2405 | set_task_cpu(p, cpu); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2406 | __task_rq_unlock(rq); |
Peter Zijlstra | ab19cb2 | 2009-11-27 15:44:43 +0100 | [diff] [blame] | 2407 | |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 2408 | rq = cpu_rq(cpu); |
| 2409 | raw_spin_lock(&rq->lock); |
Mike Galbraith | f5dc375 | 2009-10-09 08:35:03 +0200 | [diff] [blame] | 2410 | |
Peter Zijlstra | 0970d29 | 2010-02-15 14:45:54 +0100 | [diff] [blame] | 2411 | /* |
| 2412 | * We migrated the task without holding either rq->lock, however |
| 2413 | * since the task is not on the task list itself, nobody else |
| 2414 | * will try and migrate the task, hence the rq should match the |
| 2415 | * cpu we just moved it to. |
| 2416 | */ |
| 2417 | WARN_ON(task_cpu(p) != cpu); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2418 | WARN_ON(p->state != TASK_WAKING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2419 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2420 | #ifdef CONFIG_SCHEDSTATS |
| 2421 | schedstat_inc(rq, ttwu_count); |
| 2422 | if (cpu == this_cpu) |
| 2423 | schedstat_inc(rq, ttwu_local); |
| 2424 | else { |
| 2425 | struct sched_domain *sd; |
| 2426 | for_each_domain(this_cpu, sd) { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 2427 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2428 | schedstat_inc(sd, ttwu_wake_remote); |
| 2429 | break; |
| 2430 | } |
| 2431 | } |
| 2432 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 2433 | #endif /* CONFIG_SCHEDSTATS */ |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 2434 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2435 | out_activate: |
| 2436 | #endif /* CONFIG_SMP */ |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2437 | ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, |
| 2438 | cpu == this_cpu, en_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2439 | success = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2440 | out_running: |
Tejun Heo | 9ed3811 | 2009-12-03 15:08:03 +0900 | [diff] [blame] | 2441 | ttwu_post_activation(p, rq, wake_flags, success); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2442 | out: |
| 2443 | task_rq_unlock(rq, &flags); |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 2444 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2445 | |
| 2446 | return success; |
| 2447 | } |
| 2448 | |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 2449 | /** |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 2450 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
| 2451 | * @p: the thread to be awakened |
| 2452 | * |
| 2453 | * Put @p on the run-queue if it's not alredy there. The caller must |
| 2454 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
| 2455 | * the current task. this_rq() stays locked over invocation. |
| 2456 | */ |
| 2457 | static void try_to_wake_up_local(struct task_struct *p) |
| 2458 | { |
| 2459 | struct rq *rq = task_rq(p); |
| 2460 | bool success = false; |
| 2461 | |
| 2462 | BUG_ON(rq != this_rq()); |
| 2463 | BUG_ON(p == current); |
| 2464 | lockdep_assert_held(&rq->lock); |
| 2465 | |
| 2466 | if (!(p->state & TASK_NORMAL)) |
| 2467 | return; |
| 2468 | |
| 2469 | if (!p->se.on_rq) { |
| 2470 | if (likely(!task_running(rq, p))) { |
| 2471 | schedstat_inc(rq, ttwu_count); |
| 2472 | schedstat_inc(rq, ttwu_local); |
| 2473 | } |
| 2474 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); |
| 2475 | success = true; |
| 2476 | } |
| 2477 | ttwu_post_activation(p, rq, 0, success); |
| 2478 | } |
| 2479 | |
| 2480 | /** |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 2481 | * wake_up_process - Wake up a specific process |
| 2482 | * @p: The process to be woken up. |
| 2483 | * |
| 2484 | * Attempt to wake up the nominated process and move it to the set of runnable |
| 2485 | * processes. Returns 1 if the process was woken up, 0 if it was already |
| 2486 | * running. |
| 2487 | * |
| 2488 | * It may be assumed that this function implies a write memory barrier before |
| 2489 | * changing the task state if and only if any tasks are woken up. |
| 2490 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2491 | int wake_up_process(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | { |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 2493 | return try_to_wake_up(p, TASK_ALL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2494 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2495 | EXPORT_SYMBOL(wake_up_process); |
| 2496 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2497 | int wake_up_state(struct task_struct *p, unsigned int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2498 | { |
| 2499 | return try_to_wake_up(p, state, 0); |
| 2500 | } |
| 2501 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2502 | /* |
| 2503 | * Perform scheduler related setup for a newly forked process p. |
| 2504 | * p is forked by current. |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2505 | * |
| 2506 | * __sched_fork() is basic setup used by init_idle() too: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2507 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2508 | static void __sched_fork(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2509 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2510 | p->se.exec_start = 0; |
| 2511 | p->se.sum_exec_runtime = 0; |
Ingo Molnar | f6cf891 | 2007-08-28 12:53:24 +0200 | [diff] [blame] | 2512 | p->se.prev_sum_exec_runtime = 0; |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 2513 | p->se.nr_migrations = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 2514 | |
| 2515 | #ifdef CONFIG_SCHEDSTATS |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 2516 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 2517 | #endif |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2518 | |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 2519 | INIT_LIST_HEAD(&p->rt.run_list); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2520 | p->se.on_rq = 0; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 2521 | INIT_LIST_HEAD(&p->se.group_node); |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2522 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2523 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2524 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
| 2525 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2526 | } |
| 2527 | |
| 2528 | /* |
| 2529 | * fork()/clone()-time setup: |
| 2530 | */ |
| 2531 | void sched_fork(struct task_struct *p, int clone_flags) |
| 2532 | { |
| 2533 | int cpu = get_cpu(); |
| 2534 | |
| 2535 | __sched_fork(p); |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 2536 | /* |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2537 | * We mark the process as running here. This guarantees that |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 2538 | * nobody will actually run it, and a signal or other external |
| 2539 | * event cannot wake it up and insert it on the runqueue either. |
| 2540 | */ |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2541 | p->state = TASK_RUNNING; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2542 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2543 | /* |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2544 | * Revert to default priority/policy on fork if requested. |
| 2545 | */ |
| 2546 | if (unlikely(p->sched_reset_on_fork)) { |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2547 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2548 | p->policy = SCHED_NORMAL; |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2549 | p->normal_prio = p->static_prio; |
| 2550 | } |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2551 | |
Mike Galbraith | 6c697bd | 2009-06-17 10:48:02 +0200 | [diff] [blame] | 2552 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
| 2553 | p->static_prio = NICE_TO_PRIO(0); |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2554 | p->normal_prio = p->static_prio; |
Mike Galbraith | 6c697bd | 2009-06-17 10:48:02 +0200 | [diff] [blame] | 2555 | set_load_weight(p); |
| 2556 | } |
| 2557 | |
Mike Galbraith | b9dc29e | 2009-06-17 10:46:01 +0200 | [diff] [blame] | 2558 | /* |
| 2559 | * We don't need the reset flag anymore after the fork. It has |
| 2560 | * fulfilled its duty: |
| 2561 | */ |
| 2562 | p->sched_reset_on_fork = 0; |
| 2563 | } |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 2564 | |
Peter Williams | f83f9ac | 2009-09-24 06:47:10 +0000 | [diff] [blame] | 2565 | /* |
| 2566 | * Make sure we do not leak PI boosting priority to the child. |
| 2567 | */ |
| 2568 | p->prio = current->normal_prio; |
| 2569 | |
Hiroshi Shimamoto | 2ddbf95 | 2007-10-15 17:00:11 +0200 | [diff] [blame] | 2570 | if (!rt_prio(p->prio)) |
| 2571 | p->sched_class = &fair_sched_class; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 2572 | |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 2573 | if (p->sched_class->task_fork) |
| 2574 | p->sched_class->task_fork(p); |
| 2575 | |
Peter Zijlstra | 8695159 | 2010-06-22 11:44:53 +0200 | [diff] [blame] | 2576 | /* |
| 2577 | * The child is not yet in the pid-hash so no cgroup attach races, |
| 2578 | * and the cgroup is pinned to this child due to cgroup_fork() |
| 2579 | * is ran before sched_fork(). |
| 2580 | * |
| 2581 | * Silence PROVE_RCU. |
| 2582 | */ |
| 2583 | rcu_read_lock(); |
Peter Zijlstra | 5f3edc1 | 2009-09-10 13:42:00 +0200 | [diff] [blame] | 2584 | set_task_cpu(p, cpu); |
Peter Zijlstra | 8695159 | 2010-06-22 11:44:53 +0200 | [diff] [blame] | 2585 | rcu_read_unlock(); |
Peter Zijlstra | 5f3edc1 | 2009-09-10 13:42:00 +0200 | [diff] [blame] | 2586 | |
Chandra Seetharaman | 52f17b6 | 2006-07-14 00:24:38 -0700 | [diff] [blame] | 2587 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2588 | if (likely(sched_info_on())) |
Chandra Seetharaman | 52f17b6 | 2006-07-14 00:24:38 -0700 | [diff] [blame] | 2589 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2590 | #endif |
Chen, Kenneth W | d6077cb | 2006-02-14 13:53:10 -0800 | [diff] [blame] | 2591 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2592 | p->oncpu = 0; |
| 2593 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2594 | #ifdef CONFIG_PREEMPT |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2595 | /* Want to start with kernel preemption disabled. */ |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 2596 | task_thread_info(p)->preempt_count = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2597 | #endif |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame^] | 2598 | #ifdef CONFIG_SMP |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 2599 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame^] | 2600 | #endif |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 2601 | |
Nick Piggin | 476d139 | 2005-06-25 14:57:29 -0700 | [diff] [blame] | 2602 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2603 | } |
| 2604 | |
| 2605 | /* |
| 2606 | * wake_up_new_task - wake up a newly created task for the first time. |
| 2607 | * |
| 2608 | * This function will do some initial scheduler statistics housekeeping |
| 2609 | * that must be done for every newly created context, then puts the task |
| 2610 | * on the runqueue and wakes it. |
| 2611 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 2612 | void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2613 | { |
| 2614 | unsigned long flags; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2615 | struct rq *rq; |
Andrew Morton | c890692 | 2010-03-11 14:08:43 -0800 | [diff] [blame] | 2616 | int cpu __maybe_unused = get_cpu(); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2617 | |
| 2618 | #ifdef CONFIG_SMP |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2619 | rq = task_rq_lock(p, &flags); |
| 2620 | p->state = TASK_WAKING; |
| 2621 | |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2622 | /* |
| 2623 | * Fork balancing, do it here and not earlier because: |
| 2624 | * - cpus_allowed can change in the fork path |
| 2625 | * - any previously selected cpu might disappear through hotplug |
| 2626 | * |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2627 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock |
| 2628 | * without people poking at ->cpus_allowed. |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2629 | */ |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2630 | cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2631 | set_task_cpu(p, cpu); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2632 | |
| 2633 | p->state = TASK_RUNNING; |
| 2634 | task_rq_unlock(rq, &flags); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2635 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2636 | |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 2637 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | cd29fe6 | 2009-11-27 17:32:46 +0100 | [diff] [blame] | 2638 | activate_task(rq, p, 0); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 2639 | trace_sched_wakeup_new(p, 1); |
Peter Zijlstra | a7558e0 | 2009-09-14 20:02:34 +0200 | [diff] [blame] | 2640 | check_preempt_curr(rq, p, WF_FORK); |
Steven Rostedt | 9a897c5 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2641 | #ifdef CONFIG_SMP |
Peter Zijlstra | efbbd05 | 2009-12-16 18:04:40 +0100 | [diff] [blame] | 2642 | if (p->sched_class->task_woken) |
| 2643 | p->sched_class->task_woken(rq, p); |
Steven Rostedt | 9a897c5 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 2644 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2645 | task_rq_unlock(rq, &flags); |
Peter Zijlstra | fabf318 | 2010-01-21 21:04:57 +0100 | [diff] [blame] | 2646 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2647 | } |
| 2648 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2649 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2650 | |
| 2651 | /** |
Luis Henriques | 80dd99b | 2009-03-16 19:58:09 +0000 | [diff] [blame] | 2652 | * preempt_notifier_register - tell me when current is being preempted & rescheduled |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 2653 | * @notifier: notifier struct to register |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2654 | */ |
| 2655 | void preempt_notifier_register(struct preempt_notifier *notifier) |
| 2656 | { |
| 2657 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); |
| 2658 | } |
| 2659 | EXPORT_SYMBOL_GPL(preempt_notifier_register); |
| 2660 | |
| 2661 | /** |
| 2662 | * preempt_notifier_unregister - no longer interested in preemption notifications |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 2663 | * @notifier: notifier struct to unregister |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2664 | * |
| 2665 | * This is safe to call from within a preemption notifier. |
| 2666 | */ |
| 2667 | void preempt_notifier_unregister(struct preempt_notifier *notifier) |
| 2668 | { |
| 2669 | hlist_del(¬ifier->link); |
| 2670 | } |
| 2671 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); |
| 2672 | |
| 2673 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2674 | { |
| 2675 | struct preempt_notifier *notifier; |
| 2676 | struct hlist_node *node; |
| 2677 | |
| 2678 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) |
| 2679 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); |
| 2680 | } |
| 2681 | |
| 2682 | static void |
| 2683 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2684 | struct task_struct *next) |
| 2685 | { |
| 2686 | struct preempt_notifier *notifier; |
| 2687 | struct hlist_node *node; |
| 2688 | |
| 2689 | hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) |
| 2690 | notifier->ops->sched_out(notifier, next); |
| 2691 | } |
| 2692 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 2693 | #else /* !CONFIG_PREEMPT_NOTIFIERS */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2694 | |
| 2695 | static void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2696 | { |
| 2697 | } |
| 2698 | |
| 2699 | static void |
| 2700 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2701 | struct task_struct *next) |
| 2702 | { |
| 2703 | } |
| 2704 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 2705 | #endif /* CONFIG_PREEMPT_NOTIFIERS */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2706 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2707 | /** |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2708 | * prepare_task_switch - prepare to switch tasks |
| 2709 | * @rq: the runqueue preparing to switch |
Randy Dunlap | 421cee2 | 2007-07-31 00:37:50 -0700 | [diff] [blame] | 2710 | * @prev: the current task that is being switched out |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2711 | * @next: the task we are going to switch to. |
| 2712 | * |
| 2713 | * This is called with the rq lock held and interrupts off. It must |
| 2714 | * be paired with a subsequent finish_task_switch after the context |
| 2715 | * switch. |
| 2716 | * |
| 2717 | * prepare_task_switch sets up locking and calls architecture specific |
| 2718 | * hooks. |
| 2719 | */ |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2720 | static inline void |
| 2721 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
| 2722 | struct task_struct *next) |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2723 | { |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2724 | fire_sched_out_preempt_notifiers(prev, next); |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2725 | prepare_lock_switch(rq, next); |
| 2726 | prepare_arch_switch(next); |
| 2727 | } |
| 2728 | |
| 2729 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2730 | * finish_task_switch - clean up after a task-switch |
Jeff Garzik | 344baba | 2005-09-07 01:15:17 -0400 | [diff] [blame] | 2731 | * @rq: runqueue associated with task-switch |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2732 | * @prev: the thread we just switched away from. |
| 2733 | * |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2734 | * finish_task_switch must be called after the context switch, paired |
| 2735 | * with a prepare_task_switch call before the context switch. |
| 2736 | * finish_task_switch will reconcile locking set up by prepare_task_switch, |
| 2737 | * and do any other architecture-specific cleanup actions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2738 | * |
| 2739 | * Note that we may have delayed dropping an mm in context_switch(). If |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 2740 | * so, we finish that here outside of the runqueue lock. (Doing it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2741 | * with the lock held can cause deadlocks; see schedule() for |
| 2742 | * details.) |
| 2743 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 2744 | static void finish_task_switch(struct rq *rq, struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2745 | __releases(rq->lock) |
| 2746 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2747 | struct mm_struct *mm = rq->prev_mm; |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 2748 | long prev_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2749 | |
| 2750 | rq->prev_mm = NULL; |
| 2751 | |
| 2752 | /* |
| 2753 | * A task struct has one reference for the use as "current". |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 2754 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 2755 | * schedule one last time. The schedule call will never return, and |
| 2756 | * the scheduled task must drop that reference. |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 2757 | * The test for TASK_DEAD must occur while the runqueue locks are |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2758 | * still held, otherwise prev could be scheduled on another cpu, die |
| 2759 | * there before we look at prev->state, and then the reference would |
| 2760 | * be dropped twice. |
| 2761 | * Manfred Spraul <manfred@colorfullife.com> |
| 2762 | */ |
Oleg Nesterov | 55a101f | 2006-09-29 02:01:10 -0700 | [diff] [blame] | 2763 | prev_state = prev->state; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2764 | finish_arch_switch(prev); |
Jamie Iles | 8381f65 | 2010-01-08 15:27:33 +0000 | [diff] [blame] | 2765 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 2766 | local_irq_disable(); |
| 2767 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 2768 | perf_event_task_sched_in(current); |
Jamie Iles | 8381f65 | 2010-01-08 15:27:33 +0000 | [diff] [blame] | 2769 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 2770 | local_irq_enable(); |
| 2771 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2772 | finish_lock_switch(rq, prev); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 2773 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2774 | fire_sched_in_preempt_notifiers(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2775 | if (mm) |
| 2776 | mmdrop(mm); |
Oleg Nesterov | c394cc9 | 2006-09-29 02:01:11 -0700 | [diff] [blame] | 2777 | if (unlikely(prev_state == TASK_DEAD)) { |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 2778 | /* |
| 2779 | * Remove function-return probe instances associated with this |
| 2780 | * task and put them back on the free list. |
Ingo Molnar | 9761eea | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 2781 | */ |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 2782 | kprobe_flush_task(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2783 | put_task_struct(prev); |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 2784 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2785 | } |
| 2786 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2787 | #ifdef CONFIG_SMP |
| 2788 | |
| 2789 | /* assumes rq->lock is held */ |
| 2790 | static inline void pre_schedule(struct rq *rq, struct task_struct *prev) |
| 2791 | { |
| 2792 | if (prev->sched_class->pre_schedule) |
| 2793 | prev->sched_class->pre_schedule(rq, prev); |
| 2794 | } |
| 2795 | |
| 2796 | /* rq->lock is NOT held, but preemption is disabled */ |
| 2797 | static inline void post_schedule(struct rq *rq) |
| 2798 | { |
| 2799 | if (rq->post_schedule) { |
| 2800 | unsigned long flags; |
| 2801 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 2802 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2803 | if (rq->curr->sched_class->post_schedule) |
| 2804 | rq->curr->sched_class->post_schedule(rq); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 2805 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2806 | |
| 2807 | rq->post_schedule = 0; |
| 2808 | } |
| 2809 | } |
| 2810 | |
| 2811 | #else |
| 2812 | |
| 2813 | static inline void pre_schedule(struct rq *rq, struct task_struct *p) |
| 2814 | { |
| 2815 | } |
| 2816 | |
| 2817 | static inline void post_schedule(struct rq *rq) |
| 2818 | { |
| 2819 | } |
| 2820 | |
| 2821 | #endif |
| 2822 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | /** |
| 2824 | * schedule_tail - first thing a freshly forked thread must call. |
| 2825 | * @prev: the thread we just switched away from. |
| 2826 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2827 | asmlinkage void schedule_tail(struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2828 | __releases(rq->lock) |
| 2829 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2830 | struct rq *rq = this_rq(); |
| 2831 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2832 | finish_task_switch(rq, prev); |
Steven Rostedt | da19ab5 | 2009-07-29 00:21:22 -0400 | [diff] [blame] | 2833 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 2834 | /* |
| 2835 | * FIXME: do we need to worry about rq being invalidated by the |
| 2836 | * task_switch? |
| 2837 | */ |
| 2838 | post_schedule(rq); |
Steven Rostedt | da19ab5 | 2009-07-29 00:21:22 -0400 | [diff] [blame] | 2839 | |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 2840 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
| 2841 | /* In this case, finish_task_switch does not reenable preemption */ |
| 2842 | preempt_enable(); |
| 2843 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | if (current->set_child_tid) |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 2845 | put_user(task_pid_vnr(current), current->set_child_tid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2846 | } |
| 2847 | |
| 2848 | /* |
| 2849 | * context_switch - switch to the new MM and the new |
| 2850 | * thread's register state. |
| 2851 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2852 | static inline void |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 2853 | context_switch(struct rq *rq, struct task_struct *prev, |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2854 | struct task_struct *next) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2855 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2856 | struct mm_struct *mm, *oldmm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2857 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 2858 | prepare_task_switch(rq, prev, next); |
Peter Zijlstra | 27a9da6 | 2010-05-04 20:36:56 +0200 | [diff] [blame] | 2859 | trace_sched_switch(prev, next); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2860 | mm = next->mm; |
| 2861 | oldmm = prev->active_mm; |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 2862 | /* |
| 2863 | * For paravirt, this is coupled with an exit in switch_to to |
| 2864 | * combine the page table reload and the switch backend into |
| 2865 | * one hypercall. |
| 2866 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 2867 | arch_start_context_switch(prev); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 2868 | |
Heiko Carstens | 31915ab | 2010-09-16 14:42:25 +0200 | [diff] [blame] | 2869 | if (!mm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2870 | next->active_mm = oldmm; |
| 2871 | atomic_inc(&oldmm->mm_count); |
| 2872 | enter_lazy_tlb(oldmm, next); |
| 2873 | } else |
| 2874 | switch_mm(oldmm, mm, next); |
| 2875 | |
Heiko Carstens | 31915ab | 2010-09-16 14:42:25 +0200 | [diff] [blame] | 2876 | if (!prev->mm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2877 | prev->active_mm = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2878 | rq->prev_mm = oldmm; |
| 2879 | } |
Ingo Molnar | 3a5f5e4 | 2006-07-14 00:24:27 -0700 | [diff] [blame] | 2880 | /* |
| 2881 | * Since the runqueue lock will be released by the next |
| 2882 | * task (which is an invalid locking op but in the case |
| 2883 | * of the scheduler it's an obvious special-case), so we |
| 2884 | * do an early lockdep release here: |
| 2885 | */ |
| 2886 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 2887 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
Ingo Molnar | 3a5f5e4 | 2006-07-14 00:24:27 -0700 | [diff] [blame] | 2888 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2889 | |
| 2890 | /* Here we just switch the register state and the stack. */ |
| 2891 | switch_to(prev, next, prev); |
| 2892 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 2893 | barrier(); |
| 2894 | /* |
| 2895 | * this_rq must be evaluated again because prev may have moved |
| 2896 | * CPUs since it called schedule(), thus the 'rq' on its stack |
| 2897 | * frame will be invalid. |
| 2898 | */ |
| 2899 | finish_task_switch(this_rq(), prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2900 | } |
| 2901 | |
| 2902 | /* |
| 2903 | * nr_running, nr_uninterruptible and nr_context_switches: |
| 2904 | * |
| 2905 | * externally visible scheduler statistics: current number of runnable |
| 2906 | * threads, current number of uninterruptible-sleeping threads, total |
| 2907 | * number of context switches performed since bootup. |
| 2908 | */ |
| 2909 | unsigned long nr_running(void) |
| 2910 | { |
| 2911 | unsigned long i, sum = 0; |
| 2912 | |
| 2913 | for_each_online_cpu(i) |
| 2914 | sum += cpu_rq(i)->nr_running; |
| 2915 | |
| 2916 | return sum; |
| 2917 | } |
| 2918 | |
| 2919 | unsigned long nr_uninterruptible(void) |
| 2920 | { |
| 2921 | unsigned long i, sum = 0; |
| 2922 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 2923 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2924 | sum += cpu_rq(i)->nr_uninterruptible; |
| 2925 | |
| 2926 | /* |
| 2927 | * Since we read the counters lockless, it might be slightly |
| 2928 | * inaccurate. Do not allow it to go below zero though: |
| 2929 | */ |
| 2930 | if (unlikely((long)sum < 0)) |
| 2931 | sum = 0; |
| 2932 | |
| 2933 | return sum; |
| 2934 | } |
| 2935 | |
| 2936 | unsigned long long nr_context_switches(void) |
| 2937 | { |
Steven Rostedt | cc94abf | 2006-06-27 02:54:31 -0700 | [diff] [blame] | 2938 | int i; |
| 2939 | unsigned long long sum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2940 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 2941 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2942 | sum += cpu_rq(i)->nr_switches; |
| 2943 | |
| 2944 | return sum; |
| 2945 | } |
| 2946 | |
| 2947 | unsigned long nr_iowait(void) |
| 2948 | { |
| 2949 | unsigned long i, sum = 0; |
| 2950 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 2951 | for_each_possible_cpu(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2952 | sum += atomic_read(&cpu_rq(i)->nr_iowait); |
| 2953 | |
| 2954 | return sum; |
| 2955 | } |
| 2956 | |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 2957 | unsigned long nr_iowait_cpu(int cpu) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 2958 | { |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 2959 | struct rq *this = cpu_rq(cpu); |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 2960 | return atomic_read(&this->nr_iowait); |
| 2961 | } |
| 2962 | |
| 2963 | unsigned long this_cpu_load(void) |
| 2964 | { |
| 2965 | struct rq *this = this_rq(); |
| 2966 | return this->cpu_load[0]; |
| 2967 | } |
| 2968 | |
| 2969 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 2970 | /* Variables and functions for calc_load */ |
| 2971 | static atomic_long_t calc_load_tasks; |
| 2972 | static unsigned long calc_load_update; |
| 2973 | unsigned long avenrun[3]; |
| 2974 | EXPORT_SYMBOL(avenrun); |
| 2975 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 2976 | static long calc_load_fold_active(struct rq *this_rq) |
| 2977 | { |
| 2978 | long nr_active, delta = 0; |
| 2979 | |
| 2980 | nr_active = this_rq->nr_running; |
| 2981 | nr_active += (long) this_rq->nr_uninterruptible; |
| 2982 | |
| 2983 | if (nr_active != this_rq->calc_load_active) { |
| 2984 | delta = nr_active - this_rq->calc_load_active; |
| 2985 | this_rq->calc_load_active = nr_active; |
| 2986 | } |
| 2987 | |
| 2988 | return delta; |
| 2989 | } |
| 2990 | |
| 2991 | #ifdef CONFIG_NO_HZ |
| 2992 | /* |
| 2993 | * For NO_HZ we delay the active fold to the next LOAD_FREQ update. |
| 2994 | * |
| 2995 | * When making the ILB scale, we should try to pull this in as well. |
| 2996 | */ |
| 2997 | static atomic_long_t calc_load_tasks_idle; |
| 2998 | |
| 2999 | static void calc_load_account_idle(struct rq *this_rq) |
| 3000 | { |
| 3001 | long delta; |
| 3002 | |
| 3003 | delta = calc_load_fold_active(this_rq); |
| 3004 | if (delta) |
| 3005 | atomic_long_add(delta, &calc_load_tasks_idle); |
| 3006 | } |
| 3007 | |
| 3008 | static long calc_load_fold_idle(void) |
| 3009 | { |
| 3010 | long delta = 0; |
| 3011 | |
| 3012 | /* |
| 3013 | * Its got a race, we don't care... |
| 3014 | */ |
| 3015 | if (atomic_long_read(&calc_load_tasks_idle)) |
| 3016 | delta = atomic_long_xchg(&calc_load_tasks_idle, 0); |
| 3017 | |
| 3018 | return delta; |
| 3019 | } |
| 3020 | #else |
| 3021 | static void calc_load_account_idle(struct rq *this_rq) |
| 3022 | { |
| 3023 | } |
| 3024 | |
| 3025 | static inline long calc_load_fold_idle(void) |
| 3026 | { |
| 3027 | return 0; |
| 3028 | } |
| 3029 | #endif |
| 3030 | |
Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 3031 | /** |
| 3032 | * get_avenrun - get the load average array |
| 3033 | * @loads: pointer to dest load array |
| 3034 | * @offset: offset to add |
| 3035 | * @shift: shift count to shift the result left |
| 3036 | * |
| 3037 | * These values are estimates at best, so no need for locking. |
| 3038 | */ |
| 3039 | void get_avenrun(unsigned long *loads, unsigned long offset, int shift) |
| 3040 | { |
| 3041 | loads[0] = (avenrun[0] + offset) << shift; |
| 3042 | loads[1] = (avenrun[1] + offset) << shift; |
| 3043 | loads[2] = (avenrun[2] + offset) << shift; |
| 3044 | } |
| 3045 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3046 | static unsigned long |
| 3047 | calc_load(unsigned long load, unsigned long exp, unsigned long active) |
Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 3048 | { |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3049 | load *= exp; |
| 3050 | load += active * (FIXED_1 - exp); |
| 3051 | return load >> FSHIFT; |
| 3052 | } |
Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 3053 | |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3054 | /* |
| 3055 | * calc_load - update the avenrun load estimates 10 ticks after the |
| 3056 | * CPUs have updated calc_load_tasks. |
| 3057 | */ |
| 3058 | void calc_global_load(void) |
| 3059 | { |
| 3060 | unsigned long upd = calc_load_update + 10; |
| 3061 | long active; |
| 3062 | |
| 3063 | if (time_before(jiffies, upd)) |
| 3064 | return; |
| 3065 | |
| 3066 | active = atomic_long_read(&calc_load_tasks); |
| 3067 | active = active > 0 ? active * FIXED_1 : 0; |
| 3068 | |
| 3069 | avenrun[0] = calc_load(avenrun[0], EXP_1, active); |
| 3070 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); |
| 3071 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); |
| 3072 | |
| 3073 | calc_load_update += LOAD_FREQ; |
| 3074 | } |
| 3075 | |
| 3076 | /* |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3077 | * Called from update_cpu_load() to periodically update this CPU's |
| 3078 | * active count. |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3079 | */ |
| 3080 | static void calc_load_account_active(struct rq *this_rq) |
| 3081 | { |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3082 | long delta; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3083 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3084 | if (time_before(jiffies, this_rq->calc_load_update)) |
| 3085 | return; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3086 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3087 | delta = calc_load_fold_active(this_rq); |
| 3088 | delta += calc_load_fold_idle(); |
| 3089 | if (delta) |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3090 | atomic_long_add(delta, &calc_load_tasks); |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3091 | |
| 3092 | this_rq->calc_load_update += LOAD_FREQ; |
Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 3093 | } |
| 3094 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3095 | /* |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3096 | * The exact cpuload at various idx values, calculated at every tick would be |
| 3097 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load |
| 3098 | * |
| 3099 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called |
| 3100 | * on nth tick when cpu may be busy, then we have: |
| 3101 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 3102 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load |
| 3103 | * |
| 3104 | * decay_load_missed() below does efficient calculation of |
| 3105 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load |
| 3106 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load |
| 3107 | * |
| 3108 | * The calculation is approximated on a 128 point scale. |
| 3109 | * degrade_zero_ticks is the number of ticks after which load at any |
| 3110 | * particular idx is approximated to be zero. |
| 3111 | * degrade_factor is a precomputed table, a row for each load idx. |
| 3112 | * Each column corresponds to degradation factor for a power of two ticks, |
| 3113 | * based on 128 point scale. |
| 3114 | * Example: |
| 3115 | * row 2, col 3 (=12) says that the degradation at load idx 2 after |
| 3116 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). |
| 3117 | * |
| 3118 | * With this power of 2 load factors, we can degrade the load n times |
| 3119 | * by looking at 1 bits in n and doing as many mult/shift instead of |
| 3120 | * n mult/shifts needed by the exact degradation. |
| 3121 | */ |
| 3122 | #define DEGRADE_SHIFT 7 |
| 3123 | static const unsigned char |
| 3124 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; |
| 3125 | static const unsigned char |
| 3126 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { |
| 3127 | {0, 0, 0, 0, 0, 0, 0, 0}, |
| 3128 | {64, 32, 8, 0, 0, 0, 0, 0}, |
| 3129 | {96, 72, 40, 12, 1, 0, 0}, |
| 3130 | {112, 98, 75, 43, 15, 1, 0}, |
| 3131 | {120, 112, 98, 76, 45, 16, 2} }; |
| 3132 | |
| 3133 | /* |
| 3134 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog |
| 3135 | * would be when CPU is idle and so we just decay the old load without |
| 3136 | * adding any new load. |
| 3137 | */ |
| 3138 | static unsigned long |
| 3139 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) |
| 3140 | { |
| 3141 | int j = 0; |
| 3142 | |
| 3143 | if (!missed_updates) |
| 3144 | return load; |
| 3145 | |
| 3146 | if (missed_updates >= degrade_zero_ticks[idx]) |
| 3147 | return 0; |
| 3148 | |
| 3149 | if (idx == 1) |
| 3150 | return load >> missed_updates; |
| 3151 | |
| 3152 | while (missed_updates) { |
| 3153 | if (missed_updates % 2) |
| 3154 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; |
| 3155 | |
| 3156 | missed_updates >>= 1; |
| 3157 | j++; |
| 3158 | } |
| 3159 | return load; |
| 3160 | } |
| 3161 | |
| 3162 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3163 | * Update rq->cpu_load[] statistics. This function is usually called every |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3164 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called |
| 3165 | * every tick. We fix it up based on jiffies. |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3166 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3167 | static void update_cpu_load(struct rq *this_rq) |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3168 | { |
Dmitry Adamushko | 495eca4 | 2007-10-15 17:00:06 +0200 | [diff] [blame] | 3169 | unsigned long this_load = this_rq->load.weight; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3170 | unsigned long curr_jiffies = jiffies; |
| 3171 | unsigned long pending_updates; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3172 | int i, scale; |
| 3173 | |
| 3174 | this_rq->nr_load_updates++; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3175 | |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3176 | /* Avoid repeated calls on same jiffy, when moving in and out of idle */ |
| 3177 | if (curr_jiffies == this_rq->last_load_update_tick) |
| 3178 | return; |
| 3179 | |
| 3180 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; |
| 3181 | this_rq->last_load_update_tick = curr_jiffies; |
| 3182 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3183 | /* Update our load: */ |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3184 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ |
| 3185 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3186 | unsigned long old_load, new_load; |
| 3187 | |
| 3188 | /* scale is effectively 1 << i now, and >> i divides by scale */ |
| 3189 | |
| 3190 | old_load = this_rq->cpu_load[i]; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3191 | old_load = decay_load_missed(old_load, pending_updates - 1, i); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3192 | new_load = this_load; |
Ingo Molnar | a25707f | 2007-10-15 17:00:03 +0200 | [diff] [blame] | 3193 | /* |
| 3194 | * Round up the averaging division if load is increasing. This |
| 3195 | * prevents us from getting stuck on 9 if the load is 10, for |
| 3196 | * example. |
| 3197 | */ |
| 3198 | if (new_load > old_load) |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3199 | new_load += scale - 1; |
| 3200 | |
| 3201 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3202 | } |
Suresh Siddha | da2b71e | 2010-08-23 13:42:51 -0700 | [diff] [blame] | 3203 | |
| 3204 | sched_avg_update(this_rq); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3205 | } |
| 3206 | |
| 3207 | static void update_cpu_load_active(struct rq *this_rq) |
| 3208 | { |
| 3209 | update_cpu_load(this_rq); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 3210 | |
Peter Zijlstra | 74f5187 | 2010-04-22 21:50:19 +0200 | [diff] [blame] | 3211 | calc_load_account_active(this_rq); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3212 | } |
| 3213 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3214 | #ifdef CONFIG_SMP |
| 3215 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3216 | /* |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3217 | * sched_exec - execve() is a valuable balancing opportunity, because at |
| 3218 | * this point the task has the smallest effective memory and cache footprint. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3219 | */ |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3220 | void sched_exec(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3221 | { |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3222 | struct task_struct *p = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3223 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 3224 | struct rq *rq; |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3225 | int dest_cpu; |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3227 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3228 | dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); |
| 3229 | if (dest_cpu == smp_processor_id()) |
| 3230 | goto unlock; |
Peter Zijlstra | 3802290 | 2009-12-16 18:04:37 +0100 | [diff] [blame] | 3231 | |
| 3232 | /* |
| 3233 | * select_task_rq() can race against ->cpus_allowed |
| 3234 | */ |
Oleg Nesterov | 30da688 | 2010-03-15 10:10:19 +0100 | [diff] [blame] | 3235 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
Nikanth Karthikesan | b7a2b39 | 2010-11-26 12:37:09 +0530 | [diff] [blame] | 3236 | likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3237 | struct migration_arg arg = { p, dest_cpu }; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 3238 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3239 | task_rq_unlock(rq, &flags); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 3240 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3241 | return; |
| 3242 | } |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 3243 | unlock: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3244 | task_rq_unlock(rq, &flags); |
| 3245 | } |
| 3246 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3247 | #endif |
| 3248 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3249 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
| 3250 | |
| 3251 | EXPORT_PER_CPU_SYMBOL(kstat); |
| 3252 | |
| 3253 | /* |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3254 | * Return any ns on the sched_clock that have not yet been accounted in |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3255 | * @p in case that task is currently running. |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3256 | * |
| 3257 | * Called with task_rq_lock() held on @rq. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3258 | */ |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3259 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) |
| 3260 | { |
| 3261 | u64 ns = 0; |
| 3262 | |
| 3263 | if (task_current(rq, p)) { |
| 3264 | update_rq_clock(rq); |
Venkatesh Pallipadi | 305e683 | 2010-10-04 17:03:21 -0700 | [diff] [blame] | 3265 | ns = rq->clock_task - p->se.exec_start; |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3266 | if ((s64)ns < 0) |
| 3267 | ns = 0; |
| 3268 | } |
| 3269 | |
| 3270 | return ns; |
| 3271 | } |
| 3272 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3273 | unsigned long long task_delta_exec(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3274 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3275 | unsigned long flags; |
Ingo Molnar | 41b86e9 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3276 | struct rq *rq; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3277 | u64 ns = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3278 | |
Ingo Molnar | 41b86e9 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3279 | rq = task_rq_lock(p, &flags); |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3280 | ns = do_task_delta_exec(p, rq); |
| 3281 | task_rq_unlock(rq, &flags); |
Ingo Molnar | 1508487 | 2008-09-30 08:28:17 +0200 | [diff] [blame] | 3282 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3283 | return ns; |
| 3284 | } |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3285 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3286 | /* |
| 3287 | * Return accounted runtime for the task. |
| 3288 | * In case the task is currently running, return the runtime plus current's |
| 3289 | * pending runtime that have not been accounted yet. |
| 3290 | */ |
| 3291 | unsigned long long task_sched_runtime(struct task_struct *p) |
| 3292 | { |
| 3293 | unsigned long flags; |
| 3294 | struct rq *rq; |
| 3295 | u64 ns = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3296 | |
Hidetoshi Seto | c5f8d99 | 2009-03-31 16:56:03 +0900 | [diff] [blame] | 3297 | rq = task_rq_lock(p, &flags); |
| 3298 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); |
| 3299 | task_rq_unlock(rq, &flags); |
| 3300 | |
| 3301 | return ns; |
| 3302 | } |
| 3303 | |
| 3304 | /* |
| 3305 | * Return sum_exec_runtime for the thread group. |
| 3306 | * In case the task is currently running, return the sum plus current's |
| 3307 | * pending runtime that have not been accounted yet. |
| 3308 | * |
| 3309 | * Note that the thread group might have other running tasks as well, |
| 3310 | * so the return value not includes other pending runtime that other |
| 3311 | * running tasks might have. |
| 3312 | */ |
| 3313 | unsigned long long thread_group_sched_runtime(struct task_struct *p) |
| 3314 | { |
| 3315 | struct task_cputime totals; |
| 3316 | unsigned long flags; |
| 3317 | struct rq *rq; |
| 3318 | u64 ns; |
| 3319 | |
| 3320 | rq = task_rq_lock(p, &flags); |
| 3321 | thread_group_cputime(p, &totals); |
| 3322 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3323 | task_rq_unlock(rq, &flags); |
| 3324 | |
| 3325 | return ns; |
| 3326 | } |
| 3327 | |
| 3328 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3329 | * Account user cpu time to a process. |
| 3330 | * @p: the process that the cpu time gets accounted to |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3331 | * @cputime: the cpu time spent in user space since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3332 | * @cputime_scaled: cputime scaled by cpu frequency |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3333 | */ |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3334 | void account_user_time(struct task_struct *p, cputime_t cputime, |
| 3335 | cputime_t cputime_scaled) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3336 | { |
| 3337 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 3338 | cputime64_t tmp; |
| 3339 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3340 | /* Add user time to process. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3341 | p->utime = cputime_add(p->utime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3342 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3343 | account_group_user_time(p, cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3344 | |
| 3345 | /* Add user time to cpustat. */ |
| 3346 | tmp = cputime_to_cputime64(cputime); |
| 3347 | if (TASK_NICE(p) > 0) |
| 3348 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
| 3349 | else |
| 3350 | cpustat->user = cputime64_add(cpustat->user, tmp); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 3351 | |
| 3352 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); |
Jonathan Lim | 49b5cf3 | 2008-07-25 01:48:40 -0700 | [diff] [blame] | 3353 | /* Account for user time used */ |
| 3354 | acct_update_integrals(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3355 | } |
| 3356 | |
| 3357 | /* |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3358 | * Account guest cpu time to a process. |
| 3359 | * @p: the process that the cpu time gets accounted to |
| 3360 | * @cputime: the cpu time spent in virtual machine since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3361 | * @cputime_scaled: cputime scaled by cpu frequency |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3362 | */ |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3363 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
| 3364 | cputime_t cputime_scaled) |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3365 | { |
| 3366 | cputime64_t tmp; |
| 3367 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 3368 | |
| 3369 | tmp = cputime_to_cputime64(cputime); |
| 3370 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3371 | /* Add guest time to process. */ |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3372 | p->utime = cputime_add(p->utime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3373 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3374 | account_group_user_time(p, cputime); |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3375 | p->gtime = cputime_add(p->gtime, cputime); |
| 3376 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3377 | /* Add guest time to cpustat. */ |
Ryota Ozaki | ce0e7b2 | 2009-10-24 01:20:10 +0900 | [diff] [blame] | 3378 | if (TASK_NICE(p) > 0) { |
| 3379 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
| 3380 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); |
| 3381 | } else { |
| 3382 | cpustat->user = cputime64_add(cpustat->user, tmp); |
| 3383 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
| 3384 | } |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3385 | } |
| 3386 | |
| 3387 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3388 | * Account system cpu time to a process. |
| 3389 | * @p: the process that the cpu time gets accounted to |
| 3390 | * @hardirq_offset: the offset to subtract from hardirq_count() |
| 3391 | * @cputime: the cpu time spent in kernel space since the last update |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3392 | * @cputime_scaled: cputime scaled by cpu frequency |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3393 | */ |
| 3394 | void account_system_time(struct task_struct *p, int hardirq_offset, |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3395 | cputime_t cputime, cputime_t cputime_scaled) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3396 | { |
| 3397 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3398 | cputime64_t tmp; |
| 3399 | |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 3400 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3401 | account_guest_time(p, cputime, cputime_scaled); |
Harvey Harrison | 983ed7a | 2008-04-24 18:17:55 -0700 | [diff] [blame] | 3402 | return; |
| 3403 | } |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 3404 | |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3405 | /* Add system time to process. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3406 | p->stime = cputime_add(p->stime, cputime); |
Martin Schwidefsky | 457533a | 2008-12-31 15:11:37 +0100 | [diff] [blame] | 3407 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 3408 | account_group_system_time(p, cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3409 | |
| 3410 | /* Add system time to cpustat. */ |
| 3411 | tmp = cputime_to_cputime64(cputime); |
| 3412 | if (hardirq_count() - hardirq_offset) |
| 3413 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 3414 | else if (in_serving_softirq()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3415 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3416 | else |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3417 | cpustat->system = cputime64_add(cpustat->system, tmp); |
| 3418 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 3419 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); |
| 3420 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3421 | /* Account for system time used */ |
| 3422 | acct_update_integrals(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3423 | } |
| 3424 | |
| 3425 | /* |
| 3426 | * Account for involuntary wait time. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3427 | * @steal: the cpu time spent in involuntary wait |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3428 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3429 | void account_steal_time(cputime_t cputime) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3430 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3431 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3432 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
| 3433 | |
| 3434 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3435 | } |
| 3436 | |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3437 | /* |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3438 | * Account for idle time. |
| 3439 | * @cputime: the cpu time spent in idle wait |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3440 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3441 | void account_idle_time(cputime_t cputime) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3442 | { |
| 3443 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3444 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3445 | struct rq *rq = this_rq(); |
| 3446 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3447 | if (atomic_read(&rq->nr_iowait) > 0) |
| 3448 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
| 3449 | else |
| 3450 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3451 | } |
| 3452 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3453 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 3454 | |
| 3455 | /* |
| 3456 | * Account a single tick of cpu time. |
| 3457 | * @p: the process that the cpu time gets accounted to |
| 3458 | * @user_tick: indicates if the tick is a user or a system tick |
| 3459 | */ |
| 3460 | void account_process_tick(struct task_struct *p, int user_tick) |
| 3461 | { |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3462 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3463 | struct rq *rq = this_rq(); |
| 3464 | |
| 3465 | if (user_tick) |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3466 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
Eric Dumazet | f5f293a | 2009-04-29 14:44:49 +0200 | [diff] [blame] | 3467 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3468 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3469 | one_jiffy_scaled); |
| 3470 | else |
Stanislaw Gruszka | a42548a | 2009-07-29 12:15:29 +0200 | [diff] [blame] | 3471 | account_idle_time(cputime_one_jiffy); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 3472 | } |
| 3473 | |
| 3474 | /* |
| 3475 | * Account multiple ticks of steal time. |
| 3476 | * @p: the process from which the cpu time has been stolen |
| 3477 | * @ticks: number of stolen ticks |
| 3478 | */ |
| 3479 | void account_steal_ticks(unsigned long ticks) |
| 3480 | { |
| 3481 | account_steal_time(jiffies_to_cputime(ticks)); |
| 3482 | } |
| 3483 | |
| 3484 | /* |
| 3485 | * Account multiple ticks of idle time. |
| 3486 | * @ticks: number of stolen ticks |
| 3487 | */ |
| 3488 | void account_idle_ticks(unsigned long ticks) |
| 3489 | { |
| 3490 | account_idle_time(jiffies_to_cputime(ticks)); |
| 3491 | } |
| 3492 | |
| 3493 | #endif |
| 3494 | |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3495 | /* |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3496 | * Use precise platform statistics if available: |
| 3497 | */ |
| 3498 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3499 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3500 | { |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3501 | *ut = p->utime; |
| 3502 | *st = p->stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3503 | } |
| 3504 | |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3505 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3506 | { |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3507 | struct task_cputime cputime; |
| 3508 | |
| 3509 | thread_group_cputime(p, &cputime); |
| 3510 | |
| 3511 | *ut = cputime.utime; |
| 3512 | *st = cputime.stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3513 | } |
| 3514 | #else |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 3515 | |
| 3516 | #ifndef nsecs_to_cputime |
Hidetoshi Seto | b7b20df | 2009-11-26 14:49:27 +0900 | [diff] [blame] | 3517 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 3518 | #endif |
| 3519 | |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3520 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3521 | { |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3522 | cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3523 | |
| 3524 | /* |
| 3525 | * Use CFS's precise accounting: |
| 3526 | */ |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3527 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3528 | |
| 3529 | if (total) { |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3530 | u64 temp = rtime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3531 | |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3532 | temp *= utime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3533 | do_div(temp, total); |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3534 | utime = (cputime_t)temp; |
| 3535 | } else |
| 3536 | utime = rtime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3537 | |
| 3538 | /* |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3539 | * Compare with previous values, to keep monotonicity: |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3540 | */ |
Hidetoshi Seto | 761b1d2 | 2009-11-12 13:33:45 +0900 | [diff] [blame] | 3541 | p->prev_utime = max(p->prev_utime, utime); |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3542 | p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3543 | |
Hidetoshi Seto | d99ca3b | 2009-12-02 17:26:47 +0900 | [diff] [blame] | 3544 | *ut = p->prev_utime; |
| 3545 | *st = p->prev_stime; |
Hidetoshi Seto | d180c5b | 2009-11-26 14:48:30 +0900 | [diff] [blame] | 3546 | } |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3547 | |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3548 | /* |
| 3549 | * Must be called with siglock held. |
| 3550 | */ |
| 3551 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
| 3552 | { |
| 3553 | struct signal_struct *sig = p->signal; |
| 3554 | struct task_cputime cputime; |
| 3555 | cputime_t rtime, utime, total; |
| 3556 | |
| 3557 | thread_group_cputime(p, &cputime); |
| 3558 | |
| 3559 | total = cputime_add(cputime.utime, cputime.stime); |
| 3560 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
| 3561 | |
| 3562 | if (total) { |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3563 | u64 temp = rtime; |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3564 | |
Stanislaw Gruszka | e75e863 | 2010-09-14 16:35:14 +0200 | [diff] [blame] | 3565 | temp *= cputime.utime; |
Hidetoshi Seto | 0cf55e1 | 2009-12-02 17:28:07 +0900 | [diff] [blame] | 3566 | do_div(temp, total); |
| 3567 | utime = (cputime_t)temp; |
| 3568 | } else |
| 3569 | utime = rtime; |
| 3570 | |
| 3571 | sig->prev_utime = max(sig->prev_utime, utime); |
| 3572 | sig->prev_stime = max(sig->prev_stime, |
| 3573 | cputime_sub(rtime, sig->prev_utime)); |
| 3574 | |
| 3575 | *ut = sig->prev_utime; |
| 3576 | *st = sig->prev_stime; |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3577 | } |
| 3578 | #endif |
| 3579 | |
Balbir Singh | 4904862 | 2008-09-05 18:12:23 +0200 | [diff] [blame] | 3580 | /* |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3581 | * This function gets called by the timer code, with HZ frequency. |
| 3582 | * We call it with interrupts disabled. |
| 3583 | * |
| 3584 | * It also gets called by the fork code, when changing the parent's |
| 3585 | * timeslices. |
| 3586 | */ |
| 3587 | void scheduler_tick(void) |
| 3588 | { |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3589 | int cpu = smp_processor_id(); |
| 3590 | struct rq *rq = cpu_rq(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3591 | struct task_struct *curr = rq->curr; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 3592 | |
| 3593 | sched_clock_tick(); |
Christoph Lameter | 7835b98 | 2006-12-10 02:20:22 -0800 | [diff] [blame] | 3594 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3595 | raw_spin_lock(&rq->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 3596 | update_rq_clock(rq); |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 3597 | update_cpu_load_active(rq); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3598 | curr->sched_class->task_tick(rq, curr, 0); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3599 | raw_spin_unlock(&rq->lock); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3600 | |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 3601 | perf_event_task_tick(); |
Peter Zijlstra | e220d2d | 2009-05-23 18:28:55 +0200 | [diff] [blame] | 3602 | |
Christoph Lameter | e418e1c | 2006-12-10 02:20:23 -0800 | [diff] [blame] | 3603 | #ifdef CONFIG_SMP |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3604 | rq->idle_at_tick = idle_cpu(cpu); |
| 3605 | trigger_load_balance(rq, cpu); |
Christoph Lameter | e418e1c | 2006-12-10 02:20:23 -0800 | [diff] [blame] | 3606 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3607 | } |
| 3608 | |
Lai Jiangshan | 132380a | 2009-04-02 14:18:25 +0800 | [diff] [blame] | 3609 | notrace unsigned long get_parent_ip(unsigned long addr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3610 | { |
| 3611 | if (in_lock_functions(addr)) { |
| 3612 | addr = CALLER_ADDR2; |
| 3613 | if (in_lock_functions(addr)) |
| 3614 | addr = CALLER_ADDR3; |
| 3615 | } |
| 3616 | return addr; |
| 3617 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3618 | |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 3619 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
| 3620 | defined(CONFIG_PREEMPT_TRACER)) |
| 3621 | |
Srinivasa Ds | 4362758 | 2008-02-23 15:24:04 -0800 | [diff] [blame] | 3622 | void __kprobes add_preempt_count(int val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3623 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3624 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3625 | /* |
| 3626 | * Underflow? |
| 3627 | */ |
Ingo Molnar | 9a11b49 | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3628 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
| 3629 | return; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3630 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3631 | preempt_count() += val; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3632 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3633 | /* |
| 3634 | * Spinlock count overflowing soon? |
| 3635 | */ |
Miguel Ojeda Sandonis | 33859f7 | 2006-12-10 02:20:38 -0800 | [diff] [blame] | 3636 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
| 3637 | PREEMPT_MASK - 10); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3638 | #endif |
| 3639 | if (preempt_count() == val) |
| 3640 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3641 | } |
| 3642 | EXPORT_SYMBOL(add_preempt_count); |
| 3643 | |
Srinivasa Ds | 4362758 | 2008-02-23 15:24:04 -0800 | [diff] [blame] | 3644 | void __kprobes sub_preempt_count(int val) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3645 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3646 | #ifdef CONFIG_DEBUG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3647 | /* |
| 3648 | * Underflow? |
| 3649 | */ |
Ingo Molnar | 01e3eb8 | 2009-01-12 13:00:50 +0100 | [diff] [blame] | 3650 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
Ingo Molnar | 9a11b49 | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3651 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3652 | /* |
| 3653 | * Is the spinlock portion underflowing? |
| 3654 | */ |
Ingo Molnar | 9a11b49 | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3655 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
| 3656 | !(preempt_count() & PREEMPT_MASK))) |
| 3657 | return; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3658 | #endif |
Ingo Molnar | 9a11b49 | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 3659 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3660 | if (preempt_count() == val) |
| 3661 | trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3662 | preempt_count() -= val; |
| 3663 | } |
| 3664 | EXPORT_SYMBOL(sub_preempt_count); |
| 3665 | |
| 3666 | #endif |
| 3667 | |
| 3668 | /* |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3669 | * Print scheduling while atomic bug: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3670 | */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3671 | static noinline void __schedule_bug(struct task_struct *prev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3672 | { |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 3673 | struct pt_regs *regs = get_irq_regs(); |
| 3674 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 3675 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
| 3676 | prev->comm, prev->pid, preempt_count()); |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 3677 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3678 | debug_show_held_locks(prev); |
Arjan van de Ven | e21f5b1 | 2008-05-23 09:05:58 -0700 | [diff] [blame] | 3679 | print_modules(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3680 | if (irqs_disabled()) |
| 3681 | print_irqtrace_events(prev); |
Satyam Sharma | 838225b | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 3682 | |
| 3683 | if (regs) |
| 3684 | show_regs(regs); |
| 3685 | else |
| 3686 | dump_stack(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3687 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3688 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3689 | /* |
| 3690 | * Various schedule()-time debugging checks and statistics: |
| 3691 | */ |
| 3692 | static inline void schedule_debug(struct task_struct *prev) |
| 3693 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3694 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3695 | * Test if we are atomic. Since do_exit() needs to call into |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3696 | * schedule() atomically, we ignore that path for now. |
| 3697 | * Otherwise, whine if we are scheduling when we should not be. |
| 3698 | */ |
Roel Kluin | 3f33a7c | 2008-05-13 23:44:11 +0200 | [diff] [blame] | 3699 | if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3700 | __schedule_bug(prev); |
| 3701 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3702 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| 3703 | |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 3704 | schedstat_inc(this_rq(), sched_count); |
Ingo Molnar | b8efb56 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 3705 | #ifdef CONFIG_SCHEDSTATS |
| 3706 | if (unlikely(prev->lock_depth >= 0)) { |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 3707 | schedstat_inc(this_rq(), bkl_count); |
| 3708 | schedstat_inc(prev, sched_info.bkl_count); |
Ingo Molnar | b8efb56 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 3709 | } |
| 3710 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3711 | } |
| 3712 | |
Peter Zijlstra | 6cecd08 | 2009-11-30 13:00:37 +0100 | [diff] [blame] | 3713 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 3714 | { |
Mike Galbraith | a64692a | 2010-03-11 17:16:20 +0100 | [diff] [blame] | 3715 | if (prev->se.on_rq) |
| 3716 | update_rq_clock(rq); |
| 3717 | rq->skip_clock_update = 0; |
Peter Zijlstra | 6cecd08 | 2009-11-30 13:00:37 +0100 | [diff] [blame] | 3718 | prev->sched_class->put_prev_task(rq, prev); |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 3719 | } |
| 3720 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3721 | /* |
| 3722 | * Pick up the highest-prio task: |
| 3723 | */ |
| 3724 | static inline struct task_struct * |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 3725 | pick_next_task(struct rq *rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3726 | { |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 3727 | const struct sched_class *class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3728 | struct task_struct *p; |
| 3729 | |
| 3730 | /* |
| 3731 | * Optimization: we know that if all tasks are in |
| 3732 | * the fair class we can call that function directly: |
| 3733 | */ |
| 3734 | if (likely(rq->nr_running == rq->cfs.nr_running)) { |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 3735 | p = fair_sched_class.pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3736 | if (likely(p)) |
| 3737 | return p; |
| 3738 | } |
| 3739 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 3740 | for_each_class(class) { |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 3741 | p = class->pick_next_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3742 | if (p) |
| 3743 | return p; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3744 | } |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 3745 | |
| 3746 | BUG(); /* the idle class will always have a runnable task */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3747 | } |
| 3748 | |
| 3749 | /* |
| 3750 | * schedule() is the main scheduler function. |
| 3751 | */ |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 3752 | asmlinkage void __sched schedule(void) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3753 | { |
| 3754 | struct task_struct *prev, *next; |
Harvey Harrison | 67ca7bd | 2008-02-15 09:56:36 -0800 | [diff] [blame] | 3755 | unsigned long *switch_count; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3756 | struct rq *rq; |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 3757 | int cpu; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3758 | |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 3759 | need_resched: |
| 3760 | preempt_disable(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3761 | cpu = smp_processor_id(); |
| 3762 | rq = cpu_rq(cpu); |
Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 3763 | rcu_note_context_switch(cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3764 | prev = rq->curr; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3765 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3766 | release_kernel_lock(prev); |
| 3767 | need_resched_nonpreemptible: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3768 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3769 | schedule_debug(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3770 | |
Peter Zijlstra | 3165651 | 2008-07-18 18:01:23 +0200 | [diff] [blame] | 3771 | if (sched_feat(HRTICK)) |
Mike Galbraith | f333fdc | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 3772 | hrtick_clear(rq); |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3773 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3774 | raw_spin_lock_irq(&rq->lock); |
Ingo Molnar | 1e81995 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 3775 | clear_tsk_need_resched(prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3776 | |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 3777 | switch_count = &prev->nivcsw; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3778 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 3779 | if (unlikely(signal_pending_state(prev->state, prev))) { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3780 | prev->state = TASK_RUNNING; |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 3781 | } else { |
| 3782 | /* |
| 3783 | * If a worker is going to sleep, notify and |
| 3784 | * ask workqueue whether it wants to wake up a |
| 3785 | * task to maintain concurrency. If so, wake |
| 3786 | * up the task. |
| 3787 | */ |
| 3788 | if (prev->flags & PF_WQ_WORKER) { |
| 3789 | struct task_struct *to_wakeup; |
| 3790 | |
| 3791 | to_wakeup = wq_worker_sleeping(prev, cpu); |
| 3792 | if (to_wakeup) |
| 3793 | try_to_wake_up_local(to_wakeup); |
| 3794 | } |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 3795 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 3796 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3797 | switch_count = &prev->nvcsw; |
| 3798 | } |
| 3799 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 3800 | pre_schedule(rq, prev); |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 3801 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3802 | if (unlikely(!rq->nr_running)) |
| 3803 | idle_balance(cpu, rq); |
| 3804 | |
Mike Galbraith | df1c99d | 2009-03-10 19:08:11 +0100 | [diff] [blame] | 3805 | put_prev_task(rq, prev); |
Wang Chen | b67802e | 2009-03-02 13:55:26 +0800 | [diff] [blame] | 3806 | next = pick_next_task(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3807 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3808 | if (likely(prev != next)) { |
David Simner | 673a90a | 2008-04-29 10:08:59 +0100 | [diff] [blame] | 3809 | sched_info_switch(prev, next); |
Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 3810 | perf_event_task_sched_out(prev, next); |
David Simner | 673a90a | 2008-04-29 10:08:59 +0100 | [diff] [blame] | 3811 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3812 | rq->nr_switches++; |
| 3813 | rq->curr = next; |
| 3814 | ++*switch_count; |
| 3815 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 3816 | context_switch(rq, prev, next); /* unlocks the rq */ |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3817 | /* |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 3818 | * The context switch have flipped the stack from under us |
| 3819 | * and restored the local variables which were saved when |
| 3820 | * this task called schedule() in the past. prev == current |
| 3821 | * is still correct, but it can be moved to another cpu/rq. |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3822 | */ |
| 3823 | cpu = smp_processor_id(); |
| 3824 | rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3825 | } else |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 3826 | raw_spin_unlock_irq(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3827 | |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 3828 | post_schedule(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3829 | |
Oleg Nesterov | 246d86b | 2010-05-19 14:57:11 +0200 | [diff] [blame] | 3830 | if (unlikely(reacquire_kernel_lock(prev))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3831 | goto need_resched_nonpreemptible; |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 3832 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3833 | preempt_enable_no_resched(); |
Peter Zijlstra | ff74334 | 2009-03-13 12:21:26 +0100 | [diff] [blame] | 3834 | if (need_resched()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3835 | goto need_resched; |
| 3836 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3837 | EXPORT_SYMBOL(schedule); |
| 3838 | |
Frederic Weisbecker | c08f782 | 2009-12-02 20:49:17 +0100 | [diff] [blame] | 3839 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3840 | /* |
| 3841 | * Look out! "owner" is an entirely speculative pointer |
| 3842 | * access and not reliable. |
| 3843 | */ |
| 3844 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) |
| 3845 | { |
| 3846 | unsigned int cpu; |
| 3847 | struct rq *rq; |
| 3848 | |
| 3849 | if (!sched_feat(OWNER_SPIN)) |
| 3850 | return 0; |
| 3851 | |
| 3852 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 3853 | /* |
| 3854 | * Need to access the cpu field knowing that |
| 3855 | * DEBUG_PAGEALLOC could have unmapped it if |
| 3856 | * the mutex owner just released it and exited. |
| 3857 | */ |
| 3858 | if (probe_kernel_address(&owner->cpu, cpu)) |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 3859 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3860 | #else |
| 3861 | cpu = owner->cpu; |
| 3862 | #endif |
| 3863 | |
| 3864 | /* |
| 3865 | * Even if the access succeeded (likely case), |
| 3866 | * the cpu field may no longer be valid. |
| 3867 | */ |
| 3868 | if (cpu >= nr_cpumask_bits) |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 3869 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3870 | |
| 3871 | /* |
| 3872 | * We need to validate that we can do a |
| 3873 | * get_cpu() and that we have the percpu area. |
| 3874 | */ |
| 3875 | if (!cpu_online(cpu)) |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 3876 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3877 | |
| 3878 | rq = cpu_rq(cpu); |
| 3879 | |
| 3880 | for (;;) { |
| 3881 | /* |
| 3882 | * Owner changed, break to re-assess state. |
| 3883 | */ |
Tim Chen | 9d0f4dc | 2010-08-18 15:00:27 -0700 | [diff] [blame] | 3884 | if (lock->owner != owner) { |
| 3885 | /* |
| 3886 | * If the lock has switched to a different owner, |
| 3887 | * we likely have heavy contention. Return 0 to quit |
| 3888 | * optimistic spinning and not contend further: |
| 3889 | */ |
| 3890 | if (lock->owner) |
| 3891 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3892 | break; |
Tim Chen | 9d0f4dc | 2010-08-18 15:00:27 -0700 | [diff] [blame] | 3893 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3894 | |
| 3895 | /* |
| 3896 | * Is that owner really running on that cpu? |
| 3897 | */ |
| 3898 | if (task_thread_info(rq->curr) != owner || need_resched()) |
| 3899 | return 0; |
| 3900 | |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 3901 | arch_mutex_cpu_relax(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3902 | } |
Benjamin Herrenschmidt | 4b40221 | 2010-04-16 23:20:00 +0200 | [diff] [blame] | 3903 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 3904 | return 1; |
| 3905 | } |
| 3906 | #endif |
| 3907 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3908 | #ifdef CONFIG_PREEMPT |
| 3909 | /* |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 3910 | * this is the entry point to schedule() from in-kernel preemption |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3911 | * off of preempt_enable. Kernel preemptions off return from interrupt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3912 | * occur there and call schedule directly. |
| 3913 | */ |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 3914 | asmlinkage void __sched notrace preempt_schedule(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3915 | { |
| 3916 | struct thread_info *ti = current_thread_info(); |
Ingo Molnar | 6478d88 | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 3917 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3918 | /* |
| 3919 | * If there is a non-zero preempt_count or interrupts are disabled, |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3920 | * we do not want to preempt the current task. Just return.. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3921 | */ |
Nick Piggin | beed33a | 2006-10-11 01:21:52 -0700 | [diff] [blame] | 3922 | if (likely(ti->preempt_count || irqs_disabled())) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3923 | return; |
| 3924 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 3925 | do { |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 3926 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 3927 | schedule(); |
Steven Rostedt | d1f74e2 | 2010-06-02 21:52:29 -0400 | [diff] [blame] | 3928 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 3929 | |
| 3930 | /* |
| 3931 | * Check again in case we missed a preemption opportunity |
| 3932 | * between schedule and now. |
| 3933 | */ |
| 3934 | barrier(); |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 3935 | } while (need_resched()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3936 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3937 | EXPORT_SYMBOL(preempt_schedule); |
| 3938 | |
| 3939 | /* |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 3940 | * this is the entry point to schedule() from kernel preemption |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3941 | * off of irq context. |
| 3942 | * Note, that this is called and return with irqs disabled. This will |
| 3943 | * protect us against recursive calling from irq. |
| 3944 | */ |
| 3945 | asmlinkage void __sched preempt_schedule_irq(void) |
| 3946 | { |
| 3947 | struct thread_info *ti = current_thread_info(); |
Ingo Molnar | 6478d88 | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 3948 | |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 3949 | /* Catch callers which need to be fixed */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3950 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
| 3951 | |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 3952 | do { |
| 3953 | add_preempt_count(PREEMPT_ACTIVE); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 3954 | local_irq_enable(); |
| 3955 | schedule(); |
| 3956 | local_irq_disable(); |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 3957 | sub_preempt_count(PREEMPT_ACTIVE); |
| 3958 | |
| 3959 | /* |
| 3960 | * Check again in case we missed a preemption opportunity |
| 3961 | * between schedule and now. |
| 3962 | */ |
| 3963 | barrier(); |
Lai Jiangshan | 5ed0cec | 2009-03-06 19:40:20 +0800 | [diff] [blame] | 3964 | } while (need_resched()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3965 | } |
| 3966 | |
| 3967 | #endif /* CONFIG_PREEMPT */ |
| 3968 | |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 3969 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 3970 | void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3971 | { |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 3972 | return try_to_wake_up(curr->private, mode, wake_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3973 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3974 | EXPORT_SYMBOL(default_wake_function); |
| 3975 | |
| 3976 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3977 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just |
| 3978 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3979 | * number) then we wake all the non-exclusive tasks and one exclusive task. |
| 3980 | * |
| 3981 | * There are circumstances in which we can try to wake a task which has already |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3982 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3983 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
| 3984 | */ |
Johannes Weiner | 78ddb08 | 2009-04-14 16:53:05 +0200 | [diff] [blame] | 3985 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 3986 | int nr_exclusive, int wake_flags, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3987 | { |
Matthias Kaehlcke | 2e45874 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 3988 | wait_queue_t *curr, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3989 | |
Matthias Kaehlcke | 2e45874 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 3990 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3991 | unsigned flags = curr->flags; |
| 3992 | |
Peter Zijlstra | 63859d4 | 2009-09-15 19:14:42 +0200 | [diff] [blame] | 3993 | if (curr->func(curr, mode, wake_flags, key) && |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 3994 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3995 | break; |
| 3996 | } |
| 3997 | } |
| 3998 | |
| 3999 | /** |
| 4000 | * __wake_up - wake up threads blocked on a waitqueue. |
| 4001 | * @q: the waitqueue |
| 4002 | * @mode: which threads |
| 4003 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 4004 | * @key: is directly passed to the wakeup function |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4005 | * |
| 4006 | * It may be assumed that this function implies a write memory barrier before |
| 4007 | * changing the task state if and only if any tasks are woken up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4008 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 4009 | void __wake_up(wait_queue_head_t *q, unsigned int mode, |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4010 | int nr_exclusive, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4011 | { |
| 4012 | unsigned long flags; |
| 4013 | |
| 4014 | spin_lock_irqsave(&q->lock, flags); |
| 4015 | __wake_up_common(q, mode, nr_exclusive, 0, key); |
| 4016 | spin_unlock_irqrestore(&q->lock, flags); |
| 4017 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4018 | EXPORT_SYMBOL(__wake_up); |
| 4019 | |
| 4020 | /* |
| 4021 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. |
| 4022 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 4023 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4024 | { |
| 4025 | __wake_up_common(q, mode, 1, 0, NULL); |
| 4026 | } |
Michal Nazarewicz | 22c43c8 | 2010-05-05 12:53:11 +0200 | [diff] [blame] | 4027 | EXPORT_SYMBOL_GPL(__wake_up_locked); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4028 | |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4029 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) |
| 4030 | { |
| 4031 | __wake_up_common(q, mode, 1, 0, key); |
| 4032 | } |
| 4033 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4034 | /** |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4035 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4036 | * @q: the waitqueue |
| 4037 | * @mode: which threads |
| 4038 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4039 | * @key: opaque value to be passed to wakeup targets |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4040 | * |
| 4041 | * The sync wakeup differs that the waker knows that it will schedule |
| 4042 | * away soon, so while the target thread will be woken up, it will not |
| 4043 | * be migrated to another CPU - ie. the two threads are 'synchronized' |
| 4044 | * with each other. This can prevent needless bouncing between CPUs. |
| 4045 | * |
| 4046 | * On UP it can prevent extra preemption. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4047 | * |
| 4048 | * It may be assumed that this function implies a write memory barrier before |
| 4049 | * changing the task state if and only if any tasks are woken up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4050 | */ |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4051 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, |
| 4052 | int nr_exclusive, void *key) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4053 | { |
| 4054 | unsigned long flags; |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4055 | int wake_flags = WF_SYNC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4056 | |
| 4057 | if (unlikely(!q)) |
| 4058 | return; |
| 4059 | |
| 4060 | if (unlikely(!nr_exclusive)) |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4061 | wake_flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4062 | |
| 4063 | spin_lock_irqsave(&q->lock, flags); |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 4064 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4065 | spin_unlock_irqrestore(&q->lock, flags); |
| 4066 | } |
Davide Libenzi | 4ede816 | 2009-03-31 15:24:20 -0700 | [diff] [blame] | 4067 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
| 4068 | |
| 4069 | /* |
| 4070 | * __wake_up_sync - see __wake_up_sync_key() |
| 4071 | */ |
| 4072 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) |
| 4073 | { |
| 4074 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); |
| 4075 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4076 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 4077 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4078 | /** |
| 4079 | * complete: - signals a single thread waiting on this completion |
| 4080 | * @x: holds the state of this particular completion |
| 4081 | * |
| 4082 | * This will wake up a single thread waiting on this completion. Threads will be |
| 4083 | * awakened in the same order in which they were queued. |
| 4084 | * |
| 4085 | * See also complete_all(), wait_for_completion() and related routines. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4086 | * |
| 4087 | * It may be assumed that this function implies a write memory barrier before |
| 4088 | * changing the task state if and only if any tasks are woken up. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4089 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4090 | void complete(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4091 | { |
| 4092 | unsigned long flags; |
| 4093 | |
| 4094 | spin_lock_irqsave(&x->wait.lock, flags); |
| 4095 | x->done++; |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 4096 | __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4097 | spin_unlock_irqrestore(&x->wait.lock, flags); |
| 4098 | } |
| 4099 | EXPORT_SYMBOL(complete); |
| 4100 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4101 | /** |
| 4102 | * complete_all: - signals all threads waiting on this completion |
| 4103 | * @x: holds the state of this particular completion |
| 4104 | * |
| 4105 | * This will wake up all threads waiting on this particular completion event. |
David Howells | 50fa610 | 2009-04-28 15:01:38 +0100 | [diff] [blame] | 4106 | * |
| 4107 | * It may be assumed that this function implies a write memory barrier before |
| 4108 | * changing the task state if and only if any tasks are woken up. |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4109 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4110 | void complete_all(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4111 | { |
| 4112 | unsigned long flags; |
| 4113 | |
| 4114 | spin_lock_irqsave(&x->wait.lock, flags); |
| 4115 | x->done += UINT_MAX/2; |
Matthew Wilcox | d9514f6 | 2007-12-06 11:07:07 -0500 | [diff] [blame] | 4116 | __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4117 | spin_unlock_irqrestore(&x->wait.lock, flags); |
| 4118 | } |
| 4119 | EXPORT_SYMBOL(complete_all); |
| 4120 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4121 | static inline long __sched |
| 4122 | do_wait_for_common(struct completion *x, long timeout, int state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4123 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4124 | if (!x->done) { |
| 4125 | DECLARE_WAITQUEUE(wait, current); |
| 4126 | |
Changli Gao | a93d2f1 | 2010-05-07 14:33:26 +0800 | [diff] [blame] | 4127 | __add_wait_queue_tail_exclusive(&x->wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4128 | do { |
Oleg Nesterov | 94d3d82 | 2008-08-20 16:54:41 -0700 | [diff] [blame] | 4129 | if (signal_pending_state(state, current)) { |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4130 | timeout = -ERESTARTSYS; |
| 4131 | break; |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4132 | } |
| 4133 | __set_current_state(state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4134 | spin_unlock_irq(&x->wait.lock); |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4135 | timeout = schedule_timeout(timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4136 | spin_lock_irq(&x->wait.lock); |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4137 | } while (!x->done && timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4138 | __remove_wait_queue(&x->wait, &wait); |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4139 | if (!x->done) |
| 4140 | return timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4141 | } |
| 4142 | x->done--; |
Oleg Nesterov | ea71a54 | 2008-06-20 18:32:20 +0400 | [diff] [blame] | 4143 | return timeout ?: 1; |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4144 | } |
| 4145 | |
| 4146 | static long __sched |
| 4147 | wait_for_common(struct completion *x, long timeout, int state) |
| 4148 | { |
| 4149 | might_sleep(); |
| 4150 | |
| 4151 | spin_lock_irq(&x->wait.lock); |
| 4152 | timeout = do_wait_for_common(x, timeout, state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4153 | spin_unlock_irq(&x->wait.lock); |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4154 | return timeout; |
| 4155 | } |
| 4156 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4157 | /** |
| 4158 | * wait_for_completion: - waits for completion of a task |
| 4159 | * @x: holds the state of this particular completion |
| 4160 | * |
| 4161 | * This waits to be signaled for completion of a specific task. It is NOT |
| 4162 | * interruptible and there is no timeout. |
| 4163 | * |
| 4164 | * See also similar routines (i.e. wait_for_completion_timeout()) with timeout |
| 4165 | * and interrupt capability. Also see complete(). |
| 4166 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4167 | void __sched wait_for_completion(struct completion *x) |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4168 | { |
| 4169 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4170 | } |
| 4171 | EXPORT_SYMBOL(wait_for_completion); |
| 4172 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4173 | /** |
| 4174 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) |
| 4175 | * @x: holds the state of this particular completion |
| 4176 | * @timeout: timeout value in jiffies |
| 4177 | * |
| 4178 | * This waits for either a completion of a specific task to be signaled or for a |
| 4179 | * specified timeout to expire. The timeout is in jiffies. It is not |
| 4180 | * interruptible. |
| 4181 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4182 | unsigned long __sched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4183 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
| 4184 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4185 | return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4186 | } |
| 4187 | EXPORT_SYMBOL(wait_for_completion_timeout); |
| 4188 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4189 | /** |
| 4190 | * wait_for_completion_interruptible: - waits for completion of a task (w/intr) |
| 4191 | * @x: holds the state of this particular completion |
| 4192 | * |
| 4193 | * This waits for completion of a specific task to be signaled. It is |
| 4194 | * interruptible. |
| 4195 | */ |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4196 | int __sched wait_for_completion_interruptible(struct completion *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4197 | { |
Andi Kleen | 51e9799 | 2007-10-18 21:32:55 +0200 | [diff] [blame] | 4198 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
| 4199 | if (t == -ERESTARTSYS) |
| 4200 | return t; |
| 4201 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4202 | } |
| 4203 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
| 4204 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4205 | /** |
| 4206 | * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) |
| 4207 | * @x: holds the state of this particular completion |
| 4208 | * @timeout: timeout value in jiffies |
| 4209 | * |
| 4210 | * This waits for either a completion of a specific task to be signaled or for a |
| 4211 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
| 4212 | */ |
Ingo Molnar | b15136e | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 4213 | unsigned long __sched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4214 | wait_for_completion_interruptible_timeout(struct completion *x, |
| 4215 | unsigned long timeout) |
| 4216 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4217 | return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4218 | } |
| 4219 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); |
| 4220 | |
Kevin Diggs | 65eb3dc | 2008-08-26 10:26:54 +0200 | [diff] [blame] | 4221 | /** |
| 4222 | * wait_for_completion_killable: - waits for completion of a task (killable) |
| 4223 | * @x: holds the state of this particular completion |
| 4224 | * |
| 4225 | * This waits to be signaled for completion of a specific task. It can be |
| 4226 | * interrupted by a kill signal. |
| 4227 | */ |
Matthew Wilcox | 009e577 | 2007-12-06 12:29:54 -0500 | [diff] [blame] | 4228 | int __sched wait_for_completion_killable(struct completion *x) |
| 4229 | { |
| 4230 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); |
| 4231 | if (t == -ERESTARTSYS) |
| 4232 | return t; |
| 4233 | return 0; |
| 4234 | } |
| 4235 | EXPORT_SYMBOL(wait_for_completion_killable); |
| 4236 | |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4237 | /** |
Sage Weil | 0aa12fb | 2010-05-29 09:12:30 -0700 | [diff] [blame] | 4238 | * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) |
| 4239 | * @x: holds the state of this particular completion |
| 4240 | * @timeout: timeout value in jiffies |
| 4241 | * |
| 4242 | * This waits for either a completion of a specific task to be |
| 4243 | * signaled or for a specified timeout to expire. It can be |
| 4244 | * interrupted by a kill signal. The timeout is in jiffies. |
| 4245 | */ |
| 4246 | unsigned long __sched |
| 4247 | wait_for_completion_killable_timeout(struct completion *x, |
| 4248 | unsigned long timeout) |
| 4249 | { |
| 4250 | return wait_for_common(x, timeout, TASK_KILLABLE); |
| 4251 | } |
| 4252 | EXPORT_SYMBOL(wait_for_completion_killable_timeout); |
| 4253 | |
| 4254 | /** |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4255 | * try_wait_for_completion - try to decrement a completion without blocking |
| 4256 | * @x: completion structure |
| 4257 | * |
| 4258 | * Returns: 0 if a decrement cannot be done without blocking |
| 4259 | * 1 if a decrement succeeded. |
| 4260 | * |
| 4261 | * If a completion is being used as a counting completion, |
| 4262 | * attempt to decrement the counter without blocking. This |
| 4263 | * enables us to avoid waiting if the resource the completion |
| 4264 | * is protecting is not available. |
| 4265 | */ |
| 4266 | bool try_wait_for_completion(struct completion *x) |
| 4267 | { |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4268 | unsigned long flags; |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4269 | int ret = 1; |
| 4270 | |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4271 | spin_lock_irqsave(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4272 | if (!x->done) |
| 4273 | ret = 0; |
| 4274 | else |
| 4275 | x->done--; |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4276 | spin_unlock_irqrestore(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4277 | return ret; |
| 4278 | } |
| 4279 | EXPORT_SYMBOL(try_wait_for_completion); |
| 4280 | |
| 4281 | /** |
| 4282 | * completion_done - Test to see if a completion has any waiters |
| 4283 | * @x: completion structure |
| 4284 | * |
| 4285 | * Returns: 0 if there are waiters (wait_for_completion() in progress) |
| 4286 | * 1 if there are no waiters. |
| 4287 | * |
| 4288 | */ |
| 4289 | bool completion_done(struct completion *x) |
| 4290 | { |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4291 | unsigned long flags; |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4292 | int ret = 1; |
| 4293 | |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4294 | spin_lock_irqsave(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4295 | if (!x->done) |
| 4296 | ret = 0; |
Rafael J. Wysocki | 7539a3b | 2009-12-13 00:07:30 +0100 | [diff] [blame] | 4297 | spin_unlock_irqrestore(&x->wait.lock, flags); |
Dave Chinner | be4de35 | 2008-08-15 00:40:44 -0700 | [diff] [blame] | 4298 | return ret; |
| 4299 | } |
| 4300 | EXPORT_SYMBOL(completion_done); |
| 4301 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4302 | static long __sched |
| 4303 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4304 | { |
| 4305 | unsigned long flags; |
| 4306 | wait_queue_t wait; |
| 4307 | |
| 4308 | init_waitqueue_entry(&wait, current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4309 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4310 | __set_current_state(state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4311 | |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4312 | spin_lock_irqsave(&q->lock, flags); |
| 4313 | __add_wait_queue(q, &wait); |
| 4314 | spin_unlock(&q->lock); |
| 4315 | timeout = schedule_timeout(timeout); |
| 4316 | spin_lock_irq(&q->lock); |
| 4317 | __remove_wait_queue(q, &wait); |
| 4318 | spin_unlock_irqrestore(&q->lock, flags); |
| 4319 | |
| 4320 | return timeout; |
| 4321 | } |
| 4322 | |
| 4323 | void __sched interruptible_sleep_on(wait_queue_head_t *q) |
| 4324 | { |
| 4325 | sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4326 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4327 | EXPORT_SYMBOL(interruptible_sleep_on); |
| 4328 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4329 | long __sched |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4330 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4331 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4332 | return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4333 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4334 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); |
| 4335 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4336 | void __sched sleep_on(wait_queue_head_t *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4337 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4338 | sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4339 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4340 | EXPORT_SYMBOL(sleep_on); |
| 4341 | |
Ingo Molnar | 0fec171 | 2007-07-09 18:52:01 +0200 | [diff] [blame] | 4342 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4343 | { |
Andi Kleen | 8cbbe86 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4344 | return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4345 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4346 | EXPORT_SYMBOL(sleep_on_timeout); |
| 4347 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4348 | #ifdef CONFIG_RT_MUTEXES |
| 4349 | |
| 4350 | /* |
| 4351 | * rt_mutex_setprio - set the current priority of a task |
| 4352 | * @p: task |
| 4353 | * @prio: prio value (kernel-internal form) |
| 4354 | * |
| 4355 | * This function changes the 'effective' priority of a task. It does |
| 4356 | * not touch ->normal_prio like __setscheduler(). |
| 4357 | * |
| 4358 | * Used by the rt_mutex code to implement priority inheritance logic. |
| 4359 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4360 | void rt_mutex_setprio(struct task_struct *p, int prio) |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4361 | { |
| 4362 | unsigned long flags; |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4363 | int oldprio, on_rq, running; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 4364 | struct rq *rq; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4365 | const struct sched_class *prev_class; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4366 | |
| 4367 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
| 4368 | |
| 4369 | rq = task_rq_lock(p, &flags); |
| 4370 | |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 4371 | trace_sched_pi_setprio(p, prio); |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 4372 | oldprio = p->prio; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4373 | prev_class = p->sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4374 | on_rq = p->se.on_rq; |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 4375 | running = task_current(rq, p); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4376 | if (on_rq) |
Ingo Molnar | 69be72c | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 4377 | dequeue_task(rq, p, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4378 | if (running) |
| 4379 | p->sched_class->put_prev_task(rq, p); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4380 | |
| 4381 | if (rt_prio(prio)) |
| 4382 | p->sched_class = &rt_sched_class; |
| 4383 | else |
| 4384 | p->sched_class = &fair_sched_class; |
| 4385 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4386 | p->prio = prio; |
| 4387 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4388 | if (running) |
| 4389 | p->sched_class->set_curr_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4390 | if (on_rq) { |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 4391 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4392 | |
| 4393 | check_class_changed(rq, p, prev_class, oldprio, running); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4394 | } |
| 4395 | task_rq_unlock(rq, &flags); |
| 4396 | } |
| 4397 | |
| 4398 | #endif |
| 4399 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4400 | void set_user_nice(struct task_struct *p, long nice) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4401 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4402 | int old_prio, delta, on_rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4403 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 4404 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4405 | |
| 4406 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
| 4407 | return; |
| 4408 | /* |
| 4409 | * We have to be careful, if called from sys_setpriority(), |
| 4410 | * the task might be in the middle of scheduling on another CPU. |
| 4411 | */ |
| 4412 | rq = task_rq_lock(p, &flags); |
| 4413 | /* |
| 4414 | * The RT priorities are set via sched_setscheduler(), but we still |
| 4415 | * allow the 'normal' nice value to be set - but as expected |
| 4416 | * it wont have any effect on scheduling until the task is |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4417 | * SCHED_FIFO/SCHED_RR: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4418 | */ |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4419 | if (task_has_rt_policy(p)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4420 | p->static_prio = NICE_TO_PRIO(nice); |
| 4421 | goto out_unlock; |
| 4422 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4423 | on_rq = p->se.on_rq; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 4424 | if (on_rq) |
Ingo Molnar | 69be72c | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 4425 | dequeue_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4426 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4427 | p->static_prio = NICE_TO_PRIO(nice); |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 4428 | set_load_weight(p); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4429 | old_prio = p->prio; |
| 4430 | p->prio = effective_prio(p); |
| 4431 | delta = p->prio - old_prio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4432 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4433 | if (on_rq) { |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 4434 | enqueue_task(rq, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4435 | /* |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 4436 | * If the task increased its priority or is running and |
| 4437 | * lowered its priority, then reschedule its CPU: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4438 | */ |
Andrew Morton | d5f9f94 | 2007-05-08 20:27:06 -0700 | [diff] [blame] | 4439 | if (delta < 0 || (delta > 0 && task_running(rq, p))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4440 | resched_task(rq->curr); |
| 4441 | } |
| 4442 | out_unlock: |
| 4443 | task_rq_unlock(rq, &flags); |
| 4444 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4445 | EXPORT_SYMBOL(set_user_nice); |
| 4446 | |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4447 | /* |
| 4448 | * can_nice - check if a task can reduce its nice value |
| 4449 | * @p: task |
| 4450 | * @nice: nice value |
| 4451 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4452 | int can_nice(const struct task_struct *p, const int nice) |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4453 | { |
Matt Mackall | 024f474 | 2005-08-18 11:24:19 -0700 | [diff] [blame] | 4454 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
| 4455 | int nice_rlim = 20 - nice; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4456 | |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 4457 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4458 | capable(CAP_SYS_NICE)); |
| 4459 | } |
| 4460 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4461 | #ifdef __ARCH_WANT_SYS_NICE |
| 4462 | |
| 4463 | /* |
| 4464 | * sys_nice - change the priority of the current process. |
| 4465 | * @increment: priority increment |
| 4466 | * |
| 4467 | * sys_setpriority is a more generic, but much slower function that |
| 4468 | * does similar things. |
| 4469 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4470 | SYSCALL_DEFINE1(nice, int, increment) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4471 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4472 | long nice, retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4473 | |
| 4474 | /* |
| 4475 | * Setpriority might change our priority at the same moment. |
| 4476 | * We don't have to worry. Conceptually one call occurs first |
| 4477 | * and we have a single winner. |
| 4478 | */ |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4479 | if (increment < -40) |
| 4480 | increment = -40; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4481 | if (increment > 40) |
| 4482 | increment = 40; |
| 4483 | |
Américo Wang | 2b8f836 | 2009-02-16 18:54:21 +0800 | [diff] [blame] | 4484 | nice = TASK_NICE(current) + increment; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4485 | if (nice < -20) |
| 4486 | nice = -20; |
| 4487 | if (nice > 19) |
| 4488 | nice = 19; |
| 4489 | |
Matt Mackall | e43379f | 2005-05-01 08:59:00 -0700 | [diff] [blame] | 4490 | if (increment < 0 && !can_nice(current, nice)) |
| 4491 | return -EPERM; |
| 4492 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4493 | retval = security_task_setnice(current, nice); |
| 4494 | if (retval) |
| 4495 | return retval; |
| 4496 | |
| 4497 | set_user_nice(current, nice); |
| 4498 | return 0; |
| 4499 | } |
| 4500 | |
| 4501 | #endif |
| 4502 | |
| 4503 | /** |
| 4504 | * task_prio - return the priority value of a given task. |
| 4505 | * @p: the task in question. |
| 4506 | * |
| 4507 | * This is the priority value as seen by users in /proc. |
| 4508 | * RT tasks are offset by -200. Normal tasks are centered |
| 4509 | * around 0, value goes from -16 to +15. |
| 4510 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4511 | int task_prio(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4512 | { |
| 4513 | return p->prio - MAX_RT_PRIO; |
| 4514 | } |
| 4515 | |
| 4516 | /** |
| 4517 | * task_nice - return the nice value of a given task. |
| 4518 | * @p: the task in question. |
| 4519 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4520 | int task_nice(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4521 | { |
| 4522 | return TASK_NICE(p); |
| 4523 | } |
Pavel Roskin | 150d8be | 2008-03-05 16:56:37 -0500 | [diff] [blame] | 4524 | EXPORT_SYMBOL(task_nice); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4525 | |
| 4526 | /** |
| 4527 | * idle_cpu - is a given cpu idle currently? |
| 4528 | * @cpu: the processor in question. |
| 4529 | */ |
| 4530 | int idle_cpu(int cpu) |
| 4531 | { |
| 4532 | return cpu_curr(cpu) == cpu_rq(cpu)->idle; |
| 4533 | } |
| 4534 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4535 | /** |
| 4536 | * idle_task - return the idle task for a given cpu. |
| 4537 | * @cpu: the processor in question. |
| 4538 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4539 | struct task_struct *idle_task(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4540 | { |
| 4541 | return cpu_rq(cpu)->idle; |
| 4542 | } |
| 4543 | |
| 4544 | /** |
| 4545 | * find_process_by_pid - find a process with a matching PID value. |
| 4546 | * @pid: the pid in question. |
| 4547 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 4548 | static struct task_struct *find_process_by_pid(pid_t pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4549 | { |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 4550 | return pid ? find_task_by_vpid(pid) : current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4551 | } |
| 4552 | |
| 4553 | /* Actually do priority change: must hold rq lock. */ |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4554 | static void |
| 4555 | __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4556 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4557 | BUG_ON(p->se.on_rq); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 4558 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4559 | p->policy = policy; |
| 4560 | p->rt_priority = prio; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4561 | p->normal_prio = normal_prio(p); |
| 4562 | /* we are holding p->pi_lock already */ |
| 4563 | p->prio = rt_mutex_getprio(p); |
Peter Zijlstra | ffd44db | 2009-11-10 20:12:01 +0100 | [diff] [blame] | 4564 | if (rt_prio(p->prio)) |
| 4565 | p->sched_class = &rt_sched_class; |
| 4566 | else |
| 4567 | p->sched_class = &fair_sched_class; |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 4568 | set_load_weight(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4569 | } |
| 4570 | |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 4571 | /* |
| 4572 | * check the target process has a UID that matches the current process's |
| 4573 | */ |
| 4574 | static bool check_same_owner(struct task_struct *p) |
| 4575 | { |
| 4576 | const struct cred *cred = current_cred(), *pcred; |
| 4577 | bool match; |
| 4578 | |
| 4579 | rcu_read_lock(); |
| 4580 | pcred = __task_cred(p); |
| 4581 | match = (cred->euid == pcred->euid || |
| 4582 | cred->euid == pcred->uid); |
| 4583 | rcu_read_unlock(); |
| 4584 | return match; |
| 4585 | } |
| 4586 | |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4587 | static int __sched_setscheduler(struct task_struct *p, int policy, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 4588 | const struct sched_param *param, bool user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4589 | { |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4590 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4591 | unsigned long flags; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4592 | const struct sched_class *prev_class; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 4593 | struct rq *rq; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4594 | int reset_on_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4595 | |
Steven Rostedt | 66e5393 | 2006-06-27 02:54:44 -0700 | [diff] [blame] | 4596 | /* may grab non-irq protected spin_locks */ |
| 4597 | BUG_ON(in_interrupt()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4598 | recheck: |
| 4599 | /* double check policy once rq lock held */ |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4600 | if (policy < 0) { |
| 4601 | reset_on_fork = p->sched_reset_on_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4602 | policy = oldpolicy = p->policy; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4603 | } else { |
| 4604 | reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); |
| 4605 | policy &= ~SCHED_RESET_ON_FORK; |
| 4606 | |
| 4607 | if (policy != SCHED_FIFO && policy != SCHED_RR && |
| 4608 | policy != SCHED_NORMAL && policy != SCHED_BATCH && |
| 4609 | policy != SCHED_IDLE) |
| 4610 | return -EINVAL; |
| 4611 | } |
| 4612 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4613 | /* |
| 4614 | * Valid priorities for SCHED_FIFO and SCHED_RR are |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4615 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, |
| 4616 | * SCHED_BATCH and SCHED_IDLE is 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4617 | */ |
| 4618 | if (param->sched_priority < 0 || |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4619 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
Steven Rostedt | d46523e | 2005-07-25 16:28:39 -0400 | [diff] [blame] | 4620 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4621 | return -EINVAL; |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4622 | if (rt_policy(policy) != (param->sched_priority != 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4623 | return -EINVAL; |
| 4624 | |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4625 | /* |
| 4626 | * Allow unprivileged RT tasks to decrease priority: |
| 4627 | */ |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4628 | if (user && !capable(CAP_SYS_NICE)) { |
Ingo Molnar | e05606d | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4629 | if (rt_policy(policy)) { |
Oleg Nesterov | a44702e | 2010-06-11 01:09:44 +0200 | [diff] [blame] | 4630 | unsigned long rlim_rtprio = |
| 4631 | task_rlimit(p, RLIMIT_RTPRIO); |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 4632 | |
Oleg Nesterov | 8dc3e90 | 2006-09-29 02:00:50 -0700 | [diff] [blame] | 4633 | /* can't set/change the rt policy */ |
| 4634 | if (policy != p->policy && !rlim_rtprio) |
| 4635 | return -EPERM; |
| 4636 | |
| 4637 | /* can't increase priority */ |
| 4638 | if (param->sched_priority > p->rt_priority && |
| 4639 | param->sched_priority > rlim_rtprio) |
| 4640 | return -EPERM; |
| 4641 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4642 | /* |
| 4643 | * Like positive nice levels, dont allow tasks to |
| 4644 | * move out of SCHED_IDLE either: |
| 4645 | */ |
| 4646 | if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) |
| 4647 | return -EPERM; |
Oleg Nesterov | 8dc3e90 | 2006-09-29 02:00:50 -0700 | [diff] [blame] | 4648 | |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4649 | /* can't change other user's priorities */ |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 4650 | if (!check_same_owner(p)) |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4651 | return -EPERM; |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4652 | |
| 4653 | /* Normal users shall not reset the sched_reset_on_fork flag */ |
| 4654 | if (p->sched_reset_on_fork && !reset_on_fork) |
| 4655 | return -EPERM; |
Olivier Croquette | 37e4ab3 | 2005-06-25 14:57:32 -0700 | [diff] [blame] | 4656 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4657 | |
Jeremy Fitzhardinge | 725aad2 | 2008-08-03 09:33:03 -0700 | [diff] [blame] | 4658 | if (user) { |
KOSAKI Motohiro | b0ae198 | 2010-10-15 04:21:18 +0900 | [diff] [blame] | 4659 | retval = security_task_setscheduler(p); |
Jeremy Fitzhardinge | 725aad2 | 2008-08-03 09:33:03 -0700 | [diff] [blame] | 4660 | if (retval) |
| 4661 | return retval; |
| 4662 | } |
| 4663 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4664 | /* |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4665 | * make sure no PI-waiters arrive (or leave) while we are |
| 4666 | * changing the priority of the task: |
| 4667 | */ |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 4668 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4669 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4670 | * To be able to change p->policy safely, the apropriate |
| 4671 | * runqueue lock must be held. |
| 4672 | */ |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4673 | rq = __task_rq_lock(p); |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 4674 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 4675 | /* |
| 4676 | * Changing the policy of the stop threads its a very bad idea |
| 4677 | */ |
| 4678 | if (p == rq->stop) { |
| 4679 | __task_rq_unlock(rq); |
| 4680 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4681 | return -EINVAL; |
| 4682 | } |
| 4683 | |
Peter Zijlstra | dc61b1d | 2010-06-08 11:40:42 +0200 | [diff] [blame] | 4684 | #ifdef CONFIG_RT_GROUP_SCHED |
| 4685 | if (user) { |
| 4686 | /* |
| 4687 | * Do not allow realtime tasks into groups that have no runtime |
| 4688 | * assigned. |
| 4689 | */ |
| 4690 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
| 4691 | task_group(p)->rt_bandwidth.rt_runtime == 0) { |
| 4692 | __task_rq_unlock(rq); |
| 4693 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4694 | return -EPERM; |
| 4695 | } |
| 4696 | } |
| 4697 | #endif |
| 4698 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4699 | /* recheck policy now with rq lock held */ |
| 4700 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
| 4701 | policy = oldpolicy = -1; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4702 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 4703 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4704 | goto recheck; |
| 4705 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4706 | on_rq = p->se.on_rq; |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 4707 | running = task_current(rq, p); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4708 | if (on_rq) |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 4709 | deactivate_task(rq, p, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4710 | if (running) |
| 4711 | p->sched_class->put_prev_task(rq, p); |
Dmitry Adamushko | f6b5320 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4712 | |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4713 | p->sched_reset_on_fork = reset_on_fork; |
| 4714 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4715 | oldprio = p->prio; |
Thomas Gleixner | 83ab0aa | 2010-02-17 09:05:48 +0100 | [diff] [blame] | 4716 | prev_class = p->sched_class; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4717 | __setscheduler(rq, p, policy, param->sched_priority); |
Dmitry Adamushko | f6b5320 | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 4718 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 4719 | if (running) |
| 4720 | p->sched_class->set_curr_task(rq); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 4721 | if (on_rq) { |
| 4722 | activate_task(rq, p, 0); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 4723 | |
| 4724 | check_class_changed(rq, p, prev_class, oldprio, running); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4725 | } |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4726 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 4727 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 4728 | |
Thomas Gleixner | 95e02ca | 2006-06-27 02:55:02 -0700 | [diff] [blame] | 4729 | rt_mutex_adjust_pi(p); |
| 4730 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4731 | return 0; |
| 4732 | } |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4733 | |
| 4734 | /** |
| 4735 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
| 4736 | * @p: the task in question. |
| 4737 | * @policy: new policy. |
| 4738 | * @param: structure containing the new RT priority. |
| 4739 | * |
| 4740 | * NOTE that the task may be already dead. |
| 4741 | */ |
| 4742 | int sched_setscheduler(struct task_struct *p, int policy, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 4743 | const struct sched_param *param) |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4744 | { |
| 4745 | return __sched_setscheduler(p, policy, param, true); |
| 4746 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4747 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 4748 | |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4749 | /** |
| 4750 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. |
| 4751 | * @p: the task in question. |
| 4752 | * @policy: new policy. |
| 4753 | * @param: structure containing the new RT priority. |
| 4754 | * |
| 4755 | * Just like sched_setscheduler, only don't bother checking if the |
| 4756 | * current context has permission. For example, this is needed in |
| 4757 | * stop_machine(): we create temporary high priority worker threads, |
| 4758 | * but our caller might not have that capability. |
| 4759 | */ |
| 4760 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 4761 | const struct sched_param *param) |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 4762 | { |
| 4763 | return __sched_setscheduler(p, policy, param, false); |
| 4764 | } |
| 4765 | |
Ingo Molnar | 95cdf3b | 2005-09-10 00:26:11 -0700 | [diff] [blame] | 4766 | static int |
| 4767 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4768 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4769 | struct sched_param lparam; |
| 4770 | struct task_struct *p; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4771 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4772 | |
| 4773 | if (!param || pid < 0) |
| 4774 | return -EINVAL; |
| 4775 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) |
| 4776 | return -EFAULT; |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 4777 | |
| 4778 | rcu_read_lock(); |
| 4779 | retval = -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4780 | p = find_process_by_pid(pid); |
Oleg Nesterov | 5fe1d75 | 2006-09-29 02:00:48 -0700 | [diff] [blame] | 4781 | if (p != NULL) |
| 4782 | retval = sched_setscheduler(p, policy, &lparam); |
| 4783 | rcu_read_unlock(); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4784 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4785 | return retval; |
| 4786 | } |
| 4787 | |
| 4788 | /** |
| 4789 | * sys_sched_setscheduler - set/change the scheduler policy and RT priority |
| 4790 | * @pid: the pid in question. |
| 4791 | * @policy: new policy. |
| 4792 | * @param: structure containing the new RT priority. |
| 4793 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4794 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 4795 | struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4796 | { |
Jason Baron | c21761f | 2006-01-18 17:43:03 -0800 | [diff] [blame] | 4797 | /* negative values for policy are not valid */ |
| 4798 | if (policy < 0) |
| 4799 | return -EINVAL; |
| 4800 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4801 | return do_sched_setscheduler(pid, policy, param); |
| 4802 | } |
| 4803 | |
| 4804 | /** |
| 4805 | * sys_sched_setparam - set/change the RT priority of a thread |
| 4806 | * @pid: the pid in question. |
| 4807 | * @param: structure containing the new RT priority. |
| 4808 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4809 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4810 | { |
| 4811 | return do_sched_setscheduler(pid, -1, param); |
| 4812 | } |
| 4813 | |
| 4814 | /** |
| 4815 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 4816 | * @pid: the pid in question. |
| 4817 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4818 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4819 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4820 | struct task_struct *p; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4821 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4822 | |
| 4823 | if (pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4824 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4825 | |
| 4826 | retval = -ESRCH; |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 4827 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4828 | p = find_process_by_pid(pid); |
| 4829 | if (p) { |
| 4830 | retval = security_task_getscheduler(p); |
| 4831 | if (!retval) |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4832 | retval = p->policy |
| 4833 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4834 | } |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 4835 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4836 | return retval; |
| 4837 | } |
| 4838 | |
| 4839 | /** |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 4840 | * sys_sched_getparam - get the RT priority of a thread |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4841 | * @pid: the pid in question. |
| 4842 | * @param: structure containing the RT priority. |
| 4843 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4844 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4845 | { |
| 4846 | struct sched_param lp; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4847 | struct task_struct *p; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4848 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4849 | |
| 4850 | if (!param || pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 4851 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4852 | |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 4853 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4854 | p = find_process_by_pid(pid); |
| 4855 | retval = -ESRCH; |
| 4856 | if (!p) |
| 4857 | goto out_unlock; |
| 4858 | |
| 4859 | retval = security_task_getscheduler(p); |
| 4860 | if (retval) |
| 4861 | goto out_unlock; |
| 4862 | |
| 4863 | lp.sched_priority = p->rt_priority; |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 4864 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4865 | |
| 4866 | /* |
| 4867 | * This one might sleep, we cannot do it with a spinlock held ... |
| 4868 | */ |
| 4869 | retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; |
| 4870 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4871 | return retval; |
| 4872 | |
| 4873 | out_unlock: |
Thomas Gleixner | 5fe85be | 2009-12-09 10:14:58 +0000 | [diff] [blame] | 4874 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4875 | return retval; |
| 4876 | } |
| 4877 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 4878 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4879 | { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4880 | cpumask_var_t cpus_allowed, new_mask; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4881 | struct task_struct *p; |
| 4882 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4883 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 4884 | get_online_cpus(); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 4885 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4886 | |
| 4887 | p = find_process_by_pid(pid); |
| 4888 | if (!p) { |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 4889 | rcu_read_unlock(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 4890 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4891 | return -ESRCH; |
| 4892 | } |
| 4893 | |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 4894 | /* Prevent p going away */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4895 | get_task_struct(p); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 4896 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4897 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4898 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
| 4899 | retval = -ENOMEM; |
| 4900 | goto out_put_task; |
| 4901 | } |
| 4902 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { |
| 4903 | retval = -ENOMEM; |
| 4904 | goto out_free_cpus_allowed; |
| 4905 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4906 | retval = -EPERM; |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 4907 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4908 | goto out_unlock; |
| 4909 | |
KOSAKI Motohiro | b0ae198 | 2010-10-15 04:21:18 +0900 | [diff] [blame] | 4910 | retval = security_task_setscheduler(p); |
David Quigley | e7834f8 | 2006-06-23 02:03:59 -0700 | [diff] [blame] | 4911 | if (retval) |
| 4912 | goto out_unlock; |
| 4913 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4914 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4915 | cpumask_and(new_mask, in_mask, cpus_allowed); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 4916 | again: |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4917 | retval = set_cpus_allowed_ptr(p, new_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4918 | |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 4919 | if (!retval) { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4920 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4921 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 4922 | /* |
| 4923 | * We must have raced with a concurrent cpuset |
| 4924 | * update. Just reset the cpus_allowed to the |
| 4925 | * cpuset's cpus_allowed |
| 4926 | */ |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4927 | cpumask_copy(new_mask, cpus_allowed); |
Paul Menage | 8707d8b | 2007-10-18 23:40:22 -0700 | [diff] [blame] | 4928 | goto again; |
| 4929 | } |
| 4930 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4931 | out_unlock: |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4932 | free_cpumask_var(new_mask); |
| 4933 | out_free_cpus_allowed: |
| 4934 | free_cpumask_var(cpus_allowed); |
| 4935 | out_put_task: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4936 | put_task_struct(p); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 4937 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4938 | return retval; |
| 4939 | } |
| 4940 | |
| 4941 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 4942 | struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4943 | { |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 4944 | if (len < cpumask_size()) |
| 4945 | cpumask_clear(new_mask); |
| 4946 | else if (len > cpumask_size()) |
| 4947 | len = cpumask_size(); |
| 4948 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4949 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
| 4950 | } |
| 4951 | |
| 4952 | /** |
| 4953 | * sys_sched_setaffinity - set the cpu affinity of a process |
| 4954 | * @pid: pid of the process |
| 4955 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 4956 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 4957 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 4958 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 4959 | unsigned long __user *, user_mask_ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4960 | { |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4961 | cpumask_var_t new_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4962 | int retval; |
| 4963 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4964 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 4965 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4966 | |
Rusty Russell | 5a16f3d | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 4967 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
| 4968 | if (retval == 0) |
| 4969 | retval = sched_setaffinity(pid, new_mask); |
| 4970 | free_cpumask_var(new_mask); |
| 4971 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4972 | } |
| 4973 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 4974 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4975 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 4976 | struct task_struct *p; |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 4977 | unsigned long flags; |
| 4978 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4979 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4980 | |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 4981 | get_online_cpus(); |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 4982 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4983 | |
| 4984 | retval = -ESRCH; |
| 4985 | p = find_process_by_pid(pid); |
| 4986 | if (!p) |
| 4987 | goto out_unlock; |
| 4988 | |
David Quigley | e7834f8 | 2006-06-23 02:03:59 -0700 | [diff] [blame] | 4989 | retval = security_task_getscheduler(p); |
| 4990 | if (retval) |
| 4991 | goto out_unlock; |
| 4992 | |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 4993 | rq = task_rq_lock(p, &flags); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 4994 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
Thomas Gleixner | 3160568 | 2009-12-08 20:24:16 +0000 | [diff] [blame] | 4995 | task_rq_unlock(rq, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4996 | |
| 4997 | out_unlock: |
Thomas Gleixner | 23f5d14 | 2009-12-09 10:15:01 +0000 | [diff] [blame] | 4998 | rcu_read_unlock(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 4999 | put_online_cpus(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5000 | |
Ulrich Drepper | 9531b62 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5001 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5002 | } |
| 5003 | |
| 5004 | /** |
| 5005 | * sys_sched_getaffinity - get the cpu affinity of a process |
| 5006 | * @pid: pid of the process |
| 5007 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5008 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 5009 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5010 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 5011 | unsigned long __user *, user_mask_ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5012 | { |
| 5013 | int ret; |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5014 | cpumask_var_t mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5015 | |
Anton Blanchard | 84fba5e | 2010-04-06 17:02:19 +1000 | [diff] [blame] | 5016 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5017 | return -EINVAL; |
| 5018 | if (len & (sizeof(unsigned long)-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5019 | return -EINVAL; |
| 5020 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5021 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 5022 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5023 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5024 | ret = sched_getaffinity(pid, mask); |
| 5025 | if (ret == 0) { |
KOSAKI Motohiro | 8bc037f | 2010-03-17 09:36:58 +0900 | [diff] [blame] | 5026 | size_t retlen = min_t(size_t, len, cpumask_size()); |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5027 | |
| 5028 | if (copy_to_user(user_mask_ptr, mask, retlen)) |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5029 | ret = -EFAULT; |
| 5030 | else |
KOSAKI Motohiro | cd3d803 | 2010-03-12 16:15:36 +0900 | [diff] [blame] | 5031 | ret = retlen; |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5032 | } |
| 5033 | free_cpumask_var(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5034 | |
Rusty Russell | f17c860 | 2008-11-25 02:35:11 +1030 | [diff] [blame] | 5035 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5036 | } |
| 5037 | |
| 5038 | /** |
| 5039 | * sys_sched_yield - yield the current processor to other threads. |
| 5040 | * |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5041 | * This function yields the current CPU to other tasks. If there are no |
| 5042 | * other threads running on this CPU then this function will return. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5043 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5044 | SYSCALL_DEFINE0(sched_yield) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5045 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5046 | struct rq *rq = this_rq_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5047 | |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 5048 | schedstat_inc(rq, yld_count); |
Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 5049 | current->sched_class->yield_task(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5050 | |
| 5051 | /* |
| 5052 | * Since we are going to call schedule() anyway, there's |
| 5053 | * no need to preempt or enable interrupts: |
| 5054 | */ |
| 5055 | __release(rq->lock); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 5056 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 5057 | do_raw_spin_unlock(&rq->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5058 | preempt_enable_no_resched(); |
| 5059 | |
| 5060 | schedule(); |
| 5061 | |
| 5062 | return 0; |
| 5063 | } |
| 5064 | |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5065 | static inline int should_resched(void) |
| 5066 | { |
| 5067 | return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); |
| 5068 | } |
| 5069 | |
Andrew Morton | e7b3840 | 2006-06-30 01:56:00 -0700 | [diff] [blame] | 5070 | static void __cond_resched(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5071 | { |
Frederic Weisbecker | e7aaaa6 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5072 | add_preempt_count(PREEMPT_ACTIVE); |
| 5073 | schedule(); |
| 5074 | sub_preempt_count(PREEMPT_ACTIVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5075 | } |
| 5076 | |
Herbert Xu | 02b67cc | 2008-01-25 21:08:28 +0100 | [diff] [blame] | 5077 | int __sched _cond_resched(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5078 | { |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5079 | if (should_resched()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5080 | __cond_resched(); |
| 5081 | return 1; |
| 5082 | } |
| 5083 | return 0; |
| 5084 | } |
Herbert Xu | 02b67cc | 2008-01-25 21:08:28 +0100 | [diff] [blame] | 5085 | EXPORT_SYMBOL(_cond_resched); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5086 | |
| 5087 | /* |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5088 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5089 | * call schedule, and on return reacquire the lock. |
| 5090 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5091 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5092 | * operations here to prevent schedule() from being called twice (once via |
| 5093 | * spin_unlock(), once by hand). |
| 5094 | */ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5095 | int __cond_resched_lock(spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5096 | { |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5097 | int resched = should_resched(); |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5098 | int ret = 0; |
| 5099 | |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 5100 | lockdep_assert_held(lock); |
| 5101 | |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 5102 | if (spin_needbreak(lock) || resched) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5103 | spin_unlock(lock); |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5104 | if (resched) |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 5105 | __cond_resched(); |
| 5106 | else |
| 5107 | cpu_relax(); |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5108 | ret = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5109 | spin_lock(lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5110 | } |
Jan Kara | 6df3cec | 2005-06-13 15:52:32 -0700 | [diff] [blame] | 5111 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5112 | } |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5113 | EXPORT_SYMBOL(__cond_resched_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5114 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5115 | int __sched __cond_resched_softirq(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5116 | { |
| 5117 | BUG_ON(!in_softirq()); |
| 5118 | |
Peter Zijlstra | d86ee48 | 2009-07-10 14:57:57 +0200 | [diff] [blame] | 5119 | if (should_resched()) { |
Thomas Gleixner | 98d8256 | 2007-05-23 13:58:18 -0700 | [diff] [blame] | 5120 | local_bh_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5121 | __cond_resched(); |
| 5122 | local_bh_disable(); |
| 5123 | return 1; |
| 5124 | } |
| 5125 | return 0; |
| 5126 | } |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 5127 | EXPORT_SYMBOL(__cond_resched_softirq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5129 | /** |
| 5130 | * yield - yield the current processor to other threads. |
| 5131 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 5132 | * This is a shortcut for kernel-space yielding - it marks the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5133 | * thread runnable and calls sys_sched_yield(). |
| 5134 | */ |
| 5135 | void __sched yield(void) |
| 5136 | { |
| 5137 | set_current_state(TASK_RUNNING); |
| 5138 | sys_sched_yield(); |
| 5139 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5140 | EXPORT_SYMBOL(yield); |
| 5141 | |
| 5142 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5143 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5144 | * that process accounting knows that this is a task in IO wait state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5145 | */ |
| 5146 | void __sched io_schedule(void) |
| 5147 | { |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 5148 | struct rq *rq = raw_rq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5149 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5150 | delayacct_blkio_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5151 | atomic_inc(&rq->nr_iowait); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5152 | current->in_iowait = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5153 | schedule(); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5154 | current->in_iowait = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5155 | atomic_dec(&rq->nr_iowait); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5156 | delayacct_blkio_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5157 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5158 | EXPORT_SYMBOL(io_schedule); |
| 5159 | |
| 5160 | long __sched io_schedule_timeout(long timeout) |
| 5161 | { |
Hitoshi Mitake | 54d35f2 | 2009-06-29 14:44:57 +0900 | [diff] [blame] | 5162 | struct rq *rq = raw_rq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5163 | long ret; |
| 5164 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5165 | delayacct_blkio_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5166 | atomic_inc(&rq->nr_iowait); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5167 | current->in_iowait = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5168 | ret = schedule_timeout(timeout); |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 5169 | current->in_iowait = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5170 | atomic_dec(&rq->nr_iowait); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 5171 | delayacct_blkio_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5172 | return ret; |
| 5173 | } |
| 5174 | |
| 5175 | /** |
| 5176 | * sys_sched_get_priority_max - return maximum RT priority. |
| 5177 | * @policy: scheduling class. |
| 5178 | * |
| 5179 | * this syscall returns the maximum rt_priority that can be used |
| 5180 | * by a given scheduling class. |
| 5181 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5182 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5183 | { |
| 5184 | int ret = -EINVAL; |
| 5185 | |
| 5186 | switch (policy) { |
| 5187 | case SCHED_FIFO: |
| 5188 | case SCHED_RR: |
| 5189 | ret = MAX_USER_RT_PRIO-1; |
| 5190 | break; |
| 5191 | case SCHED_NORMAL: |
Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 5192 | case SCHED_BATCH: |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5193 | case SCHED_IDLE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5194 | ret = 0; |
| 5195 | break; |
| 5196 | } |
| 5197 | return ret; |
| 5198 | } |
| 5199 | |
| 5200 | /** |
| 5201 | * sys_sched_get_priority_min - return minimum RT priority. |
| 5202 | * @policy: scheduling class. |
| 5203 | * |
| 5204 | * this syscall returns the minimum rt_priority that can be used |
| 5205 | * by a given scheduling class. |
| 5206 | */ |
Heiko Carstens | 5add95d | 2009-01-14 14:14:08 +0100 | [diff] [blame] | 5207 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5208 | { |
| 5209 | int ret = -EINVAL; |
| 5210 | |
| 5211 | switch (policy) { |
| 5212 | case SCHED_FIFO: |
| 5213 | case SCHED_RR: |
| 5214 | ret = 1; |
| 5215 | break; |
| 5216 | case SCHED_NORMAL: |
Ingo Molnar | b0a9499 | 2006-01-14 13:20:41 -0800 | [diff] [blame] | 5217 | case SCHED_BATCH: |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5218 | case SCHED_IDLE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5219 | ret = 0; |
| 5220 | } |
| 5221 | return ret; |
| 5222 | } |
| 5223 | |
| 5224 | /** |
| 5225 | * sys_sched_rr_get_interval - return the default timeslice of a process. |
| 5226 | * @pid: pid of the process. |
| 5227 | * @interval: userspace pointer to the timeslice value. |
| 5228 | * |
| 5229 | * this syscall writes the default timeslice value of a given process |
| 5230 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 5231 | */ |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 5232 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
Heiko Carstens | 754fe8d | 2009-01-14 14:14:09 +0100 | [diff] [blame] | 5233 | struct timespec __user *, interval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5234 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5235 | struct task_struct *p; |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5236 | unsigned int time_slice; |
Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 5237 | unsigned long flags; |
| 5238 | struct rq *rq; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5239 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5240 | struct timespec t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5241 | |
| 5242 | if (pid < 0) |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5243 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5244 | |
| 5245 | retval = -ESRCH; |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5246 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5247 | p = find_process_by_pid(pid); |
| 5248 | if (!p) |
| 5249 | goto out_unlock; |
| 5250 | |
| 5251 | retval = security_task_getscheduler(p); |
| 5252 | if (retval) |
| 5253 | goto out_unlock; |
| 5254 | |
Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 5255 | rq = task_rq_lock(p, &flags); |
| 5256 | time_slice = p->sched_class->get_rr_interval(rq, p); |
| 5257 | task_rq_unlock(rq, &flags); |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5258 | |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5259 | rcu_read_unlock(); |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5260 | jiffies_to_timespec(time_slice, &t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5261 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5262 | return retval; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5263 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5264 | out_unlock: |
Thomas Gleixner | 1a551ae | 2009-12-09 10:15:11 +0000 | [diff] [blame] | 5265 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5266 | return retval; |
| 5267 | } |
| 5268 | |
Steven Rostedt | 7c731e0 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 5269 | static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5270 | |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5271 | void sched_show_task(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5272 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5273 | unsigned long free = 0; |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5274 | unsigned state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5275 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5276 | state = p->state ? __ffs(p->state) + 1 : 0; |
Erik Gilling | 28d0686 | 2010-11-19 18:08:51 -0800 | [diff] [blame] | 5277 | printk(KERN_INFO "%-15.15s %c", p->comm, |
Andreas Mohr | 2ed6e34 | 2006-07-10 04:43:52 -0700 | [diff] [blame] | 5278 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
Ingo Molnar | 4bd7732 | 2007-07-11 21:21:47 +0200 | [diff] [blame] | 5279 | #if BITS_PER_LONG == 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5280 | if (state == TASK_RUNNING) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5281 | printk(KERN_CONT " running "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5282 | else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5283 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5284 | #else |
| 5285 | if (state == TASK_RUNNING) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5286 | printk(KERN_CONT " running task "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5287 | else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5288 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5289 | #endif |
| 5290 | #ifdef CONFIG_DEBUG_STACK_USAGE |
Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 5291 | free = stack_not_used(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5292 | #endif |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5293 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
David Rientjes | aa47b7e | 2009-05-04 01:38:05 -0700 | [diff] [blame] | 5294 | task_pid_nr(p), task_pid_nr(p->real_parent), |
| 5295 | (unsigned long)task_thread_info(p)->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5296 | |
Nick Piggin | 5fb5e6d | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 5297 | show_stack(p, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5298 | } |
| 5299 | |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 5300 | void show_state_filter(unsigned long state_filter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5301 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 5302 | struct task_struct *g, *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5303 | |
Ingo Molnar | 4bd7732 | 2007-07-11 21:21:47 +0200 | [diff] [blame] | 5304 | #if BITS_PER_LONG == 32 |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5305 | printk(KERN_INFO |
| 5306 | " task PC stack pid father\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5307 | #else |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 5308 | printk(KERN_INFO |
| 5309 | " task PC stack pid father\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5310 | #endif |
| 5311 | read_lock(&tasklist_lock); |
| 5312 | do_each_thread(g, p) { |
| 5313 | /* |
| 5314 | * reset the NMI-timeout, listing all files on a slow |
| 5315 | * console might take alot of time: |
| 5316 | */ |
| 5317 | touch_nmi_watchdog(); |
Ingo Molnar | 39bc89f | 2007-04-25 20:50:03 -0700 | [diff] [blame] | 5318 | if (!state_filter || (p->state & state_filter)) |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 5319 | sched_show_task(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5320 | } while_each_thread(g, p); |
| 5321 | |
Jeremy Fitzhardinge | 04c9167 | 2007-05-08 00:28:05 -0700 | [diff] [blame] | 5322 | touch_all_softlockup_watchdogs(); |
| 5323 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5324 | #ifdef CONFIG_SCHED_DEBUG |
| 5325 | sysrq_sched_debug_show(); |
| 5326 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5327 | read_unlock(&tasklist_lock); |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 5328 | /* |
| 5329 | * Only show locks if all tasks are dumped: |
| 5330 | */ |
Shmulik Ladkani | 93335a2 | 2009-11-25 15:23:41 +0200 | [diff] [blame] | 5331 | if (!state_filter) |
Ingo Molnar | e59e2ae | 2006-12-06 20:35:59 -0800 | [diff] [blame] | 5332 | debug_show_all_locks(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5333 | } |
| 5334 | |
Ingo Molnar | 1df2105 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5335 | void __cpuinit init_idle_bootup_task(struct task_struct *idle) |
| 5336 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5337 | idle->sched_class = &idle_sched_class; |
Ingo Molnar | 1df2105 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 5338 | } |
| 5339 | |
Ingo Molnar | f340c0d | 2005-06-28 16:40:42 +0200 | [diff] [blame] | 5340 | /** |
| 5341 | * init_idle - set up an idle thread for a given CPU |
| 5342 | * @idle: task in question |
| 5343 | * @cpu: cpu the idle task belongs to |
| 5344 | * |
| 5345 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
| 5346 | * flag, to make booting more robust. |
| 5347 | */ |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 5348 | void __cpuinit init_idle(struct task_struct *idle, int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5349 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5350 | struct rq *rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5351 | unsigned long flags; |
| 5352 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5353 | raw_spin_lock_irqsave(&rq->lock, flags); |
Ingo Molnar | 5cbd54e | 2008-11-12 20:05:50 +0100 | [diff] [blame] | 5354 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5355 | __sched_fork(idle); |
Peter Zijlstra | 06b83b5 | 2009-12-16 18:04:35 +0100 | [diff] [blame] | 5356 | idle->state = TASK_RUNNING; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5357 | idle->se.exec_start = sched_clock(); |
| 5358 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5359 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
Peter Zijlstra | 6506cf6c | 2010-09-16 17:50:31 +0200 | [diff] [blame] | 5360 | /* |
| 5361 | * We're having a chicken and egg problem, even though we are |
| 5362 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
| 5363 | * lockdep check in task_group() will fail. |
| 5364 | * |
| 5365 | * Similar case to sched_fork(). / Alternatively we could |
| 5366 | * use task_rq_lock() here and obtain the other rq->lock. |
| 5367 | * |
| 5368 | * Silence PROVE_RCU |
| 5369 | */ |
| 5370 | rcu_read_lock(); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5371 | __set_task_cpu(idle, cpu); |
Peter Zijlstra | 6506cf6c | 2010-09-16 17:50:31 +0200 | [diff] [blame] | 5372 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5373 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5374 | rq->curr = rq->idle = idle; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 5375 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| 5376 | idle->oncpu = 1; |
| 5377 | #endif |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5378 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5379 | |
| 5380 | /* Set the preempt count _outside_ the spinlocks! */ |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 5381 | #if defined(CONFIG_PREEMPT) |
| 5382 | task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); |
| 5383 | #else |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 5384 | task_thread_info(idle)->preempt_count = 0; |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 5385 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5386 | /* |
| 5387 | * The idle tasks have their own, simple scheduling class: |
| 5388 | */ |
| 5389 | idle->sched_class = &idle_sched_class; |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 5390 | ftrace_graph_init_task(idle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5391 | } |
| 5392 | |
| 5393 | /* |
| 5394 | * In a system that switches off the HZ timer nohz_cpu_mask |
| 5395 | * indicates which cpus entered this state. This is used |
| 5396 | * in the rcu update to wait only for active cpus. For system |
| 5397 | * which do not switch off the HZ timer nohz_cpu_mask should |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 5398 | * always be CPU_BITS_NONE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5399 | */ |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 5400 | cpumask_var_t nohz_cpu_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5401 | |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 5402 | /* |
| 5403 | * Increase the granularity value when there are more CPUs, |
| 5404 | * because with more CPUs the 'effective latency' as visible |
| 5405 | * to users decreases. But the relationship is not linear, |
| 5406 | * so pick a second-best guess by going with the log2 of the |
| 5407 | * number of CPUs. |
| 5408 | * |
| 5409 | * This idea comes from the SD scheduler of Con Kolivas: |
| 5410 | */ |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 5411 | static int get_update_sysctl_factor(void) |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5412 | { |
Mike Galbraith | 4ca3ef7 | 2009-12-10 09:25:53 +0100 | [diff] [blame] | 5413 | unsigned int cpus = min_t(int, num_online_cpus(), 8); |
Christian Ehrhardt | 1983a92 | 2009-11-30 12:16:47 +0100 | [diff] [blame] | 5414 | unsigned int factor; |
| 5415 | |
| 5416 | switch (sysctl_sched_tunable_scaling) { |
| 5417 | case SCHED_TUNABLESCALING_NONE: |
| 5418 | factor = 1; |
| 5419 | break; |
| 5420 | case SCHED_TUNABLESCALING_LINEAR: |
| 5421 | factor = cpus; |
| 5422 | break; |
| 5423 | case SCHED_TUNABLESCALING_LOG: |
| 5424 | default: |
| 5425 | factor = 1 + ilog2(cpus); |
| 5426 | break; |
| 5427 | } |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5428 | |
Christian Ehrhardt | acb4a84 | 2009-11-30 12:16:48 +0100 | [diff] [blame] | 5429 | return factor; |
| 5430 | } |
| 5431 | |
| 5432 | static void update_sysctl(void) |
| 5433 | { |
| 5434 | unsigned int factor = get_update_sysctl_factor(); |
| 5435 | |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5436 | #define SET_SYSCTL(name) \ |
| 5437 | (sysctl_##name = (factor) * normalized_sysctl_##name) |
| 5438 | SET_SYSCTL(sched_min_granularity); |
| 5439 | SET_SYSCTL(sched_latency); |
| 5440 | SET_SYSCTL(sched_wakeup_granularity); |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5441 | #undef SET_SYSCTL |
| 5442 | } |
| 5443 | |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 5444 | static inline void sched_init_granularity(void) |
| 5445 | { |
Christian Ehrhardt | 0bcdcf2 | 2009-11-30 12:16:46 +0100 | [diff] [blame] | 5446 | update_sysctl(); |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 5447 | } |
| 5448 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5449 | #ifdef CONFIG_SMP |
| 5450 | /* |
| 5451 | * This is how migration works: |
| 5452 | * |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5453 | * 1) we invoke migration_cpu_stop() on the target CPU using |
| 5454 | * stop_one_cpu(). |
| 5455 | * 2) stopper starts to run (implicitly forcing the migrated thread |
| 5456 | * off the CPU) |
| 5457 | * 3) it checks whether the migrated task is still in the wrong runqueue. |
| 5458 | * 4) if it's in the wrong runqueue then the migration thread removes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5459 | * it and puts it into the right queue. |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5460 | * 5) stopper completes and stop_one_cpu() returns and the migration |
| 5461 | * is done. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5462 | */ |
| 5463 | |
| 5464 | /* |
| 5465 | * Change a given task's CPU affinity. Migrate the thread to a |
| 5466 | * proper CPU and schedule it away if the CPU it's executing on |
| 5467 | * is removed from the allowed bitmask. |
| 5468 | * |
| 5469 | * NOTE: the caller must have a valid reference to the task, the |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5470 | * task must not exit() & deallocate itself prematurely. The |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5471 | * call is not atomic; no spinlocks may be held. |
| 5472 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5473 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5474 | { |
| 5475 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5476 | struct rq *rq; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5477 | unsigned int dest_cpu; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5478 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5479 | |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 5480 | /* |
| 5481 | * Serialize against TASK_WAKING so that ttwu() and wunt() can |
| 5482 | * drop the rq->lock and still rely on ->cpus_allowed. |
| 5483 | */ |
| 5484 | again: |
| 5485 | while (task_is_waking(p)) |
| 5486 | cpu_relax(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5487 | rq = task_rq_lock(p, &flags); |
Peter Zijlstra | 65cc8e4 | 2010-03-25 21:05:16 +0100 | [diff] [blame] | 5488 | if (task_is_waking(p)) { |
| 5489 | task_rq_unlock(rq, &flags); |
| 5490 | goto again; |
| 5491 | } |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5492 | |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 5493 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5494 | ret = -EINVAL; |
| 5495 | goto out; |
| 5496 | } |
| 5497 | |
David Rientjes | 9985b0b | 2008-06-05 12:57:11 -0700 | [diff] [blame] | 5498 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5499 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
David Rientjes | 9985b0b | 2008-06-05 12:57:11 -0700 | [diff] [blame] | 5500 | ret = -EINVAL; |
| 5501 | goto out; |
| 5502 | } |
| 5503 | |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 5504 | if (p->sched_class->set_cpus_allowed) |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 5505 | p->sched_class->set_cpus_allowed(p, new_mask); |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 5506 | else { |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5507 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 5508 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
Gregory Haskins | 73fe6aa | 2008-01-25 21:08:07 +0100 | [diff] [blame] | 5509 | } |
| 5510 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5511 | /* Can the task run on the task's current CPU? If so, we're done */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5512 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5513 | goto out; |
| 5514 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5515 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
Nikanth Karthikesan | b7a2b39 | 2010-11-26 12:37:09 +0530 | [diff] [blame] | 5516 | if (migrate_task(p, rq)) { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5517 | struct migration_arg arg = { p, dest_cpu }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5518 | /* Need help from migration thread: drop lock and wait. */ |
| 5519 | task_rq_unlock(rq, &flags); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5520 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5521 | tlb_migrate_finish(p->mm); |
| 5522 | return 0; |
| 5523 | } |
| 5524 | out: |
| 5525 | task_rq_unlock(rq, &flags); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5526 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5527 | return ret; |
| 5528 | } |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 5529 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5530 | |
| 5531 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5532 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5533 | * this because either it can't run here any more (set_cpus_allowed() |
| 5534 | * away from this CPU, or CPU going down), or because we're |
| 5535 | * attempting to rebalance this task on exec (sched_exec). |
| 5536 | * |
| 5537 | * So we race with normal scheduler movements, but that's OK, as long |
| 5538 | * as the task is no longer on this CPU. |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5539 | * |
| 5540 | * Returns non-zero if task was successfully migrated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5541 | */ |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5542 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5543 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 5544 | struct rq *rq_dest, *rq_src; |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5545 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5546 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 5547 | if (unlikely(!cpu_active(dest_cpu))) |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5548 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5549 | |
| 5550 | rq_src = cpu_rq(src_cpu); |
| 5551 | rq_dest = cpu_rq(dest_cpu); |
| 5552 | |
| 5553 | double_rq_lock(rq_src, rq_dest); |
| 5554 | /* Already moved. */ |
| 5555 | if (task_cpu(p) != src_cpu) |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5556 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5557 | /* Affinity changed (again). */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 5558 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5559 | goto fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5560 | |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5561 | /* |
| 5562 | * If we're not on a rq, the next wake-up will ensure we're |
| 5563 | * placed properly. |
| 5564 | */ |
| 5565 | if (p->se.on_rq) { |
Ingo Molnar | 2e1cb74 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 5566 | deactivate_task(rq_src, p, 0); |
Peter Zijlstra | e291200 | 2009-12-16 18:04:36 +0100 | [diff] [blame] | 5567 | set_task_cpu(p, dest_cpu); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 5568 | activate_task(rq_dest, p, 0); |
Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 5569 | check_preempt_curr(rq_dest, p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5570 | } |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5571 | done: |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5572 | ret = 1; |
Linus Torvalds | b1e3873 | 2008-07-10 11:25:03 -0700 | [diff] [blame] | 5573 | fail: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5574 | double_rq_unlock(rq_src, rq_dest); |
Kirill Korotaev | efc3081 | 2006-06-27 02:54:32 -0700 | [diff] [blame] | 5575 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5576 | } |
| 5577 | |
| 5578 | /* |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5579 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
| 5580 | * and performs thread migration by bumping thread off CPU then |
| 5581 | * 'pushing' onto another runqueue. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5582 | */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5583 | static int migration_cpu_stop(void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5584 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5585 | struct migration_arg *arg = data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5586 | |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5587 | /* |
| 5588 | * The original target cpu might have gone down and we might |
| 5589 | * be on another cpu but it doesn't matter. |
| 5590 | */ |
| 5591 | local_irq_disable(); |
| 5592 | __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); |
| 5593 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5594 | return 0; |
| 5595 | } |
| 5596 | |
| 5597 | #ifdef CONFIG_HOTPLUG_CPU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5598 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5599 | /* |
| 5600 | * Ensures that the idle task is using init_mm right before its cpu goes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5601 | * offline. |
| 5602 | */ |
| 5603 | void idle_task_exit(void) |
| 5604 | { |
| 5605 | struct mm_struct *mm = current->active_mm; |
| 5606 | |
| 5607 | BUG_ON(cpu_online(smp_processor_id())); |
| 5608 | |
| 5609 | if (mm != &init_mm) |
| 5610 | switch_mm(mm, &init_mm, current); |
| 5611 | mmdrop(mm); |
| 5612 | } |
| 5613 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5614 | /* |
| 5615 | * While a dead CPU has no uninterruptible tasks queued at this point, |
| 5616 | * it might still have a nonzero ->nr_uninterruptible counter, because |
| 5617 | * for performance reasons the counter is not stricly tracking tasks to |
| 5618 | * their home CPUs. So we just add the counter to another CPU's counter, |
| 5619 | * to keep the global sum constant after CPU-down: |
| 5620 | */ |
| 5621 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5622 | { |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5623 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5624 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5625 | rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; |
| 5626 | rq_src->nr_uninterruptible = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5627 | } |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 5628 | |
| 5629 | /* |
| 5630 | * remove the tasks which were accounted by rq from calc_load_tasks. |
| 5631 | */ |
| 5632 | static void calc_global_load_remove(struct rq *rq) |
| 5633 | { |
| 5634 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); |
Thomas Gleixner | a468d38 | 2009-07-17 14:15:46 +0200 | [diff] [blame] | 5635 | rq->calc_load_active = 0; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 5636 | } |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5637 | |
| 5638 | /* |
| 5639 | * Migrate all tasks from the rq, sleeping tasks will be migrated by |
| 5640 | * try_to_wake_up()->select_task_rq(). |
| 5641 | * |
| 5642 | * Called with rq->lock held even though we'er in stop_machine() and |
| 5643 | * there's no concurrency possible, we hold the required locks anyway |
| 5644 | * because of lock validation efforts. |
| 5645 | */ |
| 5646 | static void migrate_tasks(unsigned int dead_cpu) |
| 5647 | { |
| 5648 | struct rq *rq = cpu_rq(dead_cpu); |
| 5649 | struct task_struct *next, *stop = rq->stop; |
| 5650 | int dest_cpu; |
| 5651 | |
| 5652 | /* |
| 5653 | * Fudge the rq selection such that the below task selection loop |
| 5654 | * doesn't get stuck on the currently eligible stop task. |
| 5655 | * |
| 5656 | * We're currently inside stop_machine() and the rq is either stuck |
| 5657 | * in the stop_machine_cpu_stop() loop, or we're executing this code, |
| 5658 | * either way we should never end up calling schedule() until we're |
| 5659 | * done here. |
| 5660 | */ |
| 5661 | rq->stop = NULL; |
| 5662 | |
| 5663 | for ( ; ; ) { |
| 5664 | /* |
| 5665 | * There's this thread running, bail when that's the only |
| 5666 | * remaining thread. |
| 5667 | */ |
| 5668 | if (rq->nr_running == 1) |
| 5669 | break; |
| 5670 | |
| 5671 | next = pick_next_task(rq); |
| 5672 | BUG_ON(!next); |
| 5673 | next->sched_class->put_prev_task(rq, next); |
| 5674 | |
| 5675 | /* Find suitable destination for @next, with force if needed. */ |
| 5676 | dest_cpu = select_fallback_rq(dead_cpu, next); |
| 5677 | raw_spin_unlock(&rq->lock); |
| 5678 | |
| 5679 | __migrate_task(next, dead_cpu, dest_cpu); |
| 5680 | |
| 5681 | raw_spin_lock(&rq->lock); |
| 5682 | } |
| 5683 | |
| 5684 | rq->stop = stop; |
| 5685 | } |
| 5686 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5687 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 5688 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5689 | #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) |
| 5690 | |
| 5691 | static struct ctl_table sd_ctl_dir[] = { |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5692 | { |
| 5693 | .procname = "sched_domain", |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 5694 | .mode = 0555, |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5695 | }, |
Eric W. Biederman | 5699230 | 2009-11-05 15:38:40 -0800 | [diff] [blame] | 5696 | {} |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5697 | }; |
| 5698 | |
| 5699 | static struct ctl_table sd_ctl_root[] = { |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5700 | { |
| 5701 | .procname = "kernel", |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 5702 | .mode = 0555, |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5703 | .child = sd_ctl_dir, |
| 5704 | }, |
Eric W. Biederman | 5699230 | 2009-11-05 15:38:40 -0800 | [diff] [blame] | 5705 | {} |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5706 | }; |
| 5707 | |
| 5708 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
| 5709 | { |
| 5710 | struct ctl_table *entry = |
Milton Miller | 5cf9f06 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5711 | kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5712 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5713 | return entry; |
| 5714 | } |
| 5715 | |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5716 | static void sd_free_ctl_entry(struct ctl_table **tablep) |
| 5717 | { |
Milton Miller | cd79007 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 5718 | struct ctl_table *entry; |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5719 | |
Milton Miller | cd79007 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 5720 | /* |
| 5721 | * In the intermediate directories, both the child directory and |
| 5722 | * procname are dynamically allocated and could fail but the mode |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 5723 | * will always be set. In the lowest directory the names are |
Milton Miller | cd79007 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 5724 | * static strings and all have proc handlers. |
| 5725 | */ |
| 5726 | for (entry = *tablep; entry->mode; entry++) { |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5727 | if (entry->child) |
| 5728 | sd_free_ctl_entry(&entry->child); |
Milton Miller | cd79007 | 2007-10-17 16:55:11 +0200 | [diff] [blame] | 5729 | if (entry->proc_handler == NULL) |
| 5730 | kfree(entry->procname); |
| 5731 | } |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5732 | |
| 5733 | kfree(*tablep); |
| 5734 | *tablep = NULL; |
| 5735 | } |
| 5736 | |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5737 | static void |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5738 | set_table_entry(struct ctl_table *entry, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5739 | const char *procname, void *data, int maxlen, |
| 5740 | mode_t mode, proc_handler *proc_handler) |
| 5741 | { |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5742 | entry->procname = procname; |
| 5743 | entry->data = data; |
| 5744 | entry->maxlen = maxlen; |
| 5745 | entry->mode = mode; |
| 5746 | entry->proc_handler = proc_handler; |
| 5747 | } |
| 5748 | |
| 5749 | static struct ctl_table * |
| 5750 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
| 5751 | { |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 5752 | struct ctl_table *table = sd_alloc_ctl_entry(13); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5753 | |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5754 | if (table == NULL) |
| 5755 | return NULL; |
| 5756 | |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5757 | set_table_entry(&table[0], "min_interval", &sd->min_interval, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5758 | sizeof(long), 0644, proc_doulongvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5759 | set_table_entry(&table[1], "max_interval", &sd->max_interval, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5760 | sizeof(long), 0644, proc_doulongvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5761 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5762 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5763 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5764 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5765 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5766 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5767 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5768 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5769 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5770 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5771 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5772 | sizeof(int), 0644, proc_dointvec_minmax); |
Alexey Dobriyan | e036185 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 5773 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5774 | sizeof(int), 0644, proc_dointvec_minmax); |
Zou Nan hai | ace8b3d | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5775 | set_table_entry(&table[9], "cache_nice_tries", |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5776 | &sd->cache_nice_tries, |
| 5777 | sizeof(int), 0644, proc_dointvec_minmax); |
Zou Nan hai | ace8b3d | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 5778 | set_table_entry(&table[10], "flags", &sd->flags, |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5779 | sizeof(int), 0644, proc_dointvec_minmax); |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 5780 | set_table_entry(&table[11], "name", sd->name, |
| 5781 | CORENAME_MAX_SIZE, 0444, proc_dostring); |
| 5782 | /* &table[12] is terminator */ |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5783 | |
| 5784 | return table; |
| 5785 | } |
| 5786 | |
Ingo Molnar | 9a4e715 | 2007-11-28 15:52:56 +0100 | [diff] [blame] | 5787 | static ctl_table *sd_alloc_ctl_cpu_table(int cpu) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5788 | { |
| 5789 | struct ctl_table *entry, *table; |
| 5790 | struct sched_domain *sd; |
| 5791 | int domain_num = 0, i; |
| 5792 | char buf[32]; |
| 5793 | |
| 5794 | for_each_domain(cpu, sd) |
| 5795 | domain_num++; |
| 5796 | entry = table = sd_alloc_ctl_entry(domain_num + 1); |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5797 | if (table == NULL) |
| 5798 | return NULL; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5799 | |
| 5800 | i = 0; |
| 5801 | for_each_domain(cpu, sd) { |
| 5802 | snprintf(buf, 32, "domain%d", i); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5803 | entry->procname = kstrdup(buf, GFP_KERNEL); |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 5804 | entry->mode = 0555; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5805 | entry->child = sd_alloc_ctl_domain_table(sd); |
| 5806 | entry++; |
| 5807 | i++; |
| 5808 | } |
| 5809 | return table; |
| 5810 | } |
| 5811 | |
| 5812 | static struct ctl_table_header *sd_sysctl_header; |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5813 | static void register_sched_domain_sysctl(void) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5814 | { |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 5815 | int i, cpu_num = num_possible_cpus(); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5816 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
| 5817 | char buf[32]; |
| 5818 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 5819 | WARN_ON(sd_ctl_dir[0].child); |
| 5820 | sd_ctl_dir[0].child = entry; |
| 5821 | |
Milton Miller | ad1cdc1 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5822 | if (entry == NULL) |
| 5823 | return; |
| 5824 | |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 5825 | for_each_possible_cpu(i) { |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5826 | snprintf(buf, 32, "cpu%d", i); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5827 | entry->procname = kstrdup(buf, GFP_KERNEL); |
Eric W. Biederman | c57baf1 | 2007-08-23 15:18:02 +0200 | [diff] [blame] | 5828 | entry->mode = 0555; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5829 | entry->child = sd_alloc_ctl_cpu_table(i); |
Milton Miller | 97b6ea7 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5830 | entry++; |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5831 | } |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 5832 | |
| 5833 | WARN_ON(sd_sysctl_header); |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5834 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
| 5835 | } |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5836 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 5837 | /* may be called multiple times per register */ |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5838 | static void unregister_sched_domain_sysctl(void) |
| 5839 | { |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 5840 | if (sd_sysctl_header) |
| 5841 | unregister_sysctl_table(sd_sysctl_header); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5842 | sd_sysctl_header = NULL; |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 5843 | if (sd_ctl_dir[0].child) |
| 5844 | sd_free_ctl_entry(&sd_ctl_dir[0].child); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5845 | } |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5846 | #else |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 5847 | static void register_sched_domain_sysctl(void) |
| 5848 | { |
| 5849 | } |
| 5850 | static void unregister_sched_domain_sysctl(void) |
Nick Piggin | e692ab5 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 5851 | { |
| 5852 | } |
| 5853 | #endif |
| 5854 | |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 5855 | static void set_rq_online(struct rq *rq) |
| 5856 | { |
| 5857 | if (!rq->online) { |
| 5858 | const struct sched_class *class; |
| 5859 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 5860 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 5861 | rq->online = 1; |
| 5862 | |
| 5863 | for_each_class(class) { |
| 5864 | if (class->rq_online) |
| 5865 | class->rq_online(rq); |
| 5866 | } |
| 5867 | } |
| 5868 | } |
| 5869 | |
| 5870 | static void set_rq_offline(struct rq *rq) |
| 5871 | { |
| 5872 | if (rq->online) { |
| 5873 | const struct sched_class *class; |
| 5874 | |
| 5875 | for_each_class(class) { |
| 5876 | if (class->rq_offline) |
| 5877 | class->rq_offline(rq); |
| 5878 | } |
| 5879 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 5880 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 5881 | rq->online = 0; |
| 5882 | } |
| 5883 | } |
| 5884 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5885 | /* |
| 5886 | * migration_call - callback that gets triggered when a CPU is added. |
| 5887 | * Here we can start up the necessary migration thread for the new CPU. |
| 5888 | */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5889 | static int __cpuinit |
| 5890 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5891 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5892 | int cpu = (long)hcpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5893 | unsigned long flags; |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 5894 | struct rq *rq = cpu_rq(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5895 | |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5896 | switch (action & ~CPU_TASKS_FROZEN) { |
Gautham R Shenoy | 5be9361 | 2007-05-09 02:34:04 -0700 | [diff] [blame] | 5897 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5898 | case CPU_UP_PREPARE: |
Thomas Gleixner | a468d38 | 2009-07-17 14:15:46 +0200 | [diff] [blame] | 5899 | rq->calc_load_update = calc_load_update; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5900 | break; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5901 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5902 | case CPU_ONLINE: |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 5903 | /* Update our root-domain */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5904 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 5905 | if (rq->rd) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 5906 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 5907 | |
| 5908 | set_rq_online(rq); |
Gregory Haskins | 1f94ef5 | 2008-03-10 16:52:41 -0400 | [diff] [blame] | 5909 | } |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5910 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5911 | break; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5912 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5913 | #ifdef CONFIG_HOTPLUG_CPU |
Gregory Haskins | 08f503b | 2008-03-10 17:59:11 -0400 | [diff] [blame] | 5914 | case CPU_DYING: |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 5915 | /* Update our root-domain */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5916 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 5917 | if (rq->rd) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 5918 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 5919 | set_rq_offline(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 5920 | } |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5921 | migrate_tasks(cpu); |
| 5922 | BUG_ON(rq->nr_running != 1); /* the migration thread */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 5923 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | 48c5cca | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 5924 | |
| 5925 | migrate_nr_uninterruptible(rq); |
| 5926 | calc_global_load_remove(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 5927 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5928 | #endif |
| 5929 | } |
| 5930 | return NOTIFY_OK; |
| 5931 | } |
| 5932 | |
Paul Mackerras | f38b082 | 2009-06-02 21:05:16 +1000 | [diff] [blame] | 5933 | /* |
| 5934 | * Register at high priority so that task migration (migrate_all_tasks) |
| 5935 | * happens before everything else. This has to be lower priority than |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5936 | * the notifier in the perf_event subsystem, though. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5937 | */ |
Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 5938 | static struct notifier_block __cpuinitdata migration_notifier = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5939 | .notifier_call = migration_call, |
Tejun Heo | 50a323b | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 5940 | .priority = CPU_PRI_MIGRATION, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5941 | }; |
| 5942 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 5943 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, |
| 5944 | unsigned long action, void *hcpu) |
| 5945 | { |
| 5946 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5947 | case CPU_ONLINE: |
| 5948 | case CPU_DOWN_FAILED: |
| 5949 | set_cpu_active((long)hcpu, true); |
| 5950 | return NOTIFY_OK; |
| 5951 | default: |
| 5952 | return NOTIFY_DONE; |
| 5953 | } |
| 5954 | } |
| 5955 | |
| 5956 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, |
| 5957 | unsigned long action, void *hcpu) |
| 5958 | { |
| 5959 | switch (action & ~CPU_TASKS_FROZEN) { |
| 5960 | case CPU_DOWN_PREPARE: |
| 5961 | set_cpu_active((long)hcpu, false); |
| 5962 | return NOTIFY_OK; |
| 5963 | default: |
| 5964 | return NOTIFY_DONE; |
| 5965 | } |
| 5966 | } |
| 5967 | |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 5968 | static int __init migration_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5969 | { |
| 5970 | void *cpu = (void *)(long)smp_processor_id(); |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 5971 | int err; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 5972 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 5973 | /* Initialize migration for the boot CPU */ |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 5974 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
| 5975 | BUG_ON(err == NOTIFY_BAD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5976 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 5977 | register_cpu_notifier(&migration_notifier); |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 5978 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 5979 | /* Register cpu active notifiers */ |
| 5980 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); |
| 5981 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); |
| 5982 | |
Thomas Gleixner | a004cd4 | 2009-07-21 09:54:05 +0200 | [diff] [blame] | 5983 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5984 | } |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 5985 | early_initcall(migration_init); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5986 | #endif |
| 5987 | |
| 5988 | #ifdef CONFIG_SMP |
Christoph Lameter | 476f353 | 2007-05-06 14:48:58 -0700 | [diff] [blame] | 5989 | |
Ingo Molnar | 3e9830d | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 5990 | #ifdef CONFIG_SCHED_DEBUG |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 5991 | |
Mike Travis | f663011 | 2009-11-17 18:22:15 -0600 | [diff] [blame] | 5992 | static __read_mostly int sched_domain_debug_enabled; |
| 5993 | |
| 5994 | static int __init sched_domain_debug_setup(char *str) |
| 5995 | { |
| 5996 | sched_domain_debug_enabled = 1; |
| 5997 | |
| 5998 | return 0; |
| 5999 | } |
| 6000 | early_param("sched_debug", sched_domain_debug_setup); |
| 6001 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6002 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6003 | struct cpumask *groupmask) |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6004 | { |
| 6005 | struct sched_group *group = sd->groups; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 6006 | char str[256]; |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6007 | |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6008 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6009 | cpumask_clear(groupmask); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6010 | |
| 6011 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 6012 | |
| 6013 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6014 | printk("does not load-balance\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6015 | if (sd->parent) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6016 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
| 6017 | " has parent"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6018 | return -1; |
| 6019 | } |
| 6020 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6021 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6022 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6023 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6024 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 6025 | "CPU%d\n", cpu); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6026 | } |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6027 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6028 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 6029 | " CPU%d\n", cpu); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6030 | } |
| 6031 | |
| 6032 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
| 6033 | do { |
| 6034 | if (!group) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6035 | printk("\n"); |
| 6036 | printk(KERN_ERR "ERROR: group is NULL\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6037 | break; |
| 6038 | } |
| 6039 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6040 | if (!group->cpu_power) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6041 | printk(KERN_CONT "\n"); |
| 6042 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
| 6043 | "set\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6044 | break; |
| 6045 | } |
| 6046 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6047 | if (!cpumask_weight(sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6048 | printk(KERN_CONT "\n"); |
| 6049 | printk(KERN_ERR "ERROR: empty group\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6050 | break; |
| 6051 | } |
| 6052 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6053 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6054 | printk(KERN_CONT "\n"); |
| 6055 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6056 | break; |
| 6057 | } |
| 6058 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6059 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6060 | |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6061 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
Gautham R Shenoy | 381512c | 2009-04-14 09:09:36 +0530 | [diff] [blame] | 6062 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6063 | printk(KERN_CONT " %s", str); |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6064 | if (group->cpu_power != SCHED_LOAD_SCALE) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6065 | printk(KERN_CONT " (cpu_power = %d)", |
| 6066 | group->cpu_power); |
Gautham R Shenoy | 381512c | 2009-04-14 09:09:36 +0530 | [diff] [blame] | 6067 | } |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6068 | |
| 6069 | group = group->next; |
| 6070 | } while (group != sd->groups); |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6071 | printk(KERN_CONT "\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6072 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6073 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6074 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6075 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6076 | if (sd->parent && |
| 6077 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6078 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 6079 | "of domain->span\n"); |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6080 | return 0; |
| 6081 | } |
| 6082 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6083 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
| 6084 | { |
Rusty Russell | d5dd3db | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6085 | cpumask_var_t groupmask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6086 | int level = 0; |
| 6087 | |
Mike Travis | f663011 | 2009-11-17 18:22:15 -0600 | [diff] [blame] | 6088 | if (!sched_domain_debug_enabled) |
| 6089 | return; |
| 6090 | |
Nick Piggin | 41c7ce9 | 2005-06-25 14:57:24 -0700 | [diff] [blame] | 6091 | if (!sd) { |
| 6092 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
| 6093 | return; |
| 6094 | } |
| 6095 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6096 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
| 6097 | |
Rusty Russell | d5dd3db | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6098 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6099 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
| 6100 | return; |
| 6101 | } |
| 6102 | |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6103 | for (;;) { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6104 | if (sched_domain_debug_one(sd, cpu, level, groupmask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6105 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6106 | level++; |
| 6107 | sd = sd->parent; |
Miguel Ojeda Sandonis | 33859f7 | 2006-12-10 02:20:38 -0800 | [diff] [blame] | 6108 | if (!sd) |
Ingo Molnar | 4dcf6af | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 6109 | break; |
| 6110 | } |
Rusty Russell | d5dd3db | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6111 | free_cpumask_var(groupmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6112 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6113 | #else /* !CONFIG_SCHED_DEBUG */ |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6114 | # define sched_domain_debug(sd, cpu) do { } while (0) |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6115 | #endif /* CONFIG_SCHED_DEBUG */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6116 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 6117 | static int sd_degenerate(struct sched_domain *sd) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6118 | { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6119 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6120 | return 1; |
| 6121 | |
| 6122 | /* Following flags need at least 2 groups */ |
| 6123 | if (sd->flags & (SD_LOAD_BALANCE | |
| 6124 | SD_BALANCE_NEWIDLE | |
| 6125 | SD_BALANCE_FORK | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6126 | SD_BALANCE_EXEC | |
| 6127 | SD_SHARE_CPUPOWER | |
| 6128 | SD_SHARE_PKG_RESOURCES)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6129 | if (sd->groups != sd->groups->next) |
| 6130 | return 0; |
| 6131 | } |
| 6132 | |
| 6133 | /* Following flags don't use groups */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 6134 | if (sd->flags & (SD_WAKE_AFFINE)) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6135 | return 0; |
| 6136 | |
| 6137 | return 1; |
| 6138 | } |
| 6139 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6140 | static int |
| 6141 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6142 | { |
| 6143 | unsigned long cflags = sd->flags, pflags = parent->flags; |
| 6144 | |
| 6145 | if (sd_degenerate(parent)) |
| 6146 | return 1; |
| 6147 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6148 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6149 | return 0; |
| 6150 | |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6151 | /* Flags needing groups don't count if only 1 group in parent */ |
| 6152 | if (parent->groups == parent->groups->next) { |
| 6153 | pflags &= ~(SD_LOAD_BALANCE | |
| 6154 | SD_BALANCE_NEWIDLE | |
| 6155 | SD_BALANCE_FORK | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6156 | SD_BALANCE_EXEC | |
| 6157 | SD_SHARE_CPUPOWER | |
| 6158 | SD_SHARE_PKG_RESOURCES); |
Ken Chen | 5436499 | 2008-12-07 18:47:37 -0800 | [diff] [blame] | 6159 | if (nr_node_ids == 1) |
| 6160 | pflags &= ~SD_SERIALIZE; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6161 | } |
| 6162 | if (~cflags & pflags) |
| 6163 | return 0; |
| 6164 | |
| 6165 | return 1; |
| 6166 | } |
| 6167 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6168 | static void free_rootdomain(struct root_domain *rd) |
| 6169 | { |
Peter Zijlstra | 047106a | 2009-11-16 10:28:09 +0100 | [diff] [blame] | 6170 | synchronize_sched(); |
| 6171 | |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6172 | cpupri_cleanup(&rd->cpupri); |
| 6173 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6174 | free_cpumask_var(rd->rto_mask); |
| 6175 | free_cpumask_var(rd->online); |
| 6176 | free_cpumask_var(rd->span); |
| 6177 | kfree(rd); |
| 6178 | } |
| 6179 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6180 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 6181 | { |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6182 | struct root_domain *old_rd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6183 | unsigned long flags; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6184 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6185 | raw_spin_lock_irqsave(&rq->lock, flags); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6186 | |
| 6187 | if (rq->rd) { |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6188 | old_rd = rq->rd; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6189 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6190 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6191 | set_rq_offline(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6192 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6193 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 6194 | |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6195 | /* |
| 6196 | * If we dont want to free the old_rt yet then |
| 6197 | * set old_rd to NULL to skip the freeing later |
| 6198 | * in this function: |
| 6199 | */ |
| 6200 | if (!atomic_dec_and_test(&old_rd->refcount)) |
| 6201 | old_rd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6202 | } |
| 6203 | |
| 6204 | atomic_inc(&rd->refcount); |
| 6205 | rq->rd = rd; |
| 6206 | |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6207 | cpumask_set_cpu(rq->cpu, rd->span); |
Gregory Haskins | 00aec93 | 2009-07-30 10:57:23 -0400 | [diff] [blame] | 6208 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 6209 | set_rq_online(rq); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6210 | |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 6211 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Ingo Molnar | a0490fa | 2009-02-12 11:35:40 +0100 | [diff] [blame] | 6212 | |
| 6213 | if (old_rd) |
| 6214 | free_rootdomain(old_rd); |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6215 | } |
| 6216 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6217 | static int init_rootdomain(struct root_domain *rd) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6218 | { |
| 6219 | memset(rd, 0, sizeof(*rd)); |
| 6220 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6221 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
Li Zefan | 0c910d2 | 2009-01-06 17:39:06 +0800 | [diff] [blame] | 6222 | goto out; |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6223 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6224 | goto free_span; |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6225 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6226 | goto free_online; |
Gregory Haskins | 6e0534f | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 6227 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6228 | if (cpupri_init(&rd->cpupri) != 0) |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6229 | goto free_rto_mask; |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6230 | return 0; |
| 6231 | |
Rusty Russell | 68e7456 | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 6232 | free_rto_mask: |
| 6233 | free_cpumask_var(rd->rto_mask); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6234 | free_online: |
| 6235 | free_cpumask_var(rd->online); |
| 6236 | free_span: |
| 6237 | free_cpumask_var(rd->span); |
Li Zefan | 0c910d2 | 2009-01-06 17:39:06 +0800 | [diff] [blame] | 6238 | out: |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6239 | return -ENOMEM; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6240 | } |
| 6241 | |
| 6242 | static void init_defrootdomain(void) |
| 6243 | { |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6244 | init_rootdomain(&def_root_domain); |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6245 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6246 | atomic_set(&def_root_domain.refcount, 1); |
| 6247 | } |
| 6248 | |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 6249 | static struct root_domain *alloc_rootdomain(void) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6250 | { |
| 6251 | struct root_domain *rd; |
| 6252 | |
| 6253 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); |
| 6254 | if (!rd) |
| 6255 | return NULL; |
| 6256 | |
Pekka Enberg | 68c38fc | 2010-07-15 23:18:22 +0300 | [diff] [blame] | 6257 | if (init_rootdomain(rd) != 0) { |
Rusty Russell | c6c4927 | 2008-11-25 02:35:05 +1030 | [diff] [blame] | 6258 | kfree(rd); |
| 6259 | return NULL; |
| 6260 | } |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6261 | |
| 6262 | return rd; |
| 6263 | } |
| 6264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6265 | /* |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 6266 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6267 | * hold the hotplug lock. |
| 6268 | */ |
Ingo Molnar | 0eab914 | 2008-01-25 21:08:19 +0100 | [diff] [blame] | 6269 | static void |
| 6270 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6271 | { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 6272 | struct rq *rq = cpu_rq(cpu); |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6273 | struct sched_domain *tmp; |
| 6274 | |
Peter Zijlstra | 669c55e | 2010-04-16 14:59:29 +0200 | [diff] [blame] | 6275 | for (tmp = sd; tmp; tmp = tmp->parent) |
| 6276 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); |
| 6277 | |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6278 | /* Remove the sched domains which do not contribute to scheduling. */ |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 6279 | for (tmp = sd; tmp; ) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6280 | struct sched_domain *parent = tmp->parent; |
| 6281 | if (!parent) |
| 6282 | break; |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 6283 | |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6284 | if (sd_parent_degenerate(tmp, parent)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6285 | tmp->parent = parent->parent; |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6286 | if (parent->parent) |
| 6287 | parent->parent->child = tmp; |
Li Zefan | f29c9b1 | 2008-11-06 09:45:16 +0800 | [diff] [blame] | 6288 | } else |
| 6289 | tmp = tmp->parent; |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6290 | } |
| 6291 | |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6292 | if (sd && sd_degenerate(sd)) { |
Suresh Siddha | 245af2c | 2005-06-25 14:57:25 -0700 | [diff] [blame] | 6293 | sd = sd->parent; |
Siddha, Suresh B | 1a84887 | 2006-10-03 01:14:08 -0700 | [diff] [blame] | 6294 | if (sd) |
| 6295 | sd->child = NULL; |
| 6296 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6297 | |
| 6298 | sched_domain_debug(sd, cpu); |
| 6299 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 6300 | rq_attach_root(rq, rd); |
Nick Piggin | 674311d | 2005-06-25 14:57:27 -0700 | [diff] [blame] | 6301 | rcu_assign_pointer(rq->sd, sd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6302 | } |
| 6303 | |
| 6304 | /* cpus with isolated domains */ |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 6305 | static cpumask_var_t cpu_isolated_map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6306 | |
| 6307 | /* Setup the mask of cpus configured for isolated domains */ |
| 6308 | static int __init isolated_cpu_setup(char *str) |
| 6309 | { |
Rusty Russell | bdddd29 | 2009-12-02 14:09:16 +1030 | [diff] [blame] | 6310 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 6311 | cpulist_parse(str, cpu_isolated_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6312 | return 1; |
| 6313 | } |
| 6314 | |
Ingo Molnar | 8927f49 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 6315 | __setup("isolcpus=", isolated_cpu_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6316 | |
| 6317 | /* |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6318 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
| 6319 | * to a function which identifies what group(along with sched group) a CPU |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6320 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
| 6321 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6322 | * |
| 6323 | * init_sched_build_groups will build a circular linked list of the groups |
| 6324 | * covered by the given span, and will set each group's ->cpumask correctly, |
| 6325 | * and ->cpu_power to 0. |
| 6326 | */ |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 6327 | static void |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6328 | init_sched_build_groups(const struct cpumask *span, |
| 6329 | const struct cpumask *cpu_map, |
| 6330 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6331 | struct sched_group **sg, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6332 | struct cpumask *tmpmask), |
| 6333 | struct cpumask *covered, struct cpumask *tmpmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6334 | { |
| 6335 | struct sched_group *first = NULL, *last = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6336 | int i; |
| 6337 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6338 | cpumask_clear(covered); |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6339 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 6340 | for_each_cpu(i, span) { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6341 | struct sched_group *sg; |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6342 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6343 | int j; |
| 6344 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6345 | if (cpumask_test_cpu(i, covered)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6346 | continue; |
| 6347 | |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6348 | cpumask_clear(sched_group_cpus(sg)); |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6349 | sg->cpu_power = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6350 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 6351 | for_each_cpu(j, span) { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6352 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6353 | continue; |
| 6354 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6355 | cpumask_set_cpu(j, covered); |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6356 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6357 | } |
| 6358 | if (!first) |
| 6359 | first = sg; |
| 6360 | if (last) |
| 6361 | last->next = sg; |
| 6362 | last = sg; |
| 6363 | } |
| 6364 | last->next = first; |
| 6365 | } |
| 6366 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6367 | #define SD_NODES_PER_DOMAIN 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6368 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6369 | #ifdef CONFIG_NUMA |
akpm@osdl.org | 198e2f1 | 2006-01-12 01:05:30 -0800 | [diff] [blame] | 6370 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6371 | /** |
| 6372 | * find_next_best_node - find the next node to include in a sched_domain |
| 6373 | * @node: node whose sched_domain we're building |
| 6374 | * @used_nodes: nodes already in the sched_domain |
| 6375 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6376 | * Find the next node to include in a given scheduling domain. Simply |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6377 | * finds the closest node not already in the @used_nodes map. |
| 6378 | * |
| 6379 | * Should use nodemask_t. |
| 6380 | */ |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6381 | static int find_next_best_node(int node, nodemask_t *used_nodes) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6382 | { |
| 6383 | int i, n, val, min_val, best_node = 0; |
| 6384 | |
| 6385 | min_val = INT_MAX; |
| 6386 | |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 6387 | for (i = 0; i < nr_node_ids; i++) { |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6388 | /* Start at @node */ |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 6389 | n = (node + i) % nr_node_ids; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6390 | |
| 6391 | if (!nr_cpus_node(n)) |
| 6392 | continue; |
| 6393 | |
| 6394 | /* Skip already used nodes */ |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6395 | if (node_isset(n, *used_nodes)) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6396 | continue; |
| 6397 | |
| 6398 | /* Simple min distance search */ |
| 6399 | val = node_distance(node, n); |
| 6400 | |
| 6401 | if (val < min_val) { |
| 6402 | min_val = val; |
| 6403 | best_node = n; |
| 6404 | } |
| 6405 | } |
| 6406 | |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6407 | node_set(best_node, *used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6408 | return best_node; |
| 6409 | } |
| 6410 | |
| 6411 | /** |
| 6412 | * sched_domain_node_span - get a cpumask for a node's sched_domain |
| 6413 | * @node: node whose cpumask we're constructing |
Randy Dunlap | 7348672 | 2008-04-22 10:07:22 -0700 | [diff] [blame] | 6414 | * @span: resulting cpumask |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6415 | * |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6416 | * Given a node, construct a good cpumask for its sched_domain to span. It |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6417 | * should be one that prevents unnecessary balancing, but also spreads tasks |
| 6418 | * out optimally. |
| 6419 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6420 | static void sched_domain_node_span(int node, struct cpumask *span) |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6421 | { |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6422 | nodemask_t used_nodes; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6423 | int i; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6424 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6425 | cpumask_clear(span); |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6426 | nodes_clear(used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6427 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6428 | cpumask_or(span, span, cpumask_of_node(node)); |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6429 | node_set(node, used_nodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6430 | |
| 6431 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
Mike Travis | c5f59f0 | 2008-04-04 18:11:10 -0700 | [diff] [blame] | 6432 | int next_node = find_next_best_node(node, &used_nodes); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6433 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6434 | cpumask_or(span, span, cpumask_of_node(next_node)); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6435 | } |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6436 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6437 | #endif /* CONFIG_NUMA */ |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6438 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 6439 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6440 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6441 | /* |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6442 | * The cpus mask in sched_group and sched_domain hangs off the end. |
Ingo Molnar | 4200efd | 2009-05-19 09:22:19 +0200 | [diff] [blame] | 6443 | * |
| 6444 | * ( See the the comments in include/linux/sched.h:struct sched_group |
| 6445 | * and struct sched_domain. ) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6446 | */ |
| 6447 | struct static_sched_group { |
| 6448 | struct sched_group sg; |
| 6449 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); |
| 6450 | }; |
| 6451 | |
| 6452 | struct static_sched_domain { |
| 6453 | struct sched_domain sd; |
| 6454 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); |
| 6455 | }; |
| 6456 | |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 6457 | struct s_data { |
| 6458 | #ifdef CONFIG_NUMA |
| 6459 | int sd_allnodes; |
| 6460 | cpumask_var_t domainspan; |
| 6461 | cpumask_var_t covered; |
| 6462 | cpumask_var_t notcovered; |
| 6463 | #endif |
| 6464 | cpumask_var_t nodemask; |
| 6465 | cpumask_var_t this_sibling_map; |
| 6466 | cpumask_var_t this_core_map; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6467 | cpumask_var_t this_book_map; |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 6468 | cpumask_var_t send_covered; |
| 6469 | cpumask_var_t tmpmask; |
| 6470 | struct sched_group **sched_group_nodes; |
| 6471 | struct root_domain *rd; |
| 6472 | }; |
| 6473 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6474 | enum s_alloc { |
| 6475 | sa_sched_groups = 0, |
| 6476 | sa_rootdomain, |
| 6477 | sa_tmpmask, |
| 6478 | sa_send_covered, |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6479 | sa_this_book_map, |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6480 | sa_this_core_map, |
| 6481 | sa_this_sibling_map, |
| 6482 | sa_nodemask, |
| 6483 | sa_sched_group_nodes, |
| 6484 | #ifdef CONFIG_NUMA |
| 6485 | sa_notcovered, |
| 6486 | sa_covered, |
| 6487 | sa_domainspan, |
| 6488 | #endif |
| 6489 | sa_none, |
| 6490 | }; |
| 6491 | |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6492 | /* |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6493 | * SMT sched-domains: |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6494 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6495 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6496 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
Tejun Heo | 1871e52 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 6497 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6498 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6499 | static int |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6500 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
| 6501 | struct sched_group **sg, struct cpumask *unused) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6502 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6503 | if (sg) |
Tejun Heo | 1871e52 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 6504 | *sg = &per_cpu(sched_groups, cpu).sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6505 | return cpu; |
| 6506 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6507 | #endif /* CONFIG_SCHED_SMT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6508 | |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6509 | /* |
| 6510 | * multi-core sched-domains: |
| 6511 | */ |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6512 | #ifdef CONFIG_SCHED_MC |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6513 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
| 6514 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6515 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6516 | static int |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6517 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
| 6518 | struct sched_group **sg, struct cpumask *mask) |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6519 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6520 | int group; |
Heiko Carstens | f269893 | 2010-08-31 10:28:15 +0200 | [diff] [blame] | 6521 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | c69fc56 | 2009-03-13 14:49:46 +1030 | [diff] [blame] | 6522 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6523 | group = cpumask_first(mask); |
Heiko Carstens | f269893 | 2010-08-31 10:28:15 +0200 | [diff] [blame] | 6524 | #else |
| 6525 | group = cpu; |
| 6526 | #endif |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6527 | if (sg) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6528 | *sg = &per_cpu(sched_group_core, group).sg; |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6529 | return group; |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6530 | } |
Heiko Carstens | f269893 | 2010-08-31 10:28:15 +0200 | [diff] [blame] | 6531 | #endif /* CONFIG_SCHED_MC */ |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6532 | |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6533 | /* |
| 6534 | * book sched-domains: |
| 6535 | */ |
| 6536 | #ifdef CONFIG_SCHED_BOOK |
| 6537 | static DEFINE_PER_CPU(struct static_sched_domain, book_domains); |
| 6538 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); |
| 6539 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6540 | static int |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6541 | cpu_to_book_group(int cpu, const struct cpumask *cpu_map, |
| 6542 | struct sched_group **sg, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6543 | { |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6544 | int group = cpu; |
| 6545 | #ifdef CONFIG_SCHED_MC |
| 6546 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
| 6547 | group = cpumask_first(mask); |
| 6548 | #elif defined(CONFIG_SCHED_SMT) |
| 6549 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
| 6550 | group = cpumask_first(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6551 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6552 | if (sg) |
| 6553 | *sg = &per_cpu(sched_group_book, group).sg; |
| 6554 | return group; |
| 6555 | } |
| 6556 | #endif /* CONFIG_SCHED_BOOK */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6557 | |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6558 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
| 6559 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 6560 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 6561 | static int |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6562 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
| 6563 | struct sched_group **sg, struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6564 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6565 | int group; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6566 | #ifdef CONFIG_SCHED_BOOK |
| 6567 | cpumask_and(mask, cpu_book_mask(cpu), cpu_map); |
| 6568 | group = cpumask_first(mask); |
| 6569 | #elif defined(CONFIG_SCHED_MC) |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6570 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6571 | group = cpumask_first(mask); |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 6572 | #elif defined(CONFIG_SCHED_SMT) |
Rusty Russell | c69fc56 | 2009-03-13 14:49:46 +1030 | [diff] [blame] | 6573 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6574 | group = cpumask_first(mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6575 | #else |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6576 | group = cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6577 | #endif |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6578 | if (sg) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6579 | *sg = &per_cpu(sched_group_phys, group).sg; |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6580 | return group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6581 | } |
| 6582 | |
| 6583 | #ifdef CONFIG_NUMA |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6584 | /* |
| 6585 | * The init_sched_build_groups can't handle what we want to do with node |
| 6586 | * groups, so roll our own. Now each node has its own list of groups which |
| 6587 | * gets dynamically allocated. |
| 6588 | */ |
Rusty Russell | 62ea9ce | 2009-01-11 01:04:16 +0100 | [diff] [blame] | 6589 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 6590 | static struct sched_group ***sched_group_nodes_bycpu; |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6591 | |
Rusty Russell | 62ea9ce | 2009-01-11 01:04:16 +0100 | [diff] [blame] | 6592 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6593 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 6594 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6595 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
| 6596 | struct sched_group **sg, |
| 6597 | struct cpumask *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6598 | { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6599 | int group; |
| 6600 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6601 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6602 | group = cpumask_first(nodemask); |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6603 | |
| 6604 | if (sg) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6605 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6606 | return group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6607 | } |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 6608 | |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6609 | static void init_numa_sched_groups_power(struct sched_group *group_head) |
| 6610 | { |
| 6611 | struct sched_group *sg = group_head; |
| 6612 | int j; |
| 6613 | |
| 6614 | if (!sg) |
| 6615 | return; |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6616 | do { |
Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6617 | for_each_cpu(j, sched_group_cpus(sg)) { |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6618 | struct sched_domain *sd; |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6619 | |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 6620 | sd = &per_cpu(phys_domains, j).sd; |
Miao Xie | 13318a7 | 2009-04-15 09:59:10 +0800 | [diff] [blame] | 6621 | if (j != group_first_cpu(sd->groups)) { |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6622 | /* |
| 6623 | * Only add "power" once for each |
| 6624 | * physical package. |
| 6625 | */ |
| 6626 | continue; |
| 6627 | } |
| 6628 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6629 | sg->cpu_power += sd->groups->cpu_power; |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6630 | } |
Andi Kleen | 3a5c359 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 6631 | sg = sg->next; |
| 6632 | } while (sg != group_head); |
Siddha, Suresh B | 0806903 | 2006-03-27 01:15:23 -0800 | [diff] [blame] | 6633 | } |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 6634 | |
| 6635 | static int build_numa_sched_groups(struct s_data *d, |
| 6636 | const struct cpumask *cpu_map, int num) |
| 6637 | { |
| 6638 | struct sched_domain *sd; |
| 6639 | struct sched_group *sg, *prev; |
| 6640 | int n, j; |
| 6641 | |
| 6642 | cpumask_clear(d->covered); |
| 6643 | cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); |
| 6644 | if (cpumask_empty(d->nodemask)) { |
| 6645 | d->sched_group_nodes[num] = NULL; |
| 6646 | goto out; |
| 6647 | } |
| 6648 | |
| 6649 | sched_domain_node_span(num, d->domainspan); |
| 6650 | cpumask_and(d->domainspan, d->domainspan, cpu_map); |
| 6651 | |
| 6652 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 6653 | GFP_KERNEL, num); |
| 6654 | if (!sg) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6655 | printk(KERN_WARNING "Can not alloc domain group for node %d\n", |
| 6656 | num); |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 6657 | return -ENOMEM; |
| 6658 | } |
| 6659 | d->sched_group_nodes[num] = sg; |
| 6660 | |
| 6661 | for_each_cpu(j, d->nodemask) { |
| 6662 | sd = &per_cpu(node_domains, j).sd; |
| 6663 | sd->groups = sg; |
| 6664 | } |
| 6665 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6666 | sg->cpu_power = 0; |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 6667 | cpumask_copy(sched_group_cpus(sg), d->nodemask); |
| 6668 | sg->next = sg; |
| 6669 | cpumask_or(d->covered, d->covered, d->nodemask); |
| 6670 | |
| 6671 | prev = sg; |
| 6672 | for (j = 0; j < nr_node_ids; j++) { |
| 6673 | n = (num + j) % nr_node_ids; |
| 6674 | cpumask_complement(d->notcovered, d->covered); |
| 6675 | cpumask_and(d->tmpmask, d->notcovered, cpu_map); |
| 6676 | cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); |
| 6677 | if (cpumask_empty(d->tmpmask)) |
| 6678 | break; |
| 6679 | cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); |
| 6680 | if (cpumask_empty(d->tmpmask)) |
| 6681 | continue; |
| 6682 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 6683 | GFP_KERNEL, num); |
| 6684 | if (!sg) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6685 | printk(KERN_WARNING |
| 6686 | "Can not alloc domain group for node %d\n", j); |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 6687 | return -ENOMEM; |
| 6688 | } |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6689 | sg->cpu_power = 0; |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 6690 | cpumask_copy(sched_group_cpus(sg), d->tmpmask); |
| 6691 | sg->next = prev->next; |
| 6692 | cpumask_or(d->covered, d->covered, d->tmpmask); |
| 6693 | prev->next = sg; |
| 6694 | prev = sg; |
| 6695 | } |
| 6696 | out: |
| 6697 | return 0; |
| 6698 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6699 | #endif /* CONFIG_NUMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6700 | |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 6701 | #ifdef CONFIG_NUMA |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6702 | /* Free memory allocated for various sched_group structures */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6703 | static void free_sched_groups(const struct cpumask *cpu_map, |
| 6704 | struct cpumask *nodemask) |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6705 | { |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 6706 | int cpu, i; |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6707 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 6708 | for_each_cpu(cpu, cpu_map) { |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6709 | struct sched_group **sched_group_nodes |
| 6710 | = sched_group_nodes_bycpu[cpu]; |
| 6711 | |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6712 | if (!sched_group_nodes) |
| 6713 | continue; |
| 6714 | |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 6715 | for (i = 0; i < nr_node_ids; i++) { |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6716 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
| 6717 | |
Mike Travis | 6ca09df | 2008-12-31 18:08:45 -0800 | [diff] [blame] | 6718 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6719 | if (cpumask_empty(nodemask)) |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6720 | continue; |
| 6721 | |
| 6722 | if (sg == NULL) |
| 6723 | continue; |
| 6724 | sg = sg->next; |
| 6725 | next_sg: |
| 6726 | oldsg = sg; |
| 6727 | sg = sg->next; |
| 6728 | kfree(oldsg); |
| 6729 | if (oldsg != sched_group_nodes[i]) |
| 6730 | goto next_sg; |
| 6731 | } |
| 6732 | kfree(sched_group_nodes); |
| 6733 | sched_group_nodes_bycpu[cpu] = NULL; |
| 6734 | } |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6735 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6736 | #else /* !CONFIG_NUMA */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 6737 | static void free_sched_groups(const struct cpumask *cpu_map, |
| 6738 | struct cpumask *nodemask) |
Siddha, Suresh B | a616058 | 2006-10-03 01:14:06 -0700 | [diff] [blame] | 6739 | { |
| 6740 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 6741 | #endif /* CONFIG_NUMA */ |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 6742 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6743 | /* |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6744 | * Initialize sched groups cpu_power. |
| 6745 | * |
| 6746 | * cpu_power indicates the capacity of sched group, which is used while |
| 6747 | * distributing the load between different sched groups in a sched domain. |
| 6748 | * Typically cpu_power for all the groups in a sched domain will be same unless |
| 6749 | * there are asymmetries in the topology. If there are asymmetries, group |
| 6750 | * having more cpu_power will pickup more load compared to the group having |
| 6751 | * less cpu_power. |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6752 | */ |
| 6753 | static void init_sched_groups_power(int cpu, struct sched_domain *sd) |
| 6754 | { |
| 6755 | struct sched_domain *child; |
| 6756 | struct sched_group *group; |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 6757 | long power; |
| 6758 | int weight; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6759 | |
| 6760 | WARN_ON(!sd || !sd->groups); |
| 6761 | |
Miao Xie | 13318a7 | 2009-04-15 09:59:10 +0800 | [diff] [blame] | 6762 | if (cpu != group_first_cpu(sd->groups)) |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6763 | return; |
| 6764 | |
Suresh Siddha | aae6d3d | 2010-09-17 15:02:32 -0700 | [diff] [blame] | 6765 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); |
| 6766 | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6767 | child = sd->child; |
| 6768 | |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6769 | sd->groups->cpu_power = 0; |
Eric Dumazet | 5517d86 | 2007-05-08 00:32:57 -0700 | [diff] [blame] | 6770 | |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 6771 | if (!child) { |
| 6772 | power = SCHED_LOAD_SCALE; |
| 6773 | weight = cpumask_weight(sched_domain_span(sd)); |
| 6774 | /* |
| 6775 | * SMT siblings share the power of a single core. |
Peter Zijlstra | a52bfd7 | 2009-09-01 10:34:35 +0200 | [diff] [blame] | 6776 | * Usually multiple threads get a better yield out of |
| 6777 | * that one core than a single thread would have, |
| 6778 | * reflect that in sd->smt_gain. |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 6779 | */ |
Peter Zijlstra | a52bfd7 | 2009-09-01 10:34:35 +0200 | [diff] [blame] | 6780 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { |
| 6781 | power *= sd->smt_gain; |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 6782 | power /= weight; |
Peter Zijlstra | a52bfd7 | 2009-09-01 10:34:35 +0200 | [diff] [blame] | 6783 | power >>= SCHED_LOAD_SHIFT; |
| 6784 | } |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6785 | sd->groups->cpu_power += power; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6786 | return; |
| 6787 | } |
| 6788 | |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6789 | /* |
Peter Zijlstra | f93e65c | 2009-09-01 10:34:32 +0200 | [diff] [blame] | 6790 | * Add cpu_power of each child group to this groups cpu_power. |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6791 | */ |
| 6792 | group = child->groups; |
| 6793 | do { |
Peter Zijlstra | 18a3885 | 2009-09-01 10:34:39 +0200 | [diff] [blame] | 6794 | sd->groups->cpu_power += group->cpu_power; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 6795 | group = group->next; |
| 6796 | } while (group != child->groups); |
| 6797 | } |
| 6798 | |
| 6799 | /* |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6800 | * Initializers for schedule domains |
| 6801 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
| 6802 | */ |
| 6803 | |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6804 | #ifdef CONFIG_SCHED_DEBUG |
| 6805 | # define SD_INIT_NAME(sd, type) sd->name = #type |
| 6806 | #else |
| 6807 | # define SD_INIT_NAME(sd, type) do { } while (0) |
| 6808 | #endif |
| 6809 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6810 | #define SD_INIT(sd, type) sd_init_##type(sd) |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6811 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6812 | #define SD_INIT_FUNC(type) \ |
| 6813 | static noinline void sd_init_##type(struct sched_domain *sd) \ |
| 6814 | { \ |
| 6815 | memset(sd, 0, sizeof(*sd)); \ |
| 6816 | *sd = SD_##type##_INIT; \ |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 6817 | sd->level = SD_LV_##type; \ |
Ingo Molnar | a5d8c34 | 2008-10-09 11:35:51 +0200 | [diff] [blame] | 6818 | SD_INIT_NAME(sd, type); \ |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6819 | } |
| 6820 | |
| 6821 | SD_INIT_FUNC(CPU) |
| 6822 | #ifdef CONFIG_NUMA |
| 6823 | SD_INIT_FUNC(ALLNODES) |
| 6824 | SD_INIT_FUNC(NODE) |
| 6825 | #endif |
| 6826 | #ifdef CONFIG_SCHED_SMT |
| 6827 | SD_INIT_FUNC(SIBLING) |
| 6828 | #endif |
| 6829 | #ifdef CONFIG_SCHED_MC |
| 6830 | SD_INIT_FUNC(MC) |
| 6831 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6832 | #ifdef CONFIG_SCHED_BOOK |
| 6833 | SD_INIT_FUNC(BOOK) |
| 6834 | #endif |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 6835 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 6836 | static int default_relax_domain_level = -1; |
| 6837 | |
| 6838 | static int __init setup_relax_domain_level(char *str) |
| 6839 | { |
Li Zefan | 30e0e17 | 2008-05-13 10:27:17 +0800 | [diff] [blame] | 6840 | unsigned long val; |
| 6841 | |
| 6842 | val = simple_strtoul(str, NULL, 0); |
| 6843 | if (val < SD_LV_MAX) |
| 6844 | default_relax_domain_level = val; |
| 6845 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 6846 | return 1; |
| 6847 | } |
| 6848 | __setup("relax_domain_level=", setup_relax_domain_level); |
| 6849 | |
| 6850 | static void set_domain_attribute(struct sched_domain *sd, |
| 6851 | struct sched_domain_attr *attr) |
| 6852 | { |
| 6853 | int request; |
| 6854 | |
| 6855 | if (!attr || attr->relax_domain_level < 0) { |
| 6856 | if (default_relax_domain_level < 0) |
| 6857 | return; |
| 6858 | else |
| 6859 | request = default_relax_domain_level; |
| 6860 | } else |
| 6861 | request = attr->relax_domain_level; |
| 6862 | if (request < sd->level) { |
| 6863 | /* turn off idle balance on this domain */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 6864 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 6865 | } else { |
| 6866 | /* turn on idle balance on this domain */ |
Peter Zijlstra | c88d591 | 2009-09-10 13:50:02 +0200 | [diff] [blame] | 6867 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 6868 | } |
| 6869 | } |
| 6870 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6871 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
| 6872 | const struct cpumask *cpu_map) |
| 6873 | { |
| 6874 | switch (what) { |
| 6875 | case sa_sched_groups: |
| 6876 | free_sched_groups(cpu_map, d->tmpmask); /* fall through */ |
| 6877 | d->sched_group_nodes = NULL; |
| 6878 | case sa_rootdomain: |
| 6879 | free_rootdomain(d->rd); /* fall through */ |
| 6880 | case sa_tmpmask: |
| 6881 | free_cpumask_var(d->tmpmask); /* fall through */ |
| 6882 | case sa_send_covered: |
| 6883 | free_cpumask_var(d->send_covered); /* fall through */ |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6884 | case sa_this_book_map: |
| 6885 | free_cpumask_var(d->this_book_map); /* fall through */ |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6886 | case sa_this_core_map: |
| 6887 | free_cpumask_var(d->this_core_map); /* fall through */ |
| 6888 | case sa_this_sibling_map: |
| 6889 | free_cpumask_var(d->this_sibling_map); /* fall through */ |
| 6890 | case sa_nodemask: |
| 6891 | free_cpumask_var(d->nodemask); /* fall through */ |
| 6892 | case sa_sched_group_nodes: |
| 6893 | #ifdef CONFIG_NUMA |
| 6894 | kfree(d->sched_group_nodes); /* fall through */ |
| 6895 | case sa_notcovered: |
| 6896 | free_cpumask_var(d->notcovered); /* fall through */ |
| 6897 | case sa_covered: |
| 6898 | free_cpumask_var(d->covered); /* fall through */ |
| 6899 | case sa_domainspan: |
| 6900 | free_cpumask_var(d->domainspan); /* fall through */ |
| 6901 | #endif |
| 6902 | case sa_none: |
| 6903 | break; |
| 6904 | } |
| 6905 | } |
| 6906 | |
| 6907 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
| 6908 | const struct cpumask *cpu_map) |
| 6909 | { |
| 6910 | #ifdef CONFIG_NUMA |
| 6911 | if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) |
| 6912 | return sa_none; |
| 6913 | if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) |
| 6914 | return sa_domainspan; |
| 6915 | if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) |
| 6916 | return sa_covered; |
| 6917 | /* Allocate the per-node list of sched groups */ |
| 6918 | d->sched_group_nodes = kcalloc(nr_node_ids, |
| 6919 | sizeof(struct sched_group *), GFP_KERNEL); |
| 6920 | if (!d->sched_group_nodes) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6921 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6922 | return sa_notcovered; |
| 6923 | } |
| 6924 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; |
| 6925 | #endif |
| 6926 | if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) |
| 6927 | return sa_sched_group_nodes; |
| 6928 | if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) |
| 6929 | return sa_nodemask; |
| 6930 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) |
| 6931 | return sa_this_sibling_map; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6932 | if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6933 | return sa_this_core_map; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6934 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) |
| 6935 | return sa_this_book_map; |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6936 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) |
| 6937 | return sa_send_covered; |
| 6938 | d->rd = alloc_rootdomain(); |
| 6939 | if (!d->rd) { |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 6940 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 6941 | return sa_tmpmask; |
| 6942 | } |
| 6943 | return sa_rootdomain; |
| 6944 | } |
| 6945 | |
Andreas Herrmann | 7f4588f | 2009-08-18 12:54:06 +0200 | [diff] [blame] | 6946 | static struct sched_domain *__build_numa_sched_domains(struct s_data *d, |
| 6947 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) |
| 6948 | { |
| 6949 | struct sched_domain *sd = NULL; |
| 6950 | #ifdef CONFIG_NUMA |
| 6951 | struct sched_domain *parent; |
| 6952 | |
| 6953 | d->sd_allnodes = 0; |
| 6954 | if (cpumask_weight(cpu_map) > |
| 6955 | SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { |
| 6956 | sd = &per_cpu(allnodes_domains, i).sd; |
| 6957 | SD_INIT(sd, ALLNODES); |
| 6958 | set_domain_attribute(sd, attr); |
| 6959 | cpumask_copy(sched_domain_span(sd), cpu_map); |
| 6960 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 6961 | d->sd_allnodes = 1; |
| 6962 | } |
| 6963 | parent = sd; |
| 6964 | |
| 6965 | sd = &per_cpu(node_domains, i).sd; |
| 6966 | SD_INIT(sd, NODE); |
| 6967 | set_domain_attribute(sd, attr); |
| 6968 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
| 6969 | sd->parent = parent; |
| 6970 | if (parent) |
| 6971 | parent->child = sd; |
| 6972 | cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); |
| 6973 | #endif |
| 6974 | return sd; |
| 6975 | } |
| 6976 | |
Andreas Herrmann | 87cce66 | 2009-08-18 12:54:55 +0200 | [diff] [blame] | 6977 | static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, |
| 6978 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 6979 | struct sched_domain *parent, int i) |
| 6980 | { |
| 6981 | struct sched_domain *sd; |
| 6982 | sd = &per_cpu(phys_domains, i).sd; |
| 6983 | SD_INIT(sd, CPU); |
| 6984 | set_domain_attribute(sd, attr); |
| 6985 | cpumask_copy(sched_domain_span(sd), d->nodemask); |
| 6986 | sd->parent = parent; |
| 6987 | if (parent) |
| 6988 | parent->child = sd; |
| 6989 | cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 6990 | return sd; |
| 6991 | } |
| 6992 | |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 6993 | static struct sched_domain *__build_book_sched_domain(struct s_data *d, |
| 6994 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 6995 | struct sched_domain *parent, int i) |
| 6996 | { |
| 6997 | struct sched_domain *sd = parent; |
| 6998 | #ifdef CONFIG_SCHED_BOOK |
| 6999 | sd = &per_cpu(book_domains, i).sd; |
| 7000 | SD_INIT(sd, BOOK); |
| 7001 | set_domain_attribute(sd, attr); |
| 7002 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); |
| 7003 | sd->parent = parent; |
| 7004 | parent->child = sd; |
| 7005 | cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7006 | #endif |
| 7007 | return sd; |
| 7008 | } |
| 7009 | |
Andreas Herrmann | 410c408 | 2009-08-18 12:56:14 +0200 | [diff] [blame] | 7010 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, |
| 7011 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7012 | struct sched_domain *parent, int i) |
| 7013 | { |
| 7014 | struct sched_domain *sd = parent; |
| 7015 | #ifdef CONFIG_SCHED_MC |
| 7016 | sd = &per_cpu(core_domains, i).sd; |
| 7017 | SD_INIT(sd, MC); |
| 7018 | set_domain_attribute(sd, attr); |
| 7019 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); |
| 7020 | sd->parent = parent; |
| 7021 | parent->child = sd; |
| 7022 | cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7023 | #endif |
| 7024 | return sd; |
| 7025 | } |
| 7026 | |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7027 | static struct sched_domain *__build_smt_sched_domain(struct s_data *d, |
| 7028 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7029 | struct sched_domain *parent, int i) |
| 7030 | { |
| 7031 | struct sched_domain *sd = parent; |
| 7032 | #ifdef CONFIG_SCHED_SMT |
| 7033 | sd = &per_cpu(cpu_domains, i).sd; |
| 7034 | SD_INIT(sd, SIBLING); |
| 7035 | set_domain_attribute(sd, attr); |
| 7036 | cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); |
| 7037 | sd->parent = parent; |
| 7038 | parent->child = sd; |
| 7039 | cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); |
| 7040 | #endif |
| 7041 | return sd; |
| 7042 | } |
| 7043 | |
Andreas Herrmann | 0e8e85c | 2009-08-18 12:57:51 +0200 | [diff] [blame] | 7044 | static void build_sched_groups(struct s_data *d, enum sched_domain_level l, |
| 7045 | const struct cpumask *cpu_map, int cpu) |
| 7046 | { |
| 7047 | switch (l) { |
| 7048 | #ifdef CONFIG_SCHED_SMT |
| 7049 | case SD_LV_SIBLING: /* set up CPU (sibling) groups */ |
| 7050 | cpumask_and(d->this_sibling_map, cpu_map, |
| 7051 | topology_thread_cpumask(cpu)); |
| 7052 | if (cpu == cpumask_first(d->this_sibling_map)) |
| 7053 | init_sched_build_groups(d->this_sibling_map, cpu_map, |
| 7054 | &cpu_to_cpu_group, |
| 7055 | d->send_covered, d->tmpmask); |
| 7056 | break; |
| 7057 | #endif |
Andreas Herrmann | a2af04c | 2009-08-18 12:58:38 +0200 | [diff] [blame] | 7058 | #ifdef CONFIG_SCHED_MC |
| 7059 | case SD_LV_MC: /* set up multi-core groups */ |
| 7060 | cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); |
| 7061 | if (cpu == cpumask_first(d->this_core_map)) |
| 7062 | init_sched_build_groups(d->this_core_map, cpu_map, |
| 7063 | &cpu_to_core_group, |
| 7064 | d->send_covered, d->tmpmask); |
| 7065 | break; |
| 7066 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7067 | #ifdef CONFIG_SCHED_BOOK |
| 7068 | case SD_LV_BOOK: /* set up book groups */ |
| 7069 | cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); |
| 7070 | if (cpu == cpumask_first(d->this_book_map)) |
| 7071 | init_sched_build_groups(d->this_book_map, cpu_map, |
| 7072 | &cpu_to_book_group, |
| 7073 | d->send_covered, d->tmpmask); |
| 7074 | break; |
| 7075 | #endif |
Andreas Herrmann | 8654809 | 2009-08-18 12:59:28 +0200 | [diff] [blame] | 7076 | case SD_LV_CPU: /* set up physical groups */ |
| 7077 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); |
| 7078 | if (!cpumask_empty(d->nodemask)) |
| 7079 | init_sched_build_groups(d->nodemask, cpu_map, |
| 7080 | &cpu_to_phys_group, |
| 7081 | d->send_covered, d->tmpmask); |
| 7082 | break; |
Andreas Herrmann | de616e3 | 2009-08-18 13:00:13 +0200 | [diff] [blame] | 7083 | #ifdef CONFIG_NUMA |
| 7084 | case SD_LV_ALLNODES: |
| 7085 | init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, |
| 7086 | d->send_covered, d->tmpmask); |
| 7087 | break; |
| 7088 | #endif |
Andreas Herrmann | 0e8e85c | 2009-08-18 12:57:51 +0200 | [diff] [blame] | 7089 | default: |
| 7090 | break; |
| 7091 | } |
| 7092 | } |
| 7093 | |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7094 | /* |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7095 | * Build sched domains for a given set of cpus and attach the sched domains |
| 7096 | * to the individual cpus |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7097 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7098 | static int __build_sched_domains(const struct cpumask *cpu_map, |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7099 | struct sched_domain_attr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7100 | { |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7101 | enum s_alloc alloc_state = sa_none; |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7102 | struct s_data d; |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7103 | struct sched_domain *sd; |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7104 | int i; |
John Hawkes | d1b5513 | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7105 | #ifdef CONFIG_NUMA |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7106 | d.sd_allnodes = 0; |
Rusty Russell | 3404c8d | 2008-11-25 02:35:03 +1030 | [diff] [blame] | 7107 | #endif |
| 7108 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7109 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
| 7110 | if (alloc_state != sa_rootdomain) |
| 7111 | goto error; |
| 7112 | alloc_state = sa_sched_groups; |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7114 | /* |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7115 | * Set up domains for cpus specified by the cpu_map. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7116 | */ |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7117 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7118 | cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), |
| 7119 | cpu_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7120 | |
Andreas Herrmann | 7f4588f | 2009-08-18 12:54:06 +0200 | [diff] [blame] | 7121 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); |
Andreas Herrmann | 87cce66 | 2009-08-18 12:54:55 +0200 | [diff] [blame] | 7122 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7123 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); |
Andreas Herrmann | 410c408 | 2009-08-18 12:56:14 +0200 | [diff] [blame] | 7124 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
Andreas Herrmann | d817353 | 2009-08-18 12:57:03 +0200 | [diff] [blame] | 7125 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7126 | } |
| 7127 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7128 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 0e8e85c | 2009-08-18 12:57:51 +0200 | [diff] [blame] | 7129 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7130 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); |
Andreas Herrmann | a2af04c | 2009-08-18 12:58:38 +0200 | [diff] [blame] | 7131 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7132 | } |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 7133 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7134 | /* Set up physical groups */ |
Andreas Herrmann | 8654809 | 2009-08-18 12:59:28 +0200 | [diff] [blame] | 7135 | for (i = 0; i < nr_node_ids; i++) |
| 7136 | build_sched_groups(&d, SD_LV_CPU, cpu_map, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7137 | |
| 7138 | #ifdef CONFIG_NUMA |
| 7139 | /* Set up node groups */ |
Andreas Herrmann | de616e3 | 2009-08-18 13:00:13 +0200 | [diff] [blame] | 7140 | if (d.sd_allnodes) |
| 7141 | build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7142 | |
Andreas Herrmann | 0601a88 | 2009-08-18 13:01:11 +0200 | [diff] [blame] | 7143 | for (i = 0; i < nr_node_ids; i++) |
| 7144 | if (build_numa_sched_groups(&d, cpu_map, i)) |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7145 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7146 | #endif |
| 7147 | |
| 7148 | /* Calculate CPU power for physical packages and nodes */ |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7149 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7150 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7151 | sd = &per_cpu(cpu_domains, i).sd; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7152 | init_sched_groups_power(i, sd); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7153 | } |
| 7154 | #endif |
| 7155 | #ifdef CONFIG_SCHED_MC |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7156 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7157 | sd = &per_cpu(core_domains, i).sd; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7158 | init_sched_groups_power(i, sd); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7159 | } |
| 7160 | #endif |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7161 | #ifdef CONFIG_SCHED_BOOK |
| 7162 | for_each_cpu(i, cpu_map) { |
| 7163 | sd = &per_cpu(book_domains, i).sd; |
| 7164 | init_sched_groups_power(i, sd); |
| 7165 | } |
| 7166 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7167 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7168 | for_each_cpu(i, cpu_map) { |
Andreas Herrmann | 294b0c9 | 2009-08-18 13:02:29 +0200 | [diff] [blame] | 7169 | sd = &per_cpu(phys_domains, i).sd; |
Siddha, Suresh B | 89c4710 | 2006-10-03 01:14:09 -0700 | [diff] [blame] | 7170 | init_sched_groups_power(i, sd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7171 | } |
| 7172 | |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7173 | #ifdef CONFIG_NUMA |
Mike Travis | 076ac2a | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 7174 | for (i = 0; i < nr_node_ids; i++) |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7175 | init_numa_sched_groups_power(d.sched_group_nodes[i]); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7176 | |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7177 | if (d.sd_allnodes) { |
Siddha, Suresh B | 6711cab | 2006-12-10 02:20:07 -0800 | [diff] [blame] | 7178 | struct sched_group *sg; |
Siddha, Suresh B | f712c0c | 2006-07-30 03:02:59 -0700 | [diff] [blame] | 7179 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7180 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7181 | d.tmpmask); |
Siddha, Suresh B | f712c0c | 2006-07-30 03:02:59 -0700 | [diff] [blame] | 7182 | init_numa_sched_groups_power(sg); |
| 7183 | } |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7184 | #endif |
| 7185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7186 | /* Attach the domains */ |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7187 | for_each_cpu(i, cpu_map) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7188 | #ifdef CONFIG_SCHED_SMT |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7189 | sd = &per_cpu(cpu_domains, i).sd; |
Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 7190 | #elif defined(CONFIG_SCHED_MC) |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7191 | sd = &per_cpu(core_domains, i).sd; |
Heiko Carstens | 01a0854 | 2010-08-31 10:28:16 +0200 | [diff] [blame] | 7192 | #elif defined(CONFIG_SCHED_BOOK) |
| 7193 | sd = &per_cpu(book_domains, i).sd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7194 | #else |
Rusty Russell | 6c99e9a | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7195 | sd = &per_cpu(phys_domains, i).sd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7196 | #endif |
Andreas Herrmann | 49a02c5 | 2009-08-18 12:51:52 +0200 | [diff] [blame] | 7197 | cpu_attach_domain(sd, d.rd, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7198 | } |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7199 | |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7200 | d.sched_group_nodes = NULL; /* don't free this we still need it */ |
| 7201 | __free_domain_allocs(&d, sa_tmpmask, cpu_map); |
| 7202 | return 0; |
Rusty Russell | 3404c8d | 2008-11-25 02:35:03 +1030 | [diff] [blame] | 7203 | |
Srivatsa Vaddagiri | 51888ca | 2006-06-27 02:54:38 -0700 | [diff] [blame] | 7204 | error: |
Andreas Herrmann | 2109b99 | 2009-08-18 12:53:00 +0200 | [diff] [blame] | 7205 | __free_domain_allocs(&d, alloc_state, cpu_map); |
| 7206 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7207 | } |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7208 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7209 | static int build_sched_domains(const struct cpumask *cpu_map) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7210 | { |
| 7211 | return __build_sched_domains(cpu_map, NULL); |
| 7212 | } |
| 7213 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7214 | static cpumask_var_t *doms_cur; /* current sched domains */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7215 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
Ingo Molnar | 4285f594 | 2008-05-16 17:47:14 +0200 | [diff] [blame] | 7216 | static struct sched_domain_attr *dattr_cur; |
| 7217 | /* attribues of custom domains in 'doms_cur' */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7218 | |
| 7219 | /* |
| 7220 | * Special case: If a kmalloc of a doms_cur partition (array of |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7221 | * cpumask) fails, then fallback to a single sched domain, |
| 7222 | * as determined by the single cpumask fallback_doms. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7223 | */ |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7224 | static cpumask_var_t fallback_doms; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7225 | |
Heiko Carstens | ee79d1b | 2008-12-09 18:49:50 +0100 | [diff] [blame] | 7226 | /* |
| 7227 | * arch_update_cpu_topology lets virtualized architectures update the |
| 7228 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 7229 | * or 0 if it stayed the same. |
| 7230 | */ |
| 7231 | int __attribute__((weak)) arch_update_cpu_topology(void) |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7232 | { |
Heiko Carstens | ee79d1b | 2008-12-09 18:49:50 +0100 | [diff] [blame] | 7233 | return 0; |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7234 | } |
| 7235 | |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7236 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) |
| 7237 | { |
| 7238 | int i; |
| 7239 | cpumask_var_t *doms; |
| 7240 | |
| 7241 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); |
| 7242 | if (!doms) |
| 7243 | return NULL; |
| 7244 | for (i = 0; i < ndoms; i++) { |
| 7245 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { |
| 7246 | free_sched_domains(doms, i); |
| 7247 | return NULL; |
| 7248 | } |
| 7249 | } |
| 7250 | return doms; |
| 7251 | } |
| 7252 | |
| 7253 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) |
| 7254 | { |
| 7255 | unsigned int i; |
| 7256 | for (i = 0; i < ndoms; i++) |
| 7257 | free_cpumask_var(doms[i]); |
| 7258 | kfree(doms); |
| 7259 | } |
| 7260 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7261 | /* |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7262 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7263 | * For now this just excludes isolated cpus, but could be used to |
| 7264 | * exclude other special cases in the future. |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7265 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7266 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7267 | { |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7268 | int err; |
| 7269 | |
Heiko Carstens | 22e52b0 | 2008-03-12 18:31:59 +0100 | [diff] [blame] | 7270 | arch_update_cpu_topology(); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7271 | ndoms_cur = 1; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7272 | doms_cur = alloc_sched_domains(ndoms_cur); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7273 | if (!doms_cur) |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7274 | doms_cur = &fallback_doms; |
| 7275 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7276 | dattr_cur = NULL; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7277 | err = build_sched_domains(doms_cur[0]); |
Milton Miller | 6382bc9 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 7278 | register_sched_domain_sysctl(); |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7279 | |
| 7280 | return err; |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7281 | } |
| 7282 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7283 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
| 7284 | struct cpumask *tmpmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7285 | { |
Mike Travis | 7c16ec5 | 2008-04-04 18:11:11 -0700 | [diff] [blame] | 7286 | free_sched_groups(cpu_map, tmpmask); |
John Hawkes | 9c1cfda | 2005-09-06 15:18:14 -0700 | [diff] [blame] | 7287 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7288 | |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7289 | /* |
| 7290 | * Detach sched domains from a group of cpus specified in cpu_map |
| 7291 | * These cpus will now be attached to the NULL domain |
| 7292 | */ |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7293 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7294 | { |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7295 | /* Save because hotplug lock held. */ |
| 7296 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7297 | int i; |
| 7298 | |
Rusty Russell | abcd083 | 2008-11-25 02:35:02 +1030 | [diff] [blame] | 7299 | for_each_cpu(i, cpu_map) |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7300 | cpu_attach_domain(NULL, &def_root_domain, i); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7301 | synchronize_sched(); |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7302 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
Dinakar Guniguntala | 1a20ff2 | 2005-06-25 14:57:33 -0700 | [diff] [blame] | 7303 | } |
| 7304 | |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7305 | /* handle null as "default" */ |
| 7306 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, |
| 7307 | struct sched_domain_attr *new, int idx_new) |
| 7308 | { |
| 7309 | struct sched_domain_attr tmp; |
| 7310 | |
| 7311 | /* fast path */ |
| 7312 | if (!new && !cur) |
| 7313 | return 1; |
| 7314 | |
| 7315 | tmp = SD_ATTR_INIT; |
| 7316 | return !memcmp(cur ? (cur + idx_cur) : &tmp, |
| 7317 | new ? (new + idx_new) : &tmp, |
| 7318 | sizeof(struct sched_domain_attr)); |
| 7319 | } |
| 7320 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7321 | /* |
| 7322 | * Partition sched domains as specified by the 'ndoms_new' |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7323 | * cpumasks in the array doms_new[] of cpumasks. This compares |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7324 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
| 7325 | * It destroys each deleted domain and builds each new domain. |
| 7326 | * |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7327 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 7328 | * The masks don't intersect (don't overlap.) We should setup one |
| 7329 | * sched domain for each mask. CPUs not in any of the cpumasks will |
| 7330 | * not be load balanced. If the same cpumask appears both in the |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7331 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
| 7332 | * it as it is. |
| 7333 | * |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7334 | * The passed in 'doms_new' should be allocated using |
| 7335 | * alloc_sched_domains. This routine takes ownership of it and will |
| 7336 | * free_sched_domains it when done with it. If the caller failed the |
| 7337 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
| 7338 | * and partition_sched_domains() will fallback to the single partition |
| 7339 | * 'fallback_doms', it also forces the domains to be rebuilt. |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7340 | * |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 7341 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
Li Zefan | 700018e | 2008-11-18 14:02:03 +0800 | [diff] [blame] | 7342 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7343 | * and it will not create the default domain. |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7344 | * |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7345 | * Call with hotplug lock held |
| 7346 | */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7347 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7348 | struct sched_domain_attr *dattr_new) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7349 | { |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7350 | int i, j, n; |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7351 | int new_topology; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7352 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7353 | mutex_lock(&sched_domains_mutex); |
Srivatsa Vaddagiri | a183561 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 7354 | |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7355 | /* always unregister in case we don't destroy any domains */ |
| 7356 | unregister_sched_domain_sysctl(); |
| 7357 | |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7358 | /* Let architecture update cpu core mappings. */ |
| 7359 | new_topology = arch_update_cpu_topology(); |
| 7360 | |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7361 | n = doms_new ? ndoms_new : 0; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7362 | |
| 7363 | /* Destroy deleted domains */ |
| 7364 | for (i = 0; i < ndoms_cur; i++) { |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7365 | for (j = 0; j < n && !new_topology; j++) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7366 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7367 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7368 | goto match1; |
| 7369 | } |
| 7370 | /* no match - a current sched domain not in new doms_new[] */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7371 | detach_destroy_domains(doms_cur[i]); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7372 | match1: |
| 7373 | ; |
| 7374 | } |
| 7375 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7376 | if (doms_new == NULL) { |
| 7377 | ndoms_cur = 0; |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7378 | doms_new = &fallback_doms; |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7379 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
Li Zefan | faa2f98 | 2008-11-04 16:20:23 +0800 | [diff] [blame] | 7380 | WARN_ON_ONCE(dattr_new); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7381 | } |
| 7382 | |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7383 | /* Build new domains */ |
| 7384 | for (i = 0; i < ndoms_new; i++) { |
Heiko Carstens | d65bd5e | 2008-12-09 18:49:51 +0100 | [diff] [blame] | 7385 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7386 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7387 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7388 | goto match2; |
| 7389 | } |
| 7390 | /* no match - add a new doms_new */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7391 | __build_sched_domains(doms_new[i], |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7392 | dattr_new ? dattr_new + i : NULL); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7393 | match2: |
| 7394 | ; |
| 7395 | } |
| 7396 | |
| 7397 | /* Remember the new sched domains */ |
Rusty Russell | acc3f5d | 2009-11-03 14:53:40 +1030 | [diff] [blame] | 7398 | if (doms_cur != &fallback_doms) |
| 7399 | free_sched_domains(doms_cur, ndoms_cur); |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7400 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7401 | doms_cur = doms_new; |
Hidetoshi Seto | 1d3504f | 2008-04-15 14:04:23 +0900 | [diff] [blame] | 7402 | dattr_cur = dattr_new; |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7403 | ndoms_cur = ndoms_new; |
Milton Miller | 7378547 | 2007-10-24 18:23:48 +0200 | [diff] [blame] | 7404 | |
| 7405 | register_sched_domain_sysctl(); |
Srivatsa Vaddagiri | a183561 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 7406 | |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7407 | mutex_unlock(&sched_domains_mutex); |
Paul Jackson | 029190c | 2007-10-18 23:40:20 -0700 | [diff] [blame] | 7408 | } |
| 7409 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7410 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7411 | static void arch_reinit_sched_domains(void) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7412 | { |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7413 | get_online_cpus(); |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 7414 | |
| 7415 | /* Destroy domains first to force the rebuild */ |
| 7416 | partition_sched_domains(0, NULL, NULL); |
| 7417 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7418 | rebuild_sched_domains(); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7419 | put_online_cpus(); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7420 | } |
| 7421 | |
| 7422 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
| 7423 | { |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7424 | unsigned int level = 0; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7425 | |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7426 | if (sscanf(buf, "%u", &level) != 1) |
| 7427 | return -EINVAL; |
| 7428 | |
| 7429 | /* |
| 7430 | * level is always be positive so don't check for |
| 7431 | * level < POWERSAVINGS_BALANCE_NONE which is 0 |
| 7432 | * What happens on 0 or 1 byte write, |
| 7433 | * need to check for count as well? |
| 7434 | */ |
| 7435 | |
| 7436 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7437 | return -EINVAL; |
| 7438 | |
| 7439 | if (smt) |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7440 | sched_smt_power_savings = level; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7441 | else |
Gautham R Shenoy | afb8a9b | 2008-12-18 23:26:09 +0530 | [diff] [blame] | 7442 | sched_mc_power_savings = level; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7443 | |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7444 | arch_reinit_sched_domains(); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7445 | |
Li Zefan | c70f22d | 2009-01-05 19:07:50 +0800 | [diff] [blame] | 7446 | return count; |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7447 | } |
| 7448 | |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7449 | #ifdef CONFIG_SCHED_MC |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7450 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7451 | struct sysdev_class_attribute *attr, |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7452 | char *page) |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7453 | { |
| 7454 | return sprintf(page, "%u\n", sched_mc_power_savings); |
| 7455 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7456 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7457 | struct sysdev_class_attribute *attr, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7458 | const char *buf, size_t count) |
| 7459 | { |
| 7460 | return sched_power_savings_store(buf, count, 0); |
| 7461 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7462 | static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, |
| 7463 | sched_mc_power_savings_show, |
| 7464 | sched_mc_power_savings_store); |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7465 | #endif |
| 7466 | |
| 7467 | #ifdef CONFIG_SCHED_SMT |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7468 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7469 | struct sysdev_class_attribute *attr, |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7470 | char *page) |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7471 | { |
| 7472 | return sprintf(page, "%u\n", sched_smt_power_savings); |
| 7473 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7474 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
Andi Kleen | c9be0a3 | 2010-01-05 12:47:58 +0100 | [diff] [blame] | 7475 | struct sysdev_class_attribute *attr, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7476 | const char *buf, size_t count) |
| 7477 | { |
| 7478 | return sched_power_savings_store(buf, count, 1); |
| 7479 | } |
Andi Kleen | f718cd4 | 2008-07-29 22:33:52 -0700 | [diff] [blame] | 7480 | static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, |
| 7481 | sched_smt_power_savings_show, |
Adrian Bunk | 6707de00 | 2007-08-12 18:08:19 +0200 | [diff] [blame] | 7482 | sched_smt_power_savings_store); |
| 7483 | #endif |
| 7484 | |
Li Zefan | 39aac64 | 2009-01-05 19:18:02 +0800 | [diff] [blame] | 7485 | int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7486 | { |
| 7487 | int err = 0; |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7488 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7489 | #ifdef CONFIG_SCHED_SMT |
| 7490 | if (smt_capable()) |
| 7491 | err = sysfs_create_file(&cls->kset.kobj, |
| 7492 | &attr_sched_smt_power_savings.attr); |
| 7493 | #endif |
| 7494 | #ifdef CONFIG_SCHED_MC |
| 7495 | if (!err && mc_capable()) |
| 7496 | err = sysfs_create_file(&cls->kset.kobj, |
| 7497 | &attr_sched_mc_power_savings.attr); |
| 7498 | #endif |
| 7499 | return err; |
| 7500 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7501 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 7502 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7503 | /* |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7504 | * Update cpusets according to cpu_active mask. If cpusets are |
| 7505 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
| 7506 | * around partition_sched_domains(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7507 | */ |
Tejun Heo | 0b2e918 | 2010-06-21 23:53:31 +0200 | [diff] [blame] | 7508 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
| 7509 | void *hcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7510 | { |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7511 | switch (action & ~CPU_TASKS_FROZEN) { |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7512 | case CPU_ONLINE: |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7513 | case CPU_DOWN_FAILED: |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7514 | cpuset_update_active_cpus(); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7515 | return NOTIFY_OK; |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7516 | default: |
| 7517 | return NOTIFY_DONE; |
| 7518 | } |
| 7519 | } |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7520 | |
Tejun Heo | 0b2e918 | 2010-06-21 23:53:31 +0200 | [diff] [blame] | 7521 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, |
| 7522 | void *hcpu) |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7523 | { |
| 7524 | switch (action & ~CPU_TASKS_FROZEN) { |
| 7525 | case CPU_DOWN_PREPARE: |
| 7526 | cpuset_update_active_cpus(); |
| 7527 | return NOTIFY_OK; |
| 7528 | default: |
| 7529 | return NOTIFY_DONE; |
| 7530 | } |
| 7531 | } |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7532 | |
| 7533 | static int update_runtime(struct notifier_block *nfb, |
| 7534 | unsigned long action, void *hcpu) |
| 7535 | { |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 7536 | int cpu = (int)(long)hcpu; |
| 7537 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7538 | switch (action) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7539 | case CPU_DOWN_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 7540 | case CPU_DOWN_PREPARE_FROZEN: |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 7541 | disable_runtime(cpu_rq(cpu)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7542 | return NOTIFY_OK; |
| 7543 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7544 | case CPU_DOWN_FAILED: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 7545 | case CPU_DOWN_FAILED_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7546 | case CPU_ONLINE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 7547 | case CPU_ONLINE_FROZEN: |
Peter Zijlstra | 7def2be | 2008-06-05 14:49:58 +0200 | [diff] [blame] | 7548 | enable_runtime(cpu_rq(cpu)); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7549 | return NOTIFY_OK; |
| 7550 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7551 | default: |
| 7552 | return NOTIFY_DONE; |
| 7553 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7554 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7555 | |
| 7556 | void __init sched_init_smp(void) |
| 7557 | { |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7558 | cpumask_var_t non_isolated_cpus; |
| 7559 | |
| 7560 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
Yong Zhang | cb5fd13 | 2009-09-14 20:20:16 +0800 | [diff] [blame] | 7561 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 7562 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7563 | #if defined(CONFIG_NUMA) |
| 7564 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
| 7565 | GFP_KERNEL); |
| 7566 | BUG_ON(sched_group_nodes_bycpu == NULL); |
| 7567 | #endif |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7568 | get_online_cpus(); |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7569 | mutex_lock(&sched_domains_mutex); |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 7570 | arch_init_sched_domains(cpu_active_mask); |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7571 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 7572 | if (cpumask_empty(non_isolated_cpus)) |
| 7573 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
Heiko Carstens | 712555e | 2008-04-28 11:33:07 +0200 | [diff] [blame] | 7574 | mutex_unlock(&sched_domains_mutex); |
Gautham R Shenoy | 95402b3 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 7575 | put_online_cpus(); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7576 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 7577 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
| 7578 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 7579 | |
| 7580 | /* RT runtime code needs to handle some hotplug events */ |
| 7581 | hotcpu_notifier(update_runtime, 0); |
| 7582 | |
Peter Zijlstra | b328ca1 | 2008-04-29 10:02:46 +0200 | [diff] [blame] | 7583 | init_hrtick(); |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 7584 | |
| 7585 | /* Move init over to a non-isolated CPU */ |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7586 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
Nick Piggin | 5c1e176 | 2006-10-03 01:14:04 -0700 | [diff] [blame] | 7587 | BUG(); |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 7588 | sched_init_granularity(); |
Rusty Russell | dcc30a3 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7589 | free_cpumask_var(non_isolated_cpus); |
Rusty Russell | 4212823 | 2008-11-25 02:35:12 +1030 | [diff] [blame] | 7590 | |
Rusty Russell | 0e3900e | 2008-11-25 02:35:13 +1030 | [diff] [blame] | 7591 | init_sched_rt_class(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7592 | } |
| 7593 | #else |
| 7594 | void __init sched_init_smp(void) |
| 7595 | { |
Ingo Molnar | 19978ca | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 7596 | sched_init_granularity(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7597 | } |
| 7598 | #endif /* CONFIG_SMP */ |
| 7599 | |
Arun R Bharadwaj | cd1bb94 | 2009-04-16 12:15:34 +0530 | [diff] [blame] | 7600 | const_debug unsigned int sysctl_timer_migration = 1; |
| 7601 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7602 | int in_sched_functions(unsigned long addr) |
| 7603 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7604 | return in_lock_functions(addr) || |
| 7605 | (addr >= (unsigned long)__sched_text_start |
| 7606 | && addr < (unsigned long)__sched_text_end); |
| 7607 | } |
| 7608 | |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 7609 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7610 | { |
| 7611 | cfs_rq->tasks_timeline = RB_ROOT; |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 7612 | INIT_LIST_HEAD(&cfs_rq->tasks); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7613 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7614 | cfs_rq->rq = rq; |
| 7615 | #endif |
Peter Zijlstra | 67e9fb2 | 2007-10-15 17:00:10 +0200 | [diff] [blame] | 7616 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7617 | } |
| 7618 | |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7619 | static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) |
| 7620 | { |
| 7621 | struct rt_prio_array *array; |
| 7622 | int i; |
| 7623 | |
| 7624 | array = &rt_rq->active; |
| 7625 | for (i = 0; i < MAX_RT_PRIO; i++) { |
| 7626 | INIT_LIST_HEAD(array->queue + i); |
| 7627 | __clear_bit(i, array->bitmap); |
| 7628 | } |
| 7629 | /* delimiter for bitsearch: */ |
| 7630 | __set_bit(MAX_RT_PRIO, array->bitmap); |
| 7631 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7632 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 7633 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
Gregory Haskins | 398a153 | 2009-01-14 09:10:04 -0500 | [diff] [blame] | 7634 | #ifdef CONFIG_SMP |
Gregory Haskins | e864c49 | 2008-12-29 09:39:49 -0500 | [diff] [blame] | 7635 | rt_rq->highest_prio.next = MAX_RT_PRIO; |
Peter Zijlstra | 48d5e25 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 7636 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7637 | #endif |
| 7638 | #ifdef CONFIG_SMP |
| 7639 | rt_rq->rt_nr_migratory = 0; |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7640 | rt_rq->overloaded = 0; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 7641 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7642 | #endif |
| 7643 | |
| 7644 | rt_rq->rt_time = 0; |
| 7645 | rt_rq->rt_throttled = 0; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 7646 | rt_rq->rt_runtime = 0; |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 7647 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7648 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7649 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 23b0fdf | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 7650 | rt_rq->rt_nr_boosted = 0; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7651 | rt_rq->rq = rq; |
| 7652 | #endif |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7653 | } |
| 7654 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7655 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7656 | static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 7657 | struct sched_entity *se, int cpu, |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7658 | struct sched_entity *parent) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7659 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7660 | struct rq *rq = cpu_rq(cpu); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7661 | tg->cfs_rq[cpu] = cfs_rq; |
| 7662 | init_cfs_rq(cfs_rq, rq); |
| 7663 | cfs_rq->tg = tg; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7664 | |
| 7665 | tg->se[cpu] = se; |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7666 | /* se could be NULL for init_task_group */ |
| 7667 | if (!se) |
| 7668 | return; |
| 7669 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7670 | if (!parent) |
| 7671 | se->cfs_rq = &rq->cfs; |
| 7672 | else |
| 7673 | se->cfs_rq = parent->my_q; |
| 7674 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7675 | se->my_q = cfs_rq; |
Paul Turner | 9437178 | 2010-11-15 15:47:10 -0800 | [diff] [blame] | 7676 | update_load_set(&se->load, 0); |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7677 | se->parent = parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7678 | } |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7679 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7680 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7681 | #ifdef CONFIG_RT_GROUP_SCHED |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7682 | static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 7683 | struct sched_rt_entity *rt_se, int cpu, |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7684 | struct sched_rt_entity *parent) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7685 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7686 | struct rq *rq = cpu_rq(cpu); |
| 7687 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7688 | tg->rt_rq[cpu] = rt_rq; |
| 7689 | init_rt_rq(rt_rq, rq); |
| 7690 | rt_rq->tg = tg; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 7691 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7692 | |
| 7693 | tg->rt_se[cpu] = rt_se; |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7694 | if (!rt_se) |
| 7695 | return; |
| 7696 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7697 | if (!parent) |
| 7698 | rt_se->rt_rq = &rq->rt; |
| 7699 | else |
| 7700 | rt_se->rt_rq = parent->my_q; |
| 7701 | |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7702 | rt_se->my_q = rt_rq; |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7703 | rt_se->parent = parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7704 | INIT_LIST_HEAD(&rt_se->run_list); |
| 7705 | } |
| 7706 | #endif |
| 7707 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7708 | void __init sched_init(void) |
| 7709 | { |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7710 | int i, j; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7711 | unsigned long alloc_size = 0, ptr; |
| 7712 | |
| 7713 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7714 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 7715 | #endif |
| 7716 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7717 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 7718 | #endif |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 7719 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Rusty Russell | 8c083f0 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 7720 | alloc_size += num_possible_cpus() * cpumask_size(); |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 7721 | #endif |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7722 | if (alloc_size) { |
Pekka Enberg | 36b7b6d | 2009-06-10 23:42:36 +0300 | [diff] [blame] | 7723 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7724 | |
| 7725 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7726 | init_task_group.se = (struct sched_entity **)ptr; |
| 7727 | ptr += nr_cpu_ids * sizeof(void **); |
| 7728 | |
| 7729 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; |
| 7730 | ptr += nr_cpu_ids * sizeof(void **); |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 7731 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7732 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7733 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7734 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; |
| 7735 | ptr += nr_cpu_ids * sizeof(void **); |
| 7736 | |
| 7737 | init_task_group.rt_rq = (struct rt_rq **)ptr; |
Peter Zijlstra | eff766a | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 7738 | ptr += nr_cpu_ids * sizeof(void **); |
| 7739 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7740 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Rusty Russell | df7c8e8 | 2009-03-19 15:22:20 +1030 | [diff] [blame] | 7741 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 7742 | for_each_possible_cpu(i) { |
| 7743 | per_cpu(load_balance_tmpmask, i) = (void *)ptr; |
| 7744 | ptr += cpumask_size(); |
| 7745 | } |
| 7746 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 7747 | } |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7748 | |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7749 | #ifdef CONFIG_SMP |
| 7750 | init_defrootdomain(); |
| 7751 | #endif |
| 7752 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 7753 | init_rt_bandwidth(&def_rt_bandwidth, |
| 7754 | global_rt_period(), global_rt_runtime()); |
| 7755 | |
| 7756 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7757 | init_rt_bandwidth(&init_task_group.rt_bandwidth, |
| 7758 | global_rt_period(), global_rt_runtime()); |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 7759 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 7760 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 7761 | #ifdef CONFIG_CGROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7762 | list_add(&init_task_group.list, &task_groups); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 7763 | INIT_LIST_HEAD(&init_task_group.children); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 7764 | autogroup_init(&init_task); |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 7765 | #endif /* CONFIG_CGROUP_SCHED */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7766 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 7767 | for_each_possible_cpu(i) { |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 7768 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7769 | |
| 7770 | rq = cpu_rq(i); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 7771 | raw_spin_lock_init(&rq->lock); |
Nick Piggin | 7897986 | 2005-06-25 14:57:13 -0700 | [diff] [blame] | 7772 | rq->nr_running = 0; |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 7773 | rq->calc_load_active = 0; |
| 7774 | rq->calc_load_update = jiffies + LOAD_FREQ; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7775 | init_cfs_rq(&rq->cfs, rq); |
Peter Zijlstra | fa85ae2 | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7776 | init_rt_rq(&rq->rt, rq); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7777 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7778 | init_task_group.shares = init_task_group_load; |
| 7779 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7780 | #ifdef CONFIG_CGROUP_SCHED |
| 7781 | /* |
| 7782 | * How much cpu bandwidth does init_task_group get? |
| 7783 | * |
| 7784 | * In case of task-groups formed thr' the cgroup filesystem, it |
| 7785 | * gets 100% of the cpu resources in the system. This overall |
| 7786 | * system cpu resource is divided among the tasks of |
| 7787 | * init_task_group and its child task-groups in a fair manner, |
| 7788 | * based on each entity's (task or task-group's) weight |
| 7789 | * (se->load.weight). |
| 7790 | * |
| 7791 | * In other words, if init_task_group has 10 tasks of weight |
| 7792 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
| 7793 | * then A0's share of the cpu resource is: |
| 7794 | * |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 7795 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7796 | * |
| 7797 | * We achieve this by letting init_task_group's tasks sit |
| 7798 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). |
| 7799 | */ |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 7800 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL); |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7801 | #endif |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7802 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 7803 | |
| 7804 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 7805 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7806 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7807 | #ifdef CONFIG_CGROUP_SCHED |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 7808 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL); |
Dhaval Giani | 354d60c | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 7809 | #endif |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 7810 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7811 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7812 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
| 7813 | rq->cpu_load[j] = 0; |
Venkatesh Pallipadi | fdf3e95 | 2010-05-17 18:14:43 -0700 | [diff] [blame] | 7814 | |
| 7815 | rq->last_load_update_tick = jiffies; |
| 7816 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7817 | #ifdef CONFIG_SMP |
Nick Piggin | 41c7ce9 | 2005-06-25 14:57:24 -0700 | [diff] [blame] | 7818 | rq->sd = NULL; |
Gregory Haskins | 57d885f | 2008-01-25 21:08:18 +0100 | [diff] [blame] | 7819 | rq->rd = NULL; |
Peter Zijlstra | e51fd5e | 2010-05-31 12:37:30 +0200 | [diff] [blame] | 7820 | rq->cpu_power = SCHED_LOAD_SCALE; |
Gregory Haskins | 3f029d3 | 2009-07-29 11:08:47 -0400 | [diff] [blame] | 7821 | rq->post_schedule = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7822 | rq->active_balance = 0; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7823 | rq->next_balance = jiffies; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7824 | rq->push_cpu = 0; |
Christoph Lameter | 0a2966b | 2006-09-25 23:30:51 -0700 | [diff] [blame] | 7825 | rq->cpu = i; |
Gregory Haskins | 1f11eb6 | 2008-06-04 15:04:05 -0400 | [diff] [blame] | 7826 | rq->online = 0; |
Mike Galbraith | eae0c9d | 2009-11-10 03:50:02 +0100 | [diff] [blame] | 7827 | rq->idle_stamp = 0; |
| 7828 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
Gregory Haskins | dc93852 | 2008-01-25 21:08:26 +0100 | [diff] [blame] | 7829 | rq_attach_root(rq, &def_root_domain); |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 7830 | #ifdef CONFIG_NO_HZ |
| 7831 | rq->nohz_balance_kick = 0; |
| 7832 | init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); |
| 7833 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7834 | #endif |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 7835 | init_rq_hrtick(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7836 | atomic_set(&rq->nr_iowait, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7837 | } |
| 7838 | |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 7839 | set_load_weight(&init_task); |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 7840 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 7841 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 7842 | INIT_HLIST_HEAD(&init_task.preempt_notifiers); |
| 7843 | #endif |
| 7844 | |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 7845 | #ifdef CONFIG_SMP |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 7846 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 7847 | #endif |
| 7848 | |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 7849 | #ifdef CONFIG_RT_MUTEXES |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 7850 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
Heiko Carstens | b50f60c | 2006-07-30 03:03:52 -0700 | [diff] [blame] | 7851 | #endif |
| 7852 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7853 | /* |
| 7854 | * The boot idle thread does lazy MMU switching as well: |
| 7855 | */ |
| 7856 | atomic_inc(&init_mm.mm_count); |
| 7857 | enter_lazy_tlb(&init_mm, current); |
| 7858 | |
| 7859 | /* |
| 7860 | * Make us the idle thread. Technically, schedule() should not be |
| 7861 | * called from this thread, however somewhere below it might be, |
| 7862 | * but because we are the idle thread, we just pick up running again |
| 7863 | * when this runqueue becomes "idle". |
| 7864 | */ |
| 7865 | init_idle(current, smp_processor_id()); |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 7866 | |
| 7867 | calc_load_update = jiffies + LOAD_FREQ; |
| 7868 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7869 | /* |
| 7870 | * During early bootup we pretend to be a normal task: |
| 7871 | */ |
| 7872 | current->sched_class = &fair_sched_class; |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 7873 | |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7874 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
Rusty Russell | 49557e6 | 2009-11-02 20:37:20 +1030 | [diff] [blame] | 7875 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
Rusty Russell | bf4d83f | 2008-11-25 09:57:51 +1030 | [diff] [blame] | 7876 | #ifdef CONFIG_SMP |
Rusty Russell | 7d1e6a9 | 2008-11-25 02:35:09 +1030 | [diff] [blame] | 7877 | #ifdef CONFIG_NO_HZ |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 7878 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
| 7879 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |
| 7880 | atomic_set(&nohz.load_balancer, nr_cpu_ids); |
| 7881 | atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); |
| 7882 | atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); |
Rusty Russell | 7d1e6a9 | 2008-11-25 02:35:09 +1030 | [diff] [blame] | 7883 | #endif |
Rusty Russell | bdddd29 | 2009-12-02 14:09:16 +1030 | [diff] [blame] | 7884 | /* May be allocated at isolcpus cmdline parse time */ |
| 7885 | if (cpu_isolated_map == NULL) |
| 7886 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
Rusty Russell | bf4d83f | 2008-11-25 09:57:51 +1030 | [diff] [blame] | 7887 | #endif /* SMP */ |
Rusty Russell | 6a7b3dc | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 7888 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7889 | perf_event_init(); |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 7890 | |
Ingo Molnar | 6892b75 | 2008-02-13 14:02:36 +0100 | [diff] [blame] | 7891 | scheduler_running = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7892 | } |
| 7893 | |
| 7894 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 7895 | static inline int preempt_count_equals(int preempt_offset) |
| 7896 | { |
Frederic Weisbecker | 234da7b | 2009-12-16 20:21:05 +0100 | [diff] [blame] | 7897 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 7898 | |
| 7899 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); |
| 7900 | } |
| 7901 | |
Simon Kagstrom | d894837 | 2009-12-23 11:08:18 +0100 | [diff] [blame] | 7902 | void __might_sleep(const char *file, int line, int preempt_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7903 | { |
Ingo Molnar | 48f24c4 | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 7904 | #ifdef in_atomic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7905 | static unsigned long prev_jiffy; /* ratelimiting */ |
| 7906 | |
Frederic Weisbecker | e4aafea | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 7907 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || |
| 7908 | system_state != SYSTEM_RUNNING || oops_in_progress) |
Ingo Molnar | aef745f | 2008-08-28 11:34:43 +0200 | [diff] [blame] | 7909 | return; |
| 7910 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
| 7911 | return; |
| 7912 | prev_jiffy = jiffies; |
| 7913 | |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 7914 | printk(KERN_ERR |
| 7915 | "BUG: sleeping function called from invalid context at %s:%d\n", |
| 7916 | file, line); |
| 7917 | printk(KERN_ERR |
| 7918 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
| 7919 | in_atomic(), irqs_disabled(), |
| 7920 | current->pid, current->comm); |
Ingo Molnar | aef745f | 2008-08-28 11:34:43 +0200 | [diff] [blame] | 7921 | |
| 7922 | debug_show_held_locks(current); |
| 7923 | if (irqs_disabled()) |
| 7924 | print_irqtrace_events(current); |
| 7925 | dump_stack(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7926 | #endif |
| 7927 | } |
| 7928 | EXPORT_SYMBOL(__might_sleep); |
| 7929 | #endif |
| 7930 | |
| 7931 | #ifdef CONFIG_MAGIC_SYSRQ |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 7932 | static void normalize_task(struct rq *rq, struct task_struct *p) |
| 7933 | { |
| 7934 | int on_rq; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 7935 | |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 7936 | on_rq = p->se.on_rq; |
| 7937 | if (on_rq) |
| 7938 | deactivate_task(rq, p, 0); |
| 7939 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
| 7940 | if (on_rq) { |
| 7941 | activate_task(rq, p, 0); |
| 7942 | resched_task(rq->curr); |
| 7943 | } |
| 7944 | } |
| 7945 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7946 | void normalize_rt_tasks(void) |
| 7947 | { |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 7948 | struct task_struct *g, *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7949 | unsigned long flags; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 7950 | struct rq *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7951 | |
Peter Zijlstra | 4cf5d77 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 7952 | read_lock_irqsave(&tasklist_lock, flags); |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 7953 | do_each_thread(g, p) { |
Ingo Molnar | 178be79 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 7954 | /* |
| 7955 | * Only normalize user tasks: |
| 7956 | */ |
| 7957 | if (!p->mm) |
| 7958 | continue; |
| 7959 | |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7960 | p->se.exec_start = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 7961 | #ifdef CONFIG_SCHEDSTATS |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 7962 | p->se.statistics.wait_start = 0; |
| 7963 | p->se.statistics.sleep_start = 0; |
| 7964 | p->se.statistics.block_start = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 7965 | #endif |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7966 | |
| 7967 | if (!rt_task(p)) { |
| 7968 | /* |
| 7969 | * Renice negative nice level userspace |
| 7970 | * tasks back to 0: |
| 7971 | */ |
| 7972 | if (TASK_NICE(p) < 0 && p->mm) |
| 7973 | set_user_nice(p, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7974 | continue; |
Ingo Molnar | dd41f59 | 2007-07-09 18:51:59 +0200 | [diff] [blame] | 7975 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7976 | |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 7977 | raw_spin_lock(&p->pi_lock); |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 7978 | rq = __task_rq_lock(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7979 | |
Ingo Molnar | 178be79 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 7980 | normalize_task(rq, p); |
Andi Kleen | 3a5e4dc | 2007-10-15 17:00:15 +0200 | [diff] [blame] | 7981 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 7982 | __task_rq_unlock(rq); |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 7983 | raw_spin_unlock(&p->pi_lock); |
Ingo Molnar | a0f98a1 | 2007-06-17 18:37:45 +0200 | [diff] [blame] | 7984 | } while_each_thread(g, p); |
| 7985 | |
Peter Zijlstra | 4cf5d77 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 7986 | read_unlock_irqrestore(&tasklist_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7987 | } |
| 7988 | |
| 7989 | #endif /* CONFIG_MAGIC_SYSRQ */ |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 7990 | |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 7991 | #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 7992 | /* |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 7993 | * These functions are only useful for the IA64 MCA handling, or kdb. |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 7994 | * |
| 7995 | * They can only be called when the whole system has been |
| 7996 | * stopped - every CPU needs to be quiescent, and no scheduling |
| 7997 | * activity can take place. Using them for anything else would |
| 7998 | * be a serious bug, and as a result, they aren't even visible |
| 7999 | * under any other configuration. |
| 8000 | */ |
| 8001 | |
| 8002 | /** |
| 8003 | * curr_task - return the current task for a given cpu. |
| 8004 | * @cpu: the processor in question. |
| 8005 | * |
| 8006 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 8007 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 8008 | struct task_struct *curr_task(int cpu) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8009 | { |
| 8010 | return cpu_curr(cpu); |
| 8011 | } |
| 8012 | |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 8013 | #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ |
| 8014 | |
| 8015 | #ifdef CONFIG_IA64 |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8016 | /** |
| 8017 | * set_curr_task - set the current task for a given cpu. |
| 8018 | * @cpu: the processor in question. |
| 8019 | * @p: the task pointer to set. |
| 8020 | * |
| 8021 | * Description: This function must only be used when non-maskable interrupts |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 8022 | * are serviced on a separate stack. It allows the architecture to switch the |
| 8023 | * notion of the current task on a cpu in a non-blocking manner. This function |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8024 | * must be called with all CPU's synchronized, and interrupts disabled, the |
| 8025 | * and caller must save the original value of the current task (see |
| 8026 | * curr_task() above) and restore that value before reenabling interrupts and |
| 8027 | * re-starting the system. |
| 8028 | * |
| 8029 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 8030 | */ |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 8031 | void set_curr_task(int cpu, struct task_struct *p) |
Linus Torvalds | 1df5c10 | 2005-09-12 07:59:21 -0700 | [diff] [blame] | 8032 | { |
| 8033 | cpu_curr(cpu) = p; |
| 8034 | } |
| 8035 | |
| 8036 | #endif |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8037 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8038 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8039 | static void free_fair_sched_group(struct task_group *tg) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8040 | { |
| 8041 | int i; |
| 8042 | |
| 8043 | for_each_possible_cpu(i) { |
| 8044 | if (tg->cfs_rq) |
| 8045 | kfree(tg->cfs_rq[i]); |
| 8046 | if (tg->se) |
| 8047 | kfree(tg->se[i]); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8048 | } |
| 8049 | |
| 8050 | kfree(tg->cfs_rq); |
| 8051 | kfree(tg->se); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8052 | } |
| 8053 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8054 | static |
| 8055 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8056 | { |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8057 | struct cfs_rq *cfs_rq; |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8058 | struct sched_entity *se; |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8059 | struct rq *rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8060 | int i; |
| 8061 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8062 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8063 | if (!tg->cfs_rq) |
| 8064 | goto err; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8065 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8066 | if (!tg->se) |
| 8067 | goto err; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8068 | |
| 8069 | tg->shares = NICE_0_LOAD; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8070 | |
| 8071 | for_each_possible_cpu(i) { |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8072 | rq = cpu_rq(i); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8073 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8074 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 8075 | GFP_KERNEL, cpu_to_node(i)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8076 | if (!cfs_rq) |
| 8077 | goto err; |
| 8078 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8079 | se = kzalloc_node(sizeof(struct sched_entity), |
| 8080 | GFP_KERNEL, cpu_to_node(i)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8081 | if (!se) |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8082 | goto err_free_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8083 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8084 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8085 | } |
| 8086 | |
| 8087 | return 1; |
| 8088 | |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8089 | err_free_rq: |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8090 | kfree(cfs_rq); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8091 | err: |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8092 | return 0; |
| 8093 | } |
| 8094 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8095 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 8096 | { |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8097 | struct rq *rq = cpu_rq(cpu); |
| 8098 | unsigned long flags; |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8099 | |
| 8100 | /* |
| 8101 | * Only empty task groups can be destroyed; so we can speculatively |
| 8102 | * check on_list without danger of it being re-added. |
| 8103 | */ |
| 8104 | if (!tg->cfs_rq[cpu]->on_list) |
| 8105 | return; |
| 8106 | |
| 8107 | raw_spin_lock_irqsave(&rq->lock, flags); |
Paul Turner | 822bc18 | 2010-11-29 16:55:40 -0800 | [diff] [blame] | 8108 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8109 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8110 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8111 | #else /* !CONFG_FAIR_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8112 | static inline void free_fair_sched_group(struct task_group *tg) |
| 8113 | { |
| 8114 | } |
| 8115 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8116 | static inline |
| 8117 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8118 | { |
| 8119 | return 1; |
| 8120 | } |
| 8121 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8122 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) |
| 8123 | { |
| 8124 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8125 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8126 | |
| 8127 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8128 | static void free_rt_sched_group(struct task_group *tg) |
| 8129 | { |
| 8130 | int i; |
| 8131 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8132 | destroy_rt_bandwidth(&tg->rt_bandwidth); |
| 8133 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8134 | for_each_possible_cpu(i) { |
| 8135 | if (tg->rt_rq) |
| 8136 | kfree(tg->rt_rq[i]); |
| 8137 | if (tg->rt_se) |
| 8138 | kfree(tg->rt_se[i]); |
| 8139 | } |
| 8140 | |
| 8141 | kfree(tg->rt_rq); |
| 8142 | kfree(tg->rt_se); |
| 8143 | } |
| 8144 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8145 | static |
| 8146 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8147 | { |
| 8148 | struct rt_rq *rt_rq; |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8149 | struct sched_rt_entity *rt_se; |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8150 | struct rq *rq; |
| 8151 | int i; |
| 8152 | |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8153 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8154 | if (!tg->rt_rq) |
| 8155 | goto err; |
Mike Travis | 434d53b | 2008-04-04 18:11:04 -0700 | [diff] [blame] | 8156 | tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8157 | if (!tg->rt_se) |
| 8158 | goto err; |
| 8159 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8160 | init_rt_bandwidth(&tg->rt_bandwidth, |
| 8161 | ktime_to_ns(def_rt_bandwidth.rt_period), 0); |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8162 | |
| 8163 | for_each_possible_cpu(i) { |
| 8164 | rq = cpu_rq(i); |
| 8165 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8166 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
| 8167 | GFP_KERNEL, cpu_to_node(i)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8168 | if (!rt_rq) |
| 8169 | goto err; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8170 | |
Li Zefan | eab1722 | 2008-10-29 17:03:22 +0800 | [diff] [blame] | 8171 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
| 8172 | GFP_KERNEL, cpu_to_node(i)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8173 | if (!rt_se) |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8174 | goto err_free_rq; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8175 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8176 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8177 | } |
| 8178 | |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8179 | return 1; |
| 8180 | |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8181 | err_free_rq: |
Phil Carmody | dfc12eb | 2009-12-10 14:29:37 +0200 | [diff] [blame] | 8182 | kfree(rt_rq); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8183 | err: |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8184 | return 0; |
| 8185 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8186 | #else /* !CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8187 | static inline void free_rt_sched_group(struct task_group *tg) |
| 8188 | { |
| 8189 | } |
| 8190 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8191 | static inline |
| 8192 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8193 | { |
| 8194 | return 1; |
| 8195 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8196 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8197 | |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8198 | #ifdef CONFIG_CGROUP_SCHED |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8199 | static void free_sched_group(struct task_group *tg) |
| 8200 | { |
| 8201 | free_fair_sched_group(tg); |
| 8202 | free_rt_sched_group(tg); |
| 8203 | kfree(tg); |
| 8204 | } |
| 8205 | |
| 8206 | /* allocate runqueue etc for a new task group */ |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8207 | struct task_group *sched_create_group(struct task_group *parent) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8208 | { |
| 8209 | struct task_group *tg; |
| 8210 | unsigned long flags; |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8211 | |
| 8212 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); |
| 8213 | if (!tg) |
| 8214 | return ERR_PTR(-ENOMEM); |
| 8215 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8216 | if (!alloc_fair_sched_group(tg, parent)) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8217 | goto err; |
| 8218 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8219 | if (!alloc_rt_sched_group(tg, parent)) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8220 | goto err; |
| 8221 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8222 | spin_lock_irqsave(&task_group_lock, flags); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8223 | list_add_rcu(&tg->list, &task_groups); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8224 | |
| 8225 | WARN_ON(!parent); /* root should already exist */ |
| 8226 | |
| 8227 | tg->parent = parent; |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8228 | INIT_LIST_HEAD(&tg->children); |
Zhang, Yanmin | 09f2724 | 2030-08-14 15:56:40 +0800 | [diff] [blame] | 8229 | list_add_rcu(&tg->siblings, &parent->children); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8230 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8231 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8232 | return tg; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8233 | |
| 8234 | err: |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8235 | free_sched_group(tg); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8236 | return ERR_PTR(-ENOMEM); |
| 8237 | } |
| 8238 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8239 | /* rcu callback to free various structures associated with a task group */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8240 | static void free_sched_group_rcu(struct rcu_head *rhp) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8241 | { |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8242 | /* now it should be safe to free those cfs_rqs */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8243 | free_sched_group(container_of(rhp, struct task_group, rcu)); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8244 | } |
| 8245 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8246 | /* Destroy runqueue etc associated with a task group */ |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8247 | void sched_destroy_group(struct task_group *tg) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8248 | { |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8249 | unsigned long flags; |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8250 | int i; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8251 | |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8252 | /* end participation in shares distribution */ |
| 8253 | for_each_possible_cpu(i) |
Peter Zijlstra | bccbe08 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8254 | unregister_fair_sched_group(tg, i); |
Peter Zijlstra | 3d4b47b | 2010-11-15 15:47:01 -0800 | [diff] [blame] | 8255 | |
| 8256 | spin_lock_irqsave(&task_group_lock, flags); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8257 | list_del_rcu(&tg->list); |
Peter Zijlstra | f473aa5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8258 | list_del_rcu(&tg->siblings); |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8259 | spin_unlock_irqrestore(&task_group_lock, flags); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8260 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8261 | /* wait for possible concurrent references to cfs_rqs complete */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8262 | call_rcu(&tg->rcu, free_sched_group_rcu); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8263 | } |
| 8264 | |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8265 | /* change task's runqueue when it moves between groups. |
Ingo Molnar | 3a25201 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 8266 | * The caller of this function should have put the task in its new group |
| 8267 | * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to |
| 8268 | * reflect its new group. |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8269 | */ |
| 8270 | void sched_move_task(struct task_struct *tsk) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8271 | { |
| 8272 | int on_rq, running; |
| 8273 | unsigned long flags; |
| 8274 | struct rq *rq; |
| 8275 | |
| 8276 | rq = task_rq_lock(tsk, &flags); |
| 8277 | |
Dmitry Adamushko | 051a1d1 | 2007-12-18 15:21:13 +0100 | [diff] [blame] | 8278 | running = task_current(rq, tsk); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8279 | on_rq = tsk->se.on_rq; |
| 8280 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8281 | if (on_rq) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8282 | dequeue_task(rq, tsk, 0); |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8283 | if (unlikely(running)) |
| 8284 | tsk->sched_class->put_prev_task(rq, tsk); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8285 | |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8286 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 8287 | if (tsk->sched_class->task_move_group) |
| 8288 | tsk->sched_class->task_move_group(tsk, on_rq); |
| 8289 | else |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8290 | #endif |
Peter Zijlstra | b2b5ce0 | 2010-10-15 15:24:15 +0200 | [diff] [blame] | 8291 | set_task_rq(tsk, task_cpu(tsk)); |
Peter Zijlstra | 810b381 | 2008-02-29 15:21:01 -0500 | [diff] [blame] | 8292 | |
Hiroshi Shimamoto | 0e1f348 | 2008-03-10 11:01:20 -0700 | [diff] [blame] | 8293 | if (unlikely(running)) |
| 8294 | tsk->sched_class->set_curr_task(rq); |
| 8295 | if (on_rq) |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 8296 | enqueue_task(rq, tsk, 0); |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8297 | |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8298 | task_rq_unlock(rq, &flags); |
| 8299 | } |
Dhaval Giani | 7c94143 | 2010-01-20 13:26:18 +0100 | [diff] [blame] | 8300 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8301 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8302 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8303 | static DEFINE_MUTEX(shares_mutex); |
| 8304 | |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8305 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8306 | { |
| 8307 | int i; |
Paul Turner | 9437178 | 2010-11-15 15:47:10 -0800 | [diff] [blame] | 8308 | unsigned long flags; |
Ingo Molnar | c61935f | 2008-01-22 11:24:58 +0100 | [diff] [blame] | 8309 | |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8310 | /* |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8311 | * We can't change the weight of the root cgroup. |
| 8312 | */ |
| 8313 | if (!tg->se[0]) |
| 8314 | return -EINVAL; |
| 8315 | |
Peter Zijlstra | 18d95a2 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 8316 | if (shares < MIN_SHARES) |
| 8317 | shares = MIN_SHARES; |
Miao Xie | cb4ad1f | 2008-04-28 12:54:56 +0800 | [diff] [blame] | 8318 | else if (shares > MAX_SHARES) |
| 8319 | shares = MAX_SHARES; |
Peter Zijlstra | 62fb185 | 2008-02-25 17:34:02 +0100 | [diff] [blame] | 8320 | |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8321 | mutex_lock(&shares_mutex); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8322 | if (tg->shares == shares) |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8323 | goto done; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8324 | |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 8325 | tg->shares = shares; |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8326 | for_each_possible_cpu(i) { |
Paul Turner | 9437178 | 2010-11-15 15:47:10 -0800 | [diff] [blame] | 8327 | struct rq *rq = cpu_rq(i); |
| 8328 | struct sched_entity *se; |
| 8329 | |
| 8330 | se = tg->se[i]; |
| 8331 | /* Propagate contribution to hierarchy */ |
| 8332 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 8333 | for_each_sched_entity(se) |
| 8334 | update_cfs_shares(group_cfs_rq(se), 0); |
| 8335 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
Peter Zijlstra | c09595f | 2008-06-27 13:41:14 +0200 | [diff] [blame] | 8336 | } |
Srivatsa Vaddagiri | 6b2d770 | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 8337 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8338 | done: |
Peter Zijlstra | 8ed3699 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8339 | mutex_unlock(&shares_mutex); |
Srivatsa Vaddagiri | 9b5b775 | 2007-10-15 17:00:09 +0200 | [diff] [blame] | 8340 | return 0; |
Srivatsa Vaddagiri | 29f59db | 2007-10-15 17:00:07 +0200 | [diff] [blame] | 8341 | } |
| 8342 | |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8343 | unsigned long sched_group_shares(struct task_group *tg) |
| 8344 | { |
| 8345 | return tg->shares; |
| 8346 | } |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8347 | #endif |
Dhaval Giani | 5cb350b | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 8348 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8349 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8350 | /* |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8351 | * Ensure that the real time constraints are schedulable. |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8352 | */ |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8353 | static DEFINE_MUTEX(rt_constraints_mutex); |
| 8354 | |
| 8355 | static unsigned long to_ratio(u64 period, u64 runtime) |
| 8356 | { |
| 8357 | if (runtime == RUNTIME_INF) |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8358 | return 1ULL << 20; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8359 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8360 | return div64_u64(runtime << 20, period); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8361 | } |
| 8362 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8363 | /* Must be called with tasklist_lock held */ |
| 8364 | static inline int tg_has_rt_tasks(struct task_group *tg) |
| 8365 | { |
| 8366 | struct task_struct *g, *p; |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8367 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8368 | do_each_thread(g, p) { |
| 8369 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) |
| 8370 | return 1; |
| 8371 | } while_each_thread(g, p); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8372 | |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8373 | return 0; |
| 8374 | } |
| 8375 | |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8376 | struct rt_schedulable_data { |
| 8377 | struct task_group *tg; |
| 8378 | u64 rt_period; |
| 8379 | u64 rt_runtime; |
| 8380 | }; |
| 8381 | |
| 8382 | static int tg_schedulable(struct task_group *tg, void *data) |
| 8383 | { |
| 8384 | struct rt_schedulable_data *d = data; |
| 8385 | struct task_group *child; |
| 8386 | unsigned long total, sum = 0; |
| 8387 | u64 period, runtime; |
| 8388 | |
| 8389 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8390 | runtime = tg->rt_bandwidth.rt_runtime; |
| 8391 | |
| 8392 | if (tg == d->tg) { |
| 8393 | period = d->rt_period; |
| 8394 | runtime = d->rt_runtime; |
| 8395 | } |
| 8396 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8397 | /* |
| 8398 | * Cannot have more runtime than the period. |
| 8399 | */ |
| 8400 | if (runtime > period && runtime != RUNTIME_INF) |
| 8401 | return -EINVAL; |
| 8402 | |
| 8403 | /* |
| 8404 | * Ensure we don't starve existing RT tasks. |
| 8405 | */ |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8406 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
| 8407 | return -EBUSY; |
| 8408 | |
| 8409 | total = to_ratio(period, runtime); |
| 8410 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8411 | /* |
| 8412 | * Nobody can have more than the global setting allows. |
| 8413 | */ |
| 8414 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) |
| 8415 | return -EINVAL; |
| 8416 | |
| 8417 | /* |
| 8418 | * The sum of our children's runtime should not exceed our own. |
| 8419 | */ |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8420 | list_for_each_entry_rcu(child, &tg->children, siblings) { |
| 8421 | period = ktime_to_ns(child->rt_bandwidth.rt_period); |
| 8422 | runtime = child->rt_bandwidth.rt_runtime; |
| 8423 | |
| 8424 | if (child == d->tg) { |
| 8425 | period = d->rt_period; |
| 8426 | runtime = d->rt_runtime; |
| 8427 | } |
| 8428 | |
| 8429 | sum += to_ratio(period, runtime); |
| 8430 | } |
| 8431 | |
| 8432 | if (sum > total) |
| 8433 | return -EINVAL; |
| 8434 | |
| 8435 | return 0; |
| 8436 | } |
| 8437 | |
| 8438 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
| 8439 | { |
| 8440 | struct rt_schedulable_data data = { |
| 8441 | .tg = tg, |
| 8442 | .rt_period = period, |
| 8443 | .rt_runtime = runtime, |
| 8444 | }; |
| 8445 | |
| 8446 | return walk_tg_tree(tg_schedulable, tg_nop, &data); |
| 8447 | } |
| 8448 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8449 | static int tg_set_bandwidth(struct task_group *tg, |
| 8450 | u64 rt_period, u64 rt_runtime) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8451 | { |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8452 | int i, err = 0; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8453 | |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8454 | mutex_lock(&rt_constraints_mutex); |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8455 | read_lock(&tasklist_lock); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8456 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
| 8457 | if (err) |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8458 | goto unlock; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8459 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8460 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8461 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
| 8462 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8463 | |
| 8464 | for_each_possible_cpu(i) { |
| 8465 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
| 8466 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8467 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8468 | rt_rq->rt_runtime = rt_runtime; |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8469 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8470 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8471 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
Peter Zijlstra | 4924627 | 2010-10-17 21:46:10 +0200 | [diff] [blame] | 8472 | unlock: |
Dhaval Giani | 521f1a24 | 2008-02-28 15:21:56 +0530 | [diff] [blame] | 8473 | read_unlock(&tasklist_lock); |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8474 | mutex_unlock(&rt_constraints_mutex); |
| 8475 | |
| 8476 | return err; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8477 | } |
| 8478 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8479 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
| 8480 | { |
| 8481 | u64 rt_runtime, rt_period; |
| 8482 | |
| 8483 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8484 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
| 8485 | if (rt_runtime_us < 0) |
| 8486 | rt_runtime = RUNTIME_INF; |
| 8487 | |
| 8488 | return tg_set_bandwidth(tg, rt_period, rt_runtime); |
| 8489 | } |
| 8490 | |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8491 | long sched_group_rt_runtime(struct task_group *tg) |
| 8492 | { |
| 8493 | u64 rt_runtime_us; |
| 8494 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8495 | if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8496 | return -1; |
| 8497 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8498 | rt_runtime_us = tg->rt_bandwidth.rt_runtime; |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8499 | do_div(rt_runtime_us, NSEC_PER_USEC); |
| 8500 | return rt_runtime_us; |
| 8501 | } |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8502 | |
| 8503 | int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) |
| 8504 | { |
| 8505 | u64 rt_runtime, rt_period; |
| 8506 | |
| 8507 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; |
| 8508 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
| 8509 | |
Raistlin | 619b048 | 2008-06-26 18:54:09 +0200 | [diff] [blame] | 8510 | if (rt_period == 0) |
| 8511 | return -EINVAL; |
| 8512 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8513 | return tg_set_bandwidth(tg, rt_period, rt_runtime); |
| 8514 | } |
| 8515 | |
| 8516 | long sched_group_rt_period(struct task_group *tg) |
| 8517 | { |
| 8518 | u64 rt_period_us; |
| 8519 | |
| 8520 | rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 8521 | do_div(rt_period_us, NSEC_PER_USEC); |
| 8522 | return rt_period_us; |
| 8523 | } |
| 8524 | |
| 8525 | static int sched_rt_global_constraints(void) |
| 8526 | { |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8527 | u64 runtime, period; |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8528 | int ret = 0; |
| 8529 | |
Hiroshi Shimamoto | ec5d498 | 2008-09-10 17:00:19 -0700 | [diff] [blame] | 8530 | if (sysctl_sched_rt_period <= 0) |
| 8531 | return -EINVAL; |
| 8532 | |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8533 | runtime = global_rt_runtime(); |
| 8534 | period = global_rt_period(); |
| 8535 | |
| 8536 | /* |
| 8537 | * Sanity check on the sysctl variables. |
| 8538 | */ |
| 8539 | if (runtime > period && runtime != RUNTIME_INF) |
| 8540 | return -EINVAL; |
Peter Zijlstra | 10b612f | 2008-06-19 14:22:27 +0200 | [diff] [blame] | 8541 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8542 | mutex_lock(&rt_constraints_mutex); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8543 | read_lock(&tasklist_lock); |
Peter Zijlstra | 4653f80 | 2008-09-23 15:33:44 +0200 | [diff] [blame] | 8544 | ret = __rt_schedulable(NULL, 0, 0); |
Peter Zijlstra | 9a7e0b1 | 2008-08-19 12:33:06 +0200 | [diff] [blame] | 8545 | read_unlock(&tasklist_lock); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8546 | mutex_unlock(&rt_constraints_mutex); |
| 8547 | |
| 8548 | return ret; |
| 8549 | } |
Dhaval Giani | 54e9912 | 2009-02-27 15:13:54 +0530 | [diff] [blame] | 8550 | |
| 8551 | int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) |
| 8552 | { |
| 8553 | /* Don't accept realtime tasks when there is no way for them to run */ |
| 8554 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) |
| 8555 | return 0; |
| 8556 | |
| 8557 | return 1; |
| 8558 | } |
| 8559 | |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8560 | #else /* !CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8561 | static int sched_rt_global_constraints(void) |
| 8562 | { |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8563 | unsigned long flags; |
| 8564 | int i; |
| 8565 | |
Hiroshi Shimamoto | ec5d498 | 2008-09-10 17:00:19 -0700 | [diff] [blame] | 8566 | if (sysctl_sched_rt_period <= 0) |
| 8567 | return -EINVAL; |
| 8568 | |
Peter Zijlstra | 60aa605 | 2009-05-05 17:50:21 +0200 | [diff] [blame] | 8569 | /* |
| 8570 | * There's always some RT tasks in the root group |
| 8571 | * -- migration, kstopmachine etc.. |
| 8572 | */ |
| 8573 | if (sysctl_sched_rt_runtime == 0) |
| 8574 | return -EBUSY; |
| 8575 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8576 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8577 | for_each_possible_cpu(i) { |
| 8578 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
| 8579 | |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8580 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8581 | rt_rq->rt_runtime = global_rt_runtime(); |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8582 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8583 | } |
Thomas Gleixner | 0986b11 | 2009-11-17 15:32:06 +0100 | [diff] [blame] | 8584 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
Peter Zijlstra | ac086bc | 2008-04-19 19:44:58 +0200 | [diff] [blame] | 8585 | |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8586 | return 0; |
| 8587 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8588 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8589 | |
| 8590 | int sched_rt_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 8591 | void __user *buffer, size_t *lenp, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8592 | loff_t *ppos) |
| 8593 | { |
| 8594 | int ret; |
| 8595 | int old_period, old_runtime; |
| 8596 | static DEFINE_MUTEX(mutex); |
| 8597 | |
| 8598 | mutex_lock(&mutex); |
| 8599 | old_period = sysctl_sched_rt_period; |
| 8600 | old_runtime = sysctl_sched_rt_runtime; |
| 8601 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 8602 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8603 | |
| 8604 | if (!ret && write) { |
| 8605 | ret = sched_rt_global_constraints(); |
| 8606 | if (ret) { |
| 8607 | sysctl_sched_rt_period = old_period; |
| 8608 | sysctl_sched_rt_runtime = old_runtime; |
| 8609 | } else { |
| 8610 | def_rt_bandwidth.rt_runtime = global_rt_runtime(); |
| 8611 | def_rt_bandwidth.rt_period = |
| 8612 | ns_to_ktime(global_rt_period()); |
| 8613 | } |
| 8614 | } |
| 8615 | mutex_unlock(&mutex); |
| 8616 | |
| 8617 | return ret; |
| 8618 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8619 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8620 | #ifdef CONFIG_CGROUP_SCHED |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8621 | |
| 8622 | /* return corresponding task_group object of a cgroup */ |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8623 | static inline struct task_group *cgroup_tg(struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8624 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8625 | return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), |
| 8626 | struct task_group, css); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8627 | } |
| 8628 | |
| 8629 | static struct cgroup_subsys_state * |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8630 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8631 | { |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8632 | struct task_group *tg, *parent; |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8633 | |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8634 | if (!cgrp->parent) { |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8635 | /* This is early initialization for the top cgroup */ |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8636 | return &init_task_group.css; |
| 8637 | } |
| 8638 | |
Dhaval Giani | ec7dc8a | 2008-04-19 19:44:59 +0200 | [diff] [blame] | 8639 | parent = cgroup_tg(cgrp->parent); |
| 8640 | tg = sched_create_group(parent); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8641 | if (IS_ERR(tg)) |
| 8642 | return ERR_PTR(-ENOMEM); |
| 8643 | |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8644 | return &tg->css; |
| 8645 | } |
| 8646 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 8647 | static void |
| 8648 | cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8649 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8650 | struct task_group *tg = cgroup_tg(cgrp); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8651 | |
| 8652 | sched_destroy_group(tg); |
| 8653 | } |
| 8654 | |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 8655 | static int |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 8656 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8657 | { |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8658 | #ifdef CONFIG_RT_GROUP_SCHED |
Dhaval Giani | 54e9912 | 2009-02-27 15:13:54 +0530 | [diff] [blame] | 8659 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8660 | return -EINVAL; |
| 8661 | #else |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8662 | /* We don't support RT-tasks being in separate groups */ |
| 8663 | if (tsk->sched_class != &fair_sched_class) |
| 8664 | return -EINVAL; |
Peter Zijlstra | b68aa23 | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8665 | #endif |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 8666 | return 0; |
| 8667 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8668 | |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 8669 | static int |
| 8670 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
| 8671 | struct task_struct *tsk, bool threadgroup) |
| 8672 | { |
| 8673 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); |
| 8674 | if (retval) |
| 8675 | return retval; |
| 8676 | if (threadgroup) { |
| 8677 | struct task_struct *c; |
| 8678 | rcu_read_lock(); |
| 8679 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 8680 | retval = cpu_cgroup_can_attach_task(cgrp, c); |
| 8681 | if (retval) { |
| 8682 | rcu_read_unlock(); |
| 8683 | return retval; |
| 8684 | } |
| 8685 | } |
| 8686 | rcu_read_unlock(); |
| 8687 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8688 | return 0; |
| 8689 | } |
| 8690 | |
| 8691 | static void |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8692 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 8693 | struct cgroup *old_cont, struct task_struct *tsk, |
| 8694 | bool threadgroup) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8695 | { |
| 8696 | sched_move_task(tsk); |
Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 8697 | if (threadgroup) { |
| 8698 | struct task_struct *c; |
| 8699 | rcu_read_lock(); |
| 8700 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 8701 | sched_move_task(c); |
| 8702 | } |
| 8703 | rcu_read_unlock(); |
| 8704 | } |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8705 | } |
| 8706 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8707 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 8708 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8709 | u64 shareval) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8710 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8711 | return sched_group_set_shares(cgroup_tg(cgrp), shareval); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8712 | } |
| 8713 | |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 8714 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8715 | { |
Paul Menage | 2b01dfe | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 8716 | struct task_group *tg = cgroup_tg(cgrp); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8717 | |
| 8718 | return (u64) tg->shares; |
| 8719 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8720 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8721 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8722 | #ifdef CONFIG_RT_GROUP_SCHED |
Mirco Tischler | 0c70814 | 2008-05-14 16:05:46 -0700 | [diff] [blame] | 8723 | static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 8724 | s64 val) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8725 | { |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 8726 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8727 | } |
| 8728 | |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 8729 | static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8730 | { |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 8731 | return sched_group_rt_runtime(cgroup_tg(cgrp)); |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8732 | } |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8733 | |
| 8734 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, |
| 8735 | u64 rt_period_us) |
| 8736 | { |
| 8737 | return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); |
| 8738 | } |
| 8739 | |
| 8740 | static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) |
| 8741 | { |
| 8742 | return sched_group_rt_period(cgroup_tg(cgrp)); |
| 8743 | } |
Dhaval Giani | 6d6bc0a | 2008-05-30 14:23:45 +0200 | [diff] [blame] | 8744 | #endif /* CONFIG_RT_GROUP_SCHED */ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8745 | |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 8746 | static struct cftype cpu_files[] = { |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8747 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 8748 | { |
| 8749 | .name = "shares", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 8750 | .read_u64 = cpu_shares_read_u64, |
| 8751 | .write_u64 = cpu_shares_write_u64, |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 8752 | }, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8753 | #endif |
| 8754 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8755 | { |
Peter Zijlstra | 9f0c1e5 | 2008-02-13 15:45:39 +0100 | [diff] [blame] | 8756 | .name = "rt_runtime_us", |
Paul Menage | 06ecb27 | 2008-04-29 01:00:06 -0700 | [diff] [blame] | 8757 | .read_s64 = cpu_rt_runtime_read, |
| 8758 | .write_s64 = cpu_rt_runtime_write, |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 8759 | }, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8760 | { |
| 8761 | .name = "rt_period_us", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 8762 | .read_u64 = cpu_rt_period_read_uint, |
| 8763 | .write_u64 = cpu_rt_period_write_uint, |
Peter Zijlstra | d0b27fa | 2008-04-19 19:44:57 +0200 | [diff] [blame] | 8764 | }, |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8765 | #endif |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8766 | }; |
| 8767 | |
| 8768 | static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
| 8769 | { |
Paul Menage | fe5c7cc | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 8770 | return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8771 | } |
| 8772 | |
| 8773 | struct cgroup_subsys cpu_cgroup_subsys = { |
Ingo Molnar | 38605ca | 2007-10-29 21:18:11 +0100 | [diff] [blame] | 8774 | .name = "cpu", |
| 8775 | .create = cpu_cgroup_create, |
| 8776 | .destroy = cpu_cgroup_destroy, |
| 8777 | .can_attach = cpu_cgroup_can_attach, |
| 8778 | .attach = cpu_cgroup_attach, |
| 8779 | .populate = cpu_cgroup_populate, |
| 8780 | .subsys_id = cpu_cgroup_subsys_id, |
Srivatsa Vaddagiri | 68318b8 | 2007-10-18 23:41:03 -0700 | [diff] [blame] | 8781 | .early_init = 1, |
| 8782 | }; |
| 8783 | |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 8784 | #endif /* CONFIG_CGROUP_SCHED */ |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8785 | |
| 8786 | #ifdef CONFIG_CGROUP_CPUACCT |
| 8787 | |
| 8788 | /* |
| 8789 | * CPU accounting code for task groups. |
| 8790 | * |
| 8791 | * Based on the work by Paul Menage (menage@google.com) and Balbir Singh |
| 8792 | * (balbir@in.ibm.com). |
| 8793 | */ |
| 8794 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 8795 | /* track cpu usage of a group of tasks and its child groups */ |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8796 | struct cpuacct { |
| 8797 | struct cgroup_subsys_state css; |
| 8798 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 8799 | u64 __percpu *cpuusage; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8800 | struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 8801 | struct cpuacct *parent; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8802 | }; |
| 8803 | |
| 8804 | struct cgroup_subsys cpuacct_subsys; |
| 8805 | |
| 8806 | /* return cpu accounting group corresponding to this container */ |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8807 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8808 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8809 | return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8810 | struct cpuacct, css); |
| 8811 | } |
| 8812 | |
| 8813 | /* return cpu accounting group to which this task belongs */ |
| 8814 | static inline struct cpuacct *task_ca(struct task_struct *tsk) |
| 8815 | { |
| 8816 | return container_of(task_subsys_state(tsk, cpuacct_subsys_id), |
| 8817 | struct cpuacct, css); |
| 8818 | } |
| 8819 | |
| 8820 | /* create a new cpu accounting group */ |
| 8821 | static struct cgroup_subsys_state *cpuacct_create( |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8822 | struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8823 | { |
| 8824 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8825 | int i; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8826 | |
| 8827 | if (!ca) |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8828 | goto out; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8829 | |
| 8830 | ca->cpuusage = alloc_percpu(u64); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8831 | if (!ca->cpuusage) |
| 8832 | goto out_free_ca; |
| 8833 | |
| 8834 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| 8835 | if (percpu_counter_init(&ca->cpustat[i], 0)) |
| 8836 | goto out_free_counters; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8837 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 8838 | if (cgrp->parent) |
| 8839 | ca->parent = cgroup_ca(cgrp->parent); |
| 8840 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8841 | return &ca->css; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8842 | |
| 8843 | out_free_counters: |
| 8844 | while (--i >= 0) |
| 8845 | percpu_counter_destroy(&ca->cpustat[i]); |
| 8846 | free_percpu(ca->cpuusage); |
| 8847 | out_free_ca: |
| 8848 | kfree(ca); |
| 8849 | out: |
| 8850 | return ERR_PTR(-ENOMEM); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8851 | } |
| 8852 | |
| 8853 | /* destroy an existing cpu accounting group */ |
Ingo Molnar | 41a2d6c | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 8854 | static void |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8855 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8856 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8857 | struct cpuacct *ca = cgroup_ca(cgrp); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8858 | int i; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8859 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8860 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| 8861 | percpu_counter_destroy(&ca->cpustat[i]); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8862 | free_percpu(ca->cpuusage); |
| 8863 | kfree(ca); |
| 8864 | } |
| 8865 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8866 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) |
| 8867 | { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 8868 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8869 | u64 data; |
| 8870 | |
| 8871 | #ifndef CONFIG_64BIT |
| 8872 | /* |
| 8873 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
| 8874 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8875 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8876 | data = *cpuusage; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8877 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8878 | #else |
| 8879 | data = *cpuusage; |
| 8880 | #endif |
| 8881 | |
| 8882 | return data; |
| 8883 | } |
| 8884 | |
| 8885 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
| 8886 | { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 8887 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8888 | |
| 8889 | #ifndef CONFIG_64BIT |
| 8890 | /* |
| 8891 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
| 8892 | */ |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8893 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8894 | *cpuusage = val; |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 8895 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8896 | #else |
| 8897 | *cpuusage = val; |
| 8898 | #endif |
| 8899 | } |
| 8900 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8901 | /* return total cpu usage (in nanoseconds) of a group */ |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8902 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8903 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8904 | struct cpuacct *ca = cgroup_ca(cgrp); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8905 | u64 totalcpuusage = 0; |
| 8906 | int i; |
| 8907 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8908 | for_each_present_cpu(i) |
| 8909 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8910 | |
| 8911 | return totalcpuusage; |
| 8912 | } |
| 8913 | |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 8914 | static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, |
| 8915 | u64 reset) |
| 8916 | { |
| 8917 | struct cpuacct *ca = cgroup_ca(cgrp); |
| 8918 | int err = 0; |
| 8919 | int i; |
| 8920 | |
| 8921 | if (reset) { |
| 8922 | err = -EINVAL; |
| 8923 | goto out; |
| 8924 | } |
| 8925 | |
Ken Chen | 720f549 | 2008-12-15 22:02:01 -0800 | [diff] [blame] | 8926 | for_each_present_cpu(i) |
| 8927 | cpuacct_cpuusage_write(ca, i, 0); |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 8928 | |
Dhaval Giani | 0297b80 | 2008-02-29 10:02:44 +0530 | [diff] [blame] | 8929 | out: |
| 8930 | return err; |
| 8931 | } |
| 8932 | |
Ken Chen | e9515c3 | 2008-12-15 22:04:15 -0800 | [diff] [blame] | 8933 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, |
| 8934 | struct seq_file *m) |
| 8935 | { |
| 8936 | struct cpuacct *ca = cgroup_ca(cgroup); |
| 8937 | u64 percpu; |
| 8938 | int i; |
| 8939 | |
| 8940 | for_each_present_cpu(i) { |
| 8941 | percpu = cpuacct_cpuusage_read(ca, i); |
| 8942 | seq_printf(m, "%llu ", (unsigned long long) percpu); |
| 8943 | } |
| 8944 | seq_printf(m, "\n"); |
| 8945 | return 0; |
| 8946 | } |
| 8947 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8948 | static const char *cpuacct_stat_desc[] = { |
| 8949 | [CPUACCT_STAT_USER] = "user", |
| 8950 | [CPUACCT_STAT_SYSTEM] = "system", |
| 8951 | }; |
| 8952 | |
| 8953 | static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, |
| 8954 | struct cgroup_map_cb *cb) |
| 8955 | { |
| 8956 | struct cpuacct *ca = cgroup_ca(cgrp); |
| 8957 | int i; |
| 8958 | |
| 8959 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { |
| 8960 | s64 val = percpu_counter_read(&ca->cpustat[i]); |
| 8961 | val = cputime64_to_clock_t(val); |
| 8962 | cb->fill(cb, cpuacct_stat_desc[i], val); |
| 8963 | } |
| 8964 | return 0; |
| 8965 | } |
| 8966 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8967 | static struct cftype files[] = { |
| 8968 | { |
| 8969 | .name = "usage", |
Paul Menage | f4c753b | 2008-04-29 00:59:56 -0700 | [diff] [blame] | 8970 | .read_u64 = cpuusage_read, |
| 8971 | .write_u64 = cpuusage_write, |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8972 | }, |
Ken Chen | e9515c3 | 2008-12-15 22:04:15 -0800 | [diff] [blame] | 8973 | { |
| 8974 | .name = "usage_percpu", |
| 8975 | .read_seq_string = cpuacct_percpu_seq_read, |
| 8976 | }, |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 8977 | { |
| 8978 | .name = "stat", |
| 8979 | .read_map = cpuacct_stats_show, |
| 8980 | }, |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8981 | }; |
| 8982 | |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8983 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8984 | { |
Dhaval Giani | 32cd756 | 2008-02-29 10:02:43 +0530 | [diff] [blame] | 8985 | return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8986 | } |
| 8987 | |
| 8988 | /* |
| 8989 | * charge this task's execution time to its accounting group. |
| 8990 | * |
| 8991 | * called with rq->lock held. |
| 8992 | */ |
| 8993 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
| 8994 | { |
| 8995 | struct cpuacct *ca; |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 8996 | int cpu; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8997 | |
Li Zefan | c40c6f8 | 2009-02-26 15:40:15 +0800 | [diff] [blame] | 8998 | if (unlikely(!cpuacct_subsys.active)) |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 8999 | return; |
| 9000 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9001 | cpu = task_cpu(tsk); |
Bharata B Rao | a18b83b | 2009-03-23 10:02:53 +0530 | [diff] [blame] | 9002 | |
| 9003 | rcu_read_lock(); |
| 9004 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9005 | ca = task_ca(tsk); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9006 | |
Bharata B Rao | 934352f | 2008-11-10 20:41:13 +0530 | [diff] [blame] | 9007 | for (; ca; ca = ca->parent) { |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 9008 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9009 | *cpuusage += cputime; |
| 9010 | } |
Bharata B Rao | a18b83b | 2009-03-23 10:02:53 +0530 | [diff] [blame] | 9011 | |
| 9012 | rcu_read_unlock(); |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9013 | } |
| 9014 | |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9015 | /* |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9016 | * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large |
| 9017 | * in cputime_t units. As a result, cpuacct_update_stats calls |
| 9018 | * percpu_counter_add with values large enough to always overflow the |
| 9019 | * per cpu batch limit causing bad SMP scalability. |
| 9020 | * |
| 9021 | * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we |
| 9022 | * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled |
| 9023 | * and enabled. We cap it at INT_MAX which is the largest allowed batch value. |
| 9024 | */ |
| 9025 | #ifdef CONFIG_SMP |
| 9026 | #define CPUACCT_BATCH \ |
| 9027 | min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) |
| 9028 | #else |
| 9029 | #define CPUACCT_BATCH 0 |
| 9030 | #endif |
| 9031 | |
| 9032 | /* |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9033 | * Charge the system/user time to the task's accounting group. |
| 9034 | */ |
| 9035 | static void cpuacct_update_stats(struct task_struct *tsk, |
| 9036 | enum cpuacct_stat_index idx, cputime_t val) |
| 9037 | { |
| 9038 | struct cpuacct *ca; |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9039 | int batch = CPUACCT_BATCH; |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9040 | |
| 9041 | if (unlikely(!cpuacct_subsys.active)) |
| 9042 | return; |
| 9043 | |
| 9044 | rcu_read_lock(); |
| 9045 | ca = task_ca(tsk); |
| 9046 | |
| 9047 | do { |
Anton Blanchard | fa535a7 | 2010-02-02 14:46:13 -0800 | [diff] [blame] | 9048 | __percpu_counter_add(&ca->cpustat[idx], val, batch); |
Bharata B Rao | ef12fef | 2009-03-31 10:02:22 +0530 | [diff] [blame] | 9049 | ca = ca->parent; |
| 9050 | } while (ca); |
| 9051 | rcu_read_unlock(); |
| 9052 | } |
| 9053 | |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 9054 | struct cgroup_subsys cpuacct_subsys = { |
| 9055 | .name = "cpuacct", |
| 9056 | .create = cpuacct_create, |
| 9057 | .destroy = cpuacct_destroy, |
| 9058 | .populate = cpuacct_populate, |
| 9059 | .subsys_id = cpuacct_subsys_id, |
| 9060 | }; |
| 9061 | #endif /* CONFIG_CGROUP_CPUACCT */ |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9062 | |
| 9063 | #ifndef CONFIG_SMP |
| 9064 | |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9065 | void synchronize_sched_expedited(void) |
| 9066 | { |
Paul E. McKenney | fc390cd | 2010-05-06 11:42:52 -0700 | [diff] [blame] | 9067 | barrier(); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9068 | } |
| 9069 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
| 9070 | |
| 9071 | #else /* #ifndef CONFIG_SMP */ |
| 9072 | |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9073 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9074 | |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9075 | static int synchronize_sched_expedited_cpu_stop(void *data) |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9076 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9077 | /* |
| 9078 | * There must be a full memory barrier on each affected CPU |
| 9079 | * between the time that try_stop_cpus() is called and the |
| 9080 | * time that it returns. |
| 9081 | * |
| 9082 | * In the current initial implementation of cpu_stop, the |
| 9083 | * above condition is already met when the control reaches |
| 9084 | * this point and the following smp_mb() is not strictly |
| 9085 | * necessary. Do smp_mb() anyway for documentation and |
| 9086 | * robustness against future implementation changes. |
| 9087 | */ |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9088 | smp_mb(); /* See above comment block. */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9089 | return 0; |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9090 | } |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9091 | |
| 9092 | /* |
| 9093 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" |
| 9094 | * approach to force grace period to end quickly. This consumes |
| 9095 | * significant time on all CPUs, and is thus not recommended for |
| 9096 | * any sort of common-case code. |
| 9097 | * |
| 9098 | * Note that it is illegal to call this function while holding any |
| 9099 | * lock that is acquired by a CPU-hotplug notifier. Failing to |
| 9100 | * observe this restriction will result in deadlock. |
| 9101 | */ |
| 9102 | void synchronize_sched_expedited(void) |
| 9103 | { |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9104 | int snap, trycount = 0; |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9105 | |
| 9106 | smp_mb(); /* ensure prior mod happens before capturing snap. */ |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9107 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9108 | get_online_cpus(); |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9109 | while (try_stop_cpus(cpu_online_mask, |
| 9110 | synchronize_sched_expedited_cpu_stop, |
Tejun Heo | 94458d5 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9111 | NULL) == -EAGAIN) { |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9112 | put_online_cpus(); |
| 9113 | if (trycount++ < 10) |
| 9114 | udelay(trycount * num_online_cpus()); |
| 9115 | else { |
| 9116 | synchronize_sched(); |
| 9117 | return; |
| 9118 | } |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9119 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9120 | smp_mb(); /* ensure test happens before caller kfree */ |
| 9121 | return; |
| 9122 | } |
| 9123 | get_online_cpus(); |
| 9124 | } |
Tejun Heo | 969c792 | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9125 | atomic_inc(&synchronize_sched_expedited_count); |
Paul E. McKenney | cc631fb | 2010-05-06 18:49:21 +0200 | [diff] [blame] | 9126 | smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9127 | put_online_cpus(); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 9128 | } |
| 9129 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
| 9130 | |
| 9131 | #endif /* #else #ifndef CONFIG_SMP */ |