blob: 8cd6725c5758c4947fccf3258dcd0b6ea1a58f1e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
Peter Zijlstraa7878702013-08-14 14:55:40 +020018#include <asm/preempt.h>
Peter Zijlstraf27dde82013-08-14 14:55:31 +020019
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020020#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020021extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
Konstantin Khlebnikov98197d32015-07-15 12:52:04 +030023#define preempt_count_dec_and_test() \
24 ({ preempt_count_sub(1); should_resched(0); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#else
Peter Zijlstrabdb43802013-09-10 12:15:23 +020026#define preempt_count_add(val) __preempt_count_add(val)
27#define preempt_count_sub(val) __preempt_count_sub(val)
28#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#endif
30
Peter Zijlstrabdb43802013-09-10 12:15:23 +020031#define __preempt_count_inc() __preempt_count_add(1)
32#define __preempt_count_dec() __preempt_count_sub(1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Peter Zijlstrabdb43802013-09-10 12:15:23 +020034#define preempt_count_inc() preempt_count_add(1)
35#define preempt_count_dec() preempt_count_sub(1)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020036
37#ifdef CONFIG_PREEMPT_COUNT
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define preempt_disable() \
40do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020041 preempt_count_inc(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 barrier(); \
43} while (0)
44
Thomas Gleixnerba74c142011-03-21 13:32:17 +010045#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -070046do { \
47 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020048 preempt_count_dec(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070049} while (0)
50
Peter Zijlstrabdb43802013-09-10 12:15:23 +020051#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
Thomas Gleixnerba74c142011-03-21 13:32:17 +010052
Peter Zijlstrabdb43802013-09-10 12:15:23 +020053#ifdef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define preempt_enable() \
55do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020056 barrier(); \
57 if (unlikely(preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020058 __preempt_schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070059} while (0)
60
Peter Zijlstrabdb43802013-09-10 12:15:23 +020061#define preempt_check_resched() \
62do { \
Konstantin Khlebnikov98197d32015-07-15 12:52:04 +030063 if (should_resched(0)) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020064 __preempt_schedule(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020065} while (0)
66
67#else
Peter Zijlstra62b94a02013-11-20 16:52:19 +010068#define preempt_enable() \
69do { \
70 barrier(); \
71 preempt_count_dec(); \
72} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020073#define preempt_check_resched() do { } while (0)
74#endif
Steven Rostedt50282522008-05-12 21:20:41 +020075
76#define preempt_disable_notrace() \
77do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020078 __preempt_count_inc(); \
Steven Rostedt50282522008-05-12 21:20:41 +020079 barrier(); \
80} while (0)
81
82#define preempt_enable_no_resched_notrace() \
83do { \
84 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020085 __preempt_count_dec(); \
Steven Rostedt50282522008-05-12 21:20:41 +020086} while (0)
87
Peter Zijlstrabdb43802013-09-10 12:15:23 +020088#ifdef CONFIG_PREEMPT
89
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020090#ifndef CONFIG_CONTEXT_TRACKING
91#define __preempt_schedule_context() __preempt_schedule()
Peter Zijlstrabdb43802013-09-10 12:15:23 +020092#endif
93
Steven Rostedt50282522008-05-12 21:20:41 +020094#define preempt_enable_notrace() \
95do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020096 barrier(); \
97 if (unlikely(__preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020098 __preempt_schedule_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +020099} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200100#else
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100101#define preempt_enable_notrace() \
102do { \
103 barrier(); \
104 __preempt_count_dec(); \
105} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200106#endif
Steven Rostedt50282522008-05-12 21:20:41 +0200107
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200108#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Linus Torvalds386afc92013-04-09 10:48:33 -0700110/*
111 * Even if we don't have any preemption, we need preempt disable/enable
112 * to be barriers, so that we don't have things like get_user/put_user
113 * that can cause faults and scheduling migrate into our preempt-protected
114 * region.
115 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200116#define preempt_disable() barrier()
Linus Torvalds386afc92013-04-09 10:48:33 -0700117#define sched_preempt_enable_no_resched() barrier()
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200118#define preempt_enable_no_resched() barrier()
119#define preempt_enable() barrier()
120#define preempt_check_resched() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Linus Torvalds386afc92013-04-09 10:48:33 -0700122#define preempt_disable_notrace() barrier()
123#define preempt_enable_no_resched_notrace() barrier()
124#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200125
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200126#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100128#ifdef MODULE
129/*
130 * Modules have no business playing preemption tricks.
131 */
132#undef sched_preempt_enable_no_resched
133#undef preempt_enable_no_resched
134#undef preempt_enable_no_resched_notrace
135#undef preempt_check_resched
136#endif
137
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100138#define preempt_set_need_resched() \
139do { \
140 set_preempt_need_resched(); \
141} while (0)
142#define preempt_fold_need_resched() \
143do { \
144 if (tif_need_resched()) \
145 set_preempt_need_resched(); \
146} while (0)
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100147
Avi Kivitye107be32007-07-26 13:40:43 +0200148#ifdef CONFIG_PREEMPT_NOTIFIERS
149
150struct preempt_notifier;
151
152/**
153 * preempt_ops - notifiers called when a task is preempted and rescheduled
154 * @sched_in: we're about to be rescheduled:
155 * notifier: struct preempt_notifier for the task being scheduled
156 * cpu: cpu we're scheduled on
157 * @sched_out: we've just been preempted
158 * notifier: struct preempt_notifier for the task being preempted
159 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900160 *
161 * Please note that sched_in and out are called under different
162 * contexts. sched_out is called with rq lock held and irq disabled
163 * while sched_in is called without rq lock and irq enabled. This
164 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200165 */
166struct preempt_ops {
167 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
168 void (*sched_out)(struct preempt_notifier *notifier,
169 struct task_struct *next);
170};
171
172/**
173 * preempt_notifier - key for installing preemption notifiers
174 * @link: internal use
175 * @ops: defines the notifier functions to be called
176 *
177 * Usually used in conjunction with container_of().
178 */
179struct preempt_notifier {
180 struct hlist_node link;
181 struct preempt_ops *ops;
182};
183
184void preempt_notifier_register(struct preempt_notifier *notifier);
185void preempt_notifier_unregister(struct preempt_notifier *notifier);
186
187static inline void preempt_notifier_init(struct preempt_notifier *notifier,
188 struct preempt_ops *ops)
189{
190 INIT_HLIST_NODE(&notifier->link);
191 notifier->ops = ops;
192}
193
194#endif
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196#endif /* __LINUX_PREEMPT_H */