aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-16 13:27:55 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-04-16 13:27:55 +0200
commita237c1c5bc5dc5c76a21be922dca4826f3eca8ca (patch)
treea216c9a6d9e870b84424938e9e0b4722dc8634cd
parent5853b4f06f7b9b56f37f457d7923f7b96496074e (diff)
block: let io_schedule() flush the plug inline
Linus correctly observes that the most important dispatch cases are now done from kblockd, this isn't ideal for latency reasons. The original reason for switching dispatches out-of-line was to avoid too deep a stack, so by _only_ letting the "accidental" flush directly in schedule() be guarded by offload to kblockd, we should be able to get the best of both worlds. So add a blk_schedule_flush_plug() that offloads to kblockd, and only use that from the schedule() path. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--include/linux/blkdev.h13
-rw-r--r--kernel/sched.c2
2 files changed, 14 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1c76506fcf1..ec0357d8c4a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -872,6 +872,14 @@ static inline void blk_flush_plug(struct task_struct *tsk)
struct blk_plug *plug = tsk->plug;
if (plug)
+ blk_flush_plug_list(plug, false);
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ if (plug)
blk_flush_plug_list(plug, true);
}
@@ -1317,6 +1325,11 @@ static inline void blk_flush_plug(struct task_struct *task)
{
}
+static inline void blk_schedule_flush_plug(struct task_struct *task)
+{
+}
+
+
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
return false;
diff --git a/kernel/sched.c b/kernel/sched.c
index a187c3fe027..312f8b95c2d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4118,7 +4118,7 @@ need_resched:
*/
if (blk_needs_flush_plug(prev)) {
raw_spin_unlock(&rq->lock);
- blk_flush_plug(prev);
+ blk_schedule_flush_plug(prev);
raw_spin_lock(&rq->lock);
}
}