aboutsummaryrefslogtreecommitdiff
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2015-09-18 13:35:08 -0400
committerAlex Shi <alex.shi@linaro.org>2016-06-08 11:08:25 +0800
commit023b4bcf542d8bafa387d66ce65b453bd4ce9a87 (patch)
treefa78ba7e46387793dae3fc52b47a7fa803df48af /fs/fs-writeback.c
parent2a22459177bd0a13d1cb633502e4ab98264f72dc (diff)
fs-writeback: unplug before cond_resched in writeback_sb_inodes
Commit 505a666ee3fc ("writeback: plug writeback in wb_writeback() and writeback_inodes_wb()") has us holding a plug during writeback_sb_inodes, which increases the merge rate when relatively contiguous small files are written by the filesystem. It helps both on flash and spindles. For an fs_mark workload creating 4K files in parallel across 8 drives, this commit improves performance ~9% more by unplugging before calling cond_resched(). cond_resched() doesn't trigger an implicit unplug, so explicitly getting the IO down to the device before scheduling reduces latencies for anyone waiting on clean pages. It also cuts down on how often we use kblockd to unplug, which means less work bouncing from one workqueue to another. Many more details about how we got here: https://lkml.org/lkml/2015/9/11/570 Signed-off-by: Chris Mason <clm@fb.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit 590dca3a71875461e8fea3013af74386945191b2) Signed-off-by: Alex Shi <alex.shi@linaro.org>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 88712ef9f494..9f847af78d00 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1520,6 +1520,21 @@ static long writeback_sb_inodes(struct super_block *sb,
wbc_detach_inode(&wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote += write_chunk - wbc.nr_to_write;
+
+ if (need_resched()) {
+ /*
+ * We're trying to balance between building up a nice
+ * long list of IOs to improve our merge rate, and
+ * getting those IOs out quickly for anyone throttling
+ * in balance_dirty_pages(). cond_resched() doesn't
+ * unplug, so get our IOs out the door before we
+ * give up the CPU.
+ */
+ blk_flush_plug(current);
+ cond_resched();
+ }
+
+
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
@@ -1527,7 +1542,7 @@ static long writeback_sb_inodes(struct super_block *sb,
requeue_inode(inode, wb, &wbc);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
- cond_resched_lock(&wb->list_lock);
+
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.