From c8e28ce049faa53a470c132893abbc9f2bde9420 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sun, 23 Jan 2011 10:07:47 -0600 Subject: writeback: account per-bdi accumulated dirtied pages Introduce the BDI_DIRTIED counter. It will be used for estimating the bdi's dirty bandwidth. CC: Jan Kara CC: Michael Rubin CC: Peter Zijlstra Signed-off-by: Wu Fengguang --- include/linux/backing-dev.h | 1 + mm/backing-dev.c | 2 ++ mm/page-writeback.c | 1 + 3 files changed, 4 insertions(+) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 3b2f9cb82986..9ca241a70c49 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int); enum bdi_stat_item { BDI_RECLAIMABLE, BDI_WRITEBACK, + BDI_DIRTIED, BDI_WRITTEN, NR_BDI_STAT_ITEMS }; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index a87da524a4a0..fea7e6efd1d7 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -97,6 +97,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) "BdiDirtyThresh: %10lu kB\n" "DirtyThresh: %10lu kB\n" "BackgroundThresh: %10lu kB\n" + "BdiDirtied: %10lu kB\n" "BdiWritten: %10lu kB\n" "BdiWriteBandwidth: %10lu kBps\n" "b_dirty: %10lu\n" @@ -109,6 +110,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) K(bdi_thresh), K(dirty_thresh), K(background_thresh), + (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), (unsigned long) K(bdi->write_bandwidth), nr_dirty, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0e309cd1b5b9..0e6dd5c2ed31 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1322,6 +1322,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); task_dirty_inc(current); task_io_account_write(PAGE_CACHE_SIZE); } -- cgit v1.2.3 From 6c14ae1e92c77eabd3e7527cf2e7836cde8b8487 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 2 Mar 2011 16:04:18 -0600 Subject: writeback: dirty position control bdi_position_ratio() provides a scale factor to bdi->dirty_ratelimit, so that the resulted task rate limit can drive the dirty pages back to the global/bdi setpoints. Old scheme is, | free run area | throttle area ----------------------------------------+----------------------------> thresh^ dirty pages New scheme is, ^ task rate limit | | * | * | * |[free run] * [smooth throttled] | * | * | * ..bdi->dirty_ratelimit..........* | . * | . * | . * | . * | . * +-------------------------------.-----------------------*------------> setpoint^ limit^ dirty pages The slope of the bdi control line should be 1) large enough to pull the dirty pages to setpoint reasonably fast 2) small enough to avoid big fluctuations in the resulted pos_ratio and hence task ratelimit Since the fluctuation range of the bdi dirty pages is typically observed to be within 1-second worth of data, the bdi control line's slope is selected to be a linear function of bdi write bandwidth, so that it can adapt to slow/fast storage devices well. Assume the bdi control line pos_ratio = 1.0 + k * (dirty - bdi_setpoint) where k is the negative slope. If targeting for 12.5% fluctuation range in pos_ratio when dirty pages are fluctuating in range [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2], we get slope k = - 1 / (8 * write_bw) Let pos_ratio(x_intercept) = 0, we get the parameter used in code: x_intercept = bdi_setpoint + 8 * write_bw The global/bdi slopes are nicely complementing each other when the system has only one major bdi (indicated by bdi_thresh ~= thresh): 1) slope of global control line => scaling to the control scope size 2) slope of main bdi control line => scaling to the writeout bandwidth so that - in memory tight systems, (1) becomes strong enough to squeeze dirty pages inside the control scope - in large memory systems where the "gravity" of (1) for pulling the dirty pages to setpoint is too weak, (2) can back (1) up and drive dirty pages to bdi_setpoint ~= setpoint reasonably fast. Unfortunately in JBOD setups, the fluctuation range of bdi threshold is related to memory size due to the interferences between disks. In this case, the bdi slope will be weighted sum of write_bw and bdi_thresh. Given equations span = x_intercept - bdi_setpoint k = df/dx = - 1 / span and the extremum values span = bdi_thresh dx = bdi_thresh we get df = - dx / span = - 1.0 That means, when bdi_dirty deviates bdi_thresh up, pos_ratio and hence task ratelimit will fluctuate by -100%. peter: use 3rd order polynomial for the global control line CC: Peter Zijlstra Acked-by: Jan Kara Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 191 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 190 insertions(+), 1 deletion(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0e6dd5c2ed31..c16ddd8f5cb6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -46,6 +46,8 @@ */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) +#define RATELIMIT_CALC_SHIFT 10 + /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. @@ -411,6 +413,12 @@ unsigned long determine_dirtyable_memory(void) return x + 1; /* Ensure that we never return 0 */ } +static unsigned long dirty_freerun_ceiling(unsigned long thresh, + unsigned long bg_thresh) +{ + return (thresh + bg_thresh) / 2; +} + static unsigned long hard_dirty_limit(unsigned long thresh) { return max(thresh, global_dirty_limit); @@ -495,6 +503,184 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) return bdi_dirty; } +/* + * Dirty position control. + * + * (o) global/bdi setpoints + * + * We want the dirty pages be balanced around the global/bdi setpoints. + * When the number of dirty pages is higher/lower than the setpoint, the + * dirty position control ratio (and hence task dirty ratelimit) will be + * decreased/increased to bring the dirty pages back to the setpoint. + * + * pos_ratio = 1 << RATELIMIT_CALC_SHIFT + * + * if (dirty < setpoint) scale up pos_ratio + * if (dirty > setpoint) scale down pos_ratio + * + * if (bdi_dirty < bdi_setpoint) scale up pos_ratio + * if (bdi_dirty > bdi_setpoint) scale down pos_ratio + * + * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT + * + * (o) global control line + * + * ^ pos_ratio + * | + * | |<===== global dirty control scope ======>| + * 2.0 .............* + * | .* + * | . * + * | . * + * | . * + * | . * + * | . * + * 1.0 ................................* + * | . . * + * | . . * + * | . . * + * | . . * + * | . . * + * 0 +------------.------------------.----------------------*-------------> + * freerun^ setpoint^ limit^ dirty pages + * + * (o) bdi control line + * + * ^ pos_ratio + * | + * | * + * | * + * | * + * | * + * | * |<=========== span ============>| + * 1.0 .......................* + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * 1/4 ...............................................* * * * * * * * * * * * + * | . . + * | . . + * | . . + * 0 +----------------------.-------------------------------.-------------> + * bdi_setpoint^ x_intercept^ + * + * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can + * be smoothly throttled down to normal if it starts high in situations like + * - start writing to a slow SD card and a fast disk at the same time. The SD + * card's bdi_dirty may rush to many times higher than bdi_setpoint. + * - the bdi dirty thresh drops quickly due to change of JBOD workload + */ +static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty) +{ + unsigned long write_bw = bdi->avg_write_bandwidth; + unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); + unsigned long limit = hard_dirty_limit(thresh); + unsigned long x_intercept; + unsigned long setpoint; /* dirty pages' target balance point */ + unsigned long bdi_setpoint; + unsigned long span; + long long pos_ratio; /* for scaling up/down the rate limit */ + long x; + + if (unlikely(dirty >= limit)) + return 0; + + /* + * global setpoint + * + * setpoint - dirty 3 + * f(dirty) := 1.0 + (----------------) + * limit - setpoint + * + * it's a 3rd order polynomial that subjects to + * + * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast + * (2) f(setpoint) = 1.0 => the balance point + * (3) f(limit) = 0 => the hard limit + * (4) df/dx <= 0 => negative feedback control + * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) + * => fast response on large errors; small oscillation near setpoint + */ + setpoint = (freerun + limit) / 2; + x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT, + limit - setpoint + 1); + pos_ratio = x; + pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; + pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; + pos_ratio += 1 << RATELIMIT_CALC_SHIFT; + + /* + * We have computed basic pos_ratio above based on global situation. If + * the bdi is over/under its share of dirty pages, we want to scale + * pos_ratio further down/up. That is done by the following mechanism. + */ + + /* + * bdi setpoint + * + * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) + * + * x_intercept - bdi_dirty + * := -------------------------- + * x_intercept - bdi_setpoint + * + * The main bdi control line is a linear function that subjects to + * + * (1) f(bdi_setpoint) = 1.0 + * (2) k = - 1 / (8 * write_bw) (in single bdi case) + * or equally: x_intercept = bdi_setpoint + 8 * write_bw + * + * For single bdi case, the dirty pages are observed to fluctuate + * regularly within range + * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] + * for various filesystems, where (2) can yield in a reasonable 12.5% + * fluctuation range for pos_ratio. + * + * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its + * own size, so move the slope over accordingly and choose a slope that + * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. + */ + if (unlikely(bdi_thresh > thresh)) + bdi_thresh = thresh; + /* + * scale global setpoint to bdi's: + * bdi_setpoint = setpoint * bdi_thresh / thresh + */ + x = div_u64((u64)bdi_thresh << 16, thresh + 1); + bdi_setpoint = setpoint * (u64)x >> 16; + /* + * Use span=(8*write_bw) in single bdi case as indicated by + * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. + * + * bdi_thresh thresh - bdi_thresh + * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh + * thresh thresh + */ + span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; + x_intercept = bdi_setpoint + span; + + if (bdi_dirty < x_intercept - span / 4) { + pos_ratio *= x_intercept - bdi_dirty; + do_div(pos_ratio, x_intercept - bdi_setpoint + 1); + } else + pos_ratio /= 4; + + return pos_ratio; +} + static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, unsigned long elapsed, unsigned long written) @@ -655,6 +841,7 @@ static void balance_dirty_pages(struct address_space *mapping, unsigned long nr_reclaimable, bdi_nr_reclaimable; unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ unsigned long bdi_dirty; + unsigned long freerun; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; @@ -679,7 +866,9 @@ static void balance_dirty_pages(struct address_space *mapping, * catch-up. This avoids (excessively) small writeouts * when the bdi limits are ramping up. */ - if (nr_dirty <= (background_thresh + dirty_thresh) / 2) + freerun = dirty_freerun_ceiling(dirty_thresh, + background_thresh); + if (nr_dirty <= freerun) break; bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); -- cgit v1.2.3 From af6a311384bce6c88e15c80ab22ab051a918b4eb Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Mon, 3 Oct 2011 20:46:17 -0600 Subject: writeback: add bg_threshold parameter to __bdi_update_bandwidth() No behavior change. Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 2 +- include/linux/writeback.h | 1 + mm/page-writeback.c | 11 +++++++---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 04cf3b91e501..28076562ada0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -675,7 +675,7 @@ static inline bool over_bground_thresh(void) static void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) { - __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time); + __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); } /* diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 2b8963ff0f35..ddb4652cb337 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -143,6 +143,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, void __bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, + unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c16ddd8f5cb6..4b954c9fe846 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -779,6 +779,7 @@ static void global_update_bandwidth(unsigned long thresh, void __bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, + unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, @@ -815,6 +816,7 @@ snapshot: static void bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, + unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, @@ -823,8 +825,8 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) return; spin_lock(&bdi->wb.list_lock); - __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty, - start_time); + __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, + bdi_thresh, bdi_dirty, start_time); spin_unlock(&bdi->wb.list_lock); } @@ -912,8 +914,9 @@ static void balance_dirty_pages(struct address_space *mapping, if (!bdi->dirty_exceeded) bdi->dirty_exceeded = 1; - bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty, - bdi_thresh, bdi_dirty, start_time); + bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, + nr_dirty, bdi_thresh, bdi_dirty, + start_time); /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. * Unstable writes are a feature of certain networked -- cgit v1.2.3 From be3ffa276446e1b691a2bf84e7621e5a6fb49db9 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sun, 12 Jun 2011 10:51:31 -0600 Subject: writeback: dirty rate control It's all about bdi->dirty_ratelimit, which aims to be (write_bw / N) when there are N dd tasks. On write() syscall, use bdi->dirty_ratelimit ============================================ balance_dirty_pages(pages_dirtied) { task_ratelimit = bdi->dirty_ratelimit * bdi_position_ratio(); pause = pages_dirtied / task_ratelimit; sleep(pause); } On every 200ms, update bdi->dirty_ratelimit =========================================== bdi_update_dirty_ratelimit() { task_ratelimit = bdi->dirty_ratelimit * bdi_position_ratio(); balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate; bdi->dirty_ratelimit = balanced_dirty_ratelimit } Estimation of balanced bdi->dirty_ratelimit =========================================== balanced task_ratelimit ----------------------- balance_dirty_pages() needs to throttle tasks dirtying pages such that the total amount of dirty pages stays below the specified dirty limit in order to avoid memory deadlocks. Furthermore we desire fairness in that tasks get throttled proportionally to the amount of pages they dirty. IOW we want to throttle tasks such that we match the dirty rate to the writeout bandwidth, this yields a stable amount of dirty pages: dirty_rate == write_bw (1) The fairness requirement gives us: task_ratelimit = balanced_dirty_ratelimit == write_bw / N (2) where N is the number of dd tasks. We don't know N beforehand, but still can estimate balanced_dirty_ratelimit within 200ms. Start by throttling each dd task at rate task_ratelimit = task_ratelimit_0 (3) (any non-zero initial value is OK) After 200ms, we measured dirty_rate = # of pages dirtied by all dd's / 200ms write_bw = # of pages written to the disk / 200ms For the aggressive dd dirtiers, the equality holds dirty_rate == N * task_rate == N * task_ratelimit_0 (4) Or task_ratelimit_0 == dirty_rate / N (5) Now we conclude that the balanced task ratelimit can be estimated by write_bw balanced_dirty_ratelimit = task_ratelimit_0 * ---------- (6) dirty_rate Because with (4) and (5) we can get the desired equality (1): write_bw balanced_dirty_ratelimit == (dirty_rate / N) * ---------- dirty_rate == write_bw / N Then using the balanced task ratelimit we can compute task pause times like: task_pause = task->nr_dirtied / task_ratelimit task_ratelimit with position control ------------------------------------ However, while the above gives us means of matching the dirty rate to the writeout bandwidth, it at best provides us with a stable dirty page count (assuming a static system). In order to control the dirty page count such that it is high enough to provide performance, but does not exceed the specified limit we need another control. The dirty position control works by extending (2) to task_ratelimit = balanced_dirty_ratelimit * pos_ratio (7) where pos_ratio is a negative feedback function that subjects to 1) f(setpoint) = 1.0 2) df/dx < 0 That is, if the dirty pages are ABOVE the setpoint, we throttle each task a bit more HEAVY than balanced_dirty_ratelimit, so that the dirty pages are created less fast than they are cleaned, thus DROP to the setpoints (and the reverse). Based on (7) and the assumption that both dirty_ratelimit and pos_ratio remains CONSTANT for the past 200ms, we get task_ratelimit_0 = balanced_dirty_ratelimit * pos_ratio (8) Putting (8) into (6), we get the formula used in bdi_update_dirty_ratelimit(): write_bw balanced_dirty_ratelimit *= pos_ratio * ---------- (9) dirty_rate Signed-off-by: Wu Fengguang --- include/linux/backing-dev.h | 7 ++++ mm/backing-dev.c | 1 + mm/page-writeback.c | 83 +++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 89 insertions(+), 2 deletions(-) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 9ca241a70c49..dff0ff78e878 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -75,10 +75,17 @@ struct backing_dev_info { struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; unsigned long bw_time_stamp; /* last time write bw is updated */ + unsigned long dirtied_stamp; unsigned long written_stamp; /* pages written at bw_time_stamp */ unsigned long write_bandwidth; /* the estimated write bandwidth */ unsigned long avg_write_bandwidth; /* further smoothed write bw */ + /* + * The base dirty throttle rate, re-calculated on every 200ms. + * All the bdi tasks' dirty rate will be curbed under it. + */ + unsigned long dirty_ratelimit; + struct prop_local_percpu completions; int dirty_exceeded; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index fea7e6efd1d7..ba20f94cde93 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -686,6 +686,7 @@ int bdi_init(struct backing_dev_info *bdi) bdi->bw_time_stamp = jiffies; bdi->written_stamp = 0; + bdi->dirty_ratelimit = INIT_BW; bdi->write_bandwidth = INIT_BW; bdi->avg_write_bandwidth = INIT_BW; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4b954c9fe846..1721b6523c04 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -777,6 +777,79 @@ static void global_update_bandwidth(unsigned long thresh, spin_unlock(&dirty_lock); } +/* + * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. + * + * Normal bdi tasks will be curbed at or below it in long term. + * Obviously it should be around (write_bw / N) when there are N dd tasks. + */ +static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long dirtied, + unsigned long elapsed) +{ + unsigned long write_bw = bdi->avg_write_bandwidth; + unsigned long dirty_ratelimit = bdi->dirty_ratelimit; + unsigned long dirty_rate; + unsigned long task_ratelimit; + unsigned long balanced_dirty_ratelimit; + unsigned long pos_ratio; + + /* + * The dirty rate will match the writeout rate in long term, except + * when dirty pages are truncated by userspace or re-dirtied by FS. + */ + dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; + + pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, + bdi_thresh, bdi_dirty); + /* + * task_ratelimit reflects each dd's dirty rate for the past 200ms. + */ + task_ratelimit = (u64)dirty_ratelimit * + pos_ratio >> RATELIMIT_CALC_SHIFT; + task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ + + /* + * A linear estimation of the "balanced" throttle rate. The theory is, + * if there are N dd tasks, each throttled at task_ratelimit, the bdi's + * dirty_rate will be measured to be (N * task_ratelimit). So the below + * formula will yield the balanced rate limit (write_bw / N). + * + * Note that the expanded form is not a pure rate feedback: + * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) + * but also takes pos_ratio into account: + * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) + * + * (1) is not realistic because pos_ratio also takes part in balancing + * the dirty rate. Consider the state + * pos_ratio = 0.5 (3) + * rate = 2 * (write_bw / N) (4) + * If (1) is used, it will stuck in that state! Because each dd will + * be throttled at + * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) + * yielding + * dirty_rate = N * task_ratelimit = write_bw (6) + * put (6) into (1) we get + * rate_(i+1) = rate_(i) (7) + * + * So we end up using (2) to always keep + * rate_(i+1) ~= (write_bw / N) (8) + * regardless of the value of pos_ratio. As long as (8) is satisfied, + * pos_ratio is able to drive itself to 1.0, which is not only where + * the dirty count meet the setpoint, but also where the slope of + * pos_ratio is most flat and hence task_ratelimit is least fluctuated. + */ + balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, + dirty_rate | 1); + + bdi->dirty_ratelimit = max(balanced_dirty_ratelimit, 1UL); +} + void __bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, @@ -787,6 +860,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, { unsigned long now = jiffies; unsigned long elapsed = now - bdi->bw_time_stamp; + unsigned long dirtied; unsigned long written; /* @@ -795,6 +869,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, if (elapsed < BANDWIDTH_INTERVAL) return; + dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); /* @@ -804,12 +879,16 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) goto snapshot; - if (thresh) + if (thresh) { global_update_bandwidth(thresh, dirty, now); - + bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, + bdi_thresh, bdi_dirty, + dirtied, elapsed); + } bdi_update_write_bandwidth(bdi, elapsed, written); snapshot: + bdi->dirtied_stamp = dirtied; bdi->written_stamp = written; bdi->bw_time_stamp = now; } -- cgit v1.2.3 From 7381131cbcf7e15d201a0ffd782a4698efe4e740 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 26 Aug 2011 15:53:24 -0600 Subject: writeback: stabilize bdi->dirty_ratelimit There are some imperfections in balanced_dirty_ratelimit. 1) large fluctuations The dirty_rate used for computing balanced_dirty_ratelimit is merely averaged in the past 200ms (very small comparing to the 3s estimation period for write_bw), which makes rather dispersed distribution of balanced_dirty_ratelimit. It's pretty hard to average out the singular points by increasing the estimation period. Considering that the averaging technique will introduce very undesirable time lags, I give it up totally. (btw, the 3s write_bw averaging time lag is much more acceptable because its impact is one-way and therefore won't lead to oscillations.) The more practical way is filtering -- most singular balanced_dirty_ratelimit points can be filtered out by remembering some prev_balanced_rate and prev_prev_balanced_rate. However the more reliable way is to guard balanced_dirty_ratelimit with task_ratelimit. 2) due to truncates and fs redirties, the (write_bw <=> dirty_rate) match could become unbalanced, which may lead to large systematical errors in balanced_dirty_ratelimit. The truncates, due to its possibly bumpy nature, can hardly be compensated smoothly. So let's face it. When some over-estimated balanced_dirty_ratelimit brings dirty_ratelimit high, dirty pages will go higher than the setpoint. task_ratelimit will in turn become lower than dirty_ratelimit. So if we consider both balanced_dirty_ratelimit and task_ratelimit and update dirty_ratelimit only when they are on the same side of dirty_ratelimit, the systematical errors in balanced_dirty_ratelimit won't be able to bring dirty_ratelimit far away. The balanced_dirty_ratelimit estimation may also be inaccurate near @limit or @freerun, however is less an issue. 3) since we ultimately want to - keep the fluctuations of task ratelimit as small as possible - keep the dirty pages around the setpoint as long time as possible the update policy used for (2) also serves the above goals nicely: if for some reason the dirty pages are high (task_ratelimit < dirty_ratelimit), and dirty_ratelimit is low (dirty_ratelimit < balanced_dirty_ratelimit), there is no point to bring up dirty_ratelimit in a hurry only to hurt both the above two goals. So, we make use of task_ratelimit to limit the update of dirty_ratelimit in two ways: 1) avoid changing dirty rate when it's against the position control target (the adjusted rate will slow down the progress of dirty pages going back to setpoint). 2) limit the step size. task_ratelimit is changing values step by step, leaving a consistent trace comparing to the randomly jumping balanced_dirty_ratelimit. task_ratelimit also has the nice smaller errors in stable state and typically larger errors when there are big errors in rate. So it's a pretty good limiting factor for the step size of dirty_ratelimit. Note that bdi->dirty_ratelimit is always tracking balanced_dirty_ratelimit. task_ratelimit is merely used as a limiting factor. Signed-off-by: Wu Fengguang --- include/linux/backing-dev.h | 3 ++ mm/backing-dev.c | 1 + mm/page-writeback.c | 71 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index dff0ff78e878..c3b92010d894 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -83,8 +83,11 @@ struct backing_dev_info { /* * The base dirty throttle rate, re-calculated on every 200ms. * All the bdi tasks' dirty rate will be curbed under it. + * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit + * in small steps and is much more smooth/stable than the latter. */ unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; struct prop_local_percpu completions; int dirty_exceeded; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ba20f94cde93..5dcaa3c756d1 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -686,6 +686,7 @@ int bdi_init(struct backing_dev_info *bdi) bdi->bw_time_stamp = jiffies; bdi->written_stamp = 0; + bdi->balanced_dirty_ratelimit = INIT_BW; bdi->dirty_ratelimit = INIT_BW; bdi->write_bandwidth = INIT_BW; bdi->avg_write_bandwidth = INIT_BW; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 1721b6523c04..d4a6e91bd9e5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -792,12 +792,17 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, unsigned long dirtied, unsigned long elapsed) { + unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); + unsigned long limit = hard_dirty_limit(thresh); + unsigned long setpoint = (freerun + limit) / 2; unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long dirty_ratelimit = bdi->dirty_ratelimit; unsigned long dirty_rate; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; unsigned long pos_ratio; + unsigned long step; + unsigned long x; /* * The dirty rate will match the writeout rate in long term, except @@ -847,7 +852,71 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, dirty_rate | 1); - bdi->dirty_ratelimit = max(balanced_dirty_ratelimit, 1UL); + /* + * We could safely do this and return immediately: + * + * bdi->dirty_ratelimit = balanced_dirty_ratelimit; + * + * However to get a more stable dirty_ratelimit, the below elaborated + * code makes use of task_ratelimit to filter out sigular points and + * limit the step size. + * + * The below code essentially only uses the relative value of + * + * task_ratelimit - dirty_ratelimit + * = (pos_ratio - 1) * dirty_ratelimit + * + * which reflects the direction and size of dirty position error. + */ + + /* + * dirty_ratelimit will follow balanced_dirty_ratelimit iff + * task_ratelimit is on the same side of dirty_ratelimit, too. + * For example, when + * - dirty_ratelimit > balanced_dirty_ratelimit + * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) + * lowering dirty_ratelimit will help meet both the position and rate + * control targets. Otherwise, don't update dirty_ratelimit if it will + * only help meet the rate target. After all, what the users ultimately + * feel and care are stable dirty rate and small position error. + * + * |task_ratelimit - dirty_ratelimit| is used to limit the step size + * and filter out the sigular points of balanced_dirty_ratelimit. Which + * keeps jumping around randomly and can even leap far away at times + * due to the small 200ms estimation period of dirty_rate (we want to + * keep that period small to reduce time lags). + */ + step = 0; + if (dirty < setpoint) { + x = min(bdi->balanced_dirty_ratelimit, + min(balanced_dirty_ratelimit, task_ratelimit)); + if (dirty_ratelimit < x) + step = x - dirty_ratelimit; + } else { + x = max(bdi->balanced_dirty_ratelimit, + max(balanced_dirty_ratelimit, task_ratelimit)); + if (dirty_ratelimit > x) + step = dirty_ratelimit - x; + } + + /* + * Don't pursue 100% rate matching. It's impossible since the balanced + * rate itself is constantly fluctuating. So decrease the track speed + * when it gets close to the target. Helps eliminate pointless tremors. + */ + step >>= dirty_ratelimit / (2 * step + 1); + /* + * Limit the tracking speed to avoid overshooting. + */ + step = (step + 7) / 8; + + if (dirty_ratelimit < balanced_dirty_ratelimit) + dirty_ratelimit += step; + else + dirty_ratelimit -= step; + + bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); + bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; } void __bdi_update_bandwidth(struct backing_dev_info *bdi, -- cgit v1.2.3 From 9d823e8f6b1b7b39f952d7d1795f29162143a433 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 11 Jun 2011 18:10:12 -0600 Subject: writeback: per task dirty rate limit Add two fields to task_struct. 1) account dirtied pages in the individual tasks, for accuracy 2) per-task balance_dirty_pages() call intervals, for flexibility The balance_dirty_pages() call interval (ie. nr_dirtied_pause) will scale near-sqrt to the safety gap between dirty pages and threshold. The main problem of per-task nr_dirtied is, if 1k+ tasks start dirtying pages at exactly the same time, each task will be assigned a large initial nr_dirtied_pause, so that the dirty threshold will be exceeded long before each task reached its nr_dirtied_pause and hence call balance_dirty_pages(). The solution is to watch for the number of pages dirtied on each CPU in between the calls into balance_dirty_pages(). If it exceeds ratelimit_pages (3% dirty threshold), force call balance_dirty_pages() for a chance to set bdi->dirty_exceeded. In normal situations, this safeguarding condition is not expected to trigger at all. On the sqrt in dirty_poll_interval(): It will serve as an initial guess when dirty pages are still in the freerun area. When dirty pages are floating inside the dirty control scope [freerun, limit], a followup patch will use some refined dirty poll interval to get the desired pause time. thresh-dirty (MB) sqrt 1 16 2 22 4 32 8 45 16 64 32 90 64 128 128 181 256 256 512 362 1024 512 The above table means, given 1MB (or 1GB) gap and the dd tasks polling balance_dirty_pages() on every 16 (or 512) pages, the dirty limit won't be exceeded as long as there are less than 16 (or 512) concurrent dd's. So sqrt naturally leads to less overheads and more safe concurrent tasks for large memory servers, which have large (thresh-freerun) gaps. peter: keep the per-CPU ratelimit for safeguarding the 1k+ tasks case CC: Peter Zijlstra Reviewed-by: Andrea Righi Signed-off-by: Wu Fengguang --- include/linux/sched.h | 7 ++++ kernel/fork.c | 3 ++ mm/page-writeback.c | 89 +++++++++++++++++++++++++++++---------------------- 3 files changed, 60 insertions(+), 39 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 41d0237fd449..a4a5582dc618 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1525,6 +1525,13 @@ struct task_struct { int make_it_fail; #endif struct prop_local_single dirties; + /* + * when (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for some dirty throttling pause + */ + int nr_dirtied; + int nr_dirtied_pause; + #ifdef CONFIG_LATENCYTOP int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; diff --git a/kernel/fork.c b/kernel/fork.c index 8e6b6f4fb272..cc0815df99f2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1302,6 +1302,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->pdeath_signal = 0; p->exit_state = 0; + p->nr_dirtied = 0; + p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); + /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d4a6e91bd9e5..daff320d263f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -54,20 +54,6 @@ */ static long ratelimit_pages = 32; -/* - * When balance_dirty_pages decides that the caller needs to perform some - * non-background writeback, this is how many pages it will attempt to write. - * It should be somewhat larger than dirtied pages to ensure that reasonably - * large amounts of I/O are submitted. - */ -static inline long sync_writeback_pages(unsigned long dirtied) -{ - if (dirtied < ratelimit_pages) - dirtied = ratelimit_pages; - - return dirtied + dirtied / 2; -} - /* The following parameters are exported via /proc/sys/vm */ /* @@ -169,6 +155,8 @@ static void update_completion_period(void) int shift = calc_period_shift(); prop_change_shift(&vm_completions, shift); prop_change_shift(&vm_dirties, shift); + + writeback_set_ratelimit(); } int dirty_background_ratio_handler(struct ctl_table *table, int write, @@ -978,6 +966,23 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, spin_unlock(&bdi->wb.list_lock); } +/* + * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() + * will look to see if it needs to start dirty throttling. + * + * If dirty_poll_interval is too low, big NUMA machines will call the expensive + * global_page_state() too often. So scale it near-sqrt to the safety margin + * (the number of pages we may dirty without exceeding the dirty limits). + */ +static unsigned long dirty_poll_interval(unsigned long dirty, + unsigned long thresh) +{ + if (thresh > dirty) + return 1UL << (ilog2(thresh - dirty) >> 1); + + return 1; +} + /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force @@ -1112,6 +1117,9 @@ static void balance_dirty_pages(struct address_space *mapping, if (clear_dirty_exceeded && bdi->dirty_exceeded) bdi->dirty_exceeded = 0; + current->nr_dirtied = 0; + current->nr_dirtied_pause = dirty_poll_interval(nr_dirty, dirty_thresh); + if (writeback_in_progress(bdi)) return; @@ -1138,7 +1146,7 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) } } -static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; +static DEFINE_PER_CPU(int, bdp_ratelimits); /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state @@ -1158,31 +1166,39 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied) { struct backing_dev_info *bdi = mapping->backing_dev_info; - unsigned long ratelimit; - unsigned long *p; + int ratelimit; + int *p; if (!bdi_cap_account_dirty(bdi)) return; - ratelimit = ratelimit_pages; - if (mapping->backing_dev_info->dirty_exceeded) - ratelimit = 8; + ratelimit = current->nr_dirtied_pause; + if (bdi->dirty_exceeded) + ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); + + current->nr_dirtied += nr_pages_dirtied; + preempt_disable(); /* - * Check the rate limiting. Also, we do not want to throttle real-time - * tasks in balance_dirty_pages(). Period. + * This prevents one CPU to accumulate too many dirtied pages without + * calling into balance_dirty_pages(), which can happen when there are + * 1000+ tasks, all of them start dirtying pages at exactly the same + * time, hence all honoured too large initial task->nr_dirtied_pause. */ - preempt_disable(); p = &__get_cpu_var(bdp_ratelimits); - *p += nr_pages_dirtied; - if (unlikely(*p >= ratelimit)) { - ratelimit = sync_writeback_pages(*p); + if (unlikely(current->nr_dirtied >= ratelimit)) *p = 0; - preempt_enable(); - balance_dirty_pages(mapping, ratelimit); - return; + else { + *p += nr_pages_dirtied; + if (unlikely(*p >= ratelimit_pages)) { + *p = 0; + ratelimit = 0; + } } preempt_enable(); + + if (unlikely(current->nr_dirtied >= ratelimit)) + balance_dirty_pages(mapping, current->nr_dirtied); } EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); @@ -1277,22 +1293,17 @@ void laptop_sync_completion(void) * * Here we set ratelimit_pages to a level which ensures that when all CPUs are * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory - * thresholds before writeback cuts in. - * - * But the limit should not be set too high. Because it also controls the - * amount of memory which the balance_dirty_pages() caller has to write back. - * If this is too large then the caller will block on the IO queue all the - * time. So limit it to four megabytes - the balance_dirty_pages() caller - * will write six megabyte chunks, max. + * thresholds. */ void writeback_set_ratelimit(void) { - ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); + unsigned long background_thresh; + unsigned long dirty_thresh; + global_dirty_limits(&background_thresh, &dirty_thresh); + ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16; - if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) - ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; } static int __cpuinit -- cgit v1.2.3 From 143dfe8611a63030ce0c79419dc362f7838be557 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 27 Aug 2010 18:45:12 -0600 Subject: writeback: IO-less balance_dirty_pages() As proposed by Chris, Dave and Jan, don't start foreground writeback IO inside balance_dirty_pages(). Instead, simply let it idle sleep for some time to throttle the dirtying task. In the mean while, kick off the per-bdi flusher thread to do background writeback IO. RATIONALS ========= - disk seeks on concurrent writeback of multiple inodes (Dave Chinner) If every thread doing writes and being throttled start foreground writeback, it leads to N IO submitters from at least N different inodes at the same time, end up with N different sets of IO being issued with potentially zero locality to each other, resulting in much lower elevator sort/merge efficiency and hence we seek the disk all over the place to service the different sets of IO. OTOH, if there is only one submission thread, it doesn't jump between inodes in the same way when congestion clears - it keeps writing to the same inode, resulting in large related chunks of sequential IOs being issued to the disk. This is more efficient than the above foreground writeback because the elevator works better and the disk seeks less. - lock contention and cache bouncing on concurrent IO submitters (Dave Chinner) With this patchset, the fs_mark benchmark on a 12-drive software RAID0 goes from CPU bound to IO bound, freeing "3-4 CPUs worth of spinlock contention". * "CPU usage has dropped by ~55%", "it certainly appears that most of the CPU time saving comes from the removal of contention on the inode_wb_list_lock" (IMHO at least 10% comes from the reduction of cacheline bouncing, because the new code is able to call much less frequently into balance_dirty_pages() and hence access the global page states) * the user space "App overhead" is reduced by 20%, by avoiding the cacheline pollution by the complex writeback code path * "for a ~5% throughput reduction", "the number of write IOs have dropped by ~25%", and the elapsed time reduced from 41:42.17 to 40:53.23. * On a simple test of 100 dd, it reduces the CPU %system time from 30% to 3%, and improves IO throughput from 38MB/s to 42MB/s. - IO size too small for fast arrays and too large for slow USB sticks The write_chunk used by current balance_dirty_pages() cannot be directly set to some large value (eg. 128MB) for better IO efficiency. Because it could lead to more than 1 second user perceivable stalls. Even the current 4MB write size may be too large for slow USB sticks. The fact that balance_dirty_pages() starts IO on itself couples the IO size to wait time, which makes it hard to do suitable IO size while keeping the wait time under control. Now it's possible to increase writeback chunk size proportional to the disk bandwidth. In a simple test of 50 dd's on XFS, 1-HDD, 3GB ram, the larger writeback size dramatically reduces the seek count to 1/10 (far beyond my expectation) and improves the write throughput by 24%. - long block time in balance_dirty_pages() hurts desktop responsiveness Many of us may have the experience: it often takes a couple of seconds or even long time to stop a heavy writing dd/cp/tar command with Ctrl-C or "kill -9". - IO pipeline broken by bumpy write() progress There are a broad class of "loop {read(buf); write(buf);}" applications whose read() pipeline will be under-utilized or even come to a stop if the write()s have long latencies _or_ don't progress in a constant rate. The current threshold based throttling inherently transfers the large low level IO completion fluctuations to bumpy application write()s, and further deteriorates with increasing number of dirtiers and/or bdi's. For example, when doing 50 dd's + 1 remote rsync to an XFS partition, the rsync progresses very bumpy in legacy kernel, and throughput is improved by 67% by this patchset. (plus the larger write chunk size, it will be 93% speedup). The new rate based throttling can support 1000+ dd's with excellent smoothness, low latency and low overheads. For the above reasons, it's much better to do IO-less and low latency pauses in balance_dirty_pages(). Jan Kara, Dave Chinner and me explored the scheme to let balance_dirty_pages() wait for enough writeback IO completions to safeguard the dirty limit. However it's found to have two problems: - in large NUMA systems, the per-cpu counters may have big accounting errors, leading to big throttle wait time and jitters. - NFS may kill large amount of unstable pages with one single COMMIT. Because NFS server serves COMMIT with expensive fsync() IOs, it is desirable to delay and reduce the number of COMMITs. So it's not likely to optimize away such kind of bursty IO completions, and the resulted large (and tiny) stall times in IO completion based throttling. So here is a pause time oriented approach, which tries to control the pause time in each balance_dirty_pages() invocations, by controlling the number of pages dirtied before calling balance_dirty_pages(), for smooth and efficient dirty throttling: - avoid useless (eg. zero pause time) balance_dirty_pages() calls - avoid too small pause time (less than 4ms, which burns CPU power) - avoid too large pause time (more than 200ms, which hurts responsiveness) - avoid big fluctuations of pause times It can control pause times at will. The default policy (in a followup patch) will be to do ~10ms pauses in 1-dd case, and increase to ~100ms in 1000-dd case. BEHAVIOR CHANGE =============== (1) dirty threshold Users will notice that the applications will get throttled once crossing the global (background + dirty)/2=15% threshold, and then balanced around 17.5%. Before patch, the behavior is to just throttle it at 20% dirtyable memory in 1-dd case. Since the task will be soft throttled earlier than before, it may be perceived by end users as performance "slow down" if his application happens to dirty more than 15% dirtyable memory. (2) smoothness/responsiveness Users will notice a more responsive system during heavy writeback. "killall dd" will take effect instantly. Signed-off-by: Wu Fengguang --- include/trace/events/writeback.h | 24 ------ mm/page-writeback.c | 161 ++++++++++++++------------------------- 2 files changed, 56 insertions(+), 129 deletions(-) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 5f172703eb4f..178c23508d3d 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -104,30 +104,6 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register); DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); DEFINE_WRITEBACK_EVENT(writeback_thread_start); DEFINE_WRITEBACK_EVENT(writeback_thread_stop); -DEFINE_WRITEBACK_EVENT(balance_dirty_start); -DEFINE_WRITEBACK_EVENT(balance_dirty_wait); - -TRACE_EVENT(balance_dirty_written, - - TP_PROTO(struct backing_dev_info *bdi, int written), - - TP_ARGS(bdi, written), - - TP_STRUCT__entry( - __array(char, name, 32) - __field(int, written) - ), - - TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); - __entry->written = written; - ), - - TP_printk("bdi %s written %d", - __entry->name, - __entry->written - ) -); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), diff --git a/mm/page-writeback.c b/mm/page-writeback.c index daff320d263f..f32f25092c66 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -250,50 +250,6 @@ static void bdi_writeout_fraction(struct backing_dev_info *bdi, numerator, denominator); } -static inline void task_dirties_fraction(struct task_struct *tsk, - long *numerator, long *denominator) -{ - prop_fraction_single(&vm_dirties, &tsk->dirties, - numerator, denominator); -} - -/* - * task_dirty_limit - scale down dirty throttling threshold for one task - * - * task specific dirty limit: - * - * dirty -= (dirty/8) * p_{t} - * - * To protect light/slow dirtying tasks from heavier/fast ones, we start - * throttling individual tasks before reaching the bdi dirty limit. - * Relatively low thresholds will be allocated to heavy dirtiers. So when - * dirty pages grow large, heavy dirtiers will be throttled first, which will - * effectively curb the growth of dirty pages. Light dirtiers with high enough - * dirty threshold may never get throttled. - */ -#define TASK_LIMIT_FRACTION 8 -static unsigned long task_dirty_limit(struct task_struct *tsk, - unsigned long bdi_dirty) -{ - long numerator, denominator; - unsigned long dirty = bdi_dirty; - u64 inv = dirty / TASK_LIMIT_FRACTION; - - task_dirties_fraction(tsk, &numerator, &denominator); - inv *= numerator; - do_div(inv, denominator); - - dirty -= inv; - - return max(dirty, bdi_dirty/2); -} - -/* Minimum limit for any task */ -static unsigned long task_min_dirty_limit(unsigned long bdi_dirty) -{ - return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION; -} - /* * */ @@ -986,30 +942,36 @@ static unsigned long dirty_poll_interval(unsigned long dirty, /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force - * the caller to perform writeback if the system is over `vm_dirty_ratio'. + * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. * If we're over `background_thresh' then the writeback threads are woken to * perform some writeout. */ static void balance_dirty_pages(struct address_space *mapping, - unsigned long write_chunk) + unsigned long pages_dirtied) { - unsigned long nr_reclaimable, bdi_nr_reclaimable; + unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ + unsigned long bdi_reclaimable; unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ unsigned long bdi_dirty; unsigned long freerun; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; - unsigned long task_bdi_thresh; - unsigned long min_task_bdi_thresh; - unsigned long pages_written = 0; - unsigned long pause = 1; + long pause = 0; bool dirty_exceeded = false; - bool clear_dirty_exceeded = true; + unsigned long task_ratelimit; + unsigned long dirty_ratelimit; + unsigned long pos_ratio; struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long start_time = jiffies; for (;;) { + /* + * Unstable writes are a feature of certain networked + * filesystems (i.e. NFS) in which data may have been + * written to the server's write cache, but has not yet + * been flushed to permanent storage. + */ nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); @@ -1026,9 +988,23 @@ static void balance_dirty_pages(struct address_space *mapping, if (nr_dirty <= freerun) break; + if (unlikely(!writeback_in_progress(bdi))) + bdi_start_background_writeback(bdi); + + /* + * bdi_thresh is not treated as some limiting factor as + * dirty_thresh, due to reasons + * - in JBOD setup, bdi_thresh can fluctuate a lot + * - in a system with HDD and USB key, the USB key may somehow + * go into state (bdi_dirty >> bdi_thresh) either because + * bdi_dirty starts high, or because bdi_thresh drops low. + * In this case we don't want to hard throttle the USB key + * dirtiers for 100 seconds until bdi_dirty drops under + * bdi_thresh. Instead the auxiliary bdi control line in + * bdi_position_ratio() will let the dirtier task progress + * at some rate <= (write_bw / 2) for bringing down bdi_dirty. + */ bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); - min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh); - task_bdi_thresh = task_dirty_limit(current, bdi_thresh); /* * In order to avoid the stacked BDI deadlock we need @@ -1040,57 +1016,41 @@ static void balance_dirty_pages(struct address_space *mapping, * actually dirty; with m+n sitting in the percpu * deltas. */ - if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) { - bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); - bdi_dirty = bdi_nr_reclaimable + + if (bdi_thresh < 2 * bdi_stat_error(bdi)) { + bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); + bdi_dirty = bdi_reclaimable + bdi_stat_sum(bdi, BDI_WRITEBACK); } else { - bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); - bdi_dirty = bdi_nr_reclaimable + + bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); + bdi_dirty = bdi_reclaimable + bdi_stat(bdi, BDI_WRITEBACK); } - /* - * The bdi thresh is somehow "soft" limit derived from the - * global "hard" limit. The former helps to prevent heavy IO - * bdi or process from holding back light ones; The latter is - * the last resort safeguard. - */ - dirty_exceeded = (bdi_dirty > task_bdi_thresh) || + dirty_exceeded = (bdi_dirty > bdi_thresh) || (nr_dirty > dirty_thresh); - clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) && - (nr_dirty <= dirty_thresh); - - if (!dirty_exceeded) - break; - - if (!bdi->dirty_exceeded) + if (dirty_exceeded && !bdi->dirty_exceeded) bdi->dirty_exceeded = 1; bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, start_time); - /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. - * Unstable writes are a feature of certain networked - * filesystems (i.e. NFS) in which data may have been - * written to the server's write cache, but has not yet - * been flushed to permanent storage. - * Only move pages to writeback if this bdi is over its - * threshold otherwise wait until the disk writes catch - * up. - */ - trace_balance_dirty_start(bdi); - if (bdi_nr_reclaimable > task_bdi_thresh) { - pages_written += writeback_inodes_wb(&bdi->wb, - write_chunk); - trace_balance_dirty_written(bdi, pages_written); - if (pages_written >= write_chunk) - break; /* We've done our duty */ + dirty_ratelimit = bdi->dirty_ratelimit; + pos_ratio = bdi_position_ratio(bdi, dirty_thresh, + background_thresh, nr_dirty, + bdi_thresh, bdi_dirty); + if (unlikely(pos_ratio == 0)) { + pause = MAX_PAUSE; + goto pause; } + task_ratelimit = (u64)dirty_ratelimit * + pos_ratio >> RATELIMIT_CALC_SHIFT; + pause = (HZ * pages_dirtied) / (task_ratelimit | 1); + pause = min_t(long, pause, MAX_PAUSE); + +pause: __set_current_state(TASK_UNINTERRUPTIBLE); io_schedule_timeout(pause); - trace_balance_dirty_wait(bdi); dirty_thresh = hard_dirty_limit(dirty_thresh); /* @@ -1099,22 +1059,11 @@ static void balance_dirty_pages(struct address_space *mapping, * 200ms is typically more than enough to curb heavy dirtiers; * (b) the pause time limit makes the dirtiers more responsive. */ - if (nr_dirty < dirty_thresh && - bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && - time_after(jiffies, start_time + MAX_PAUSE)) + if (nr_dirty < dirty_thresh) break; - - /* - * Increase the delay for each loop, up to our previous - * default of taking a 100ms nap. - */ - pause <<= 1; - if (pause > HZ / 10) - pause = HZ / 10; } - /* Clear dirty_exceeded flag only when no task can exceed the limit */ - if (clear_dirty_exceeded && bdi->dirty_exceeded) + if (!dirty_exceeded && bdi->dirty_exceeded) bdi->dirty_exceeded = 0; current->nr_dirtied = 0; @@ -1131,8 +1080,10 @@ static void balance_dirty_pages(struct address_space *mapping, * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ - if ((laptop_mode && pages_written) || - (!laptop_mode && (nr_reclaimable > background_thresh))) + if (laptop_mode) + return; + + if (nr_reclaimable > background_thresh) bdi_start_background_writeback(bdi); } -- cgit v1.2.3 From c8462cc9de9e92264ec647903772f6036a99b286 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 11 Jun 2011 19:21:43 -0600 Subject: writeback: limit max dirty pause time Apply two policies to scale down the max pause time for 1) small number of concurrent dirtiers 2) small memory system (comparing to storage bandwidth) MAX_PAUSE=200ms may only be suitable for high end servers with lots of concurrent dirtiers, where the large pause time can reduce much overheads. Otherwise, smaller pause time is desirable whenever possible, so as to get good responsiveness and smooth user experiences. It's actually required for good disk utilization in the case when all the dirty pages can be synced to disk within MAX_PAUSE=200ms. Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index f32f25092c66..cc351e6f9ed9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -939,6 +939,43 @@ static unsigned long dirty_poll_interval(unsigned long dirty, return 1; } +static unsigned long bdi_max_pause(struct backing_dev_info *bdi, + unsigned long bdi_dirty) +{ + unsigned long bw = bdi->avg_write_bandwidth; + unsigned long hi = ilog2(bw); + unsigned long lo = ilog2(bdi->dirty_ratelimit); + unsigned long t; + + /* target for 20ms max pause on 1-dd case */ + t = HZ / 50; + + /* + * Scale up pause time for concurrent dirtiers in order to reduce CPU + * overheads. + * + * (N * 20ms) on 2^N concurrent tasks. + */ + if (hi > lo) + t += (hi - lo) * (20 * HZ) / 1024; + + /* + * Limit pause time for small memory systems. If sleeping for too long + * time, a small pool of dirty/writeback pages may go empty and disk go + * idle. + * + * 8 serves as the safety ratio. + */ + if (bdi_dirty) + t = min(t, bdi_dirty * HZ / (8 * bw + 1)); + + /* + * The pause time will be settled within range (max_pause/4, max_pause). + * Apply a minimal value of 4 to get a non-zero max_pause/4. + */ + return clamp_val(t, 4, MAX_PAUSE); +} + /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force @@ -958,6 +995,7 @@ static void balance_dirty_pages(struct address_space *mapping, unsigned long dirty_thresh; unsigned long bdi_thresh; long pause = 0; + long max_pause; bool dirty_exceeded = false; unsigned long task_ratelimit; unsigned long dirty_ratelimit; @@ -1035,18 +1073,20 @@ static void balance_dirty_pages(struct address_space *mapping, nr_dirty, bdi_thresh, bdi_dirty, start_time); + max_pause = bdi_max_pause(bdi, bdi_dirty); + dirty_ratelimit = bdi->dirty_ratelimit; pos_ratio = bdi_position_ratio(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty); if (unlikely(pos_ratio == 0)) { - pause = MAX_PAUSE; + pause = max_pause; goto pause; } task_ratelimit = (u64)dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT; pause = (HZ * pages_dirtied) / (task_ratelimit | 1); - pause = min_t(long, pause, MAX_PAUSE); + pause = min(pause, max_pause); pause: __set_current_state(TASK_UNINTERRUPTIBLE); -- cgit v1.2.3 From 57fc978cfb61ed40a7bbfe5a569359159ba31abd Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 11 Jun 2011 19:32:32 -0600 Subject: writeback: control dirty pause time The dirty pause time shall ultimately be controlled by adjusting nr_dirtied_pause, since there is relationship pause = pages_dirtied / task_ratelimit Assuming pages_dirtied ~= nr_dirtied_pause task_ratelimit ~= dirty_ratelimit We get nr_dirtied_pause ~= dirty_ratelimit * desired_pause Here dirty_ratelimit is preferred over task_ratelimit because it's more stable. It's also important to limit possible large transitional errors: - bw is changing quickly - pages_dirtied << nr_dirtied_pause on entering dirty exceeded area - pages_dirtied >> nr_dirtied_pause on btrfs (to be improved by a separate fix, but still expect non-trivial errors) So we end up using the above formula inside clamp_val(). The best test case for this code is to run 100 "dd bs=4M" tasks on btrfs and check its pause time distribution. Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index cc351e6f9ed9..6a8bb693b429 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1086,6 +1086,10 @@ static void balance_dirty_pages(struct address_space *mapping, task_ratelimit = (u64)dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT; pause = (HZ * pages_dirtied) / (task_ratelimit | 1); + if (unlikely(pause <= 0)) { + pause = 1; /* avoid resetting nr_dirtied_pause below */ + break; + } pause = min(pause, max_pause); pause: @@ -1107,7 +1111,21 @@ pause: bdi->dirty_exceeded = 0; current->nr_dirtied = 0; - current->nr_dirtied_pause = dirty_poll_interval(nr_dirty, dirty_thresh); + if (pause == 0) { /* in freerun area */ + current->nr_dirtied_pause = + dirty_poll_interval(nr_dirty, dirty_thresh); + } else if (pause <= max_pause / 4 && + pages_dirtied >= current->nr_dirtied_pause) { + current->nr_dirtied_pause = clamp_val( + dirty_ratelimit * (max_pause / 2) / HZ, + pages_dirtied + pages_dirtied / 8, + pages_dirtied * 4); + } else if (pause >= max_pause) { + current->nr_dirtied_pause = 1 | clamp_val( + dirty_ratelimit * (max_pause / 2) / HZ, + pages_dirtied / 4, + pages_dirtied - pages_dirtied / 8); + } if (writeback_in_progress(bdi)) return; -- cgit v1.2.3 From 8927f66c4ede9a18b4b58f7e6f9debca67065f6b Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 4 Aug 2011 22:16:46 -0600 Subject: writeback: dirty position control - bdi reserve area Keep a minimal pool of dirty pages for each bdi, so that the disk IO queues won't underrun. Also gently increase a small bdi_thresh to avoid it stuck in 0 for some light dirtied bdi. It's particularly useful for JBOD and small memory system. It may result in (pos_ratio > 1) at the setpoint and push the dirty pages high. This is more or less intended because the bdi is in the danger of IO queue underflow. Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6a8bb693b429..325f753c80ed 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -599,6 +599,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, */ if (unlikely(bdi_thresh > thresh)) bdi_thresh = thresh; + bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); /* * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh @@ -622,6 +623,20 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, } else pos_ratio /= 4; + /* + * bdi reserve area, safeguard against dirty pool underrun and disk idle + * It may push the desired control point of global dirty pages higher + * than setpoint. + */ + x_intercept = bdi_thresh / 2; + if (bdi_dirty < x_intercept) { + if (bdi_dirty > x_intercept / 8) { + pos_ratio *= x_intercept; + do_div(pos_ratio, bdi_dirty); + } else + pos_ratio *= 8; + } + return pos_ratio; } -- cgit v1.2.3 From b00949aa2df9970a912bf060bc95e99da356881c Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 18 Nov 2010 14:38:33 -0600 Subject: writeback: per-bdi background threshold One thing puzzled me is that in JBOD case, the per-disk writeout performance is smaller than the corresponding single-disk case even when they have comparable bdi_thresh. Tracing shows find that in single disk case, bdi_writeback is always kept high while in JBOD case, it could drop low from time to time and correspondingly bdi_reclaimable could sometimes rush high. The fix is to watch bdi_reclaimable and kick background writeback as soon as it goes high. This resembles the global background threshold but in per-bdi manner. The trick is, as long as bdi_reclaimable does not go high, bdi_writeback naturally won't go low because bdi_reclaimable+bdi_writeback ~= bdi_thresh. With less fluctuated writeback pages, JBOD performance is observed to increase noticeably in various cases. vmstat:nr_written values before/after patch: 3.1.0-rc4-wo-underrun+ 3.1.0-rc4-bgthresh3+ ------------------------ ------------------------ 125596480 +25.9% 158179363 JBOD-10HDD-16G/ext4-100dd-1M-24p-16384M-20:10-X 61790815 +110.4% 130032231 JBOD-10HDD-16G/ext4-10dd-1M-24p-16384M-20:10-X 58853546 -0.1% 58823828 JBOD-10HDD-16G/ext4-1dd-1M-24p-16384M-20:10-X 110159811 +24.7% 137355377 JBOD-10HDD-16G/xfs-100dd-1M-24p-16384M-20:10-X 69544762 +10.8% 77080047 JBOD-10HDD-16G/xfs-10dd-1M-24p-16384M-20:10-X 50644862 +0.5% 50890006 JBOD-10HDD-16G/xfs-1dd-1M-24p-16384M-20:10-X 42677090 +28.0% 54643527 JBOD-10HDD-thresh=100M/ext4-100dd-1M-24p-16384M-100M:10-X 47491324 +13.3% 53785605 JBOD-10HDD-thresh=100M/ext4-10dd-1M-24p-16384M-100M:10-X 52548986 +0.9% 53001031 JBOD-10HDD-thresh=100M/ext4-1dd-1M-24p-16384M-100M:10-X 26783091 +36.8% 36650248 JBOD-10HDD-thresh=100M/xfs-100dd-1M-24p-16384M-100M:10-X 35526347 +14.0% 40492312 JBOD-10HDD-thresh=100M/xfs-10dd-1M-24p-16384M-100M:10-X 44670723 -1.1% 44177606 JBOD-10HDD-thresh=100M/xfs-1dd-1M-24p-16384M-100M:10-X 127996037 +22.4% 156719990 JBOD-10HDD-thresh=2G/ext4-100dd-1M-24p-16384M-2048M:10-X 57518856 +3.8% 59677625 JBOD-10HDD-thresh=2G/ext4-10dd-1M-24p-16384M-2048M:10-X 51919909 +12.2% 58269894 JBOD-10HDD-thresh=2G/ext4-1dd-1M-24p-16384M-2048M:10-X 86410514 +79.0% 154660433 JBOD-10HDD-thresh=2G/xfs-100dd-1M-24p-16384M-2048M:10-X 40132519 +38.6% 55617893 JBOD-10HDD-thresh=2G/xfs-10dd-1M-24p-16384M-2048M:10-X 48423248 +7.5% 52042927 JBOD-10HDD-thresh=2G/xfs-1dd-1M-24p-16384M-2048M:10-X 206041046 +44.1% 296846536 JBOD-10HDD-thresh=4G/xfs-100dd-1M-24p-16384M-4096M:10-X 72312903 -19.4% 58272885 JBOD-10HDD-thresh=4G/xfs-10dd-1M-24p-16384M-4096M:10-X 50635672 -0.5% 50384787 JBOD-10HDD-thresh=4G/xfs-1dd-1M-24p-16384M-4096M:10-X 68308534 +115.7% 147324758 JBOD-10HDD-thresh=800M/ext4-100dd-1M-24p-16384M-800M:10-X 57882933 +14.5% 66269621 JBOD-10HDD-thresh=800M/ext4-10dd-1M-24p-16384M-800M:10-X 52183472 +12.8% 58855181 JBOD-10HDD-thresh=800M/ext4-1dd-1M-24p-16384M-800M:10-X 53788956 +94.2% 104460352 JBOD-10HDD-thresh=800M/xfs-100dd-1M-24p-16384M-800M:10-X 44493342 +35.5% 60298210 JBOD-10HDD-thresh=800M/xfs-10dd-1M-24p-16384M-800M:10-X 42641209 +18.9% 50681038 JBOD-10HDD-thresh=800M/xfs-1dd-1M-24p-16384M-800M:10-X Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 28076562ada0..6401cd76f109 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -658,14 +658,21 @@ long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) return nr_pages - work.nr_pages; } -static inline bool over_bground_thresh(void) +static bool over_bground_thresh(struct backing_dev_info *bdi) { unsigned long background_thresh, dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); - return (global_page_state(NR_FILE_DIRTY) + - global_page_state(NR_UNSTABLE_NFS) > background_thresh); + if (global_page_state(NR_FILE_DIRTY) + + global_page_state(NR_UNSTABLE_NFS) > background_thresh) + return true; + + if (bdi_stat(bdi, BDI_RECLAIMABLE) > + bdi_dirty_limit(bdi, background_thresh)) + return true; + + return false; } /* @@ -727,7 +734,7 @@ static long wb_writeback(struct bdi_writeback *wb, * For background writeout, stop when we are below the * background dirty threshold */ - if (work->for_background && !over_bground_thresh()) + if (work->for_background && !over_bground_thresh(wb->bdi)) break; if (work->for_kupdate) { @@ -811,7 +818,7 @@ static unsigned long get_nr_dirty_pages(void) static long wb_check_background_flush(struct bdi_writeback *wb) { - if (over_bground_thresh()) { + if (over_bground_thresh(wb->bdi)) { struct wb_writeback_work work = { .nr_pages = LONG_MAX, -- cgit v1.2.3 From 50657fc4dfa7e345a1008f7c1de0bf930bbecca9 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Tue, 11 Oct 2011 17:06:33 -0600 Subject: writeback: fix ppc compile warnings on do_div(long long, unsigned long) Fix powerpc compile warnings mm/page-writeback.c: In function 'bdi_position_ratio': mm/page-writeback.c:622:3: warning: comparison of distinct pointer types lacks a cast [enabled by default] page-writeback.c:635:4: warning: comparison of distinct pointer types lacks a cast [enabled by default] Also fix gcc "uninitialized var" warnings. Reported-by: Stephen Rothwell Signed-off-by: Wu Fengguang --- mm/page-writeback.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 325f753c80ed..0802d5177997 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -618,8 +618,8 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, x_intercept = bdi_setpoint + span; if (bdi_dirty < x_intercept - span / 4) { - pos_ratio *= x_intercept - bdi_dirty; - do_div(pos_ratio, x_intercept - bdi_setpoint + 1); + pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), + x_intercept - bdi_setpoint + 1); } else pos_ratio /= 4; @@ -630,10 +630,9 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, */ x_intercept = bdi_thresh / 2; if (bdi_dirty < x_intercept) { - if (bdi_dirty > x_intercept / 8) { - pos_ratio *= x_intercept; - do_div(pos_ratio, bdi_dirty); - } else + if (bdi_dirty > x_intercept / 8) + pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); + else pos_ratio *= 8; } @@ -1010,10 +1009,10 @@ static void balance_dirty_pages(struct address_space *mapping, unsigned long dirty_thresh; unsigned long bdi_thresh; long pause = 0; - long max_pause; + long uninitialized_var(max_pause); bool dirty_exceeded = false; unsigned long task_ratelimit; - unsigned long dirty_ratelimit; + unsigned long uninitialized_var(dirty_ratelimit); unsigned long pos_ratio; struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long start_time = jiffies; -- cgit v1.2.3 From b48c104d2211b0ac881a71f5f76a3816225f8111 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 2 Mar 2011 17:22:49 -0600 Subject: writeback: trace event bdi_dirty_ratelimit It helps understand how various throttle bandwidths are updated. Signed-off-by: Wu Fengguang --- include/trace/events/writeback.h | 45 ++++++++++++++++++++++++++++++++++++++++ mm/page-writeback.c | 2 ++ 2 files changed, 47 insertions(+) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 178c23508d3d..ffb5deb77ca9 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -226,6 +226,51 @@ TRACE_EVENT(global_dirty_state, ) ); +#define KBps(x) ((x) << (PAGE_SHIFT - 10)) + +TRACE_EVENT(bdi_dirty_ratelimit, + + TP_PROTO(struct backing_dev_info *bdi, + unsigned long dirty_rate, + unsigned long task_ratelimit), + + TP_ARGS(bdi, dirty_rate, task_ratelimit), + + TP_STRUCT__entry( + __array(char, bdi, 32) + __field(unsigned long, write_bw) + __field(unsigned long, avg_write_bw) + __field(unsigned long, dirty_rate) + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned long, balanced_dirty_ratelimit) + ), + + TP_fast_assign( + strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + __entry->write_bw = KBps(bdi->write_bandwidth); + __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); + __entry->dirty_rate = KBps(dirty_rate); + __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->balanced_dirty_ratelimit = + KBps(bdi->balanced_dirty_ratelimit); + ), + + TP_printk("bdi %s: " + "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "balanced_dirty_ratelimit=%lu", + __entry->bdi, + __entry->write_bw, /* write bandwidth */ + __entry->avg_write_bw, /* avg write bandwidth */ + __entry->dirty_rate, /* bdi dirty rate */ + __entry->dirty_ratelimit, /* base ratelimit */ + __entry->task_ratelimit, /* ratelimit with position control */ + __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ + ) +); + DECLARE_EVENT_CLASS(writeback_congest_waited_template, TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0802d5177997..e3c2d8bf87bb 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -875,6 +875,8 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; + + trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); } void __bdi_update_bandwidth(struct backing_dev_info *bdi, -- cgit v1.2.3 From ece13ac31bbe492d940ba0bc4ade2ae1521f46a5 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sun, 29 Aug 2010 23:33:20 -0600 Subject: writeback: trace event balance_dirty_pages Useful for analyzing the dynamics of the throttling algorithms and debugging user reported problems. Signed-off-by: Wu Fengguang --- include/trace/events/writeback.h | 73 ++++++++++++++++++++++++++++++++++++++++ mm/page-writeback.c | 22 ++++++++++++ 2 files changed, 95 insertions(+) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index ffb5deb77ca9..0ce9f06f58c2 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -271,6 +271,79 @@ TRACE_EVENT(bdi_dirty_ratelimit, ) ); +TRACE_EVENT(balance_dirty_pages, + + TP_PROTO(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long dirty_ratelimit, + unsigned long task_ratelimit, + unsigned long dirtied, + long pause, + unsigned long start_time), + + TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, + dirty_ratelimit, task_ratelimit, + dirtied, pause, start_time), + + TP_STRUCT__entry( + __array( char, bdi, 32) + __field(unsigned long, limit) + __field(unsigned long, setpoint) + __field(unsigned long, dirty) + __field(unsigned long, bdi_setpoint) + __field(unsigned long, bdi_dirty) + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned int, dirtied) + __field(unsigned int, dirtied_pause) + __field(unsigned long, paused) + __field( long, pause) + ), + + TP_fast_assign( + unsigned long freerun = (thresh + bg_thresh) / 2; + strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + + __entry->limit = global_dirty_limit; + __entry->setpoint = (global_dirty_limit + freerun) / 2; + __entry->dirty = dirty; + __entry->bdi_setpoint = __entry->setpoint * + bdi_thresh / (thresh + 1); + __entry->bdi_dirty = bdi_dirty; + __entry->dirty_ratelimit = KBps(dirty_ratelimit); + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->dirtied = dirtied; + __entry->dirtied_pause = current->nr_dirtied_pause; + __entry->pause = pause * 1000 / HZ; + __entry->paused = (jiffies - start_time) * 1000 / HZ; + ), + + + TP_printk("bdi %s: " + "limit=%lu setpoint=%lu dirty=%lu " + "bdi_setpoint=%lu bdi_dirty=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "dirtied=%u dirtied_pause=%u " + "paused=%lu pause=%ld", + __entry->bdi, + __entry->limit, + __entry->setpoint, + __entry->dirty, + __entry->bdi_setpoint, + __entry->bdi_dirty, + __entry->dirty_ratelimit, + __entry->task_ratelimit, + __entry->dirtied, + __entry->dirtied_pause, + __entry->paused, /* ms */ + __entry->pause /* ms */ + ) +); + DECLARE_EVENT_CLASS(writeback_congest_waited_template, TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e3c2d8bf87bb..45d36f7dc169 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1103,12 +1103,34 @@ static void balance_dirty_pages(struct address_space *mapping, pos_ratio >> RATELIMIT_CALC_SHIFT; pause = (HZ * pages_dirtied) / (task_ratelimit | 1); if (unlikely(pause <= 0)) { + trace_balance_dirty_pages(bdi, + dirty_thresh, + background_thresh, + nr_dirty, + bdi_thresh, + bdi_dirty, + dirty_ratelimit, + task_ratelimit, + pages_dirtied, + pause, + start_time); pause = 1; /* avoid resetting nr_dirtied_pause below */ break; } pause = min(pause, max_pause); pause: + trace_balance_dirty_pages(bdi, + dirty_thresh, + background_thresh, + nr_dirty, + bdi_thresh, + bdi_dirty, + dirty_ratelimit, + task_ratelimit, + pages_dirtied, + pause, + start_time); __set_current_state(TASK_UNINTERRUPTIBLE); io_schedule_timeout(pause); -- cgit v1.2.3 From ad4e38dd6a33bb3a4882c487d7abe621e583b982 Mon Sep 17 00:00:00 2001 From: Curt Wohlgemuth Date: Fri, 7 Oct 2011 21:51:56 -0600 Subject: writeback: send work item to queue_io, move_expired_inodes Instead of sending ->older_than_this to queue_io() and move_expired_inodes(), send the entire wb_writeback_work structure. There are other fields of a work item that are useful in these routines and in tracepoints. Acked-by: Jan Kara Signed-off-by: Curt Wohlgemuth Signed-off-by: Wu Fengguang --- fs/fs-writeback.c | 16 ++++++++-------- include/trace/events/writeback.h | 5 +++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 6401cd76f109..c51029693600 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -251,7 +251,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - unsigned long *older_than_this) + struct wb_writeback_work *work) { LIST_HEAD(tmp); struct list_head *pos, *node; @@ -262,8 +262,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (work->older_than_this && + inode_dirtied_after(inode, *work->older_than_this)) break; if (sb && sb != inode->i_sb) do_sb_sort = 1; @@ -302,13 +302,13 @@ out: * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) { int moved; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); - trace_writeback_queue_io(wb, older_than_this, moved); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); + trace_writeback_queue_io(wb, work, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -651,7 +651,7 @@ long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, NULL); + queue_io(wb, &work); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); @@ -745,7 +745,7 @@ static long wb_writeback(struct bdi_writeback *wb, trace_writeback_start(wb->bdi, work); if (list_empty(&wb->b_io)) - queue_io(wb, work->older_than_this); + queue_io(wb, work); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 0ce9f06f58c2..1261db3916cc 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -157,9 +157,9 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, - unsigned long *older_than_this, + struct wb_writeback_work *work, int moved), - TP_ARGS(wb, older_than_this, moved), + TP_ARGS(wb, work, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) @@ -167,6 +167,7 @@ TRACE_EVENT(writeback_queue_io, __field(int, moved) ), TP_fast_assign( + unsigned long *older_than_this = work->older_than_this; strncpy(__entry->name, dev_name(wb->bdi->dev), 32); __entry->older = older_than_this ? *older_than_this : 0; __entry->age = older_than_this ? -- cgit v1.2.3 From 0e175a1835ffc979e55787774e58ec79e41957d7 Mon Sep 17 00:00:00 2001 From: Curt Wohlgemuth Date: Fri, 7 Oct 2011 21:54:10 -0600 Subject: writeback: Add a 'reason' to wb_writeback_work This creates a new 'reason' field in a wb_writeback_work structure, which unambiguously identifies who initiates writeback activity. A 'wb_reason' enumeration has been added to writeback.h, to enumerate the possible reasons. The 'writeback_work_class' and tracepoint event class and 'writeback_queue_io' tracepoints are updated to include the symbolic 'reason' in all trace events. And the 'writeback_inodes_sbXXX' family of routines has had a wb_stats parameter added to them, so callers can specify why writeback is being started. Acked-by: Jan Kara Signed-off-by: Curt Wohlgemuth Signed-off-by: Wu Fengguang --- fs/btrfs/extent-tree.c | 3 ++- fs/buffer.c | 2 +- fs/ext4/inode.c | 2 +- fs/fs-writeback.c | 49 +++++++++++++++++++++++++++++----------- fs/quota/quota.c | 2 +- fs/sync.c | 4 ++-- fs/ubifs/budget.c | 2 +- include/linux/backing-dev.h | 3 ++- include/linux/writeback.h | 32 +++++++++++++++++++++----- include/trace/events/writeback.h | 14 ++++++++---- mm/backing-dev.c | 3 ++- mm/page-writeback.c | 3 ++- mm/vmscan.c | 3 ++- 13 files changed, 88 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f5be06a2462f..c9ee0e18bbdc 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3340,7 +3340,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, smp_mb(); nr_pages = min_t(unsigned long, nr_pages, root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); - writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); + writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages, + WB_REASON_FS_FREE_SPACE); spin_lock(&space_info->lock); if (reserved > space_info->bytes_reserved) diff --git a/fs/buffer.c b/fs/buffer.c index 1a80b048ade8..f5dcee6c4cfb 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -285,7 +285,7 @@ static void free_more_memory(void) struct zone *zone; int nid; - wakeup_flusher_threads(1024); + wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); yield(); for_each_online_node(nid) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 986e2388f031..7fa73a3b2120 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2241,7 +2241,7 @@ static int ext4_nonda_switch(struct super_block *sb) * start pushing delalloc when 1/2 of free blocks are dirty. */ if (free_blocks < 2 * dirty_blocks) - writeback_inodes_sb_if_idle(sb); + writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE); return 0; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index c51029693600..73c3992b2bb4 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -41,11 +41,23 @@ struct wb_writeback_work { unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; + enum wb_reason reason; /* why was writeback initiated? */ struct list_head list; /* pending work list */ struct completion *done; /* set if the caller waits */ }; +const char *wb_reason_name[] = { + [WB_REASON_BACKGROUND] = "background", + [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", + [WB_REASON_SYNC] = "sync", + [WB_REASON_PERIODIC] = "periodic", + [WB_REASON_LAPTOP_TIMER] = "laptop_timer", + [WB_REASON_FREE_MORE_MEM] = "free_more_memory", + [WB_REASON_FS_FREE_SPACE] = "fs_free_space", + [WB_REASON_FORKER_THREAD] = "forker_thread" +}; + /* * Include the creation of the trace points after defining the * wb_writeback_work structure so that the definition remains local to this @@ -115,7 +127,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, static void __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, - bool range_cyclic) + bool range_cyclic, enum wb_reason reason) { struct wb_writeback_work *work; @@ -135,6 +147,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, work->sync_mode = WB_SYNC_NONE; work->nr_pages = nr_pages; work->range_cyclic = range_cyclic; + work->reason = reason; bdi_queue_work(bdi, work); } @@ -150,9 +163,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, * completion. Caller need not hold sb s_umount semaphore. * */ -void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, + enum wb_reason reason) { - __bdi_start_writeback(bdi, nr_pages, true); + __bdi_start_writeback(bdi, nr_pages, true, reason); } /** @@ -641,12 +655,14 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, return wrote; } -long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + enum wb_reason reason) { struct wb_writeback_work work = { .nr_pages = nr_pages, .sync_mode = WB_SYNC_NONE, .range_cyclic = 1, + .reason = reason, }; spin_lock(&wb->list_lock); @@ -825,6 +841,7 @@ static long wb_check_background_flush(struct bdi_writeback *wb) .sync_mode = WB_SYNC_NONE, .for_background = 1, .range_cyclic = 1, + .reason = WB_REASON_BACKGROUND, }; return wb_writeback(wb, &work); @@ -858,6 +875,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) .sync_mode = WB_SYNC_NONE, .for_kupdate = 1, .range_cyclic = 1, + .reason = WB_REASON_PERIODIC, }; return wb_writeback(wb, &work); @@ -976,7 +994,7 @@ int bdi_writeback_thread(void *data) * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back * the whole world. */ -void wakeup_flusher_threads(long nr_pages) +void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) { struct backing_dev_info *bdi; @@ -989,7 +1007,7 @@ void wakeup_flusher_threads(long nr_pages) list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - __bdi_start_writeback(bdi, nr_pages, false); + __bdi_start_writeback(bdi, nr_pages, false, reason); } rcu_read_unlock(); } @@ -1210,7 +1228,9 @@ static void wait_sb_inodes(struct super_block *sb) * on how many (if any) will be written, and this function does not wait * for IO completion of submitted IO. */ -void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) +void writeback_inodes_sb_nr(struct super_block *sb, + unsigned long nr, + enum wb_reason reason) { DECLARE_COMPLETION_ONSTACK(done); struct wb_writeback_work work = { @@ -1219,6 +1239,7 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) .tagged_writepages = 1, .done = &done, .nr_pages = nr, + .reason = reason, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); @@ -1235,9 +1256,9 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr); * on how many (if any) will be written, and this function does not wait * for IO completion of submitted IO. */ -void writeback_inodes_sb(struct super_block *sb) +void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) { - return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); + return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); } EXPORT_SYMBOL(writeback_inodes_sb); @@ -1248,11 +1269,11 @@ EXPORT_SYMBOL(writeback_inodes_sb); * Invoke writeback_inodes_sb if no writeback is currently underway. * Returns 1 if writeback was started, 0 if not. */ -int writeback_inodes_sb_if_idle(struct super_block *sb) +int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason) { if (!writeback_in_progress(sb->s_bdi)) { down_read(&sb->s_umount); - writeback_inodes_sb(sb); + writeback_inodes_sb(sb, reason); up_read(&sb->s_umount); return 1; } else @@ -1269,11 +1290,12 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); * Returns 1 if writeback was started, 0 if not. */ int writeback_inodes_sb_nr_if_idle(struct super_block *sb, - unsigned long nr) + unsigned long nr, + enum wb_reason reason) { if (!writeback_in_progress(sb->s_bdi)) { down_read(&sb->s_umount); - writeback_inodes_sb_nr(sb, nr); + writeback_inodes_sb_nr(sb, nr, reason); up_read(&sb->s_umount); return 1; } else @@ -1297,6 +1319,7 @@ void sync_inodes_sb(struct super_block *sb) .nr_pages = LONG_MAX, .range_cyclic = 0, .done = &done, + .reason = WB_REASON_SYNC, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 10b6be3ca280..4bae57fc603b 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -286,7 +286,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, /* caller already holds s_umount */ if (sb->s_flags & MS_RDONLY) return -EROFS; - writeback_inodes_sb(sb); + writeback_inodes_sb(sb, WB_REASON_SYNC); return 0; default: return -EINVAL; diff --git a/fs/sync.c b/fs/sync.c index c98a7477edfd..101b8ef901d7 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -43,7 +43,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) if (wait) sync_inodes_sb(sb); else - writeback_inodes_sb(sb); + writeback_inodes_sb(sb, WB_REASON_SYNC); if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, wait); @@ -98,7 +98,7 @@ static void sync_filesystems(int wait) */ SYSCALL_DEFINE0(sync) { - wakeup_flusher_threads(0); + wakeup_flusher_threads(0, WB_REASON_SYNC); sync_filesystems(0); sync_filesystems(1); if (unlikely(laptop_mode)) diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 315de66e52b2..bc4f94b28706 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -63,7 +63,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write) { down_read(&c->vfs_sb->s_umount); - writeback_inodes_sb(c->vfs_sb); + writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); up_read(&c->vfs_sb->s_umount); } diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c3b92010d894..b1038bd686ac 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -118,7 +118,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); -void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, + enum wb_reason reason); void bdi_start_background_writeback(struct backing_dev_info *bdi); int bdi_writeback_thread(void *data); int bdi_has_dirty_io(struct backing_dev_info *bdi); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index ddb4652cb337..a378c295851f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -38,6 +38,23 @@ enum writeback_sync_modes { WB_SYNC_ALL, /* Wait on every mapping */ }; +/* + * why some writeback work was initiated + */ +enum wb_reason { + WB_REASON_BACKGROUND, + WB_REASON_TRY_TO_FREE_PAGES, + WB_REASON_SYNC, + WB_REASON_PERIODIC, + WB_REASON_LAPTOP_TIMER, + WB_REASON_FREE_MORE_MEM, + WB_REASON_FS_FREE_SPACE, + WB_REASON_FORKER_THREAD, + + WB_REASON_MAX, +}; +extern const char *wb_reason_name[]; + /* * A control structure which tells the writeback code what to do. These are * always on the stack, and hence need no locking. They are always initialised @@ -69,14 +86,17 @@ struct writeback_control { */ struct bdi_writeback; int inode_wait(void *); -void writeback_inodes_sb(struct super_block *); -void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); -int writeback_inodes_sb_if_idle(struct super_block *); -int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); +void writeback_inodes_sb(struct super_block *, enum wb_reason reason); +void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, + enum wb_reason reason); +int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason); +int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr, + enum wb_reason reason); void sync_inodes_sb(struct super_block *); -long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages); +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + enum wb_reason reason); long wb_do_writeback(struct bdi_writeback *wb, int force_wait); -void wakeup_flusher_threads(long nr_pages); +void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 1261db3916cc..b99caa8b780c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -34,6 +34,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(int, for_kupdate) __field(int, range_cyclic) __field(int, for_background) + __field(int, reason) ), TP_fast_assign( strncpy(__entry->name, dev_name(bdi->dev), 32); @@ -43,16 +44,18 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->for_kupdate = work->for_kupdate; __entry->range_cyclic = work->range_cyclic; __entry->for_background = work->for_background; + __entry->reason = work->reason; ), TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " - "kupdate=%d range_cyclic=%d background=%d", + "kupdate=%d range_cyclic=%d background=%d reason=%s", __entry->name, MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), __entry->nr_pages, __entry->sync_mode, __entry->for_kupdate, __entry->range_cyclic, - __entry->for_background + __entry->for_background, + wb_reason_name[__entry->reason] ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -165,6 +168,7 @@ TRACE_EVENT(writeback_queue_io, __field(unsigned long, older) __field(long, age) __field(int, moved) + __field(int, reason) ), TP_fast_assign( unsigned long *older_than_this = work->older_than_this; @@ -173,12 +177,14 @@ TRACE_EVENT(writeback_queue_io, __entry->age = older_than_this ? (jiffies - *older_than_this) * 1000 / HZ : -1; __entry->moved = moved; + __entry->reason = work->reason; ), - TP_printk("bdi %s: older=%lu age=%ld enqueue=%d", + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", __entry->name, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ - __entry->moved) + __entry->moved, + wb_reason_name[__entry->reason]) ); TRACE_EVENT(global_dirty_state, diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 5dcaa3c756d1..dd8916feb05e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -476,7 +476,8 @@ static int bdi_forker_thread(void *ptr) * the bdi from the thread. Hopefully 1024 is * large enough for efficient IO. */ - writeback_inodes_wb(&bdi->wb, 1024); + writeback_inodes_wb(&bdi->wb, 1024, + WB_REASON_FORKER_THREAD); } else { /* * The spinlock makes sure we do not lose diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 45d36f7dc169..650846b61584 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1301,7 +1301,8 @@ void laptop_mode_timer_fn(unsigned long data) * threshold */ if (bdi_has_dirty_io(&q->backing_dev_info)) - bdi_start_writeback(&q->backing_dev_info, nr_pages); + bdi_start_writeback(&q->backing_dev_info, nr_pages, + WB_REASON_LAPTOP_TIMER); } /* diff --git a/mm/vmscan.c b/mm/vmscan.c index b55699cd9067..c735bd770d3d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2181,7 +2181,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, */ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; if (total_scanned > writeback_threshold) { - wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); + wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, + WB_REASON_TRY_TO_FREE_PAGES); sc->may_writepage = 1; } -- cgit v1.2.3