blob: 12a245fac8b49f4134c2896ca2c6cc515850dd8b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070011#include <linux/kernel.h>
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -070012#include <linux/fs.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020013#include <linux/sched.h>
Tejun Heo0b6d7572015-05-22 17:13:33 -040014#include <linux/blkdev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020015#include <linux/writeback.h>
Tejun Heo9d6e9852015-05-22 17:13:32 -040016#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Mikulas Patocka8077c0d2013-10-14 12:14:13 -040018int __must_check bdi_init(struct backing_dev_info *bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070019void bdi_destroy(struct backing_dev_info *bdi);
20
Joe Perchesd2cc4dd2012-11-29 08:37:03 -060021__printf(3, 4)
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070022int bdi_register(struct backing_dev_info *bdi, struct device *parent,
23 const char *fmt, ...);
24int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +010025int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060026void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
27 enum wb_reason reason);
Christoph Hellwigc5444192010-06-08 18:15:15 +020028void bdi_start_background_writeback(struct backing_dev_info *bdi);
Tejun Heoc9b0f222015-05-22 17:13:30 -040029void wb_workfn(struct work_struct *work);
Jens Axboe03ba3782009-09-09 09:08:54 +020030int bdi_has_dirty_io(struct backing_dev_info *bdi);
Tejun Heoc9b0f222015-05-22 17:13:30 -040031void wb_wakeup_delayed(struct bdi_writeback *wb);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070032
Jens Axboe03ba3782009-09-09 09:08:54 +020033extern spinlock_t bdi_lock;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020034extern struct list_head bdi_list;
35
Tejun Heo839a8e82013-04-01 19:08:06 -070036extern struct workqueue_struct *bdi_wq;
37
Jens Axboe03ba3782009-09-09 09:08:54 +020038static inline int wb_has_dirty_io(struct bdi_writeback *wb)
39{
40 return !list_empty(&wb->b_dirty) ||
41 !list_empty(&wb->b_io) ||
42 !list_empty(&wb->b_more_io);
43}
44
Tejun Heo9ce34202015-05-22 17:13:27 -040045static inline void __add_wb_stat(struct bdi_writeback *wb,
46 enum wb_stat_item item, s64 amount)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070047{
Tejun Heo9ce34202015-05-22 17:13:27 -040048 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070049}
50
Tejun Heo9ce34202015-05-22 17:13:27 -040051static inline void __inc_wb_stat(struct bdi_writeback *wb,
52 enum wb_stat_item item)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070053{
Tejun Heo9ce34202015-05-22 17:13:27 -040054 __add_wb_stat(wb, item, 1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070055}
56
Tejun Heo9ce34202015-05-22 17:13:27 -040057static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070058{
59 unsigned long flags;
60
61 local_irq_save(flags);
Tejun Heo9ce34202015-05-22 17:13:27 -040062 __inc_wb_stat(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070063 local_irq_restore(flags);
64}
65
Tejun Heo9ce34202015-05-22 17:13:27 -040066static inline void __dec_wb_stat(struct bdi_writeback *wb,
67 enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070068{
Tejun Heo9ce34202015-05-22 17:13:27 -040069 __add_wb_stat(wb, item, -1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070070}
71
Tejun Heo9ce34202015-05-22 17:13:27 -040072static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070073{
74 unsigned long flags;
75
76 local_irq_save(flags);
Tejun Heo9ce34202015-05-22 17:13:27 -040077 __dec_wb_stat(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070078 local_irq_restore(flags);
79}
80
Tejun Heo9ce34202015-05-22 17:13:27 -040081static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070082{
Tejun Heo9ce34202015-05-22 17:13:27 -040083 return percpu_counter_read_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070084}
85
Tejun Heo9ce34202015-05-22 17:13:27 -040086static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
87 enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070088{
Tejun Heo9ce34202015-05-22 17:13:27 -040089 return percpu_counter_sum_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070090}
91
Tejun Heo9ce34202015-05-22 17:13:27 -040092static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070093{
94 s64 sum;
95 unsigned long flags;
96
97 local_irq_save(flags);
Tejun Heo9ce34202015-05-22 17:13:27 -040098 sum = __wb_stat_sum(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070099 local_irq_restore(flags);
100
101 return sum;
102}
103
Tejun Heo9ce34202015-05-22 17:13:27 -0400104extern void wb_writeout_inc(struct bdi_writeback *wb);
Miklos Szeredidd5656e2008-04-30 00:54:37 -0700105
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700106/*
107 * maximal error of a stat counter.
108 */
Tejun Heo9ce34202015-05-22 17:13:27 -0400109static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700110{
111#ifdef CONFIG_SMP
Tejun Heo9ce34202015-05-22 17:13:27 -0400112 return nr_cpu_ids * WB_STAT_BATCH;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700113#else
114 return 1;
115#endif
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700116}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700118int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700119int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/*
122 * Flags in backing_dev_info::capability
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700123 *
124 * The first three flags control whether dirty pages will contribute to the
125 * VM's accounting and whether writepages() should be called for dirty pages
126 * (something that would not, for example, be appropriate for ramfs)
127 *
128 * WARNING: these flags are closely related and should not normally be
129 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
130 * three flags into a single convenience macro.
131 *
132 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
133 * BDI_CAP_NO_WRITEBACK: Don't write pages back
134 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
Maxim Patlasov5a537482013-09-11 14:22:46 -0700135 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 */
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700137#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
138#define BDI_CAP_NO_WRITEBACK 0x00000002
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100139#define BDI_CAP_NO_ACCT_WB 0x00000004
140#define BDI_CAP_STABLE_WRITES 0x00000008
141#define BDI_CAP_STRICTLIMIT 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700143#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
144 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
145
Jörn Engel5129a462010-04-25 08:54:42 +0200146extern struct backing_dev_info noop_backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148int writeback_in_progress(struct backing_dev_info *bdi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Tejun Heo0b6d7572015-05-22 17:13:33 -0400150static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
151{
152 struct super_block *sb;
153
154 if (!inode)
155 return &noop_backing_dev_info;
156
157 sb = inode->i_sb;
158#ifdef CONFIG_BLOCK
159 if (sb_is_blkdev_sb(sb))
160 return blk_get_backing_dev_info(I_BDEV(inode));
161#endif
162 return sb->s_bdi;
163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
166{
167 if (bdi->congested_fn)
168 return bdi->congested_fn(bdi->congested_data, bdi_bits);
Tejun Heoff3b6c52015-05-22 17:13:26 -0400169 return (bdi->wb.state & bdi_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
172static inline int bdi_read_congested(struct backing_dev_info *bdi)
173{
Tejun Heoff3b6c52015-05-22 17:13:26 -0400174 return bdi_congested(bdi, 1 << WB_sync_congested);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
177static inline int bdi_write_congested(struct backing_dev_info *bdi)
178{
Tejun Heoff3b6c52015-05-22 17:13:26 -0400179 return bdi_congested(bdi, 1 << WB_async_congested);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
182static inline int bdi_rw_congested(struct backing_dev_info *bdi)
183{
Tejun Heoff3b6c52015-05-22 17:13:26 -0400184 return bdi_congested(bdi, (1 << WB_sync_congested) |
185 (1 << WB_async_congested));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Jens Axboe8aa7e842009-07-09 14:52:32 +0200188long congestion_wait(int sync, long timeout);
Mel Gorman0e093d992010-10-26 14:21:45 -0700189long wait_iff_congested(struct zone *zone, int sync, long timeout);
Wanpeng Li3965c9a2012-07-31 16:41:52 -0700190int pdflush_proc_obsolete(struct ctl_table *table, int write,
191 void __user *buffer, size_t *lenp, loff_t *ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800193static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
194{
195 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
196}
197
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700198static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
199{
200 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
201}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700203static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
204{
205 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
206}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700208static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
209{
210 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
211 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
212 BDI_CAP_NO_WRITEBACK));
213}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700215static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
216{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100217 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700218}
219
220static inline bool mapping_cap_account_dirty(struct address_space *mapping)
221{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100222 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08fe2008-04-30 00:54:37 -0700223}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Jens Axboe03ba3782009-09-09 09:08:54 +0200225static inline int bdi_sched_wait(void *word)
226{
227 schedule();
228 return 0;
229}
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#endif /* _LINUX_BACKING_DEV_H */