blob: 021ec4da993251e919c04de5094384b81b86be81 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070055EXPORT_SYMBOL(init_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57static int sync_buffer(void *word)
58{
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
62
63 smp_mb();
64 bd = bh->b_bdev;
65 if (bd)
66 blk_run_address_space(bd->bd_inode->i_mapping);
67 io_schedule();
68 return 0;
69}
70
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080071void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
75}
76EXPORT_SYMBOL(__lock_buffer);
77
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080078void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Nick Piggin51b07fc2008-10-18 20:27:00 -070080 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070084EXPORT_SYMBOL(unlock_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91void __wait_on_buffer(struct buffer_head * bh)
92{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070095EXPORT_SYMBOL(__wait_on_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97static void
98__clear_page_buffers(struct page *page)
99{
100 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700101 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 page_cache_release(page);
103}
104
Keith Mannthey08bafc02008-11-25 10:24:35 +0100105
106static int quiet_error(struct buffer_head *bh)
107{
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109 return 0;
110 return 1;
111}
112
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static void buffer_io_error(struct buffer_head *bh)
115{
116 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
120}
121
122/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700123 * End-of-IO handler helper function which does not touch the bh after
124 * unlocking it.
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
128 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700130static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 if (uptodate) {
133 set_buffer_uptodate(bh);
134 } else {
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
137 }
138 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700139}
140
141/*
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
144 */
145void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146{
147 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 put_bh(bh);
149}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700150EXPORT_SYMBOL(end_buffer_read_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153{
154 char b[BDEVNAME_SIZE];
155
156 if (uptodate) {
157 set_buffer_uptodate(bh);
158 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100159 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 buffer_io_error(bh);
161 printk(KERN_WARNING "lost page write due to "
162 "I/O error on %s\n",
163 bdevname(bh->b_bdev, b));
164 }
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
167 }
168 unlock_buffer(bh);
169 put_bh(bh);
170}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700171EXPORT_SYMBOL(end_buffer_write_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
177 * private_lock.
178 *
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
183 */
184static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800185__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
190 pgoff_t index;
191 struct buffer_head *bh;
192 struct buffer_head *head;
193 struct page *page;
194 int all_mapped = 1;
195
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
198 if (!page)
199 goto out;
200
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
203 goto out_unlock;
204 head = page_buffers(page);
205 bh = head;
206 do {
Nikanth Karthikesan97f76d32009-04-02 16:56:46 -0700207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 else if (bh->b_blocknr == block) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 ret = bh;
211 get_bh(bh);
212 goto out_unlock;
213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 bh = bh->b_this_page;
215 } while (bh != head);
216
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
221 */
222 if (all_mapped) {
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230 }
231out_unlock:
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
234out:
235 return ret;
236}
237
238/* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
241 by the user.
242
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
246
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
249
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
262
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700270void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700272 struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274 if (mapping->nrpages == 0)
275 return;
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 invalidate_bh_lrus();
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800278 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700280EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282/*
Jens Axboe5b0830c2009-09-23 19:37:09 +0200283 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 */
285static void free_more_memory(void)
286{
Mel Gorman19770b32008-04-28 02:12:18 -0700287 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700288 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Jens Axboe03ba3782009-09-09 09:08:54 +0200290 wakeup_flusher_threads(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 yield();
292
Mel Gorman0e884602008-04-28 02:12:14 -0700293 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700294 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
295 gfp_zone(GFP_NOFS), NULL,
296 &zone);
297 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700298 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700299 GFP_NOFS, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
301}
302
303/*
304 * I/O completion handler for block_read_full_page() - pages
305 * which come unlocked at the end of I/O.
306 */
307static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
308{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700310 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 struct buffer_head *tmp;
312 struct page *page;
313 int page_uptodate = 1;
314
315 BUG_ON(!buffer_async_read(bh));
316
317 page = bh->b_page;
318 if (uptodate) {
319 set_buffer_uptodate(bh);
320 } else {
321 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100322 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 buffer_io_error(bh);
324 SetPageError(page);
325 }
326
327 /*
328 * Be _very_ careful from here on. Bad things can happen if
329 * two buffer heads end IO at almost the same time and both
330 * decide that the page is now completely done.
331 */
Nick Piggina3972202005-07-07 17:56:56 -0700332 first = page_buffers(page);
333 local_irq_save(flags);
334 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 clear_buffer_async_read(bh);
336 unlock_buffer(bh);
337 tmp = bh;
338 do {
339 if (!buffer_uptodate(tmp))
340 page_uptodate = 0;
341 if (buffer_async_read(tmp)) {
342 BUG_ON(!buffer_locked(tmp));
343 goto still_busy;
344 }
345 tmp = tmp->b_this_page;
346 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700347 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
348 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350 /*
351 * If none of the buffers had errors and they are all
352 * uptodate then we can set the page uptodate.
353 */
354 if (page_uptodate && !PageError(page))
355 SetPageUptodate(page);
356 unlock_page(page);
357 return;
358
359still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700360 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
361 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 return;
363}
364
365/*
366 * Completion handler for block_write_full_page() - pages which are unlocked
367 * during I/O, and which have PageWriteback cleared upon I/O completion.
368 */
Chris Mason35c80d52009-04-15 13:22:38 -0400369void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
371 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700373 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 struct buffer_head *tmp;
375 struct page *page;
376
377 BUG_ON(!buffer_async_write(bh));
378
379 page = bh->b_page;
380 if (uptodate) {
381 set_buffer_uptodate(bh);
382 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100383 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 buffer_io_error(bh);
385 printk(KERN_WARNING "lost page write due to "
386 "I/O error on %s\n",
387 bdevname(bh->b_bdev, b));
388 }
389 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700390 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 clear_buffer_uptodate(bh);
392 SetPageError(page);
393 }
394
Nick Piggina3972202005-07-07 17:56:56 -0700395 first = page_buffers(page);
396 local_irq_save(flags);
397 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 clear_buffer_async_write(bh);
400 unlock_buffer(bh);
401 tmp = bh->b_this_page;
402 while (tmp != bh) {
403 if (buffer_async_write(tmp)) {
404 BUG_ON(!buffer_locked(tmp));
405 goto still_busy;
406 }
407 tmp = tmp->b_this_page;
408 }
Nick Piggina3972202005-07-07 17:56:56 -0700409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 end_page_writeback(page);
412 return;
413
414still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700415 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
416 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 return;
418}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700419EXPORT_SYMBOL(end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421/*
422 * If a page's buffers are under async readin (end_buffer_async_read
423 * completion) then there is a possibility that another thread of
424 * control could lock one of the buffers after it has completed
425 * but while some of the other buffers have not completed. This
426 * locked buffer would confuse end_buffer_async_read() into not unlocking
427 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
428 * that this buffer is not under async I/O.
429 *
430 * The page comes unlocked when it has no locked buffer_async buffers
431 * left.
432 *
433 * PageLocked prevents anyone starting new async I/O reads any of
434 * the buffers.
435 *
436 * PageWriteback is used to prevent simultaneous writeout of the same
437 * page.
438 *
439 * PageLocked prevents anyone from starting writeback of a page which is
440 * under read I/O (PageWriteback is only ever set against a locked page).
441 */
442static void mark_buffer_async_read(struct buffer_head *bh)
443{
444 bh->b_end_io = end_buffer_async_read;
445 set_buffer_async_read(bh);
446}
447
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700448static void mark_buffer_async_write_endio(struct buffer_head *bh,
449 bh_end_io_t *handler)
Chris Mason35c80d52009-04-15 13:22:38 -0400450{
451 bh->b_end_io = handler;
452 set_buffer_async_write(bh);
453}
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455void mark_buffer_async_write(struct buffer_head *bh)
456{
Chris Mason35c80d52009-04-15 13:22:38 -0400457 mark_buffer_async_write_endio(bh, end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459EXPORT_SYMBOL(mark_buffer_async_write);
460
461
462/*
463 * fs/buffer.c contains helper functions for buffer-backed address space's
464 * fsync functions. A common requirement for buffer-based filesystems is
465 * that certain data from the backing blockdev needs to be written out for
466 * a successful fsync(). For example, ext2 indirect blocks need to be
467 * written back and waited upon before fsync() returns.
468 *
469 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
470 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
471 * management of a list of dependent buffers at ->i_mapping->private_list.
472 *
473 * Locking is a little subtle: try_to_free_buffers() will remove buffers
474 * from their controlling inode's queue when they are being freed. But
475 * try_to_free_buffers() will be operating against the *blockdev* mapping
476 * at the time, not against the S_ISREG file which depends on those buffers.
477 * So the locking for private_list is via the private_lock in the address_space
478 * which backs the buffers. Which is different from the address_space
479 * against which the buffers are listed. So for a particular address_space,
480 * mapping->private_lock does *not* protect mapping->private_list! In fact,
481 * mapping->private_list will always be protected by the backing blockdev's
482 * ->private_lock.
483 *
484 * Which introduces a requirement: all buffers on an address_space's
485 * ->private_list must be from the same address_space: the blockdev's.
486 *
487 * address_spaces which do not place buffers at ->private_list via these
488 * utility functions are free to use private_lock and private_list for
489 * whatever they want. The only requirement is that list_empty(private_list)
490 * be true at clear_inode() time.
491 *
492 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
493 * filesystems should do that. invalidate_inode_buffers() should just go
494 * BUG_ON(!list_empty).
495 *
496 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
497 * take an address_space, not an inode. And it should be called
498 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
499 * queued up.
500 *
501 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
502 * list if it is already on a list. Because if the buffer is on a list,
503 * it *must* already be on the right one. If not, the filesystem is being
504 * silly. This will save a ton of locking. But first we have to ensure
505 * that buffers are taken *off* the old inode's list when they are freed
506 * (presumably in truncate). That requires careful auditing of all
507 * filesystems (do it inside bforget()). It could also be done by bringing
508 * b_inode back.
509 */
510
511/*
512 * The buffer's backing address_space's private_lock must be held
513 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700514static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
516 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700517 WARN_ON(!bh->b_assoc_map);
518 if (buffer_write_io_error(bh))
519 set_bit(AS_EIO, &bh->b_assoc_map->flags);
520 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
522
523int inode_has_buffers(struct inode *inode)
524{
525 return !list_empty(&inode->i_data.private_list);
526}
527
528/*
529 * osync is designed to support O_SYNC io. It waits synchronously for
530 * all already-submitted IO to complete, but does not queue any new
531 * writes to the disk.
532 *
533 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
534 * you dirty the buffers, and then use osync_inode_buffers to wait for
535 * completion. Any other dirty buffers which are not yet queued for
536 * write will not be flushed to disk by the osync.
537 */
538static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
539{
540 struct buffer_head *bh;
541 struct list_head *p;
542 int err = 0;
543
544 spin_lock(lock);
545repeat:
546 list_for_each_prev(p, list) {
547 bh = BH_ENTRY(p);
548 if (buffer_locked(bh)) {
549 get_bh(bh);
550 spin_unlock(lock);
551 wait_on_buffer(bh);
552 if (!buffer_uptodate(bh))
553 err = -EIO;
554 brelse(bh);
555 spin_lock(lock);
556 goto repeat;
557 }
558 }
559 spin_unlock(lock);
560 return err;
561}
562
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700563static void do_thaw_all(struct work_struct *work)
Eric Sandeenc2d75432009-03-31 15:23:46 -0700564{
565 struct super_block *sb;
566 char b[BDEVNAME_SIZE];
567
568 spin_lock(&sb_lock);
569restart:
570 list_for_each_entry(sb, &super_blocks, s_list) {
Al Viro551de6f2010-03-22 19:36:35 -0400571 if (list_empty(&sb->s_instances))
572 continue;
Eric Sandeenc2d75432009-03-31 15:23:46 -0700573 sb->s_count++;
574 spin_unlock(&sb_lock);
575 down_read(&sb->s_umount);
576 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
577 printk(KERN_WARNING "Emergency Thaw on %s\n",
578 bdevname(sb->s_bdev, b));
579 up_read(&sb->s_umount);
580 spin_lock(&sb_lock);
581 if (__put_super_and_need_restart(sb))
582 goto restart;
583 }
584 spin_unlock(&sb_lock);
Jens Axboe053c5252009-04-08 13:44:08 +0200585 kfree(work);
Eric Sandeenc2d75432009-03-31 15:23:46 -0700586 printk(KERN_WARNING "Emergency Thaw complete\n");
587}
588
589/**
590 * emergency_thaw_all -- forcibly thaw every frozen filesystem
591 *
592 * Used for emergency unfreeze of all filesystems via SysRq
593 */
594void emergency_thaw_all(void)
595{
Jens Axboe053c5252009-04-08 13:44:08 +0200596 struct work_struct *work;
597
598 work = kmalloc(sizeof(*work), GFP_ATOMIC);
599 if (work) {
600 INIT_WORK(work, do_thaw_all);
601 schedule_work(work);
602 }
Eric Sandeenc2d75432009-03-31 15:23:46 -0700603}
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800606 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700607 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 *
609 * Starts I/O against the buffers at mapping->private_list, and waits upon
610 * that I/O.
611 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700612 * Basically, this is a convenience function for fsync().
613 * @mapping is a file or directory which needs those buffers to be written for
614 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 */
616int sync_mapping_buffers(struct address_space *mapping)
617{
618 struct address_space *buffer_mapping = mapping->assoc_mapping;
619
620 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
621 return 0;
622
623 return fsync_buffers_list(&buffer_mapping->private_lock,
624 &mapping->private_list);
625}
626EXPORT_SYMBOL(sync_mapping_buffers);
627
628/*
629 * Called when we've recently written block `bblock', and it is known that
630 * `bblock' was for a buffer_boundary() buffer. This means that the block at
631 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
632 * dirty, schedule it for IO. So that indirects merge nicely with their data.
633 */
634void write_boundary_block(struct block_device *bdev,
635 sector_t bblock, unsigned blocksize)
636{
637 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
638 if (bh) {
639 if (buffer_dirty(bh))
640 ll_rw_block(WRITE, 1, &bh);
641 put_bh(bh);
642 }
643}
644
645void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
646{
647 struct address_space *mapping = inode->i_mapping;
648 struct address_space *buffer_mapping = bh->b_page->mapping;
649
650 mark_buffer_dirty(bh);
651 if (!mapping->assoc_mapping) {
652 mapping->assoc_mapping = buffer_mapping;
653 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200654 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800656 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 spin_lock(&buffer_mapping->private_lock);
658 list_move_tail(&bh->b_assoc_buffers,
659 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700660 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 spin_unlock(&buffer_mapping->private_lock);
662 }
663}
664EXPORT_SYMBOL(mark_buffer_dirty_inode);
665
666/*
Nick Piggin787d2212007-07-17 04:03:34 -0700667 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
668 * dirty.
669 *
670 * If warn is true, then emit a warning if the page is not uptodate and has
671 * not been truncated.
672 */
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700673static void __set_page_dirty(struct page *page,
Nick Piggin787d2212007-07-17 04:03:34 -0700674 struct address_space *mapping, int warn)
675{
Nick Piggin19fd6232008-07-25 19:45:32 -0700676 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700677 if (page->mapping) { /* Race with truncate? */
678 WARN_ON_ONCE(warn && !PageUptodate(page));
Edward Shishkine3a7cca2009-03-31 15:19:39 -0700679 account_page_dirtied(page, mapping);
Nick Piggin787d2212007-07-17 04:03:34 -0700680 radix_tree_tag_set(&mapping->page_tree,
681 page_index(page), PAGECACHE_TAG_DIRTY);
682 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700683 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700684 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Nick Piggin787d2212007-07-17 04:03:34 -0700685}
686
687/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 * Add a page to the dirty page list.
689 *
690 * It is a sad fact of life that this function is called from several places
691 * deeply under spinlocking. It may not sleep.
692 *
693 * If the page has buffers, the uptodate buffers are set dirty, to preserve
694 * dirty-state coherency between the page and the buffers. It the page does
695 * not have buffers then when they are later attached they will all be set
696 * dirty.
697 *
698 * The buffers are dirtied before the page is dirtied. There's a small race
699 * window in which a writepage caller may see the page cleanness but not the
700 * buffer dirtiness. That's fine. If this code were to set the page dirty
701 * before the buffers, a concurrent writepage caller could clear the page dirty
702 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
703 * page on the dirty page list.
704 *
705 * We use private_lock to lock against try_to_free_buffers while using the
706 * page's buffer list. Also use this to protect against clean buffers being
707 * added to the page after it was set dirty.
708 *
709 * FIXME: may need to call ->reservepage here as well. That's rather up to the
710 * address_space though.
711 */
712int __set_page_dirty_buffers(struct page *page)
713{
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700714 int newly_dirty;
Nick Piggin787d2212007-07-17 04:03:34 -0700715 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200716
717 if (unlikely(!mapping))
718 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 spin_lock(&mapping->private_lock);
721 if (page_has_buffers(page)) {
722 struct buffer_head *head = page_buffers(page);
723 struct buffer_head *bh = head;
724
725 do {
726 set_buffer_dirty(bh);
727 bh = bh->b_this_page;
728 } while (bh != head);
729 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700730 newly_dirty = !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 spin_unlock(&mapping->private_lock);
732
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700733 if (newly_dirty)
734 __set_page_dirty(page, mapping, 1);
735 return newly_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737EXPORT_SYMBOL(__set_page_dirty_buffers);
738
739/*
740 * Write out and wait upon a list of buffers.
741 *
742 * We have conflicting pressures: we want to make sure that all
743 * initially dirty buffers get waited on, but that any subsequently
744 * dirtied buffers don't. After all, we don't want fsync to last
745 * forever if somebody is actively writing to the file.
746 *
747 * Do this in two main stages: first we copy dirty buffers to a
748 * temporary inode list, queueing the writes as we go. Then we clean
749 * up, waiting for those writes to complete.
750 *
751 * During this second stage, any subsequent updates to the file may end
752 * up refiling the buffer on the original inode's dirty list again, so
753 * there is a chance we will end up with a buffer queued for write but
754 * not yet completed on that list. So, as a final cleanup we go through
755 * the osync code to catch these locked, dirty buffers without requeuing
756 * any newly dirty buffers for write.
757 */
758static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
759{
760 struct buffer_head *bh;
761 struct list_head tmp;
Jens Axboe9cf6b722009-04-06 14:48:03 +0200762 struct address_space *mapping, *prev_mapping = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 int err = 0, err2;
764
765 INIT_LIST_HEAD(&tmp);
766
767 spin_lock(lock);
768 while (!list_empty(list)) {
769 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800770 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700771 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800772 /* Avoid race with mark_buffer_dirty_inode() which does
773 * a lockless check and we rely on seeing the dirty bit */
774 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 if (buffer_dirty(bh) || buffer_locked(bh)) {
776 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800777 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if (buffer_dirty(bh)) {
779 get_bh(bh);
780 spin_unlock(lock);
781 /*
782 * Ensure any pending I/O completes so that
783 * ll_rw_block() actually writes the current
784 * contents - it is a noop if I/O is still in
785 * flight on potentially older contents.
786 */
Jens Axboe9cf6b722009-04-06 14:48:03 +0200787 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
788
789 /*
790 * Kick off IO for the previous mapping. Note
791 * that we will not run the very last mapping,
792 * wait_on_buffer() will do that for us
793 * through sync_buffer().
794 */
795 if (prev_mapping && prev_mapping != mapping)
796 blk_run_address_space(prev_mapping);
797 prev_mapping = mapping;
798
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 brelse(bh);
800 spin_lock(lock);
801 }
802 }
803 }
804
805 while (!list_empty(&tmp)) {
806 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800808 mapping = bh->b_assoc_map;
809 __remove_assoc_queue(bh);
810 /* Avoid race with mark_buffer_dirty_inode() which does
811 * a lockless check and we rely on seeing the dirty bit */
812 smp_mb();
813 if (buffer_dirty(bh)) {
814 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800815 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800816 bh->b_assoc_map = mapping;
817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 spin_unlock(lock);
819 wait_on_buffer(bh);
820 if (!buffer_uptodate(bh))
821 err = -EIO;
822 brelse(bh);
823 spin_lock(lock);
824 }
825
826 spin_unlock(lock);
827 err2 = osync_buffers_list(lock, list);
828 if (err)
829 return err;
830 else
831 return err2;
832}
833
834/*
835 * Invalidate any and all dirty buffers on a given inode. We are
836 * probably unmounting the fs, but that doesn't mean we have already
837 * done a sync(). Just drop the buffers from the inode list.
838 *
839 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
840 * assumes that all the buffers are against the blockdev. Not true
841 * for reiserfs.
842 */
843void invalidate_inode_buffers(struct inode *inode)
844{
845 if (inode_has_buffers(inode)) {
846 struct address_space *mapping = &inode->i_data;
847 struct list_head *list = &mapping->private_list;
848 struct address_space *buffer_mapping = mapping->assoc_mapping;
849
850 spin_lock(&buffer_mapping->private_lock);
851 while (!list_empty(list))
852 __remove_assoc_queue(BH_ENTRY(list->next));
853 spin_unlock(&buffer_mapping->private_lock);
854 }
855}
Jan Kara52b19ac2008-09-23 18:24:08 +0200856EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858/*
859 * Remove any clean buffers from the inode's buffer list. This is called
860 * when we're trying to free the inode itself. Those buffers can pin it.
861 *
862 * Returns true if all buffers were removed.
863 */
864int remove_inode_buffers(struct inode *inode)
865{
866 int ret = 1;
867
868 if (inode_has_buffers(inode)) {
869 struct address_space *mapping = &inode->i_data;
870 struct list_head *list = &mapping->private_list;
871 struct address_space *buffer_mapping = mapping->assoc_mapping;
872
873 spin_lock(&buffer_mapping->private_lock);
874 while (!list_empty(list)) {
875 struct buffer_head *bh = BH_ENTRY(list->next);
876 if (buffer_dirty(bh)) {
877 ret = 0;
878 break;
879 }
880 __remove_assoc_queue(bh);
881 }
882 spin_unlock(&buffer_mapping->private_lock);
883 }
884 return ret;
885}
886
887/*
888 * Create the appropriate buffers when given a page for data area and
889 * the size of each buffer.. Use the bh->b_this_page linked list to
890 * follow the buffers created. Return NULL if unable to create more
891 * buffers.
892 *
893 * The retry flag is used to differentiate async IO (paging, swapping)
894 * which may not fail from ordinary buffer allocations.
895 */
896struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
897 int retry)
898{
899 struct buffer_head *bh, *head;
900 long offset;
901
902try_again:
903 head = NULL;
904 offset = PAGE_SIZE;
905 while ((offset -= size) >= 0) {
906 bh = alloc_buffer_head(GFP_NOFS);
907 if (!bh)
908 goto no_grow;
909
910 bh->b_bdev = NULL;
911 bh->b_this_page = head;
912 bh->b_blocknr = -1;
913 head = bh;
914
915 bh->b_state = 0;
916 atomic_set(&bh->b_count, 0);
Chris Masonfc5cd582006-02-01 03:06:48 -0800917 bh->b_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 bh->b_size = size;
919
920 /* Link the buffer to its page */
921 set_bh_page(bh, page, offset);
922
Nathan Scott01ffe332006-01-17 09:02:07 +1100923 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
925 return head;
926/*
927 * In case anything failed, we just free everything we got.
928 */
929no_grow:
930 if (head) {
931 do {
932 bh = head;
933 head = head->b_this_page;
934 free_buffer_head(bh);
935 } while (head);
936 }
937
938 /*
939 * Return failure for non-async IO requests. Async IO requests
940 * are not allowed to fail, so we have to wait until buffer heads
941 * become available. But we don't want tasks sleeping with
942 * partially complete buffers, so all were released above.
943 */
944 if (!retry)
945 return NULL;
946
947 /* We're _really_ low on memory. Now we just
948 * wait for old buffer heads to become free due to
949 * finishing IO. Since this is an async request and
950 * the reserve list is empty, we're sure there are
951 * async buffer heads in use.
952 */
953 free_more_memory();
954 goto try_again;
955}
956EXPORT_SYMBOL_GPL(alloc_page_buffers);
957
958static inline void
959link_dev_buffers(struct page *page, struct buffer_head *head)
960{
961 struct buffer_head *bh, *tail;
962
963 bh = head;
964 do {
965 tail = bh;
966 bh = bh->b_this_page;
967 } while (bh);
968 tail->b_this_page = head;
969 attach_page_buffers(page, head);
970}
971
972/*
973 * Initialise the state of a blockdev page's buffers.
974 */
975static void
976init_page_buffers(struct page *page, struct block_device *bdev,
977 sector_t block, int size)
978{
979 struct buffer_head *head = page_buffers(page);
980 struct buffer_head *bh = head;
981 int uptodate = PageUptodate(page);
982
983 do {
984 if (!buffer_mapped(bh)) {
985 init_buffer(bh, NULL, NULL);
986 bh->b_bdev = bdev;
987 bh->b_blocknr = block;
988 if (uptodate)
989 set_buffer_uptodate(bh);
990 set_buffer_mapped(bh);
991 }
992 block++;
993 bh = bh->b_this_page;
994 } while (bh != head);
995}
996
997/*
998 * Create the page-cache page that contains the requested block.
999 *
1000 * This is user purely for blockdev mappings.
1001 */
1002static struct page *
1003grow_dev_page(struct block_device *bdev, sector_t block,
1004 pgoff_t index, int size)
1005{
1006 struct inode *inode = bdev->bd_inode;
1007 struct page *page;
1008 struct buffer_head *bh;
1009
Christoph Lameterea125892007-05-16 22:11:21 -07001010 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -07001011 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 if (!page)
1013 return NULL;
1014
Eric Sesterhenne827f922006-03-26 18:24:46 +02001015 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 if (page_has_buffers(page)) {
1018 bh = page_buffers(page);
1019 if (bh->b_size == size) {
1020 init_page_buffers(page, bdev, block, size);
1021 return page;
1022 }
1023 if (!try_to_free_buffers(page))
1024 goto failed;
1025 }
1026
1027 /*
1028 * Allocate some buffers for this page
1029 */
1030 bh = alloc_page_buffers(page, size, 0);
1031 if (!bh)
1032 goto failed;
1033
1034 /*
1035 * Link the page to the buffers and initialise them. Take the
1036 * lock to be atomic wrt __find_get_block(), which does not
1037 * run under the page lock.
1038 */
1039 spin_lock(&inode->i_mapping->private_lock);
1040 link_dev_buffers(page, bh);
1041 init_page_buffers(page, bdev, block, size);
1042 spin_unlock(&inode->i_mapping->private_lock);
1043 return page;
1044
1045failed:
1046 BUG();
1047 unlock_page(page);
1048 page_cache_release(page);
1049 return NULL;
1050}
1051
1052/*
1053 * Create buffers for the specified block device block's page. If
1054 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001056static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057grow_buffers(struct block_device *bdev, sector_t block, int size)
1058{
1059 struct page *page;
1060 pgoff_t index;
1061 int sizebits;
1062
1063 sizebits = -1;
1064 do {
1065 sizebits++;
1066 } while ((size << sizebits) < PAGE_SIZE);
1067
1068 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
Andrew Mortone5657932006-10-11 01:21:46 -07001070 /*
1071 * Check for a block which wants to lie outside our maximum possible
1072 * pagecache index. (this comparison is done using sector_t types).
1073 */
1074 if (unlikely(index != block >> sizebits)) {
1075 char b[BDEVNAME_SIZE];
1076
1077 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1078 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001079 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001080 bdevname(bdev, b));
1081 return -EIO;
1082 }
1083 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 /* Create a page with the proper size buffers.. */
1085 page = grow_dev_page(bdev, block, index, size);
1086 if (!page)
1087 return 0;
1088 unlock_page(page);
1089 page_cache_release(page);
1090 return 1;
1091}
1092
Adrian Bunk75c96f82005-05-05 16:16:09 -07001093static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094__getblk_slow(struct block_device *bdev, sector_t block, int size)
1095{
1096 /* Size must be multiple of hard sectorsize */
Martin K. Petersene1defc42009-05-22 17:17:49 -04001097 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 (size < 512 || size > PAGE_SIZE))) {
1099 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1100 size);
Martin K. Petersene1defc42009-05-22 17:17:49 -04001101 printk(KERN_ERR "logical block size: %d\n",
1102 bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 dump_stack();
1105 return NULL;
1106 }
1107
1108 for (;;) {
1109 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001110 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 bh = __find_get_block(bdev, block, size);
1113 if (bh)
1114 return bh;
1115
Andrew Mortone5657932006-10-11 01:21:46 -07001116 ret = grow_buffers(bdev, block, size);
1117 if (ret < 0)
1118 return NULL;
1119 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 free_more_memory();
1121 }
1122}
1123
1124/*
1125 * The relationship between dirty buffers and dirty pages:
1126 *
1127 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1128 * the page is tagged dirty in its radix tree.
1129 *
1130 * At all times, the dirtiness of the buffers represents the dirtiness of
1131 * subsections of the page. If the page has buffers, the page dirty bit is
1132 * merely a hint about the true dirty state.
1133 *
1134 * When a page is set dirty in its entirety, all its buffers are marked dirty
1135 * (if the page has buffers).
1136 *
1137 * When a buffer is marked dirty, its page is dirtied, but the page's other
1138 * buffers are not.
1139 *
1140 * Also. When blockdev buffers are explicitly read with bread(), they
1141 * individually become uptodate. But their backing page remains not
1142 * uptodate - even if all of its buffers are uptodate. A subsequent
1143 * block_read_full_page() against that page will discover all the uptodate
1144 * buffers, will set the page uptodate and will perform no I/O.
1145 */
1146
1147/**
1148 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001149 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 *
1151 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1152 * backing page dirty, then tag the page as dirty in its address_space's radix
1153 * tree and then attach the address_space's inode to its superblock's dirty
1154 * inode list.
1155 *
1156 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1157 * mapping->tree_lock and the global inode_lock.
1158 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001159void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160{
Nick Piggin787d2212007-07-17 04:03:34 -07001161 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001162
1163 /*
1164 * Very *carefully* optimize the it-is-already-dirty case.
1165 *
1166 * Don't let the final "is it dirty" escape to before we
1167 * perhaps modified the buffer.
1168 */
1169 if (buffer_dirty(bh)) {
1170 smp_mb();
1171 if (buffer_dirty(bh))
1172 return;
1173 }
1174
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001175 if (!test_set_buffer_dirty(bh)) {
1176 struct page *page = bh->b_page;
Linus Torvalds8e9d78e2009-08-21 17:40:08 -07001177 if (!TestSetPageDirty(page)) {
1178 struct address_space *mapping = page_mapping(page);
1179 if (mapping)
1180 __set_page_dirty(page, mapping, 0);
1181 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001184EXPORT_SYMBOL(mark_buffer_dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186/*
1187 * Decrement a buffer_head's reference count. If all buffers against a page
1188 * have zero reference count, are clean and unlocked, and if the page is clean
1189 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1190 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1191 * a page but it ends up not being freed, and buffers may later be reattached).
1192 */
1193void __brelse(struct buffer_head * buf)
1194{
1195 if (atomic_read(&buf->b_count)) {
1196 put_bh(buf);
1197 return;
1198 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001199 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001201EXPORT_SYMBOL(__brelse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
1203/*
1204 * bforget() is like brelse(), except it discards any
1205 * potentially dirty data.
1206 */
1207void __bforget(struct buffer_head *bh)
1208{
1209 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001210 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 struct address_space *buffer_mapping = bh->b_page->mapping;
1212
1213 spin_lock(&buffer_mapping->private_lock);
1214 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001215 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 spin_unlock(&buffer_mapping->private_lock);
1217 }
1218 __brelse(bh);
1219}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001220EXPORT_SYMBOL(__bforget);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222static struct buffer_head *__bread_slow(struct buffer_head *bh)
1223{
1224 lock_buffer(bh);
1225 if (buffer_uptodate(bh)) {
1226 unlock_buffer(bh);
1227 return bh;
1228 } else {
1229 get_bh(bh);
1230 bh->b_end_io = end_buffer_read_sync;
1231 submit_bh(READ, bh);
1232 wait_on_buffer(bh);
1233 if (buffer_uptodate(bh))
1234 return bh;
1235 }
1236 brelse(bh);
1237 return NULL;
1238}
1239
1240/*
1241 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1242 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1243 * refcount elevated by one when they're in an LRU. A buffer can only appear
1244 * once in a particular CPU's LRU. A single buffer can be present in multiple
1245 * CPU's LRUs at the same time.
1246 *
1247 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1248 * sb_find_get_block().
1249 *
1250 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1251 * a local interrupt disable for that.
1252 */
1253
1254#define BH_LRU_SIZE 8
1255
1256struct bh_lru {
1257 struct buffer_head *bhs[BH_LRU_SIZE];
1258};
1259
1260static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1261
1262#ifdef CONFIG_SMP
1263#define bh_lru_lock() local_irq_disable()
1264#define bh_lru_unlock() local_irq_enable()
1265#else
1266#define bh_lru_lock() preempt_disable()
1267#define bh_lru_unlock() preempt_enable()
1268#endif
1269
1270static inline void check_irqs_on(void)
1271{
1272#ifdef irqs_disabled
1273 BUG_ON(irqs_disabled());
1274#endif
1275}
1276
1277/*
1278 * The LRU management algorithm is dopey-but-simple. Sorry.
1279 */
1280static void bh_lru_install(struct buffer_head *bh)
1281{
1282 struct buffer_head *evictee = NULL;
1283 struct bh_lru *lru;
1284
1285 check_irqs_on();
1286 bh_lru_lock();
1287 lru = &__get_cpu_var(bh_lrus);
1288 if (lru->bhs[0] != bh) {
1289 struct buffer_head *bhs[BH_LRU_SIZE];
1290 int in;
1291 int out = 0;
1292
1293 get_bh(bh);
1294 bhs[out++] = bh;
1295 for (in = 0; in < BH_LRU_SIZE; in++) {
1296 struct buffer_head *bh2 = lru->bhs[in];
1297
1298 if (bh2 == bh) {
1299 __brelse(bh2);
1300 } else {
1301 if (out >= BH_LRU_SIZE) {
1302 BUG_ON(evictee != NULL);
1303 evictee = bh2;
1304 } else {
1305 bhs[out++] = bh2;
1306 }
1307 }
1308 }
1309 while (out < BH_LRU_SIZE)
1310 bhs[out++] = NULL;
1311 memcpy(lru->bhs, bhs, sizeof(bhs));
1312 }
1313 bh_lru_unlock();
1314
1315 if (evictee)
1316 __brelse(evictee);
1317}
1318
1319/*
1320 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1321 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001322static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001323lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
1325 struct buffer_head *ret = NULL;
1326 struct bh_lru *lru;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001327 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
1329 check_irqs_on();
1330 bh_lru_lock();
1331 lru = &__get_cpu_var(bh_lrus);
1332 for (i = 0; i < BH_LRU_SIZE; i++) {
1333 struct buffer_head *bh = lru->bhs[i];
1334
1335 if (bh && bh->b_bdev == bdev &&
1336 bh->b_blocknr == block && bh->b_size == size) {
1337 if (i) {
1338 while (i) {
1339 lru->bhs[i] = lru->bhs[i - 1];
1340 i--;
1341 }
1342 lru->bhs[0] = bh;
1343 }
1344 get_bh(bh);
1345 ret = bh;
1346 break;
1347 }
1348 }
1349 bh_lru_unlock();
1350 return ret;
1351}
1352
1353/*
1354 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1355 * it in the LRU and mark it as accessed. If it is not present then return
1356 * NULL
1357 */
1358struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001359__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360{
1361 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1362
1363 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001364 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 if (bh)
1366 bh_lru_install(bh);
1367 }
1368 if (bh)
1369 touch_buffer(bh);
1370 return bh;
1371}
1372EXPORT_SYMBOL(__find_get_block);
1373
1374/*
1375 * __getblk will locate (and, if necessary, create) the buffer_head
1376 * which corresponds to the passed block_device, block and size. The
1377 * returned buffer has its reference count incremented.
1378 *
1379 * __getblk() cannot fail - it just keeps trying. If you pass it an
1380 * illegal block number, __getblk() will happily return a buffer_head
1381 * which represents the non-existent block. Very weird.
1382 *
1383 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1384 * attempt is failing. FIXME, perhaps?
1385 */
1386struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001387__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388{
1389 struct buffer_head *bh = __find_get_block(bdev, block, size);
1390
1391 might_sleep();
1392 if (bh == NULL)
1393 bh = __getblk_slow(bdev, block, size);
1394 return bh;
1395}
1396EXPORT_SYMBOL(__getblk);
1397
1398/*
1399 * Do async read-ahead on a buffer..
1400 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001401void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402{
1403 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001404 if (likely(bh)) {
1405 ll_rw_block(READA, 1, &bh);
1406 brelse(bh);
1407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408}
1409EXPORT_SYMBOL(__breadahead);
1410
1411/**
1412 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001413 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 * @block: number of block
1415 * @size: size (in bytes) to read
1416 *
1417 * Reads a specified block, and returns buffer head that contains it.
1418 * It returns NULL if the block was unreadable.
1419 */
1420struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001421__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
1423 struct buffer_head *bh = __getblk(bdev, block, size);
1424
Andrew Mortona3e713b2005-10-30 15:03:15 -08001425 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 bh = __bread_slow(bh);
1427 return bh;
1428}
1429EXPORT_SYMBOL(__bread);
1430
1431/*
1432 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1433 * This doesn't race because it runs in each cpu either in irq
1434 * or with preempt disabled.
1435 */
1436static void invalidate_bh_lru(void *arg)
1437{
1438 struct bh_lru *b = &get_cpu_var(bh_lrus);
1439 int i;
1440
1441 for (i = 0; i < BH_LRU_SIZE; i++) {
1442 brelse(b->bhs[i]);
1443 b->bhs[i] = NULL;
1444 }
1445 put_cpu_var(bh_lrus);
1446}
1447
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001448void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001450 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
Nick Piggin9db55792008-02-08 04:19:49 -08001452EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
1454void set_bh_page(struct buffer_head *bh,
1455 struct page *page, unsigned long offset)
1456{
1457 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001458 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 if (PageHighMem(page))
1460 /*
1461 * This catches illegal uses and preserves the offset:
1462 */
1463 bh->b_data = (char *)(0 + offset);
1464 else
1465 bh->b_data = page_address(page) + offset;
1466}
1467EXPORT_SYMBOL(set_bh_page);
1468
1469/*
1470 * Called when truncating a buffer on a page completely.
1471 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001472static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473{
1474 lock_buffer(bh);
1475 clear_buffer_dirty(bh);
1476 bh->b_bdev = NULL;
1477 clear_buffer_mapped(bh);
1478 clear_buffer_req(bh);
1479 clear_buffer_new(bh);
1480 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001481 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 unlock_buffer(bh);
1483}
1484
1485/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 * block_invalidatepage - invalidate part of all of a buffer-backed page
1487 *
1488 * @page: the page which is affected
1489 * @offset: the index of the truncation point
1490 *
1491 * block_invalidatepage() is called when all or part of the page has become
1492 * invalidatedby a truncate operation.
1493 *
1494 * block_invalidatepage() does not have to release all buffers, but it must
1495 * ensure that no dirty buffer is left outside @offset and that no I/O
1496 * is underway against any of the blocks which are outside the truncation
1497 * point. Because the caller is about to free (and possibly reuse) those
1498 * blocks on-disk.
1499 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001500void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501{
1502 struct buffer_head *head, *bh, *next;
1503 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
1505 BUG_ON(!PageLocked(page));
1506 if (!page_has_buffers(page))
1507 goto out;
1508
1509 head = page_buffers(page);
1510 bh = head;
1511 do {
1512 unsigned int next_off = curr_off + bh->b_size;
1513 next = bh->b_this_page;
1514
1515 /*
1516 * is this block fully invalidated?
1517 */
1518 if (offset <= curr_off)
1519 discard_buffer(bh);
1520 curr_off = next_off;
1521 bh = next;
1522 } while (bh != head);
1523
1524 /*
1525 * We release buffers only if the entire page is being invalidated.
1526 * The get_block cached value has been unconditionally invalidated,
1527 * so real IO is not possible anymore.
1528 */
1529 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001530 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001532 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534EXPORT_SYMBOL(block_invalidatepage);
1535
1536/*
1537 * We attach and possibly dirty the buffers atomically wrt
1538 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1539 * is already excluded via the page lock.
1540 */
1541void create_empty_buffers(struct page *page,
1542 unsigned long blocksize, unsigned long b_state)
1543{
1544 struct buffer_head *bh, *head, *tail;
1545
1546 head = alloc_page_buffers(page, blocksize, 1);
1547 bh = head;
1548 do {
1549 bh->b_state |= b_state;
1550 tail = bh;
1551 bh = bh->b_this_page;
1552 } while (bh);
1553 tail->b_this_page = head;
1554
1555 spin_lock(&page->mapping->private_lock);
1556 if (PageUptodate(page) || PageDirty(page)) {
1557 bh = head;
1558 do {
1559 if (PageDirty(page))
1560 set_buffer_dirty(bh);
1561 if (PageUptodate(page))
1562 set_buffer_uptodate(bh);
1563 bh = bh->b_this_page;
1564 } while (bh != head);
1565 }
1566 attach_page_buffers(page, head);
1567 spin_unlock(&page->mapping->private_lock);
1568}
1569EXPORT_SYMBOL(create_empty_buffers);
1570
1571/*
1572 * We are taking a block for data and we don't want any output from any
1573 * buffer-cache aliases starting from return from that function and
1574 * until the moment when something will explicitly mark the buffer
1575 * dirty (hopefully that will not happen until we will free that block ;-)
1576 * We don't even need to mark it not-uptodate - nobody can expect
1577 * anything from a newly allocated buffer anyway. We used to used
1578 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1579 * don't want to mark the alias unmapped, for example - it would confuse
1580 * anyone who might pick it with bread() afterwards...
1581 *
1582 * Also.. Note that bforget() doesn't lock the buffer. So there can
1583 * be writeout I/O going on against recently-freed buffers. We don't
1584 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1585 * only if we really need to. That happens here.
1586 */
1587void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1588{
1589 struct buffer_head *old_bh;
1590
1591 might_sleep();
1592
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001593 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 if (old_bh) {
1595 clear_buffer_dirty(old_bh);
1596 wait_on_buffer(old_bh);
1597 clear_buffer_req(old_bh);
1598 __brelse(old_bh);
1599 }
1600}
1601EXPORT_SYMBOL(unmap_underlying_metadata);
1602
1603/*
1604 * NOTE! All mapped/uptodate combinations are valid:
1605 *
1606 * Mapped Uptodate Meaning
1607 *
1608 * No No "unknown" - must do get_block()
1609 * No Yes "hole" - zero-filled
1610 * Yes No "allocated" - allocated on disk, not read in
1611 * Yes Yes "valid" - allocated and up-to-date in memory.
1612 *
1613 * "Dirty" is valid only with the last case (mapped+uptodate).
1614 */
1615
1616/*
1617 * While block_write_full_page is writing back the dirty buffers under
1618 * the page lock, whoever dirtied the buffers may decide to clean them
1619 * again at any time. We handle that by only looking at the buffer
1620 * state inside lock_buffer().
1621 *
1622 * If block_write_full_page() is called for regular writeback
1623 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1624 * locked buffer. This only can happen if someone has written the buffer
1625 * directly, with submit_bh(). At the address_space level PageWriteback
1626 * prevents this contention from occurring.
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001627 *
1628 * If block_write_full_page() is called with wbc->sync_mode ==
1629 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1630 * causes the writes to be flagged as synchronous writes, but the
1631 * block device queue will NOT be unplugged, since usually many pages
1632 * will be pushed to the out before the higher-level caller actually
1633 * waits for the writes to be completed. The various wait functions,
1634 * such as wait_on_writeback_range() will ultimately call sync_page()
1635 * which will ultimately call blk_run_backing_dev(), which will end up
1636 * unplugging the device queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 */
1638static int __block_write_full_page(struct inode *inode, struct page *page,
Chris Mason35c80d52009-04-15 13:22:38 -04001639 get_block_t *get_block, struct writeback_control *wbc,
1640 bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641{
1642 int err;
1643 sector_t block;
1644 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001645 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001646 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 int nr_underway = 0;
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001648 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1649 WRITE_SYNC_PLUG : WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 BUG_ON(!PageLocked(page));
1652
1653 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1654
1655 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001656 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 (1 << BH_Dirty)|(1 << BH_Uptodate));
1658 }
1659
1660 /*
1661 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1662 * here, and the (potentially unmapped) buffers may become dirty at
1663 * any time. If a buffer becomes dirty here after we've inspected it
1664 * then we just miss that fact, and the page stays dirty.
1665 *
1666 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1667 * handle that here by just cleaning them.
1668 */
1669
Andrew Morton54b21a72006-01-08 01:03:05 -08001670 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 head = page_buffers(page);
1672 bh = head;
1673
1674 /*
1675 * Get all the dirty buffers mapped to disk addresses and
1676 * handle any aliases from the underlying blockdev's mapping.
1677 */
1678 do {
1679 if (block > last_block) {
1680 /*
1681 * mapped buffers outside i_size will occur, because
1682 * this page can be outside i_size when there is a
1683 * truncate in progress.
1684 */
1685 /*
1686 * The buffer was zeroed by block_write_full_page()
1687 */
1688 clear_buffer_dirty(bh);
1689 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001690 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1691 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001692 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 err = get_block(inode, block, bh, 1);
1694 if (err)
1695 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001696 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 if (buffer_new(bh)) {
1698 /* blockdev mappings never come here */
1699 clear_buffer_new(bh);
1700 unmap_underlying_metadata(bh->b_bdev,
1701 bh->b_blocknr);
1702 }
1703 }
1704 bh = bh->b_this_page;
1705 block++;
1706 } while (bh != head);
1707
1708 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 if (!buffer_mapped(bh))
1710 continue;
1711 /*
1712 * If it's a fully non-blocking write attempt and we cannot
1713 * lock the buffer then redirty the page. Note that this can
Jens Axboe5b0830c2009-09-23 19:37:09 +02001714 * potentially cause a busy-wait loop from writeback threads
1715 * and kswapd activity, but those code paths have their own
1716 * higher-level throttling.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 */
1718 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1719 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001720 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 redirty_page_for_writepage(wbc, page);
1722 continue;
1723 }
1724 if (test_clear_buffer_dirty(bh)) {
Chris Mason35c80d52009-04-15 13:22:38 -04001725 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 } else {
1727 unlock_buffer(bh);
1728 }
1729 } while ((bh = bh->b_this_page) != head);
1730
1731 /*
1732 * The page and its buffers are protected by PageWriteback(), so we can
1733 * drop the bh refcounts early.
1734 */
1735 BUG_ON(PageWriteback(page));
1736 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 do {
1739 struct buffer_head *next = bh->b_this_page;
1740 if (buffer_async_write(bh)) {
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001741 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 nr_underway++;
1743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 bh = next;
1745 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001746 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 err = 0;
1749done:
1750 if (nr_underway == 0) {
1751 /*
1752 * The page was marked dirty, but the buffers were
1753 * clean. Someone wrote them back by hand with
1754 * ll_rw_block/submit_bh. A rare case.
1755 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 /*
1759 * The page and buffer_heads can be released at any time from
1760 * here on.
1761 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763 return err;
1764
1765recover:
1766 /*
1767 * ENOSPC, or some other error. We may already have added some
1768 * blocks to the file, so we need to write these out to avoid
1769 * exposing stale data.
1770 * The page is currently locked and not marked for writeback
1771 */
1772 bh = head;
1773 /* Recovery: lock and submit the mapped buffers */
1774 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001775 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1776 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 lock_buffer(bh);
Chris Mason35c80d52009-04-15 13:22:38 -04001778 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 } else {
1780 /*
1781 * The buffer may have been set dirty during
1782 * attachment to a dirty page.
1783 */
1784 clear_buffer_dirty(bh);
1785 }
1786 } while ((bh = bh->b_this_page) != head);
1787 SetPageError(page);
1788 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001789 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 do {
1792 struct buffer_head *next = bh->b_this_page;
1793 if (buffer_async_write(bh)) {
1794 clear_buffer_dirty(bh);
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001795 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 nr_underway++;
1797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 bh = next;
1799 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001800 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 goto done;
1802}
1803
Nick Pigginafddba42007-10-16 01:25:01 -07001804/*
1805 * If a page has any new buffers, zero them out here, and mark them uptodate
1806 * and dirty so they'll be written out (in order to prevent uninitialised
1807 * block data from leaking). And clear the new bit.
1808 */
1809void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1810{
1811 unsigned int block_start, block_end;
1812 struct buffer_head *head, *bh;
1813
1814 BUG_ON(!PageLocked(page));
1815 if (!page_has_buffers(page))
1816 return;
1817
1818 bh = head = page_buffers(page);
1819 block_start = 0;
1820 do {
1821 block_end = block_start + bh->b_size;
1822
1823 if (buffer_new(bh)) {
1824 if (block_end > from && block_start < to) {
1825 if (!PageUptodate(page)) {
1826 unsigned start, size;
1827
1828 start = max(from, block_start);
1829 size = min(to, block_end) - start;
1830
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001831 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001832 set_buffer_uptodate(bh);
1833 }
1834
1835 clear_buffer_new(bh);
1836 mark_buffer_dirty(bh);
1837 }
1838 }
1839
1840 block_start = block_end;
1841 bh = bh->b_this_page;
1842 } while (bh != head);
1843}
1844EXPORT_SYMBOL(page_zero_new_buffers);
1845
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846static int __block_prepare_write(struct inode *inode, struct page *page,
1847 unsigned from, unsigned to, get_block_t *get_block)
1848{
1849 unsigned block_start, block_end;
1850 sector_t block;
1851 int err = 0;
1852 unsigned blocksize, bbits;
1853 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1854
1855 BUG_ON(!PageLocked(page));
1856 BUG_ON(from > PAGE_CACHE_SIZE);
1857 BUG_ON(to > PAGE_CACHE_SIZE);
1858 BUG_ON(from > to);
1859
1860 blocksize = 1 << inode->i_blkbits;
1861 if (!page_has_buffers(page))
1862 create_empty_buffers(page, blocksize, 0);
1863 head = page_buffers(page);
1864
1865 bbits = inode->i_blkbits;
1866 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1867
1868 for(bh = head, block_start = 0; bh != head || !block_start;
1869 block++, block_start=block_end, bh = bh->b_this_page) {
1870 block_end = block_start + blocksize;
1871 if (block_end <= from || block_start >= to) {
1872 if (PageUptodate(page)) {
1873 if (!buffer_uptodate(bh))
1874 set_buffer_uptodate(bh);
1875 }
1876 continue;
1877 }
1878 if (buffer_new(bh))
1879 clear_buffer_new(bh);
1880 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001881 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 err = get_block(inode, block, bh, 1);
1883 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001884 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 unmap_underlying_metadata(bh->b_bdev,
1887 bh->b_blocknr);
1888 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001889 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001891 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 continue;
1893 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001894 if (block_end > to || block_start < from)
1895 zero_user_segments(page,
1896 to, block_end,
1897 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 continue;
1899 }
1900 }
1901 if (PageUptodate(page)) {
1902 if (!buffer_uptodate(bh))
1903 set_buffer_uptodate(bh);
1904 continue;
1905 }
1906 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001907 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 (block_start < from || block_end > to)) {
1909 ll_rw_block(READ, 1, &bh);
1910 *wait_bh++=bh;
1911 }
1912 }
1913 /*
1914 * If we issued read requests - let them complete.
1915 */
1916 while(wait_bh > wait) {
1917 wait_on_buffer(*--wait_bh);
1918 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001919 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 }
Nick Pigginafddba42007-10-16 01:25:01 -07001921 if (unlikely(err))
1922 page_zero_new_buffers(page, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 return err;
1924}
1925
1926static int __block_commit_write(struct inode *inode, struct page *page,
1927 unsigned from, unsigned to)
1928{
1929 unsigned block_start, block_end;
1930 int partial = 0;
1931 unsigned blocksize;
1932 struct buffer_head *bh, *head;
1933
1934 blocksize = 1 << inode->i_blkbits;
1935
1936 for(bh = head = page_buffers(page), block_start = 0;
1937 bh != head || !block_start;
1938 block_start=block_end, bh = bh->b_this_page) {
1939 block_end = block_start + blocksize;
1940 if (block_end <= from || block_start >= to) {
1941 if (!buffer_uptodate(bh))
1942 partial = 1;
1943 } else {
1944 set_buffer_uptodate(bh);
1945 mark_buffer_dirty(bh);
1946 }
Nick Pigginafddba42007-10-16 01:25:01 -07001947 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 }
1949
1950 /*
1951 * If this is a partial write which happened to make all buffers
1952 * uptodate then we can optimize away a bogus readpage() for
1953 * the next read(). Here we 'discover' whether the page went
1954 * uptodate as a result of this (potentially partial) write.
1955 */
1956 if (!partial)
1957 SetPageUptodate(page);
1958 return 0;
1959}
1960
1961/*
Nick Pigginafddba42007-10-16 01:25:01 -07001962 * block_write_begin takes care of the basic task of block allocation and
1963 * bringing partial write blocks uptodate first.
1964 *
1965 * If *pagep is not NULL, then block_write_begin uses the locked page
1966 * at *pagep rather than allocating its own. In this case, the page will
1967 * not be unlocked or deallocated on failure.
1968 */
1969int block_write_begin(struct file *file, struct address_space *mapping,
1970 loff_t pos, unsigned len, unsigned flags,
1971 struct page **pagep, void **fsdata,
1972 get_block_t *get_block)
1973{
1974 struct inode *inode = mapping->host;
1975 int status = 0;
1976 struct page *page;
1977 pgoff_t index;
1978 unsigned start, end;
1979 int ownpage = 0;
1980
1981 index = pos >> PAGE_CACHE_SHIFT;
1982 start = pos & (PAGE_CACHE_SIZE - 1);
1983 end = start + len;
1984
1985 page = *pagep;
1986 if (page == NULL) {
1987 ownpage = 1;
Nick Piggin54566b22009-01-04 12:00:53 -08001988 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Pigginafddba42007-10-16 01:25:01 -07001989 if (!page) {
1990 status = -ENOMEM;
1991 goto out;
1992 }
1993 *pagep = page;
1994 } else
1995 BUG_ON(!PageLocked(page));
1996
1997 status = __block_prepare_write(inode, page, start, end, get_block);
1998 if (unlikely(status)) {
1999 ClearPageUptodate(page);
2000
2001 if (ownpage) {
2002 unlock_page(page);
2003 page_cache_release(page);
2004 *pagep = NULL;
2005
2006 /*
2007 * prepare_write() may have instantiated a few blocks
2008 * outside i_size. Trim these off again. Don't need
2009 * i_size_read because we hold i_mutex.
2010 */
2011 if (pos + len > inode->i_size)
2012 vmtruncate(inode, inode->i_size);
2013 }
Nick Pigginafddba42007-10-16 01:25:01 -07002014 }
2015
2016out:
2017 return status;
2018}
2019EXPORT_SYMBOL(block_write_begin);
2020
2021int block_write_end(struct file *file, struct address_space *mapping,
2022 loff_t pos, unsigned len, unsigned copied,
2023 struct page *page, void *fsdata)
2024{
2025 struct inode *inode = mapping->host;
2026 unsigned start;
2027
2028 start = pos & (PAGE_CACHE_SIZE - 1);
2029
2030 if (unlikely(copied < len)) {
2031 /*
2032 * The buffers that were written will now be uptodate, so we
2033 * don't have to worry about a readpage reading them and
2034 * overwriting a partial write. However if we have encountered
2035 * a short write and only partially written into a buffer, it
2036 * will not be marked uptodate, so a readpage might come in and
2037 * destroy our partial write.
2038 *
2039 * Do the simplest thing, and just treat any short write to a
2040 * non uptodate page as a zero-length write, and force the
2041 * caller to redo the whole thing.
2042 */
2043 if (!PageUptodate(page))
2044 copied = 0;
2045
2046 page_zero_new_buffers(page, start+copied, start+len);
2047 }
2048 flush_dcache_page(page);
2049
2050 /* This could be a short (even 0-length) commit */
2051 __block_commit_write(inode, page, start, start+copied);
2052
2053 return copied;
2054}
2055EXPORT_SYMBOL(block_write_end);
2056
2057int generic_write_end(struct file *file, struct address_space *mapping,
2058 loff_t pos, unsigned len, unsigned copied,
2059 struct page *page, void *fsdata)
2060{
2061 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002062 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002063
2064 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2065
2066 /*
2067 * No need to use i_size_read() here, the i_size
2068 * cannot change under us because we hold i_mutex.
2069 *
2070 * But it's important to update i_size while still holding page lock:
2071 * page writeout could otherwise come in and zero beyond i_size.
2072 */
2073 if (pos+copied > inode->i_size) {
2074 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002075 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002076 }
2077
2078 unlock_page(page);
2079 page_cache_release(page);
2080
Jan Karac7d206b2008-07-11 19:27:31 -04002081 /*
2082 * Don't mark the inode dirty under page lock. First, it unnecessarily
2083 * makes the holding time of page lock longer. Second, it forces lock
2084 * ordering of page lock and transaction start for journaling
2085 * filesystems.
2086 */
2087 if (i_size_changed)
2088 mark_inode_dirty(inode);
2089
Nick Pigginafddba42007-10-16 01:25:01 -07002090 return copied;
2091}
2092EXPORT_SYMBOL(generic_write_end);
2093
2094/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002095 * block_is_partially_uptodate checks whether buffers within a page are
2096 * uptodate or not.
2097 *
2098 * Returns true if all buffers which correspond to a file portion
2099 * we want to read are uptodate.
2100 */
2101int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2102 unsigned long from)
2103{
2104 struct inode *inode = page->mapping->host;
2105 unsigned block_start, block_end, blocksize;
2106 unsigned to;
2107 struct buffer_head *bh, *head;
2108 int ret = 1;
2109
2110 if (!page_has_buffers(page))
2111 return 0;
2112
2113 blocksize = 1 << inode->i_blkbits;
2114 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2115 to = from + to;
2116 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2117 return 0;
2118
2119 head = page_buffers(page);
2120 bh = head;
2121 block_start = 0;
2122 do {
2123 block_end = block_start + blocksize;
2124 if (block_end > from && block_start < to) {
2125 if (!buffer_uptodate(bh)) {
2126 ret = 0;
2127 break;
2128 }
2129 if (block_end >= to)
2130 break;
2131 }
2132 block_start = block_end;
2133 bh = bh->b_this_page;
2134 } while (bh != head);
2135
2136 return ret;
2137}
2138EXPORT_SYMBOL(block_is_partially_uptodate);
2139
2140/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 * Generic "read page" function for block devices that have the normal
2142 * get_block functionality. This is most of the block device filesystems.
2143 * Reads the page asynchronously --- the unlock_buffer() and
2144 * set/clear_buffer_uptodate() functions propagate buffer state into the
2145 * page struct once IO has completed.
2146 */
2147int block_read_full_page(struct page *page, get_block_t *get_block)
2148{
2149 struct inode *inode = page->mapping->host;
2150 sector_t iblock, lblock;
2151 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2152 unsigned int blocksize;
2153 int nr, i;
2154 int fully_mapped = 1;
2155
Matt Mackallcd7619d2005-05-01 08:59:01 -07002156 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 blocksize = 1 << inode->i_blkbits;
2158 if (!page_has_buffers(page))
2159 create_empty_buffers(page, blocksize, 0);
2160 head = page_buffers(page);
2161
2162 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2163 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2164 bh = head;
2165 nr = 0;
2166 i = 0;
2167
2168 do {
2169 if (buffer_uptodate(bh))
2170 continue;
2171
2172 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002173 int err = 0;
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 fully_mapped = 0;
2176 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002177 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002178 err = get_block(inode, iblock, bh, 0);
2179 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 SetPageError(page);
2181 }
2182 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002183 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002184 if (!err)
2185 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 continue;
2187 }
2188 /*
2189 * get_block() might have updated the buffer
2190 * synchronously
2191 */
2192 if (buffer_uptodate(bh))
2193 continue;
2194 }
2195 arr[nr++] = bh;
2196 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2197
2198 if (fully_mapped)
2199 SetPageMappedToDisk(page);
2200
2201 if (!nr) {
2202 /*
2203 * All buffers are uptodate - we can set the page uptodate
2204 * as well. But not if get_block() returned an error.
2205 */
2206 if (!PageError(page))
2207 SetPageUptodate(page);
2208 unlock_page(page);
2209 return 0;
2210 }
2211
2212 /* Stage two: lock the buffers */
2213 for (i = 0; i < nr; i++) {
2214 bh = arr[i];
2215 lock_buffer(bh);
2216 mark_buffer_async_read(bh);
2217 }
2218
2219 /*
2220 * Stage 3: start the IO. Check for uptodateness
2221 * inside the buffer lock in case another process reading
2222 * the underlying blockdev brought it uptodate (the sct fix).
2223 */
2224 for (i = 0; i < nr; i++) {
2225 bh = arr[i];
2226 if (buffer_uptodate(bh))
2227 end_buffer_async_read(bh, 1);
2228 else
2229 submit_bh(READ, bh);
2230 }
2231 return 0;
2232}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002233EXPORT_SYMBOL(block_read_full_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
2235/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002236 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 * deal with the hole.
2238 */
Nick Piggin89e10782007-10-16 01:25:07 -07002239int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
2241 struct address_space *mapping = inode->i_mapping;
2242 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002243 void *fsdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 int err;
2245
npiggin@suse.dec08d3b02009-08-21 02:35:06 +10002246 err = inode_newsize_ok(inode, size);
2247 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 goto out;
2249
Nick Piggin89e10782007-10-16 01:25:07 -07002250 err = pagecache_write_begin(NULL, mapping, size, 0,
2251 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2252 &page, &fsdata);
2253 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002255
Nick Piggin89e10782007-10-16 01:25:07 -07002256 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2257 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259out:
2260 return err;
2261}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002262EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002264static int cont_expand_zero(struct file *file, struct address_space *mapping,
2265 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002266{
Nick Piggin89e10782007-10-16 01:25:07 -07002267 struct inode *inode = mapping->host;
2268 unsigned blocksize = 1 << inode->i_blkbits;
2269 struct page *page;
2270 void *fsdata;
2271 pgoff_t index, curidx;
2272 loff_t curpos;
2273 unsigned zerofrom, offset, len;
2274 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002275
Nick Piggin89e10782007-10-16 01:25:07 -07002276 index = pos >> PAGE_CACHE_SHIFT;
2277 offset = pos & ~PAGE_CACHE_MASK;
2278
2279 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2280 zerofrom = curpos & ~PAGE_CACHE_MASK;
2281 if (zerofrom & (blocksize-1)) {
2282 *bytes |= (blocksize-1);
2283 (*bytes)++;
2284 }
2285 len = PAGE_CACHE_SIZE - zerofrom;
2286
2287 err = pagecache_write_begin(file, mapping, curpos, len,
2288 AOP_FLAG_UNINTERRUPTIBLE,
2289 &page, &fsdata);
2290 if (err)
2291 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002292 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002293 err = pagecache_write_end(file, mapping, curpos, len, len,
2294 page, fsdata);
2295 if (err < 0)
2296 goto out;
2297 BUG_ON(err != len);
2298 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002299
2300 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002301 }
2302
2303 /* page covers the boundary, find the boundary offset */
2304 if (index == curidx) {
2305 zerofrom = curpos & ~PAGE_CACHE_MASK;
2306 /* if we will expand the thing last block will be filled */
2307 if (offset <= zerofrom) {
2308 goto out;
2309 }
2310 if (zerofrom & (blocksize-1)) {
2311 *bytes |= (blocksize-1);
2312 (*bytes)++;
2313 }
2314 len = offset - zerofrom;
2315
2316 err = pagecache_write_begin(file, mapping, curpos, len,
2317 AOP_FLAG_UNINTERRUPTIBLE,
2318 &page, &fsdata);
2319 if (err)
2320 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002321 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002322 err = pagecache_write_end(file, mapping, curpos, len, len,
2323 page, fsdata);
2324 if (err < 0)
2325 goto out;
2326 BUG_ON(err != len);
2327 err = 0;
2328 }
2329out:
2330 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002331}
2332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333/*
2334 * For moronic filesystems that do not allow holes in file.
2335 * We may have to extend the file.
2336 */
Nick Piggin89e10782007-10-16 01:25:07 -07002337int cont_write_begin(struct file *file, struct address_space *mapping,
2338 loff_t pos, unsigned len, unsigned flags,
2339 struct page **pagep, void **fsdata,
2340 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002344 unsigned zerofrom;
2345 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346
Nick Piggin89e10782007-10-16 01:25:07 -07002347 err = cont_expand_zero(file, mapping, pos, bytes);
2348 if (err)
2349 goto out;
2350
2351 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2352 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2353 *bytes |= (blocksize-1);
2354 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 }
2356
Nick Piggin89e10782007-10-16 01:25:07 -07002357 *pagep = NULL;
2358 err = block_write_begin(file, mapping, pos, len,
2359 flags, pagep, fsdata, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360out:
Nick Piggin89e10782007-10-16 01:25:07 -07002361 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002363EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
2365int block_prepare_write(struct page *page, unsigned from, unsigned to,
2366 get_block_t *get_block)
2367{
2368 struct inode *inode = page->mapping->host;
2369 int err = __block_prepare_write(inode, page, from, to, get_block);
2370 if (err)
2371 ClearPageUptodate(page);
2372 return err;
2373}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002374EXPORT_SYMBOL(block_prepare_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
2376int block_commit_write(struct page *page, unsigned from, unsigned to)
2377{
2378 struct inode *inode = page->mapping->host;
2379 __block_commit_write(inode,page,from,to);
2380 return 0;
2381}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002382EXPORT_SYMBOL(block_commit_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
David Chinner54171692007-07-19 17:39:55 +10002384/*
2385 * block_page_mkwrite() is not allowed to change the file size as it gets
2386 * called from a page fault handler when a page is first dirtied. Hence we must
2387 * be careful to check for EOF conditions here. We set the page up correctly
2388 * for a written page which means we get ENOSPC checking when writing into
2389 * holes and correct delalloc and unwritten extent mapping on filesystems that
2390 * support these features.
2391 *
2392 * We are not allowed to take the i_mutex here so we have to play games to
2393 * protect against truncate races as the page could now be beyond EOF. Because
2394 * vmtruncate() writes the inode size before removing pages, once we have the
2395 * page lock we can determine safely if the page is beyond EOF. If it is not
2396 * beyond EOF, then the page is guaranteed safe against truncation until we
2397 * unlock the page.
2398 */
2399int
Nick Pigginc2ec1752009-03-31 15:23:21 -07002400block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
David Chinner54171692007-07-19 17:39:55 +10002401 get_block_t get_block)
2402{
Nick Pigginc2ec1752009-03-31 15:23:21 -07002403 struct page *page = vmf->page;
David Chinner54171692007-07-19 17:39:55 +10002404 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2405 unsigned long end;
2406 loff_t size;
Nick Piggin56a76f82009-03-31 15:23:23 -07002407 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
David Chinner54171692007-07-19 17:39:55 +10002408
2409 lock_page(page);
2410 size = i_size_read(inode);
2411 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002412 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002413 /* page got truncated out from underneath us */
Nick Pigginb827e492009-04-30 15:08:16 -07002414 unlock_page(page);
2415 goto out;
David Chinner54171692007-07-19 17:39:55 +10002416 }
2417
2418 /* page is wholly or partially inside EOF */
2419 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2420 end = size & ~PAGE_CACHE_MASK;
2421 else
2422 end = PAGE_CACHE_SIZE;
2423
2424 ret = block_prepare_write(page, 0, end, get_block);
2425 if (!ret)
2426 ret = block_commit_write(page, 0, end);
2427
Nick Piggin56a76f82009-03-31 15:23:23 -07002428 if (unlikely(ret)) {
Nick Pigginb827e492009-04-30 15:08:16 -07002429 unlock_page(page);
Nick Piggin56a76f82009-03-31 15:23:23 -07002430 if (ret == -ENOMEM)
2431 ret = VM_FAULT_OOM;
2432 else /* -ENOSPC, -EIO, etc */
2433 ret = VM_FAULT_SIGBUS;
Nick Pigginb827e492009-04-30 15:08:16 -07002434 } else
2435 ret = VM_FAULT_LOCKED;
Nick Pigginc2ec1752009-03-31 15:23:21 -07002436
Nick Pigginb827e492009-04-30 15:08:16 -07002437out:
David Chinner54171692007-07-19 17:39:55 +10002438 return ret;
2439}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002440EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
2442/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002443 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 * immediately, while under the page lock. So it needs a special end_io
2445 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 */
2447static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2448{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002449 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450}
2451
2452/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002453 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2454 * the page (converting it to circular linked list and taking care of page
2455 * dirty races).
2456 */
2457static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2458{
2459 struct buffer_head *bh;
2460
2461 BUG_ON(!PageLocked(page));
2462
2463 spin_lock(&page->mapping->private_lock);
2464 bh = head;
2465 do {
2466 if (PageDirty(page))
2467 set_buffer_dirty(bh);
2468 if (!bh->b_this_page)
2469 bh->b_this_page = head;
2470 bh = bh->b_this_page;
2471 } while (bh != head);
2472 attach_page_buffers(page, head);
2473 spin_unlock(&page->mapping->private_lock);
2474}
2475
2476/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 * On entry, the page is fully not uptodate.
2478 * On exit the page is fully uptodate in the areas outside (from,to)
2479 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002480int nobh_write_begin(struct file *file, struct address_space *mapping,
2481 loff_t pos, unsigned len, unsigned flags,
2482 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 get_block_t *get_block)
2484{
Nick Piggin03158cd2007-10-16 01:25:25 -07002485 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 const unsigned blkbits = inode->i_blkbits;
2487 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002488 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002489 struct page *page;
2490 pgoff_t index;
2491 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002493 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 int ret = 0;
2497 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
Nick Piggin03158cd2007-10-16 01:25:25 -07002499 index = pos >> PAGE_CACHE_SHIFT;
2500 from = pos & (PAGE_CACHE_SIZE - 1);
2501 to = from + len;
2502
Nick Piggin54566b22009-01-04 12:00:53 -08002503 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002504 if (!page)
2505 return -ENOMEM;
2506 *pagep = page;
2507 *fsdata = NULL;
2508
2509 if (page_has_buffers(page)) {
2510 unlock_page(page);
2511 page_cache_release(page);
2512 *pagep = NULL;
2513 return block_write_begin(file, mapping, pos, len, flags, pagep,
2514 fsdata, get_block);
2515 }
Nick Piggina4b06722007-10-16 01:24:48 -07002516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 if (PageMappedToDisk(page))
2518 return 0;
2519
Nick Piggina4b06722007-10-16 01:24:48 -07002520 /*
2521 * Allocate buffers so that we can keep track of state, and potentially
2522 * attach them to the page if an error occurs. In the common case of
2523 * no error, they will just be freed again without ever being attached
2524 * to the page (which is all OK, because we're under the page lock).
2525 *
2526 * Be careful: the buffer linked list is a NULL terminated one, rather
2527 * than the circular one we're used to.
2528 */
2529 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002530 if (!head) {
2531 ret = -ENOMEM;
2532 goto out_release;
2533 }
Nick Piggina4b06722007-10-16 01:24:48 -07002534
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 /*
2538 * We loop across all blocks in the page, whether or not they are
2539 * part of the affected region. This is so we can discover if the
2540 * page is fully mapped-to-disk.
2541 */
Nick Piggina4b06722007-10-16 01:24:48 -07002542 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002544 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 int create;
2546
Nick Piggina4b06722007-10-16 01:24:48 -07002547 block_end = block_start + blocksize;
2548 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 create = 1;
2550 if (block_start >= to)
2551 create = 0;
2552 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002553 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 if (ret)
2555 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002556 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002558 if (buffer_new(bh))
2559 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2560 if (PageUptodate(page)) {
2561 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002563 }
2564 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002565 zero_user_segments(page, block_start, from,
2566 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 continue;
2568 }
Nick Piggina4b06722007-10-16 01:24:48 -07002569 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 continue; /* reiserfs does this */
2571 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002572 lock_buffer(bh);
2573 bh->b_end_io = end_buffer_read_nobh;
2574 submit_bh(READ, bh);
2575 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 }
2577 }
2578
2579 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 /*
2581 * The page is locked, so these buffers are protected from
2582 * any VM or truncate activity. Hence we don't need to care
2583 * for the buffer_head refcounts.
2584 */
Nick Piggina4b06722007-10-16 01:24:48 -07002585 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 wait_on_buffer(bh);
2587 if (!buffer_uptodate(bh))
2588 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 }
2590 if (ret)
2591 goto failed;
2592 }
2593
2594 if (is_mapped_to_disk)
2595 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
Nick Piggin03158cd2007-10-16 01:25:25 -07002597 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002598
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 return 0;
2600
2601failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002602 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002604 * Error recovery is a bit difficult. We need to zero out blocks that
2605 * were newly allocated, and dirty them to ensure they get written out.
2606 * Buffers need to be attached to the page at this point, otherwise
2607 * the handling of potential IO errors during writeout would be hard
2608 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002610 attach_nobh_buffers(page, head);
2611 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002612
Nick Piggin03158cd2007-10-16 01:25:25 -07002613out_release:
2614 unlock_page(page);
2615 page_cache_release(page);
2616 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002617
Nick Piggin03158cd2007-10-16 01:25:25 -07002618 if (pos + len > inode->i_size)
2619 vmtruncate(inode, inode->i_size);
Nick Piggina4b06722007-10-16 01:24:48 -07002620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 return ret;
2622}
Nick Piggin03158cd2007-10-16 01:25:25 -07002623EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
Nick Piggin03158cd2007-10-16 01:25:25 -07002625int nobh_write_end(struct file *file, struct address_space *mapping,
2626 loff_t pos, unsigned len, unsigned copied,
2627 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628{
2629 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002630 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002631 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002632 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Dave Kleikampd4cf1092009-02-06 14:59:26 -06002634 if (unlikely(copied < len) && head)
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002635 attach_nobh_buffers(page, head);
2636 if (page_has_buffers(page))
2637 return generic_write_end(file, mapping, pos, len,
2638 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002639
Nick Piggin22c8ca72007-02-20 13:58:09 -08002640 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002642 if (pos+copied > inode->i_size) {
2643 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 mark_inode_dirty(inode);
2645 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002646
2647 unlock_page(page);
2648 page_cache_release(page);
2649
Nick Piggin03158cd2007-10-16 01:25:25 -07002650 while (head) {
2651 bh = head;
2652 head = head->b_this_page;
2653 free_buffer_head(bh);
2654 }
2655
2656 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657}
Nick Piggin03158cd2007-10-16 01:25:25 -07002658EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
2660/*
2661 * nobh_writepage() - based on block_full_write_page() except
2662 * that it tries to operate without attaching bufferheads to
2663 * the page.
2664 */
2665int nobh_writepage(struct page *page, get_block_t *get_block,
2666 struct writeback_control *wbc)
2667{
2668 struct inode * const inode = page->mapping->host;
2669 loff_t i_size = i_size_read(inode);
2670 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2671 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 int ret;
2673
2674 /* Is the page fully inside i_size? */
2675 if (page->index < end_index)
2676 goto out;
2677
2678 /* Is the page fully outside i_size? (truncate in progress) */
2679 offset = i_size & (PAGE_CACHE_SIZE-1);
2680 if (page->index >= end_index+1 || !offset) {
2681 /*
2682 * The page may have dirty, unmapped buffers. For example,
2683 * they may have been added in ext3_writepage(). Make them
2684 * freeable here, so the page does not leak.
2685 */
2686#if 0
2687 /* Not really sure about this - do we need this ? */
2688 if (page->mapping->a_ops->invalidatepage)
2689 page->mapping->a_ops->invalidatepage(page, offset);
2690#endif
2691 unlock_page(page);
2692 return 0; /* don't care */
2693 }
2694
2695 /*
2696 * The page straddles i_size. It must be zeroed out on each and every
2697 * writepage invocation because it may be mmapped. "A file is mapped
2698 * in multiples of the page size. For a file that is not a multiple of
2699 * the page size, the remaining memory is zeroed when mapped, and
2700 * writes to that region are not written out to the file."
2701 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002702 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703out:
2704 ret = mpage_writepage(page, get_block, wbc);
2705 if (ret == -EAGAIN)
Chris Mason35c80d52009-04-15 13:22:38 -04002706 ret = __block_write_full_page(inode, page, get_block, wbc,
2707 end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 return ret;
2709}
2710EXPORT_SYMBOL(nobh_writepage);
2711
Nick Piggin03158cd2007-10-16 01:25:25 -07002712int nobh_truncate_page(struct address_space *mapping,
2713 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2716 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002717 unsigned blocksize;
2718 sector_t iblock;
2719 unsigned length, pos;
2720 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002722 struct buffer_head map_bh;
2723 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
Nick Piggin03158cd2007-10-16 01:25:25 -07002725 blocksize = 1 << inode->i_blkbits;
2726 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
Nick Piggin03158cd2007-10-16 01:25:25 -07002728 /* Block boundary? Nothing to do */
2729 if (!length)
2730 return 0;
2731
2732 length = blocksize - length;
2733 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2734
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002736 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 if (!page)
2738 goto out;
2739
Nick Piggin03158cd2007-10-16 01:25:25 -07002740 if (page_has_buffers(page)) {
2741has_buffers:
2742 unlock_page(page);
2743 page_cache_release(page);
2744 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002746
2747 /* Find the buffer that contains "offset" */
2748 pos = blocksize;
2749 while (offset >= pos) {
2750 iblock++;
2751 pos += blocksize;
2752 }
2753
Theodore Ts'o460bcf52009-05-12 07:37:56 -04002754 map_bh.b_size = blocksize;
2755 map_bh.b_state = 0;
Nick Piggin03158cd2007-10-16 01:25:25 -07002756 err = get_block(inode, iblock, &map_bh, 0);
2757 if (err)
2758 goto unlock;
2759 /* unmapped? It's a hole - nothing to do */
2760 if (!buffer_mapped(&map_bh))
2761 goto unlock;
2762
2763 /* Ok, it's mapped. Make sure it's up-to-date */
2764 if (!PageUptodate(page)) {
2765 err = mapping->a_ops->readpage(NULL, page);
2766 if (err) {
2767 page_cache_release(page);
2768 goto out;
2769 }
2770 lock_page(page);
2771 if (!PageUptodate(page)) {
2772 err = -EIO;
2773 goto unlock;
2774 }
2775 if (page_has_buffers(page))
2776 goto has_buffers;
2777 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002778 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002779 set_page_dirty(page);
2780 err = 0;
2781
2782unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 unlock_page(page);
2784 page_cache_release(page);
2785out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002786 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787}
2788EXPORT_SYMBOL(nobh_truncate_page);
2789
2790int block_truncate_page(struct address_space *mapping,
2791 loff_t from, get_block_t *get_block)
2792{
2793 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2794 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2795 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002796 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 unsigned length, pos;
2798 struct inode *inode = mapping->host;
2799 struct page *page;
2800 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 int err;
2802
2803 blocksize = 1 << inode->i_blkbits;
2804 length = offset & (blocksize - 1);
2805
2806 /* Block boundary? Nothing to do */
2807 if (!length)
2808 return 0;
2809
2810 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002811 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
2813 page = grab_cache_page(mapping, index);
2814 err = -ENOMEM;
2815 if (!page)
2816 goto out;
2817
2818 if (!page_has_buffers(page))
2819 create_empty_buffers(page, blocksize, 0);
2820
2821 /* Find the buffer that contains "offset" */
2822 bh = page_buffers(page);
2823 pos = blocksize;
2824 while (offset >= pos) {
2825 bh = bh->b_this_page;
2826 iblock++;
2827 pos += blocksize;
2828 }
2829
2830 err = 0;
2831 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002832 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 err = get_block(inode, iblock, bh, 0);
2834 if (err)
2835 goto unlock;
2836 /* unmapped? It's a hole - nothing to do */
2837 if (!buffer_mapped(bh))
2838 goto unlock;
2839 }
2840
2841 /* Ok, it's mapped. Make sure it's up-to-date */
2842 if (PageUptodate(page))
2843 set_buffer_uptodate(bh);
2844
David Chinner33a266d2007-02-12 00:51:41 -08002845 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 err = -EIO;
2847 ll_rw_block(READ, 1, &bh);
2848 wait_on_buffer(bh);
2849 /* Uhhuh. Read error. Complain and punt. */
2850 if (!buffer_uptodate(bh))
2851 goto unlock;
2852 }
2853
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002854 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 mark_buffer_dirty(bh);
2856 err = 0;
2857
2858unlock:
2859 unlock_page(page);
2860 page_cache_release(page);
2861out:
2862 return err;
2863}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002864EXPORT_SYMBOL(block_truncate_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
2866/*
2867 * The generic ->writepage function for buffer-backed address_spaces
Chris Mason35c80d52009-04-15 13:22:38 -04002868 * this form passes in the end_io handler used to finish the IO.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 */
Chris Mason35c80d52009-04-15 13:22:38 -04002870int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2871 struct writeback_control *wbc, bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872{
2873 struct inode * const inode = page->mapping->host;
2874 loff_t i_size = i_size_read(inode);
2875 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2876 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
2878 /* Is the page fully inside i_size? */
2879 if (page->index < end_index)
Chris Mason35c80d52009-04-15 13:22:38 -04002880 return __block_write_full_page(inode, page, get_block, wbc,
2881 handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882
2883 /* Is the page fully outside i_size? (truncate in progress) */
2884 offset = i_size & (PAGE_CACHE_SIZE-1);
2885 if (page->index >= end_index+1 || !offset) {
2886 /*
2887 * The page may have dirty, unmapped buffers. For example,
2888 * they may have been added in ext3_writepage(). Make them
2889 * freeable here, so the page does not leak.
2890 */
Jan Karaaaa40592005-10-30 15:00:16 -08002891 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 unlock_page(page);
2893 return 0; /* don't care */
2894 }
2895
2896 /*
2897 * The page straddles i_size. It must be zeroed out on each and every
Adam Buchbinder2a61aa42009-12-11 16:35:40 -05002898 * writepage invocation because it may be mmapped. "A file is mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 * in multiples of the page size. For a file that is not a multiple of
2900 * the page size, the remaining memory is zeroed when mapped, and
2901 * writes to that region are not written out to the file."
2902 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002903 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Chris Mason35c80d52009-04-15 13:22:38 -04002904 return __block_write_full_page(inode, page, get_block, wbc, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002906EXPORT_SYMBOL(block_write_full_page_endio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Chris Mason35c80d52009-04-15 13:22:38 -04002908/*
2909 * The generic ->writepage function for buffer-backed address_spaces
2910 */
2911int block_write_full_page(struct page *page, get_block_t *get_block,
2912 struct writeback_control *wbc)
2913{
2914 return block_write_full_page_endio(page, get_block, wbc,
2915 end_buffer_async_write);
2916}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002917EXPORT_SYMBOL(block_write_full_page);
Chris Mason35c80d52009-04-15 13:22:38 -04002918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2920 get_block_t *get_block)
2921{
2922 struct buffer_head tmp;
2923 struct inode *inode = mapping->host;
2924 tmp.b_state = 0;
2925 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002926 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 get_block(inode, block, &tmp, 0);
2928 return tmp.b_blocknr;
2929}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002930EXPORT_SYMBOL(generic_block_bmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
NeilBrown6712ecf2007-09-27 12:47:43 +02002932static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933{
2934 struct buffer_head *bh = bio->bi_private;
2935
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 if (err == -EOPNOTSUPP) {
2937 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2938 set_bit(BH_Eopnotsupp, &bh->b_state);
2939 }
2940
Keith Mannthey08bafc02008-11-25 10:24:35 +01002941 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2942 set_bit(BH_Quiet, &bh->b_state);
2943
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2945 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946}
2947
2948int submit_bh(int rw, struct buffer_head * bh)
2949{
2950 struct bio *bio;
2951 int ret = 0;
2952
2953 BUG_ON(!buffer_locked(bh));
2954 BUG_ON(!buffer_mapped(bh));
2955 BUG_ON(!bh->b_end_io);
Aneesh Kumar K.V8fb0e342009-05-12 16:22:37 -04002956 BUG_ON(buffer_delay(bh));
2957 BUG_ON(buffer_unwritten(bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
Jens Axboe48fd4f92008-08-22 10:00:36 +02002959 /*
2960 * Mask in barrier bit for a write (could be either a WRITE or a
2961 * WRITE_SYNC
2962 */
2963 if (buffer_ordered(bh) && (rw & WRITE))
2964 rw |= WRITE_BARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
2966 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002967 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002969 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 clear_buffer_write_io_error(bh);
2971
2972 /*
2973 * from here on down, it's all bio -- do the initial mapping,
2974 * submit_bio -> generic_make_request may further map this bio around
2975 */
2976 bio = bio_alloc(GFP_NOIO, 1);
2977
2978 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2979 bio->bi_bdev = bh->b_bdev;
2980 bio->bi_io_vec[0].bv_page = bh->b_page;
2981 bio->bi_io_vec[0].bv_len = bh->b_size;
2982 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2983
2984 bio->bi_vcnt = 1;
2985 bio->bi_idx = 0;
2986 bio->bi_size = bh->b_size;
2987
2988 bio->bi_end_io = end_bio_bh_io_sync;
2989 bio->bi_private = bh;
2990
2991 bio_get(bio);
2992 submit_bio(rw, bio);
2993
2994 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2995 ret = -EOPNOTSUPP;
2996
2997 bio_put(bio);
2998 return ret;
2999}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003000EXPORT_SYMBOL(submit_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
3002/**
3003 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07003004 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 * @nr: number of &struct buffer_heads in the array
3006 * @bhs: array of pointers to &struct buffer_head
3007 *
Jan Karaa7662232005-09-06 15:19:10 -07003008 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3009 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3010 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3011 * are sent to disk. The fourth %READA option is described in the documentation
3012 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 *
3014 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07003015 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3016 * clean when doing a write request, and any buffer that appears to be
3017 * up-to-date when doing read request. Further it marks as clean buffers that
3018 * are processed for writing (the buffer cache won't assume that they are
3019 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 *
3021 * ll_rw_block sets b_end_io to simple completion handler that marks
3022 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3023 * any waiters.
3024 *
3025 * All of the buffers must be for the same device, and must also be a
3026 * multiple of the current approved size for the device.
3027 */
3028void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3029{
3030 int i;
3031
3032 for (i = 0; i < nr; i++) {
3033 struct buffer_head *bh = bhs[i];
3034
Jens Axboe9cf6b722009-04-06 14:48:03 +02003035 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
Jan Karaa7662232005-09-06 15:19:10 -07003036 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02003037 else if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 continue;
3039
Jens Axboe9cf6b722009-04-06 14:48:03 +02003040 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3041 rw == SWRITE_SYNC_PLUG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003043 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003044 get_bh(bh);
Jens Axboe18ce3752008-07-01 09:07:34 +02003045 if (rw == SWRITE_SYNC)
3046 submit_bh(WRITE_SYNC, bh);
3047 else
3048 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 continue;
3050 }
3051 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003053 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003054 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 submit_bh(rw, bh);
3056 continue;
3057 }
3058 }
3059 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 }
3061}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003062EXPORT_SYMBOL(ll_rw_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063
3064/*
3065 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3066 * and then start new I/O and then wait upon it. The caller must have a ref on
3067 * the buffer_head.
3068 */
3069int sync_dirty_buffer(struct buffer_head *bh)
3070{
3071 int ret = 0;
3072
3073 WARN_ON(atomic_read(&bh->b_count) < 1);
3074 lock_buffer(bh);
3075 if (test_clear_buffer_dirty(bh)) {
3076 get_bh(bh);
3077 bh->b_end_io = end_buffer_write_sync;
Jens Axboe1aa2a7c2009-04-06 14:48:08 +02003078 ret = submit_bh(WRITE_SYNC, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 wait_on_buffer(bh);
3080 if (buffer_eopnotsupp(bh)) {
3081 clear_buffer_eopnotsupp(bh);
3082 ret = -EOPNOTSUPP;
3083 }
3084 if (!ret && !buffer_uptodate(bh))
3085 ret = -EIO;
3086 } else {
3087 unlock_buffer(bh);
3088 }
3089 return ret;
3090}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003091EXPORT_SYMBOL(sync_dirty_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092
3093/*
3094 * try_to_free_buffers() checks if all the buffers on this particular page
3095 * are unused, and releases them if so.
3096 *
3097 * Exclusion against try_to_free_buffers may be obtained by either
3098 * locking the page or by holding its mapping's private_lock.
3099 *
3100 * If the page is dirty but all the buffers are clean then we need to
3101 * be sure to mark the page clean as well. This is because the page
3102 * may be against a block device, and a later reattachment of buffers
3103 * to a dirty page will set *all* buffers dirty. Which would corrupt
3104 * filesystem data on the same device.
3105 *
3106 * The same applies to regular filesystem pages: if all the buffers are
3107 * clean then we set the page clean and proceed. To do that, we require
3108 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3109 * private_lock.
3110 *
3111 * try_to_free_buffers() is non-blocking.
3112 */
3113static inline int buffer_busy(struct buffer_head *bh)
3114{
3115 return atomic_read(&bh->b_count) |
3116 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3117}
3118
3119static int
3120drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3121{
3122 struct buffer_head *head = page_buffers(page);
3123 struct buffer_head *bh;
3124
3125 bh = head;
3126 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003127 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 set_bit(AS_EIO, &page->mapping->flags);
3129 if (buffer_busy(bh))
3130 goto failed;
3131 bh = bh->b_this_page;
3132 } while (bh != head);
3133
3134 do {
3135 struct buffer_head *next = bh->b_this_page;
3136
Jan Kara535ee2f2008-02-08 04:21:59 -08003137 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 __remove_assoc_queue(bh);
3139 bh = next;
3140 } while (bh != head);
3141 *buffers_to_free = head;
3142 __clear_page_buffers(page);
3143 return 1;
3144failed:
3145 return 0;
3146}
3147
3148int try_to_free_buffers(struct page *page)
3149{
3150 struct address_space * const mapping = page->mapping;
3151 struct buffer_head *buffers_to_free = NULL;
3152 int ret = 0;
3153
3154 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003155 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 return 0;
3157
3158 if (mapping == NULL) { /* can this still happen? */
3159 ret = drop_buffers(page, &buffers_to_free);
3160 goto out;
3161 }
3162
3163 spin_lock(&mapping->private_lock);
3164 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003165
3166 /*
3167 * If the filesystem writes its buffers by hand (eg ext3)
3168 * then we can have clean buffers against a dirty page. We
3169 * clean the page here; otherwise the VM will never notice
3170 * that the filesystem did any IO at all.
3171 *
3172 * Also, during truncate, discard_buffer will have marked all
3173 * the page's buffers clean. We discover that here and clean
3174 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003175 *
3176 * private_lock must be held over this entire operation in order
3177 * to synchronise against __set_page_dirty_buffers and prevent the
3178 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003179 */
3180 if (ret)
3181 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003182 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183out:
3184 if (buffers_to_free) {
3185 struct buffer_head *bh = buffers_to_free;
3186
3187 do {
3188 struct buffer_head *next = bh->b_this_page;
3189 free_buffer_head(bh);
3190 bh = next;
3191 } while (bh != buffers_to_free);
3192 }
3193 return ret;
3194}
3195EXPORT_SYMBOL(try_to_free_buffers);
3196
NeilBrown3978d712006-03-26 01:37:17 -08003197void block_sync_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198{
3199 struct address_space *mapping;
3200
3201 smp_mb();
3202 mapping = page_mapping(page);
3203 if (mapping)
3204 blk_run_backing_dev(mapping->backing_dev_info, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003206EXPORT_SYMBOL(block_sync_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208/*
3209 * There are no bdflush tunables left. But distributions are
3210 * still running obsolete flush daemons, so we terminate them here.
3211 *
3212 * Use of bdflush() is deprecated and will be removed in a future kernel.
Jens Axboe5b0830c2009-09-23 19:37:09 +02003213 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 */
Heiko Carstensbdc480e2009-01-14 14:14:12 +01003215SYSCALL_DEFINE2(bdflush, int, func, long, data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216{
3217 static int msg_count;
3218
3219 if (!capable(CAP_SYS_ADMIN))
3220 return -EPERM;
3221
3222 if (msg_count < 5) {
3223 msg_count++;
3224 printk(KERN_INFO
3225 "warning: process `%s' used the obsolete bdflush"
3226 " system call\n", current->comm);
3227 printk(KERN_INFO "Fix your initscripts?\n");
3228 }
3229
3230 if (func == 1)
3231 do_exit(0);
3232 return 0;
3233}
3234
3235/*
3236 * Buffer-head allocation
3237 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003238static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
3240/*
3241 * Once the number of bh's in the machine exceeds this level, we start
3242 * stripping them in writeback.
3243 */
3244static int max_buffer_heads;
3245
3246int buffer_heads_over_limit;
3247
3248struct bh_accounting {
3249 int nr; /* Number of live bh's */
3250 int ratelimit; /* Limit cacheline bouncing */
3251};
3252
3253static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3254
3255static void recalc_bh_state(void)
3256{
3257 int i;
3258 int tot = 0;
3259
3260 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3261 return;
3262 __get_cpu_var(bh_accounting).ratelimit = 0;
Eric Dumazet8a143422006-03-24 03:18:10 -08003263 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 tot += per_cpu(bh_accounting, i).nr;
3265 buffer_heads_over_limit = (tot > max_buffer_heads);
3266}
3267
Al Virodd0fc662005-10-07 07:46:04 +01003268struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
Richard Kennedy019b4d12010-03-10 15:20:33 -08003270 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003272 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003273 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003275 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 }
3277 return ret;
3278}
3279EXPORT_SYMBOL(alloc_buffer_head);
3280
3281void free_buffer_head(struct buffer_head *bh)
3282{
3283 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3284 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003285 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003287 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288}
3289EXPORT_SYMBOL(free_buffer_head);
3290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291static void buffer_exit_cpu(int cpu)
3292{
3293 int i;
3294 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3295
3296 for (i = 0; i < BH_LRU_SIZE; i++) {
3297 brelse(b->bhs[i]);
3298 b->bhs[i] = NULL;
3299 }
Eric Dumazet8a143422006-03-24 03:18:10 -08003300 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3301 per_cpu(bh_accounting, cpu).nr = 0;
3302 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303}
3304
3305static int buffer_cpu_notify(struct notifier_block *self,
3306 unsigned long action, void *hcpu)
3307{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003308 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 buffer_exit_cpu((unsigned long)hcpu);
3310 return NOTIFY_OK;
3311}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003313/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003314 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003315 * @bh: struct buffer_head
3316 *
3317 * Return true if the buffer is up-to-date and false,
3318 * with the buffer locked, if not.
3319 */
3320int bh_uptodate_or_lock(struct buffer_head *bh)
3321{
3322 if (!buffer_uptodate(bh)) {
3323 lock_buffer(bh);
3324 if (!buffer_uptodate(bh))
3325 return 0;
3326 unlock_buffer(bh);
3327 }
3328 return 1;
3329}
3330EXPORT_SYMBOL(bh_uptodate_or_lock);
3331
3332/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003333 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003334 * @bh: struct buffer_head
3335 *
3336 * Returns zero on success and -EIO on error.
3337 */
3338int bh_submit_read(struct buffer_head *bh)
3339{
3340 BUG_ON(!buffer_locked(bh));
3341
3342 if (buffer_uptodate(bh)) {
3343 unlock_buffer(bh);
3344 return 0;
3345 }
3346
3347 get_bh(bh);
3348 bh->b_end_io = end_buffer_read_sync;
3349 submit_bh(READ, bh);
3350 wait_on_buffer(bh);
3351 if (buffer_uptodate(bh))
3352 return 0;
3353 return -EIO;
3354}
3355EXPORT_SYMBOL(bh_submit_read);
3356
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357void __init buffer_init(void)
3358{
3359 int nrpages;
3360
Christoph Lameterb98938c2008-02-04 22:28:36 -08003361 bh_cachep = kmem_cache_create("buffer_head",
3362 sizeof(struct buffer_head), 0,
3363 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3364 SLAB_MEM_SPREAD),
Richard Kennedy019b4d12010-03-10 15:20:33 -08003365 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
3367 /*
3368 * Limit the bh occupancy to 10% of ZONE_NORMAL
3369 */
3370 nrpages = (nr_free_buffer_pages() * 10) / 100;
3371 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3372 hotcpu_notifier(buffer_cpu_notify, 0);
3373}