blob: c191ea58750f7c28333489554e76e9c79466ac09 [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05007#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050010#include <linux/writeback.h>
11#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070012#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060013#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050014#include "extent_io.h"
15#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040016#include "ctree.h"
17#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020018#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010019#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040020#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040021#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080022#include "backref.h"
Chris Masond1310b22008-01-24 16:13:08 -050023
Chris Masond1310b22008-01-24 16:13:08 -050024static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
Chris Mason9be33952013-05-17 18:30:14 -040026static struct bio_set *btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050027
Filipe Manana27a35072014-07-06 20:09:59 +010028static inline bool extent_state_in_tree(const struct extent_state *state)
29{
30 return !RB_EMPTY_NODE(&state->rb_node);
31}
32
Eric Sandeen6d49ba12013-04-22 16:12:31 +000033#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050034static LIST_HEAD(buffers);
35static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040036
Chris Masond3977122009-01-05 21:25:51 -050037static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000038
39static inline
40void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
41{
42 unsigned long flags;
43
44 spin_lock_irqsave(&leak_lock, flags);
45 list_add(new, head);
46 spin_unlock_irqrestore(&leak_lock, flags);
47}
48
49static inline
50void btrfs_leak_debug_del(struct list_head *entry)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&leak_lock, flags);
55 list_del(entry);
56 spin_unlock_irqrestore(&leak_lock, flags);
57}
58
59static inline
60void btrfs_leak_debug_check(void)
61{
62 struct extent_state *state;
63 struct extent_buffer *eb;
64
65 while (!list_empty(&states)) {
66 state = list_entry(states.next, struct extent_state, leak_list);
Filipe Manana27a35072014-07-06 20:09:59 +010067 pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
68 state->start, state->end, state->state,
69 extent_state_in_tree(state),
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +020070 atomic_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000071 list_del(&state->leak_list);
72 kmem_cache_free(extent_state_cache, state);
73 }
74
75 while (!list_empty(&buffers)) {
76 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Frank Holtonefe120a2013-12-20 11:37:06 -050077 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +020078 "refs %d\n",
79 eb->start, eb->len, atomic_read(&eb->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000080 list_del(&eb->leak_list);
81 kmem_cache_free(extent_buffer_cache, eb);
82 }
83}
David Sterba8d599ae2013-04-30 15:22:23 +000084
Josef Bacika5dee372013-12-13 10:02:44 -050085#define btrfs_debug_check_extent_io_range(tree, start, end) \
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +000087static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -050088 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +000089{
Josef Bacika5dee372013-12-13 10:02:44 -050090 struct inode *inode;
91 u64 isize;
David Sterba8d599ae2013-04-30 15:22:23 +000092
Josef Bacika5dee372013-12-13 10:02:44 -050093 if (!tree->mapping)
94 return;
95
96 inode = tree->mapping->host;
97 isize = i_size_read(inode);
David Sterba8d599ae2013-04-30 15:22:23 +000098 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
99 printk_ratelimited(KERN_DEBUG
Frank Holtonefe120a2013-12-20 11:37:06 -0500100 "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200101 caller, btrfs_ino(inode), isize, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000102 }
103}
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000104#else
105#define btrfs_leak_debug_add(new, head) do {} while (0)
106#define btrfs_leak_debug_del(entry) do {} while (0)
107#define btrfs_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +0000108#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400109#endif
Chris Masond1310b22008-01-24 16:13:08 -0500110
Chris Masond1310b22008-01-24 16:13:08 -0500111#define BUFFER_LRU_MAX 64
112
113struct tree_entry {
114 u64 start;
115 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500116 struct rb_node rb_node;
117};
118
119struct extent_page_data {
120 struct bio *bio;
121 struct extent_io_tree *tree;
122 get_extent_t *get_extent;
Josef Bacikde0022b2012-09-25 14:25:58 -0400123 unsigned long bio_flags;
Chris Mason771ed682008-11-06 22:02:51 -0500124
125 /* tells writepage not to lock the state bits for this range
126 * it still does the unlocking
127 */
Chris Masonffbd5172009-04-20 15:50:09 -0400128 unsigned int extent_locked:1;
129
130 /* tells the submit_bio code to use a WRITE_SYNC */
131 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500132};
133
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400134static noinline void flush_write_bio(void *data);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400135static inline struct btrfs_fs_info *
136tree_fs_info(struct extent_io_tree *tree)
137{
Josef Bacika5dee372013-12-13 10:02:44 -0500138 if (!tree->mapping)
139 return NULL;
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400140 return btrfs_sb(tree->mapping->host->i_sb);
141}
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400142
Chris Masond1310b22008-01-24 16:13:08 -0500143int __init extent_io_init(void)
144{
David Sterba837e1972012-09-07 03:00:48 -0600145 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200146 sizeof(struct extent_state), 0,
147 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500148 if (!extent_state_cache)
149 return -ENOMEM;
150
David Sterba837e1972012-09-07 03:00:48 -0600151 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200152 sizeof(struct extent_buffer), 0,
153 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500154 if (!extent_buffer_cache)
155 goto free_state_cache;
Chris Mason9be33952013-05-17 18:30:14 -0400156
157 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
158 offsetof(struct btrfs_io_bio, bio));
159 if (!btrfs_bioset)
160 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700161
162 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
163 goto free_bioset;
164
Chris Masond1310b22008-01-24 16:13:08 -0500165 return 0;
166
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700167free_bioset:
168 bioset_free(btrfs_bioset);
169 btrfs_bioset = NULL;
170
Chris Mason9be33952013-05-17 18:30:14 -0400171free_buffer_cache:
172 kmem_cache_destroy(extent_buffer_cache);
173 extent_buffer_cache = NULL;
174
Chris Masond1310b22008-01-24 16:13:08 -0500175free_state_cache:
176 kmem_cache_destroy(extent_state_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400177 extent_state_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500178 return -ENOMEM;
179}
180
181void extent_io_exit(void)
182{
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000183 btrfs_leak_debug_check();
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000184
185 /*
186 * Make sure all delayed rcu free are flushed before we
187 * destroy caches.
188 */
189 rcu_barrier();
Chris Masond1310b22008-01-24 16:13:08 -0500190 if (extent_state_cache)
191 kmem_cache_destroy(extent_state_cache);
192 if (extent_buffer_cache)
193 kmem_cache_destroy(extent_buffer_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400194 if (btrfs_bioset)
195 bioset_free(btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500196}
197
198void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200199 struct address_space *mapping)
Chris Masond1310b22008-01-24 16:13:08 -0500200{
Eric Paris6bef4d32010-02-23 19:43:04 +0000201 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500202 tree->ops = NULL;
203 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500204 spin_lock_init(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500205 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500206}
Chris Masond1310b22008-01-24 16:13:08 -0500207
Christoph Hellwigb2950862008-12-02 09:54:17 -0500208static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500209{
210 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500211
212 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400213 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500214 return state;
215 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500216 state->private = 0;
Filipe Manana27a35072014-07-06 20:09:59 +0100217 RB_CLEAR_NODE(&state->rb_node);
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000218 btrfs_leak_debug_add(&state->leak_list, &states);
Chris Masond1310b22008-01-24 16:13:08 -0500219 atomic_set(&state->refs, 1);
220 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100221 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500222 return state;
223}
Chris Masond1310b22008-01-24 16:13:08 -0500224
Chris Mason4845e442010-05-25 20:56:50 -0400225void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500226{
Chris Masond1310b22008-01-24 16:13:08 -0500227 if (!state)
228 return;
229 if (atomic_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100230 WARN_ON(extent_state_in_tree(state));
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000231 btrfs_leak_debug_del(&state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100232 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500233 kmem_cache_free(extent_state_cache, state);
234 }
235}
Chris Masond1310b22008-01-24 16:13:08 -0500236
Filipe Mananaf2071b22014-02-12 15:05:53 +0000237static struct rb_node *tree_insert(struct rb_root *root,
238 struct rb_node *search_start,
239 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000240 struct rb_node *node,
241 struct rb_node ***p_in,
242 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500243{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000244 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500245 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500246 struct tree_entry *entry;
247
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000248 if (p_in && parent_in) {
249 p = *p_in;
250 parent = *parent_in;
251 goto do_insert;
252 }
253
Filipe Mananaf2071b22014-02-12 15:05:53 +0000254 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500255 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500256 parent = *p;
257 entry = rb_entry(parent, struct tree_entry, rb_node);
258
259 if (offset < entry->start)
260 p = &(*p)->rb_left;
261 else if (offset > entry->end)
262 p = &(*p)->rb_right;
263 else
264 return parent;
265 }
266
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000267do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500268 rb_link_node(node, parent, p);
269 rb_insert_color(node, root);
270 return NULL;
271}
272
Chris Mason80ea96b2008-02-01 14:51:59 -0500273static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000274 struct rb_node **prev_ret,
275 struct rb_node **next_ret,
276 struct rb_node ***p_ret,
277 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500278{
Chris Mason80ea96b2008-02-01 14:51:59 -0500279 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000280 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500281 struct rb_node *prev = NULL;
282 struct rb_node *orig_prev = NULL;
283 struct tree_entry *entry;
284 struct tree_entry *prev_entry = NULL;
285
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000286 while (*n) {
287 prev = *n;
288 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500289 prev_entry = entry;
290
291 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000292 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500293 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000294 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500295 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000296 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500297 }
298
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000299 if (p_ret)
300 *p_ret = n;
301 if (parent_ret)
302 *parent_ret = prev;
303
Chris Masond1310b22008-01-24 16:13:08 -0500304 if (prev_ret) {
305 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500306 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500307 prev = rb_next(prev);
308 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
309 }
310 *prev_ret = prev;
311 prev = orig_prev;
312 }
313
314 if (next_ret) {
315 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500316 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500317 prev = rb_prev(prev);
318 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
319 }
320 *next_ret = prev;
321 }
322 return NULL;
323}
324
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000325static inline struct rb_node *
326tree_search_for_insert(struct extent_io_tree *tree,
327 u64 offset,
328 struct rb_node ***p_ret,
329 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500330{
Chris Mason70dec802008-01-29 09:59:12 -0500331 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500332 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500333
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000334 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500335 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500336 return prev;
337 return ret;
338}
339
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000340static inline struct rb_node *tree_search(struct extent_io_tree *tree,
341 u64 offset)
342{
343 return tree_search_for_insert(tree, offset, NULL, NULL);
344}
345
Josef Bacik9ed74f22009-09-11 16:12:44 -0400346static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
347 struct extent_state *other)
348{
349 if (tree->ops && tree->ops->merge_extent_hook)
350 tree->ops->merge_extent_hook(tree->mapping->host, new,
351 other);
352}
353
Chris Masond1310b22008-01-24 16:13:08 -0500354/*
355 * utility function to look for merge candidates inside a given range.
356 * Any extents with matching state are merged together into a single
357 * extent in the tree. Extents with EXTENT_IO in their state field
358 * are not merged because the end_io handlers need to be able to do
359 * operations on them without sleeping (or doing allocations/splits).
360 *
361 * This should be called with the tree lock held.
362 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000363static void merge_state(struct extent_io_tree *tree,
364 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500365{
366 struct extent_state *other;
367 struct rb_node *other_node;
368
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400369 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000370 return;
Chris Masond1310b22008-01-24 16:13:08 -0500371
372 other_node = rb_prev(&state->rb_node);
373 if (other_node) {
374 other = rb_entry(other_node, struct extent_state, rb_node);
375 if (other->end == state->start - 1 &&
376 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400377 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500378 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500379 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100380 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500381 free_extent_state(other);
382 }
383 }
384 other_node = rb_next(&state->rb_node);
385 if (other_node) {
386 other = rb_entry(other_node, struct extent_state, rb_node);
387 if (other->start == state->end + 1 &&
388 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400389 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400390 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400391 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100392 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400393 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500394 }
395 }
Chris Masond1310b22008-01-24 16:13:08 -0500396}
397
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000398static void set_state_cb(struct extent_io_tree *tree,
David Sterba41074882013-04-29 13:38:46 +0000399 struct extent_state *state, unsigned long *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500400{
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000401 if (tree->ops && tree->ops->set_bit_hook)
402 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500403}
404
405static void clear_state_cb(struct extent_io_tree *tree,
David Sterba41074882013-04-29 13:38:46 +0000406 struct extent_state *state, unsigned long *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500407{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400408 if (tree->ops && tree->ops->clear_bit_hook)
409 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500410}
411
Xiao Guangrong3150b692011-07-14 03:19:08 +0000412static void set_state_bits(struct extent_io_tree *tree,
David Sterba41074882013-04-29 13:38:46 +0000413 struct extent_state *state, unsigned long *bits);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000414
Chris Masond1310b22008-01-24 16:13:08 -0500415/*
416 * insert an extent_state struct into the tree. 'bits' are set on the
417 * struct before it is inserted.
418 *
419 * This may return -EEXIST if the extent is already there, in which case the
420 * state struct is freed.
421 *
422 * The tree lock is not taken internally. This is a utility function and
423 * probably isn't what you want to call (see set/clear_extent_bit).
424 */
425static int insert_state(struct extent_io_tree *tree,
426 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000427 struct rb_node ***p,
428 struct rb_node **parent,
David Sterba41074882013-04-29 13:38:46 +0000429 unsigned long *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500430{
431 struct rb_node *node;
432
Julia Lawall31b1a2b2012-11-03 10:58:34 +0000433 if (end < start)
Frank Holtonefe120a2013-12-20 11:37:06 -0500434 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200435 end, start);
Chris Masond1310b22008-01-24 16:13:08 -0500436 state->start = start;
437 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400438
Xiao Guangrong3150b692011-07-14 03:19:08 +0000439 set_state_bits(tree, state, bits);
440
Filipe Mananaf2071b22014-02-12 15:05:53 +0000441 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500442 if (node) {
443 struct extent_state *found;
444 found = rb_entry(node, struct extent_state, rb_node);
Frank Holtonefe120a2013-12-20 11:37:06 -0500445 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200446 "%llu %llu\n",
447 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500448 return -EEXIST;
449 }
450 merge_state(tree, state);
451 return 0;
452}
453
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000454static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
Josef Bacik9ed74f22009-09-11 16:12:44 -0400455 u64 split)
456{
457 if (tree->ops && tree->ops->split_extent_hook)
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000458 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400459}
460
Chris Masond1310b22008-01-24 16:13:08 -0500461/*
462 * split a given extent state struct in two, inserting the preallocated
463 * struct 'prealloc' as the newly created second half. 'split' indicates an
464 * offset inside 'orig' where it should be split.
465 *
466 * Before calling,
467 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
468 * are two extent state structs in the tree:
469 * prealloc: [orig->start, split - 1]
470 * orig: [ split, orig->end ]
471 *
472 * The tree locks are not taken by this function. They need to be held
473 * by the caller.
474 */
475static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
476 struct extent_state *prealloc, u64 split)
477{
478 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400479
480 split_cb(tree, orig, split);
481
Chris Masond1310b22008-01-24 16:13:08 -0500482 prealloc->start = orig->start;
483 prealloc->end = split - 1;
484 prealloc->state = orig->state;
485 orig->start = split;
486
Filipe Mananaf2071b22014-02-12 15:05:53 +0000487 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
488 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500489 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500490 free_extent_state(prealloc);
491 return -EEXIST;
492 }
493 return 0;
494}
495
Li Zefancdc6a392012-03-12 16:39:48 +0800496static struct extent_state *next_state(struct extent_state *state)
497{
498 struct rb_node *next = rb_next(&state->rb_node);
499 if (next)
500 return rb_entry(next, struct extent_state, rb_node);
501 else
502 return NULL;
503}
504
Chris Masond1310b22008-01-24 16:13:08 -0500505/*
506 * utility function to clear some bits in an extent state struct.
Wang Sheng-Hui1b303fc2012-04-06 14:35:18 +0800507 * it will optionally wake up any one waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500508 *
509 * If no bits are set on the state struct after clearing things, the
510 * struct is freed and removed from the tree
511 */
Li Zefancdc6a392012-03-12 16:39:48 +0800512static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
513 struct extent_state *state,
David Sterba41074882013-04-29 13:38:46 +0000514 unsigned long *bits, int wake)
Chris Masond1310b22008-01-24 16:13:08 -0500515{
Li Zefancdc6a392012-03-12 16:39:48 +0800516 struct extent_state *next;
David Sterba41074882013-04-29 13:38:46 +0000517 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
Chris Masond1310b22008-01-24 16:13:08 -0500518
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400519 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500520 u64 range = state->end - state->start + 1;
521 WARN_ON(range > tree->dirty_bytes);
522 tree->dirty_bytes -= range;
523 }
Chris Mason291d6732008-01-29 15:55:23 -0500524 clear_state_cb(tree, state, bits);
Josef Bacik32c00af2009-10-08 13:34:05 -0400525 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500526 if (wake)
527 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400528 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800529 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100530 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500531 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100532 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500533 free_extent_state(state);
534 } else {
535 WARN_ON(1);
536 }
537 } else {
538 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800539 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500540 }
Li Zefancdc6a392012-03-12 16:39:48 +0800541 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500542}
543
Xiao Guangrong82337672011-04-20 06:44:57 +0000544static struct extent_state *
545alloc_extent_state_atomic(struct extent_state *prealloc)
546{
547 if (!prealloc)
548 prealloc = alloc_extent_state(GFP_ATOMIC);
549
550 return prealloc;
551}
552
Eric Sandeen48a3b632013-04-25 20:41:01 +0000553static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400554{
555 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
556 "Extent tree was modified by another "
557 "thread while locked.");
558}
559
Chris Masond1310b22008-01-24 16:13:08 -0500560/*
561 * clear some bits on a range in the tree. This may require splitting
562 * or inserting elements in the tree, so the gfp mask is used to
563 * indicate which allocations or sleeping are allowed.
564 *
565 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
566 * the given range from the tree regardless of state (ie for truncate).
567 *
568 * the range [start, end] is inclusive.
569 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100570 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500571 */
572int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +0000573 unsigned long bits, int wake, int delete,
Chris Mason2c64c532009-09-02 15:04:12 -0400574 struct extent_state **cached_state,
575 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500576{
577 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400578 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500579 struct extent_state *prealloc = NULL;
580 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400581 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500582 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000583 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500584
Josef Bacika5dee372013-12-13 10:02:44 -0500585 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000586
Josef Bacik7ee9e442013-06-21 16:37:03 -0400587 if (bits & EXTENT_DELALLOC)
588 bits |= EXTENT_NORESERVE;
589
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400590 if (delete)
591 bits |= ~EXTENT_CTLBITS;
592 bits |= EXTENT_FIRST_DELALLOC;
593
Josef Bacik2ac55d42010-02-03 19:33:23 +0000594 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
595 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500596again:
597 if (!prealloc && (mask & __GFP_WAIT)) {
598 prealloc = alloc_extent_state(mask);
599 if (!prealloc)
600 return -ENOMEM;
601 }
602
Chris Masoncad321a2008-12-17 14:51:42 -0500603 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400604 if (cached_state) {
605 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000606
607 if (clear) {
608 *cached_state = NULL;
609 cached_state = NULL;
610 }
611
Filipe Manana27a35072014-07-06 20:09:59 +0100612 if (cached && extent_state_in_tree(cached) &&
613 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000614 if (clear)
615 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400616 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400617 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400618 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000619 if (clear)
620 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400621 }
Chris Masond1310b22008-01-24 16:13:08 -0500622 /*
623 * this search will find the extents that end after
624 * our range starts
625 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500626 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500627 if (!node)
628 goto out;
629 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400630hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500631 if (state->start > end)
632 goto out;
633 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400634 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500635
Liu Bo04493142012-02-16 18:34:37 +0800636 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800637 if (!(state->state & bits)) {
638 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800639 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800640 }
Liu Bo04493142012-02-16 18:34:37 +0800641
Chris Masond1310b22008-01-24 16:13:08 -0500642 /*
643 * | ---- desired range ---- |
644 * | state | or
645 * | ------------- state -------------- |
646 *
647 * We need to split the extent we found, and may flip
648 * bits on second half.
649 *
650 * If the extent we found extends past our range, we
651 * just split and search again. It'll get split again
652 * the next time though.
653 *
654 * If the extent we found is inside our range, we clear
655 * the desired bit on it.
656 */
657
658 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000659 prealloc = alloc_extent_state_atomic(prealloc);
660 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500661 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400662 if (err)
663 extent_io_tree_panic(tree, err);
664
Chris Masond1310b22008-01-24 16:13:08 -0500665 prealloc = NULL;
666 if (err)
667 goto out;
668 if (state->end <= end) {
Liu Bod1ac6e42012-05-10 18:10:39 +0800669 state = clear_state_bit(tree, state, &bits, wake);
670 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500671 }
672 goto search_again;
673 }
674 /*
675 * | ---- desired range ---- |
676 * | state |
677 * We need to split the extent, and clear the bit
678 * on the first half
679 */
680 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000681 prealloc = alloc_extent_state_atomic(prealloc);
682 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500683 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400684 if (err)
685 extent_io_tree_panic(tree, err);
686
Chris Masond1310b22008-01-24 16:13:08 -0500687 if (wake)
688 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400689
Jeff Mahoney6763af82012-03-01 14:56:29 +0100690 clear_state_bit(tree, prealloc, &bits, wake);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400691
Chris Masond1310b22008-01-24 16:13:08 -0500692 prealloc = NULL;
693 goto out;
694 }
Chris Mason42daec22009-09-23 19:51:09 -0400695
Li Zefancdc6a392012-03-12 16:39:48 +0800696 state = clear_state_bit(tree, state, &bits, wake);
Liu Bo04493142012-02-16 18:34:37 +0800697next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400698 if (last_end == (u64)-1)
699 goto out;
700 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800701 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800702 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500703 goto search_again;
704
705out:
Chris Masoncad321a2008-12-17 14:51:42 -0500706 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500707 if (prealloc)
708 free_extent_state(prealloc);
709
Jeff Mahoney6763af82012-03-01 14:56:29 +0100710 return 0;
Chris Masond1310b22008-01-24 16:13:08 -0500711
712search_again:
713 if (start > end)
714 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500715 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500716 if (mask & __GFP_WAIT)
717 cond_resched();
718 goto again;
719}
Chris Masond1310b22008-01-24 16:13:08 -0500720
Jeff Mahoney143bede2012-03-01 14:56:26 +0100721static void wait_on_state(struct extent_io_tree *tree,
722 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500723 __releases(tree->lock)
724 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500725{
726 DEFINE_WAIT(wait);
727 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500728 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500729 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500730 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500731 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500732}
733
734/*
735 * waits for one or more bits to clear on a range in the state tree.
736 * The range [start, end] is inclusive.
737 * The tree lock is taken by this function
738 */
David Sterba41074882013-04-29 13:38:46 +0000739static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
740 unsigned long bits)
Chris Masond1310b22008-01-24 16:13:08 -0500741{
742 struct extent_state *state;
743 struct rb_node *node;
744
Josef Bacika5dee372013-12-13 10:02:44 -0500745 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000746
Chris Masoncad321a2008-12-17 14:51:42 -0500747 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500748again:
749 while (1) {
750 /*
751 * this search will find all the extents that end after
752 * our range starts
753 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500754 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100755process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500756 if (!node)
757 break;
758
759 state = rb_entry(node, struct extent_state, rb_node);
760
761 if (state->start > end)
762 goto out;
763
764 if (state->state & bits) {
765 start = state->start;
766 atomic_inc(&state->refs);
767 wait_on_state(tree, state);
768 free_extent_state(state);
769 goto again;
770 }
771 start = state->end + 1;
772
773 if (start > end)
774 break;
775
Filipe Mananac50d3e72014-03-31 14:53:25 +0100776 if (!cond_resched_lock(&tree->lock)) {
777 node = rb_next(node);
778 goto process_node;
779 }
Chris Masond1310b22008-01-24 16:13:08 -0500780 }
781out:
Chris Masoncad321a2008-12-17 14:51:42 -0500782 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500783}
Chris Masond1310b22008-01-24 16:13:08 -0500784
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000785static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500786 struct extent_state *state,
David Sterba41074882013-04-29 13:38:46 +0000787 unsigned long *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500788{
David Sterba41074882013-04-29 13:38:46 +0000789 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400790
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000791 set_state_cb(tree, state, bits);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400792 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500793 u64 range = state->end - state->start + 1;
794 tree->dirty_bytes += range;
795 }
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400796 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500797}
798
Chris Mason2c64c532009-09-02 15:04:12 -0400799static void cache_state(struct extent_state *state,
800 struct extent_state **cached_ptr)
801{
802 if (cached_ptr && !(*cached_ptr)) {
803 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
804 *cached_ptr = state;
805 atomic_inc(&state->refs);
806 }
807 }
808}
809
Chris Masond1310b22008-01-24 16:13:08 -0500810/*
Chris Mason1edbb732009-09-02 13:24:36 -0400811 * set some bits on a range in the tree. This may require allocations or
812 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500813 *
Chris Mason1edbb732009-09-02 13:24:36 -0400814 * If any of the exclusive bits are set, this will fail with -EEXIST if some
815 * part of the range already has the desired bits set. The start of the
816 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500817 *
Chris Mason1edbb732009-09-02 13:24:36 -0400818 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500819 */
Chris Mason1edbb732009-09-02 13:24:36 -0400820
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100821static int __must_check
822__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +0000823 unsigned long bits, unsigned long exclusive_bits,
824 u64 *failed_start, struct extent_state **cached_state,
825 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500826{
827 struct extent_state *state;
828 struct extent_state *prealloc = NULL;
829 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000830 struct rb_node **p;
831 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500832 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500833 u64 last_start;
834 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400835
Josef Bacika5dee372013-12-13 10:02:44 -0500836 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000837
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400838 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500839again:
840 if (!prealloc && (mask & __GFP_WAIT)) {
841 prealloc = alloc_extent_state(mask);
Xiao Guangrong82337672011-04-20 06:44:57 +0000842 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500843 }
844
Chris Masoncad321a2008-12-17 14:51:42 -0500845 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400846 if (cached_state && *cached_state) {
847 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400848 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +0100849 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -0400850 node = &state->rb_node;
851 goto hit_next;
852 }
853 }
Chris Masond1310b22008-01-24 16:13:08 -0500854 /*
855 * this search will find all the extents that end after
856 * our range starts.
857 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000858 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -0500859 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000860 prealloc = alloc_extent_state_atomic(prealloc);
861 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000862 err = insert_state(tree, prealloc, start, end,
863 &p, &parent, &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400864 if (err)
865 extent_io_tree_panic(tree, err);
866
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +0000867 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500868 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500869 goto out;
870 }
Chris Masond1310b22008-01-24 16:13:08 -0500871 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400872hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500873 last_start = state->start;
874 last_end = state->end;
875
876 /*
877 * | ---- desired range ---- |
878 * | state |
879 *
880 * Just lock what we found and keep going
881 */
882 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400883 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500884 *failed_start = state->start;
885 err = -EEXIST;
886 goto out;
887 }
Chris Mason42daec22009-09-23 19:51:09 -0400888
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000889 set_state_bits(tree, state, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400890 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500891 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400892 if (last_end == (u64)-1)
893 goto out;
894 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800895 state = next_state(state);
896 if (start < end && state && state->start == start &&
897 !need_resched())
898 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500899 goto search_again;
900 }
901
902 /*
903 * | ---- desired range ---- |
904 * | state |
905 * or
906 * | ------------- state -------------- |
907 *
908 * We need to split the extent we found, and may flip bits on
909 * second half.
910 *
911 * If the extent we found extends past our
912 * range, we just split and search again. It'll get split
913 * again the next time though.
914 *
915 * If the extent we found is inside our range, we set the
916 * desired bit on it.
917 */
918 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400919 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500920 *failed_start = start;
921 err = -EEXIST;
922 goto out;
923 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000924
925 prealloc = alloc_extent_state_atomic(prealloc);
926 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500927 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400928 if (err)
929 extent_io_tree_panic(tree, err);
930
Chris Masond1310b22008-01-24 16:13:08 -0500931 prealloc = NULL;
932 if (err)
933 goto out;
934 if (state->end <= end) {
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000935 set_state_bits(tree, state, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400936 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500937 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400938 if (last_end == (u64)-1)
939 goto out;
940 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800941 state = next_state(state);
942 if (start < end && state && state->start == start &&
943 !need_resched())
944 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500945 }
946 goto search_again;
947 }
948 /*
949 * | ---- desired range ---- |
950 * | state | or | state |
951 *
952 * There's a hole, we need to insert something in it and
953 * ignore the extent we found.
954 */
955 if (state->start > start) {
956 u64 this_end;
957 if (end < last_start)
958 this_end = end;
959 else
Chris Masond3977122009-01-05 21:25:51 -0500960 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000961
962 prealloc = alloc_extent_state_atomic(prealloc);
963 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000964
965 /*
966 * Avoid to free 'prealloc' if it can be merged with
967 * the later extent.
968 */
Chris Masond1310b22008-01-24 16:13:08 -0500969 err = insert_state(tree, prealloc, start, this_end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000970 NULL, NULL, &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400971 if (err)
972 extent_io_tree_panic(tree, err);
973
Chris Mason2c64c532009-09-02 15:04:12 -0400974 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500975 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500976 start = this_end + 1;
977 goto search_again;
978 }
979 /*
980 * | ---- desired range ---- |
981 * | state |
982 * We need to split the extent, and set the bit
983 * on the first half
984 */
985 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400986 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500987 *failed_start = start;
988 err = -EEXIST;
989 goto out;
990 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000991
992 prealloc = alloc_extent_state_atomic(prealloc);
993 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500994 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400995 if (err)
996 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -0500997
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000998 set_state_bits(tree, prealloc, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400999 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001000 merge_state(tree, prealloc);
1001 prealloc = NULL;
1002 goto out;
1003 }
1004
1005 goto search_again;
1006
1007out:
Chris Masoncad321a2008-12-17 14:51:42 -05001008 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001009 if (prealloc)
1010 free_extent_state(prealloc);
1011
1012 return err;
1013
1014search_again:
1015 if (start > end)
1016 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -05001017 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001018 if (mask & __GFP_WAIT)
1019 cond_resched();
1020 goto again;
1021}
Chris Masond1310b22008-01-24 16:13:08 -05001022
David Sterba41074882013-04-29 13:38:46 +00001023int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1024 unsigned long bits, u64 * failed_start,
1025 struct extent_state **cached_state, gfp_t mask)
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001026{
1027 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1028 cached_state, mask);
1029}
1030
1031
Josef Bacik462d6fa2011-09-26 13:56:12 -04001032/**
Liu Bo10983f22012-07-11 15:26:19 +08001033 * convert_extent_bit - convert all bits in a given range from one bit to
1034 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001035 * @tree: the io tree to search
1036 * @start: the start offset in bytes
1037 * @end: the end offset in bytes (inclusive)
1038 * @bits: the bits to set in this range
1039 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001040 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001041 * @mask: the allocation mask
1042 *
1043 * This will go through and set bits for the given range. If any states exist
1044 * already in this range they are set with the given bit and cleared of the
1045 * clear_bits. This is only meant to be used by things that are mergeable, ie
1046 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1047 * boundary bits like LOCK.
1048 */
1049int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +00001050 unsigned long bits, unsigned long clear_bits,
Josef Bacike6138872012-09-27 17:07:30 -04001051 struct extent_state **cached_state, gfp_t mask)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001052{
1053 struct extent_state *state;
1054 struct extent_state *prealloc = NULL;
1055 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001056 struct rb_node **p;
1057 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001058 int err = 0;
1059 u64 last_start;
1060 u64 last_end;
1061
Josef Bacika5dee372013-12-13 10:02:44 -05001062 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +00001063
Josef Bacik462d6fa2011-09-26 13:56:12 -04001064again:
1065 if (!prealloc && (mask & __GFP_WAIT)) {
1066 prealloc = alloc_extent_state(mask);
1067 if (!prealloc)
1068 return -ENOMEM;
1069 }
1070
1071 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001072 if (cached_state && *cached_state) {
1073 state = *cached_state;
1074 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001075 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001076 node = &state->rb_node;
1077 goto hit_next;
1078 }
1079 }
1080
Josef Bacik462d6fa2011-09-26 13:56:12 -04001081 /*
1082 * this search will find all the extents that end after
1083 * our range starts.
1084 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001085 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001086 if (!node) {
1087 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001088 if (!prealloc) {
1089 err = -ENOMEM;
1090 goto out;
1091 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001092 err = insert_state(tree, prealloc, start, end,
1093 &p, &parent, &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001094 if (err)
1095 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001096 cache_state(prealloc, cached_state);
1097 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001098 goto out;
1099 }
1100 state = rb_entry(node, struct extent_state, rb_node);
1101hit_next:
1102 last_start = state->start;
1103 last_end = state->end;
1104
1105 /*
1106 * | ---- desired range ---- |
1107 * | state |
1108 *
1109 * Just lock what we found and keep going
1110 */
1111 if (state->start == start && state->end <= end) {
Josef Bacik462d6fa2011-09-26 13:56:12 -04001112 set_state_bits(tree, state, &bits);
Josef Bacike6138872012-09-27 17:07:30 -04001113 cache_state(state, cached_state);
Liu Bod1ac6e42012-05-10 18:10:39 +08001114 state = clear_state_bit(tree, state, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001115 if (last_end == (u64)-1)
1116 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001117 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001118 if (start < end && state && state->start == start &&
1119 !need_resched())
1120 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001121 goto search_again;
1122 }
1123
1124 /*
1125 * | ---- desired range ---- |
1126 * | state |
1127 * or
1128 * | ------------- state -------------- |
1129 *
1130 * We need to split the extent we found, and may flip bits on
1131 * second half.
1132 *
1133 * If the extent we found extends past our
1134 * range, we just split and search again. It'll get split
1135 * again the next time though.
1136 *
1137 * If the extent we found is inside our range, we set the
1138 * desired bit on it.
1139 */
1140 if (state->start < start) {
1141 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001142 if (!prealloc) {
1143 err = -ENOMEM;
1144 goto out;
1145 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001146 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001147 if (err)
1148 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001149 prealloc = NULL;
1150 if (err)
1151 goto out;
1152 if (state->end <= end) {
1153 set_state_bits(tree, state, &bits);
Josef Bacike6138872012-09-27 17:07:30 -04001154 cache_state(state, cached_state);
Liu Bod1ac6e42012-05-10 18:10:39 +08001155 state = clear_state_bit(tree, state, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001156 if (last_end == (u64)-1)
1157 goto out;
1158 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001159 if (start < end && state && state->start == start &&
1160 !need_resched())
1161 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001162 }
1163 goto search_again;
1164 }
1165 /*
1166 * | ---- desired range ---- |
1167 * | state | or | state |
1168 *
1169 * There's a hole, we need to insert something in it and
1170 * ignore the extent we found.
1171 */
1172 if (state->start > start) {
1173 u64 this_end;
1174 if (end < last_start)
1175 this_end = end;
1176 else
1177 this_end = last_start - 1;
1178
1179 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001180 if (!prealloc) {
1181 err = -ENOMEM;
1182 goto out;
1183 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001184
1185 /*
1186 * Avoid to free 'prealloc' if it can be merged with
1187 * the later extent.
1188 */
1189 err = insert_state(tree, prealloc, start, this_end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001190 NULL, NULL, &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001191 if (err)
1192 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001193 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001194 prealloc = NULL;
1195 start = this_end + 1;
1196 goto search_again;
1197 }
1198 /*
1199 * | ---- desired range ---- |
1200 * | state |
1201 * We need to split the extent, and set the bit
1202 * on the first half
1203 */
1204 if (state->start <= end && state->end > end) {
1205 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001206 if (!prealloc) {
1207 err = -ENOMEM;
1208 goto out;
1209 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001210
1211 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001212 if (err)
1213 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001214
1215 set_state_bits(tree, prealloc, &bits);
Josef Bacike6138872012-09-27 17:07:30 -04001216 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001217 clear_state_bit(tree, prealloc, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001218 prealloc = NULL;
1219 goto out;
1220 }
1221
1222 goto search_again;
1223
1224out:
1225 spin_unlock(&tree->lock);
1226 if (prealloc)
1227 free_extent_state(prealloc);
1228
1229 return err;
1230
1231search_again:
1232 if (start > end)
1233 goto out;
1234 spin_unlock(&tree->lock);
1235 if (mask & __GFP_WAIT)
1236 cond_resched();
1237 goto again;
1238}
1239
Chris Masond1310b22008-01-24 16:13:08 -05001240/* wrappers around set/clear extent bit */
1241int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1242 gfp_t mask)
1243{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001244 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001245 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001246}
Chris Masond1310b22008-01-24 16:13:08 -05001247
1248int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +00001249 unsigned long bits, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001250{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001251 return set_extent_bit(tree, start, end, bits, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001252 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001253}
Chris Masond1310b22008-01-24 16:13:08 -05001254
1255int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +00001256 unsigned long bits, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001257{
Chris Mason2c64c532009-09-02 15:04:12 -04001258 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001259}
Chris Masond1310b22008-01-24 16:13:08 -05001260
1261int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001262 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001263{
1264 return set_extent_bit(tree, start, end,
Liu Bofee187d2011-09-29 15:55:28 +08001265 EXTENT_DELALLOC | EXTENT_UPTODATE,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001266 NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001267}
Chris Masond1310b22008-01-24 16:13:08 -05001268
Liu Bo9e8a4a82012-09-05 19:10:51 -06001269int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1270 struct extent_state **cached_state, gfp_t mask)
1271{
1272 return set_extent_bit(tree, start, end,
1273 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1274 NULL, cached_state, mask);
1275}
1276
Chris Masond1310b22008-01-24 16:13:08 -05001277int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1278 gfp_t mask)
1279{
1280 return clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04001281 EXTENT_DIRTY | EXTENT_DELALLOC |
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -04001282 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001283}
Chris Masond1310b22008-01-24 16:13:08 -05001284
1285int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1286 gfp_t mask)
1287{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001288 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001289 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001290}
Chris Masond1310b22008-01-24 16:13:08 -05001291
Chris Masond1310b22008-01-24 16:13:08 -05001292int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
Arne Jansen507903b2011-04-06 10:02:20 +00001293 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001294{
Liu Bo6b67a322013-03-28 08:30:28 +00001295 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001296 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001297}
Chris Masond1310b22008-01-24 16:13:08 -05001298
Josef Bacik5fd02042012-05-02 14:00:54 -04001299int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1300 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001301{
Chris Mason2c64c532009-09-02 15:04:12 -04001302 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001303 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001304}
Chris Masond1310b22008-01-24 16:13:08 -05001305
Chris Masond352ac62008-09-29 15:18:18 -04001306/*
1307 * either insert or lock state struct between start and end use mask to tell
1308 * us if waiting is desired.
1309 */
Chris Mason1edbb732009-09-02 13:24:36 -04001310int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +00001311 unsigned long bits, struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001312{
1313 int err;
1314 u64 failed_start;
1315 while (1) {
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001316 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1317 EXTENT_LOCKED, &failed_start,
1318 cached_state, GFP_NOFS);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001319 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001320 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1321 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001322 } else
Chris Masond1310b22008-01-24 16:13:08 -05001323 break;
Chris Masond1310b22008-01-24 16:13:08 -05001324 WARN_ON(start > end);
1325 }
1326 return err;
1327}
Chris Masond1310b22008-01-24 16:13:08 -05001328
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001329int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Chris Mason1edbb732009-09-02 13:24:36 -04001330{
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001331 return lock_extent_bits(tree, start, end, 0, NULL);
Chris Mason1edbb732009-09-02 13:24:36 -04001332}
1333
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001334int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001335{
1336 int err;
1337 u64 failed_start;
1338
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001339 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1340 &failed_start, NULL, GFP_NOFS);
Yan Zheng66435582008-10-30 14:19:50 -04001341 if (err == -EEXIST) {
1342 if (failed_start > start)
1343 clear_extent_bit(tree, start, failed_start - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001344 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
Josef Bacik25179202008-10-29 14:49:05 -04001345 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001346 }
Josef Bacik25179202008-10-29 14:49:05 -04001347 return 1;
1348}
Josef Bacik25179202008-10-29 14:49:05 -04001349
Chris Mason2c64c532009-09-02 15:04:12 -04001350int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1351 struct extent_state **cached, gfp_t mask)
1352{
1353 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1354 mask);
1355}
1356
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001357int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001358{
Chris Mason2c64c532009-09-02 15:04:12 -04001359 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001360 GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05001361}
Chris Masond1310b22008-01-24 16:13:08 -05001362
Chris Mason4adaa612013-03-26 13:07:00 -04001363int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1364{
1365 unsigned long index = start >> PAGE_CACHE_SHIFT;
1366 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1367 struct page *page;
1368
1369 while (index <= end_index) {
1370 page = find_get_page(inode->i_mapping, index);
1371 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1372 clear_page_dirty_for_io(page);
1373 page_cache_release(page);
1374 index++;
1375 }
1376 return 0;
1377}
1378
1379int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{
1381 unsigned long index = start >> PAGE_CACHE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1383 struct page *page;
1384
1385 while (index <= end_index) {
1386 page = find_get_page(inode->i_mapping, index);
1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388 account_page_redirty(page);
1389 __set_page_dirty_nobuffers(page);
1390 page_cache_release(page);
1391 index++;
1392 }
1393 return 0;
1394}
1395
Chris Masond1310b22008-01-24 16:13:08 -05001396/*
Chris Masond1310b22008-01-24 16:13:08 -05001397 * helper function to set both pages and extents in the tree writeback
1398 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05001399static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001400{
1401 unsigned long index = start >> PAGE_CACHE_SHIFT;
1402 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1403 struct page *page;
1404
1405 while (index <= end_index) {
1406 page = find_get_page(tree->mapping, index);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001407 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Masond1310b22008-01-24 16:13:08 -05001408 set_page_writeback(page);
1409 page_cache_release(page);
1410 index++;
1411 }
Chris Masond1310b22008-01-24 16:13:08 -05001412 return 0;
1413}
Chris Masond1310b22008-01-24 16:13:08 -05001414
Chris Masond352ac62008-09-29 15:18:18 -04001415/* find the first state struct with 'bits' set after 'start', and
1416 * return it. tree->lock must be held. NULL will returned if
1417 * nothing was found after 'start'
1418 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001419static struct extent_state *
1420find_first_extent_bit_state(struct extent_io_tree *tree,
David Sterba41074882013-04-29 13:38:46 +00001421 u64 start, unsigned long bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001422{
1423 struct rb_node *node;
1424 struct extent_state *state;
1425
1426 /*
1427 * this search will find all the extents that end after
1428 * our range starts.
1429 */
1430 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001431 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001432 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001433
Chris Masond3977122009-01-05 21:25:51 -05001434 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001435 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001436 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001437 return state;
Chris Masond3977122009-01-05 21:25:51 -05001438
Chris Masond7fc6402008-02-18 12:12:38 -05001439 node = rb_next(node);
1440 if (!node)
1441 break;
1442 }
1443out:
1444 return NULL;
1445}
Chris Masond7fc6402008-02-18 12:12:38 -05001446
Chris Masond352ac62008-09-29 15:18:18 -04001447/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001448 * find the first offset in the io tree with 'bits' set. zero is
1449 * returned if we find something, and *start_ret and *end_ret are
1450 * set to reflect the state struct that was found.
1451 *
Wang Sheng-Hui477d7ea2012-04-06 14:35:47 +08001452 * If nothing was found, 1 is returned. If found something, return 0.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001453 */
1454int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba41074882013-04-29 13:38:46 +00001455 u64 *start_ret, u64 *end_ret, unsigned long bits,
Josef Bacike6138872012-09-27 17:07:30 -04001456 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001457{
1458 struct extent_state *state;
Josef Bacike6138872012-09-27 17:07:30 -04001459 struct rb_node *n;
Xiao Guangrong69261c42011-07-14 03:19:45 +00001460 int ret = 1;
1461
1462 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001463 if (cached_state && *cached_state) {
1464 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001465 if (state->end == start - 1 && extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001466 n = rb_next(&state->rb_node);
1467 while (n) {
1468 state = rb_entry(n, struct extent_state,
1469 rb_node);
1470 if (state->state & bits)
1471 goto got_it;
1472 n = rb_next(n);
1473 }
1474 free_extent_state(*cached_state);
1475 *cached_state = NULL;
1476 goto out;
1477 }
1478 free_extent_state(*cached_state);
1479 *cached_state = NULL;
1480 }
1481
Xiao Guangrong69261c42011-07-14 03:19:45 +00001482 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001483got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001484 if (state) {
Josef Bacike6138872012-09-27 17:07:30 -04001485 cache_state(state, cached_state);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001486 *start_ret = state->start;
1487 *end_ret = state->end;
1488 ret = 0;
1489 }
Josef Bacike6138872012-09-27 17:07:30 -04001490out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001491 spin_unlock(&tree->lock);
1492 return ret;
1493}
1494
1495/*
Chris Masond352ac62008-09-29 15:18:18 -04001496 * find a contiguous range of bytes in the file marked as delalloc, not
1497 * more than 'max_bytes'. start and end are used to return the range,
1498 *
1499 * 1 is returned if we find something, 0 if nothing was in the tree
1500 */
Chris Masonc8b97812008-10-29 14:49:59 -04001501static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001502 u64 *start, u64 *end, u64 max_bytes,
1503 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001504{
1505 struct rb_node *node;
1506 struct extent_state *state;
1507 u64 cur_start = *start;
1508 u64 found = 0;
1509 u64 total_bytes = 0;
1510
Chris Masoncad321a2008-12-17 14:51:42 -05001511 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001512
Chris Masond1310b22008-01-24 16:13:08 -05001513 /*
1514 * this search will find all the extents that end after
1515 * our range starts.
1516 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001517 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001518 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001519 if (!found)
1520 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001521 goto out;
1522 }
1523
Chris Masond3977122009-01-05 21:25:51 -05001524 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001525 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001526 if (found && (state->start != cur_start ||
1527 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001528 goto out;
1529 }
1530 if (!(state->state & EXTENT_DELALLOC)) {
1531 if (!found)
1532 *end = state->end;
1533 goto out;
1534 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001535 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001536 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001537 *cached_state = state;
1538 atomic_inc(&state->refs);
1539 }
Chris Masond1310b22008-01-24 16:13:08 -05001540 found++;
1541 *end = state->end;
1542 cur_start = state->end + 1;
1543 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001544 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001545 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001546 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001547 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001548 break;
1549 }
1550out:
Chris Masoncad321a2008-12-17 14:51:42 -05001551 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001552 return found;
1553}
1554
Jeff Mahoney143bede2012-03-01 14:56:26 +01001555static noinline void __unlock_for_delalloc(struct inode *inode,
1556 struct page *locked_page,
1557 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001558{
1559 int ret;
1560 struct page *pages[16];
1561 unsigned long index = start >> PAGE_CACHE_SHIFT;
1562 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1563 unsigned long nr_pages = end_index - index + 1;
1564 int i;
1565
1566 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001567 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001568
Chris Masond3977122009-01-05 21:25:51 -05001569 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001570 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001571 min_t(unsigned long, nr_pages,
1572 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001573 for (i = 0; i < ret; i++) {
1574 if (pages[i] != locked_page)
1575 unlock_page(pages[i]);
1576 page_cache_release(pages[i]);
1577 }
1578 nr_pages -= ret;
1579 index += ret;
1580 cond_resched();
1581 }
Chris Masonc8b97812008-10-29 14:49:59 -04001582}
1583
1584static noinline int lock_delalloc_pages(struct inode *inode,
1585 struct page *locked_page,
1586 u64 delalloc_start,
1587 u64 delalloc_end)
1588{
1589 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1590 unsigned long start_index = index;
1591 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1592 unsigned long pages_locked = 0;
1593 struct page *pages[16];
1594 unsigned long nrpages;
1595 int ret;
1596 int i;
1597
1598 /* the caller is responsible for locking the start index */
1599 if (index == locked_page->index && index == end_index)
1600 return 0;
1601
1602 /* skip the page at the start index */
1603 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001604 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001605 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001606 min_t(unsigned long,
1607 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001608 if (ret == 0) {
1609 ret = -EAGAIN;
1610 goto done;
1611 }
1612 /* now we have an array of pages, lock them all */
1613 for (i = 0; i < ret; i++) {
1614 /*
1615 * the caller is taking responsibility for
1616 * locked_page
1617 */
Chris Mason771ed682008-11-06 22:02:51 -05001618 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001619 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001620 if (!PageDirty(pages[i]) ||
1621 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001622 ret = -EAGAIN;
1623 unlock_page(pages[i]);
1624 page_cache_release(pages[i]);
1625 goto done;
1626 }
1627 }
Chris Masonc8b97812008-10-29 14:49:59 -04001628 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001629 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001630 }
Chris Masonc8b97812008-10-29 14:49:59 -04001631 nrpages -= ret;
1632 index += ret;
1633 cond_resched();
1634 }
1635 ret = 0;
1636done:
1637 if (ret && pages_locked) {
1638 __unlock_for_delalloc(inode, locked_page,
1639 delalloc_start,
1640 ((u64)(start_index + pages_locked - 1)) <<
1641 PAGE_CACHE_SHIFT);
1642 }
1643 return ret;
1644}
1645
1646/*
1647 * find a contiguous range of bytes in the file marked as delalloc, not
1648 * more than 'max_bytes'. start and end are used to return the range,
1649 *
1650 * 1 is returned if we find something, 0 if nothing was in the tree
1651 */
Josef Bacik294e30f2013-10-09 12:00:56 -04001652STATIC u64 find_lock_delalloc_range(struct inode *inode,
1653 struct extent_io_tree *tree,
1654 struct page *locked_page, u64 *start,
1655 u64 *end, u64 max_bytes)
Chris Masonc8b97812008-10-29 14:49:59 -04001656{
1657 u64 delalloc_start;
1658 u64 delalloc_end;
1659 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001660 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001661 int ret;
1662 int loops = 0;
1663
1664again:
1665 /* step one, find a bunch of delalloc bytes starting at start */
1666 delalloc_start = *start;
1667 delalloc_end = 0;
1668 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001669 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001670 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001671 *start = delalloc_start;
1672 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001673 free_extent_state(cached_state);
Liu Bo385fe0b2013-10-01 23:49:49 +08001674 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001675 }
1676
1677 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001678 * start comes from the offset of locked_page. We have to lock
1679 * pages in order, so we can't process delalloc bytes before
1680 * locked_page
1681 */
Chris Masond3977122009-01-05 21:25:51 -05001682 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001683 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001684
1685 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001686 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04001687 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04001688 if (delalloc_end + 1 - delalloc_start > max_bytes)
1689 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05001690
Chris Masonc8b97812008-10-29 14:49:59 -04001691 /* step two, lock all the pages after the page that has start */
1692 ret = lock_delalloc_pages(inode, locked_page,
1693 delalloc_start, delalloc_end);
1694 if (ret == -EAGAIN) {
1695 /* some of the pages are gone, lets avoid looping by
1696 * shortening the size of the delalloc range we're searching
1697 */
Chris Mason9655d292009-09-02 15:22:30 -04001698 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07001699 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001700 if (!loops) {
Josef Bacik7bf811a52013-10-07 22:11:09 -04001701 max_bytes = PAGE_CACHE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001702 loops = 1;
1703 goto again;
1704 } else {
1705 found = 0;
1706 goto out_failed;
1707 }
1708 }
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001709 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
Chris Masonc8b97812008-10-29 14:49:59 -04001710
1711 /* step three, lock the state bits for the whole range */
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001712 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001713
1714 /* then test to make sure it is all still delalloc */
1715 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001716 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001717 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001718 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1719 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001720 __unlock_for_delalloc(inode, locked_page,
1721 delalloc_start, delalloc_end);
1722 cond_resched();
1723 goto again;
1724 }
Chris Mason9655d292009-09-02 15:22:30 -04001725 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001726 *start = delalloc_start;
1727 *end = delalloc_end;
1728out_failed:
1729 return found;
1730}
1731
Josef Bacikc2790a22013-07-29 11:20:47 -04001732int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1733 struct page *locked_page,
1734 unsigned long clear_bits,
1735 unsigned long page_ops)
Chris Masonc8b97812008-10-29 14:49:59 -04001736{
Josef Bacikc2790a22013-07-29 11:20:47 -04001737 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Chris Masonc8b97812008-10-29 14:49:59 -04001738 int ret;
1739 struct page *pages[16];
1740 unsigned long index = start >> PAGE_CACHE_SHIFT;
1741 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1742 unsigned long nr_pages = end_index - index + 1;
1743 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001744
Chris Mason2c64c532009-09-02 15:04:12 -04001745 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacikc2790a22013-07-29 11:20:47 -04001746 if (page_ops == 0)
Chris Mason771ed682008-11-06 22:02:51 -05001747 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001748
Chris Masond3977122009-01-05 21:25:51 -05001749 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001750 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001751 min_t(unsigned long,
1752 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001753 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001754
Josef Bacikc2790a22013-07-29 11:20:47 -04001755 if (page_ops & PAGE_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001756 SetPagePrivate2(pages[i]);
1757
Chris Masonc8b97812008-10-29 14:49:59 -04001758 if (pages[i] == locked_page) {
1759 page_cache_release(pages[i]);
1760 continue;
1761 }
Josef Bacikc2790a22013-07-29 11:20:47 -04001762 if (page_ops & PAGE_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001763 clear_page_dirty_for_io(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001764 if (page_ops & PAGE_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001765 set_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001766 if (page_ops & PAGE_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001767 end_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001768 if (page_ops & PAGE_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001769 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001770 page_cache_release(pages[i]);
1771 }
1772 nr_pages -= ret;
1773 index += ret;
1774 cond_resched();
1775 }
1776 return 0;
1777}
Chris Masonc8b97812008-10-29 14:49:59 -04001778
Chris Masond352ac62008-09-29 15:18:18 -04001779/*
1780 * count the number of bytes in the tree that have a given bit(s)
1781 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1782 * cached. The total number found is returned.
1783 */
Chris Masond1310b22008-01-24 16:13:08 -05001784u64 count_range_bits(struct extent_io_tree *tree,
1785 u64 *start, u64 search_end, u64 max_bytes,
Chris Masonec29ed52011-02-23 16:23:20 -05001786 unsigned long bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001787{
1788 struct rb_node *node;
1789 struct extent_state *state;
1790 u64 cur_start = *start;
1791 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001792 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001793 int found = 0;
1794
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05301795 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05001796 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05001797
Chris Masoncad321a2008-12-17 14:51:42 -05001798 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001799 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1800 total_bytes = tree->dirty_bytes;
1801 goto out;
1802 }
1803 /*
1804 * this search will find all the extents that end after
1805 * our range starts.
1806 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001807 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001808 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001809 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001810
Chris Masond3977122009-01-05 21:25:51 -05001811 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001812 state = rb_entry(node, struct extent_state, rb_node);
1813 if (state->start > search_end)
1814 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001815 if (contig && found && state->start > last + 1)
1816 break;
1817 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001818 total_bytes += min(search_end, state->end) + 1 -
1819 max(cur_start, state->start);
1820 if (total_bytes >= max_bytes)
1821 break;
1822 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001823 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001824 found = 1;
1825 }
Chris Masonec29ed52011-02-23 16:23:20 -05001826 last = state->end;
1827 } else if (contig && found) {
1828 break;
Chris Masond1310b22008-01-24 16:13:08 -05001829 }
1830 node = rb_next(node);
1831 if (!node)
1832 break;
1833 }
1834out:
Chris Masoncad321a2008-12-17 14:51:42 -05001835 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001836 return total_bytes;
1837}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001838
Chris Masond352ac62008-09-29 15:18:18 -04001839/*
1840 * set the private field for a given byte offset in the tree. If there isn't
1841 * an extent_state there already, this does nothing.
1842 */
Sergei Trofimovich171170c2013-08-14 23:27:46 +03001843static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
Chris Masond1310b22008-01-24 16:13:08 -05001844{
1845 struct rb_node *node;
1846 struct extent_state *state;
1847 int ret = 0;
1848
Chris Masoncad321a2008-12-17 14:51:42 -05001849 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001850 /*
1851 * this search will find all the extents that end after
1852 * our range starts.
1853 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001854 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001855 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001856 ret = -ENOENT;
1857 goto out;
1858 }
1859 state = rb_entry(node, struct extent_state, rb_node);
1860 if (state->start != start) {
1861 ret = -ENOENT;
1862 goto out;
1863 }
1864 state->private = private;
1865out:
Chris Masoncad321a2008-12-17 14:51:42 -05001866 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001867 return ret;
1868}
1869
1870int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1871{
1872 struct rb_node *node;
1873 struct extent_state *state;
1874 int ret = 0;
1875
Chris Masoncad321a2008-12-17 14:51:42 -05001876 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001877 /*
1878 * this search will find all the extents that end after
1879 * our range starts.
1880 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001881 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001882 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001883 ret = -ENOENT;
1884 goto out;
1885 }
1886 state = rb_entry(node, struct extent_state, rb_node);
1887 if (state->start != start) {
1888 ret = -ENOENT;
1889 goto out;
1890 }
1891 *private = state->private;
1892out:
Chris Masoncad321a2008-12-17 14:51:42 -05001893 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001894 return ret;
1895}
1896
1897/*
1898 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001899 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001900 * has the bits set. Otherwise, 1 is returned if any bit in the
1901 * range is found set.
1902 */
1903int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba41074882013-04-29 13:38:46 +00001904 unsigned long bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001905{
1906 struct extent_state *state = NULL;
1907 struct rb_node *node;
1908 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001909
Chris Masoncad321a2008-12-17 14:51:42 -05001910 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01001911 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001912 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001913 node = &cached->rb_node;
1914 else
1915 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001916 while (node && start <= end) {
1917 state = rb_entry(node, struct extent_state, rb_node);
1918
1919 if (filled && state->start > start) {
1920 bitset = 0;
1921 break;
1922 }
1923
1924 if (state->start > end)
1925 break;
1926
1927 if (state->state & bits) {
1928 bitset = 1;
1929 if (!filled)
1930 break;
1931 } else if (filled) {
1932 bitset = 0;
1933 break;
1934 }
Chris Mason46562cec2009-09-23 20:23:16 -04001935
1936 if (state->end == (u64)-1)
1937 break;
1938
Chris Masond1310b22008-01-24 16:13:08 -05001939 start = state->end + 1;
1940 if (start > end)
1941 break;
1942 node = rb_next(node);
1943 if (!node) {
1944 if (filled)
1945 bitset = 0;
1946 break;
1947 }
1948 }
Chris Masoncad321a2008-12-17 14:51:42 -05001949 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001950 return bitset;
1951}
Chris Masond1310b22008-01-24 16:13:08 -05001952
1953/*
1954 * helper function to set a given page up to date if all the
1955 * extents in the tree for that page are up to date
1956 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001957static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001958{
Miao Xie4eee4fa2012-12-21 09:17:45 +00001959 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05001960 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001961 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001962 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001963}
1964
Miao Xie454ff3d2014-09-12 18:43:58 +08001965static int free_io_failure(struct inode *inode, struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001966{
1967 int ret;
1968 int err = 0;
1969 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1970
1971 set_state_private(failure_tree, rec->start, 0);
1972 ret = clear_extent_bits(failure_tree, rec->start,
1973 rec->start + rec->len - 1,
1974 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1975 if (ret)
1976 err = ret;
1977
David Woodhouse53b381b2013-01-29 18:40:14 -05001978 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1979 rec->start + rec->len - 1,
1980 EXTENT_DAMAGED, GFP_NOFS);
1981 if (ret && !err)
1982 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001983
1984 kfree(rec);
1985 return err;
1986}
1987
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001988/*
1989 * this bypasses the standard btrfs submit functions deliberately, as
1990 * the standard behavior is to write all copies in a raid setup. here we only
1991 * want to write the one bad copy. so we do the mapping for ourselves and issue
1992 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001993 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001994 * actually prevents the read that triggered the error from finishing.
1995 * currently, there can be no more than two copies of every data bit. thus,
1996 * exactly one rewrite is required.
1997 */
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001998int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001999 u64 length, u64 logical, struct page *page,
2000 int mirror_num)
2001{
2002 struct bio *bio;
2003 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002004 u64 map_length = 0;
2005 u64 sector;
2006 struct btrfs_bio *bbio = NULL;
David Woodhouse53b381b2013-01-29 18:40:14 -05002007 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002008 int ret;
2009
Ilya Dryomov908960c2013-11-03 19:06:39 +02002010 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002011 BUG_ON(!mirror_num);
2012
David Woodhouse53b381b2013-01-29 18:40:14 -05002013 /* we can't repair anything in raid56 yet */
2014 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2015 return 0;
2016
Chris Mason9be33952013-05-17 18:30:14 -04002017 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002018 if (!bio)
2019 return -EIO;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002020 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002021 map_length = length;
2022
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002023 ret = btrfs_map_block(fs_info, WRITE, logical,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002024 &map_length, &bbio, mirror_num);
2025 if (ret) {
2026 bio_put(bio);
2027 return -EIO;
2028 }
2029 BUG_ON(mirror_num != bbio->mirror_num);
2030 sector = bbio->stripes[mirror_num-1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002031 bio->bi_iter.bi_sector = sector;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002032 dev = bbio->stripes[mirror_num-1].dev;
2033 kfree(bbio);
2034 if (!dev || !dev->bdev || !dev->writeable) {
2035 bio_put(bio);
2036 return -EIO;
2037 }
2038 bio->bi_bdev = dev->bdev;
Miao Xie4eee4fa2012-12-21 09:17:45 +00002039 bio_add_page(bio, page, length, start - page_offset(page));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002040
Kent Overstreet33879d42013-11-23 22:33:32 -08002041 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002042 /* try to remap that extent elsewhere? */
2043 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002044 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002045 return -EIO;
2046 }
2047
Frank Holtonefe120a2013-12-20 11:37:06 -05002048 printk_ratelimited_in_rcu(KERN_INFO
2049 "BTRFS: read error corrected: ino %lu off %llu "
2050 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2051 start, rcu_str_deref(dev->name), sector);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002052
2053 bio_put(bio);
2054 return 0;
2055}
2056
Josef Bacikea466792012-03-26 21:57:36 -04002057int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2058 int mirror_num)
2059{
Josef Bacikea466792012-03-26 21:57:36 -04002060 u64 start = eb->start;
2061 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond95603b2012-04-12 15:55:15 -04002062 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002063
Ilya Dryomov908960c2013-11-03 19:06:39 +02002064 if (root->fs_info->sb->s_flags & MS_RDONLY)
2065 return -EROFS;
2066
Josef Bacikea466792012-03-26 21:57:36 -04002067 for (i = 0; i < num_pages; i++) {
2068 struct page *p = extent_buffer_page(eb, i);
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002069 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
Josef Bacikea466792012-03-26 21:57:36 -04002070 start, p, mirror_num);
2071 if (ret)
2072 break;
2073 start += PAGE_CACHE_SIZE;
2074 }
2075
2076 return ret;
2077}
2078
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002079/*
2080 * each time an IO finishes, we do a fast check in the IO failure tree
2081 * to see if we need to process or clean up an io_failure_record
2082 */
2083static int clean_io_failure(u64 start, struct page *page)
2084{
2085 u64 private;
2086 u64 private_failure;
2087 struct io_failure_record *failrec;
Ilya Dryomov908960c2013-11-03 19:06:39 +02002088 struct inode *inode = page->mapping->host;
2089 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002090 struct extent_state *state;
2091 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002092 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002093
2094 private = 0;
2095 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2096 (u64)-1, 1, EXTENT_DIRTY, 0);
2097 if (!ret)
2098 return 0;
2099
2100 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2101 &private_failure);
2102 if (ret)
2103 return 0;
2104
2105 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2106 BUG_ON(!failrec->this_mirror);
2107
2108 if (failrec->in_validation) {
2109 /* there was no real error, just free the record */
2110 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2111 failrec->start);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002112 goto out;
2113 }
Ilya Dryomov908960c2013-11-03 19:06:39 +02002114 if (fs_info->sb->s_flags & MS_RDONLY)
2115 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002116
2117 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2118 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2119 failrec->start,
2120 EXTENT_LOCKED);
2121 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2122
Miao Xie883d0de2013-07-25 19:22:35 +08002123 if (state && state->start <= failrec->start &&
2124 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002125 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2126 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002127 if (num_copies > 1) {
Miao Xie454ff3d2014-09-12 18:43:58 +08002128 repair_io_failure(fs_info, start, failrec->len,
2129 failrec->logical, page,
2130 failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002131 }
2132 }
2133
2134out:
Miao Xie454ff3d2014-09-12 18:43:58 +08002135 free_io_failure(inode, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002136
Miao Xie454ff3d2014-09-12 18:43:58 +08002137 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002138}
2139
Miao Xie2fe63032014-09-12 18:43:59 +08002140int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2141 struct io_failure_record **failrec_ret)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002142{
Miao Xie2fe63032014-09-12 18:43:59 +08002143 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002144 u64 private;
2145 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002146 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2147 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2148 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002149 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002150 u64 logical;
2151
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002152 ret = get_state_private(failure_tree, start, &private);
2153 if (ret) {
2154 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2155 if (!failrec)
2156 return -ENOMEM;
Miao Xie2fe63032014-09-12 18:43:59 +08002157
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002158 failrec->start = start;
2159 failrec->len = end - start + 1;
2160 failrec->this_mirror = 0;
2161 failrec->bio_flags = 0;
2162 failrec->in_validation = 0;
2163
2164 read_lock(&em_tree->lock);
2165 em = lookup_extent_mapping(em_tree, start, failrec->len);
2166 if (!em) {
2167 read_unlock(&em_tree->lock);
2168 kfree(failrec);
2169 return -EIO;
2170 }
2171
Filipe David Borba Manana68ba9902013-11-25 03:22:07 +00002172 if (em->start > start || em->start + em->len <= start) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002173 free_extent_map(em);
2174 em = NULL;
2175 }
2176 read_unlock(&em_tree->lock);
Tsutomu Itoh7a2d6a62012-10-01 03:07:15 -06002177 if (!em) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002178 kfree(failrec);
2179 return -EIO;
2180 }
Miao Xie2fe63032014-09-12 18:43:59 +08002181
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002182 logical = start - em->start;
2183 logical = em->block_start + logical;
2184 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2185 logical = em->block_start;
2186 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2187 extent_set_compress_type(&failrec->bio_flags,
2188 em->compress_type);
2189 }
Miao Xie2fe63032014-09-12 18:43:59 +08002190
2191 pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2192 logical, start, failrec->len);
2193
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002194 failrec->logical = logical;
2195 free_extent_map(em);
2196
2197 /* set the bits in the private failure tree */
2198 ret = set_extent_bits(failure_tree, start, end,
2199 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2200 if (ret >= 0)
2201 ret = set_state_private(failure_tree, start,
2202 (u64)(unsigned long)failrec);
2203 /* set the bits in the inode's tree */
2204 if (ret >= 0)
2205 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2206 GFP_NOFS);
2207 if (ret < 0) {
2208 kfree(failrec);
2209 return ret;
2210 }
2211 } else {
2212 failrec = (struct io_failure_record *)(unsigned long)private;
Miao Xie2fe63032014-09-12 18:43:59 +08002213 pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002214 failrec->logical, failrec->start, failrec->len,
2215 failrec->in_validation);
2216 /*
2217 * when data can be on disk more than twice, add to failrec here
2218 * (e.g. with a list for failed_mirror) to make
2219 * clean_io_failure() clean all those errors at once.
2220 */
2221 }
Miao Xie2fe63032014-09-12 18:43:59 +08002222
2223 *failrec_ret = failrec;
2224
2225 return 0;
2226}
2227
2228int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2229 struct io_failure_record *failrec, int failed_mirror)
2230{
2231 int num_copies;
2232
Stefan Behrens5d964052012-11-05 14:59:07 +01002233 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2234 failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002235 if (num_copies == 1) {
2236 /*
2237 * we only have a single copy of the data, so don't bother with
2238 * all the retry and error correction code that follows. no
2239 * matter what the error is, it is very likely to persist.
2240 */
Miao Xie2fe63032014-09-12 18:43:59 +08002241 pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
Miao Xie09a7f7a2013-07-25 19:22:32 +08002242 num_copies, failrec->this_mirror, failed_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +08002243 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002244 }
2245
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002246 /*
2247 * there are two premises:
2248 * a) deliver good data to the caller
2249 * b) correct the bad sectors on disk
2250 */
2251 if (failed_bio->bi_vcnt > 1) {
2252 /*
2253 * to fulfill b), we need to know the exact failing sectors, as
2254 * we don't want to rewrite any more than the failed ones. thus,
2255 * we need separate read requests for the failed bio
2256 *
2257 * if the following BUG_ON triggers, our validation request got
2258 * merged. we need separate requests for our algorithm to work.
2259 */
2260 BUG_ON(failrec->in_validation);
2261 failrec->in_validation = 1;
2262 failrec->this_mirror = failed_mirror;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002263 } else {
2264 /*
2265 * we're ready to fulfill a) and b) alongside. get a good copy
2266 * of the failed sector and if we succeed, we have setup
2267 * everything for repair_io_failure to do the rest for us.
2268 */
2269 if (failrec->in_validation) {
2270 BUG_ON(failrec->this_mirror != failed_mirror);
2271 failrec->in_validation = 0;
2272 failrec->this_mirror = 0;
2273 }
2274 failrec->failed_mirror = failed_mirror;
2275 failrec->this_mirror++;
2276 if (failrec->this_mirror == failed_mirror)
2277 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002278 }
2279
Miao Xiefacc8a222013-07-25 19:22:34 +08002280 if (failrec->this_mirror > num_copies) {
Miao Xie2fe63032014-09-12 18:43:59 +08002281 pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002282 num_copies, failrec->this_mirror, failed_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +08002283 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002284 }
2285
Miao Xie2fe63032014-09-12 18:43:59 +08002286 return 1;
2287}
2288
2289
2290struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2291 struct io_failure_record *failrec,
2292 struct page *page, int pg_offset, int icsum,
2293 bio_end_io_t *endio_func)
2294{
2295 struct bio *bio;
2296 struct btrfs_io_bio *btrfs_failed_bio;
2297 struct btrfs_io_bio *btrfs_bio;
2298
Chris Mason9be33952013-05-17 18:30:14 -04002299 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Miao Xie2fe63032014-09-12 18:43:59 +08002300 if (!bio)
2301 return NULL;
2302
2303 bio->bi_end_io = endio_func;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002304 bio->bi_iter.bi_sector = failrec->logical >> 9;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002305 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002306 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002307
Miao Xiefacc8a222013-07-25 19:22:34 +08002308 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2309 if (btrfs_failed_bio->csum) {
2310 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2311 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2312
2313 btrfs_bio = btrfs_io_bio(bio);
2314 btrfs_bio->csum = btrfs_bio->csum_inline;
Miao Xie2fe63032014-09-12 18:43:59 +08002315 icsum *= csum_size;
2316 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
Miao Xiefacc8a222013-07-25 19:22:34 +08002317 csum_size);
2318 }
2319
Miao Xie2fe63032014-09-12 18:43:59 +08002320 bio_add_page(bio, page, failrec->len, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002321
Miao Xie2fe63032014-09-12 18:43:59 +08002322 return bio;
2323}
2324
2325/*
2326 * this is a generic handler for readpage errors (default
2327 * readpage_io_failed_hook). if other copies exist, read those and write back
2328 * good data to the failed position. does not investigate in remapping the
2329 * failed extent elsewhere, hoping the device will be smart enough to do this as
2330 * needed
2331 */
2332
2333static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2334 struct page *page, u64 start, u64 end,
2335 int failed_mirror)
2336{
2337 struct io_failure_record *failrec;
2338 struct inode *inode = page->mapping->host;
2339 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2340 struct bio *bio;
2341 int read_mode;
2342 int ret;
2343
2344 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2345
2346 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2347 if (ret)
2348 return ret;
2349
2350 ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2351 if (!ret) {
2352 free_io_failure(inode, failrec);
2353 return -EIO;
2354 }
2355
2356 if (failed_bio->bi_vcnt > 1)
2357 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2358 else
2359 read_mode = READ_SYNC;
2360
2361 phy_offset >>= inode->i_sb->s_blocksize_bits;
2362 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2363 start - page_offset(page),
2364 (int)phy_offset, failed_bio->bi_end_io);
2365 if (!bio) {
2366 free_io_failure(inode, failrec);
2367 return -EIO;
2368 }
2369
2370 pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2371 read_mode, failrec->this_mirror, failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002372
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002373 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2374 failrec->this_mirror,
2375 failrec->bio_flags, 0);
Miao Xie6c387ab2014-09-12 18:43:57 +08002376 if (ret) {
Miao Xie454ff3d2014-09-12 18:43:58 +08002377 free_io_failure(inode, failrec);
Miao Xie6c387ab2014-09-12 18:43:57 +08002378 bio_put(bio);
2379 }
2380
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002381 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002382}
2383
Chris Masond1310b22008-01-24 16:13:08 -05002384/* lots and lots of room for performance fixes in the end_bio funcs */
2385
Jeff Mahoney87826df2012-02-15 16:23:57 +01002386int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2387{
2388 int uptodate = (err == 0);
2389 struct extent_io_tree *tree;
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002390 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002391
2392 tree = &BTRFS_I(page->mapping->host)->io_tree;
2393
2394 if (tree->ops && tree->ops->writepage_end_io_hook) {
2395 ret = tree->ops->writepage_end_io_hook(page, start,
2396 end, NULL, uptodate);
2397 if (ret)
2398 uptodate = 0;
2399 }
2400
Jeff Mahoney87826df2012-02-15 16:23:57 +01002401 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002402 ClearPageUptodate(page);
2403 SetPageError(page);
Liu Bo5dca6ee2014-05-12 12:47:36 +08002404 ret = ret < 0 ? ret : -EIO;
2405 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002406 }
2407 return 0;
2408}
2409
Chris Masond1310b22008-01-24 16:13:08 -05002410/*
2411 * after a writepage IO is done, we need to:
2412 * clear the uptodate bits on error
2413 * clear the writeback bits in the extent tree for this IO
2414 * end_page_writeback if the page has no more pending IO
2415 *
2416 * Scheduling is not allowed, so the extent state tree is expected
2417 * to have one and only one object corresponding to this IO.
2418 */
Chris Masond1310b22008-01-24 16:13:08 -05002419static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002420{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002421 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002422 u64 start;
2423 u64 end;
Kent Overstreet2c30c712013-11-07 12:20:26 -08002424 int i;
Chris Masond1310b22008-01-24 16:13:08 -05002425
Kent Overstreet2c30c712013-11-07 12:20:26 -08002426 bio_for_each_segment_all(bvec, bio, i) {
Chris Masond1310b22008-01-24 16:13:08 -05002427 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04002428
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002429 /* We always issue full-page reads, but if some block
2430 * in a page fails to read, blk_update_request() will
2431 * advance bv_offset and adjust bv_len to compensate.
2432 * Print a warning for nonzero offsets, and an error
2433 * if they don't add up to a full page. */
Frank Holtonefe120a2013-12-20 11:37:06 -05002434 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2435 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2436 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2437 "partial page write in btrfs with offset %u and length %u",
2438 bvec->bv_offset, bvec->bv_len);
2439 else
2440 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2441 "incomplete page write in btrfs with offset %u and "
2442 "length %u",
2443 bvec->bv_offset, bvec->bv_len);
2444 }
Chris Masond1310b22008-01-24 16:13:08 -05002445
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002446 start = page_offset(page);
2447 end = start + bvec->bv_offset + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002448
Jeff Mahoney87826df2012-02-15 16:23:57 +01002449 if (end_extent_writepage(page, err, start, end))
2450 continue;
Chris Mason70dec802008-01-29 09:59:12 -05002451
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002452 end_page_writeback(page);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002453 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002454
Chris Masond1310b22008-01-24 16:13:08 -05002455 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002456}
2457
Miao Xie883d0de2013-07-25 19:22:35 +08002458static void
2459endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2460 int uptodate)
2461{
2462 struct extent_state *cached = NULL;
2463 u64 end = start + len - 1;
2464
2465 if (uptodate && tree->track_uptodate)
2466 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2467 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2468}
2469
Chris Masond1310b22008-01-24 16:13:08 -05002470/*
2471 * after a readpage IO is done, we need to:
2472 * clear the uptodate bits on error
2473 * set the uptodate bits if things worked
2474 * set the page up to date if all extents in the tree are uptodate
2475 * clear the lock bit in the extent tree
2476 * unlock the page if there are no other extents locked for it
2477 *
2478 * Scheduling is not allowed, so the extent state tree is expected
2479 * to have one and only one object corresponding to this IO.
2480 */
Chris Masond1310b22008-01-24 16:13:08 -05002481static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002482{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002483 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002484 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Miao Xiefacc8a222013-07-25 19:22:34 +08002485 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
David Woodhouse902b22f2008-08-20 08:51:49 -04002486 struct extent_io_tree *tree;
Miao Xiefacc8a222013-07-25 19:22:34 +08002487 u64 offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002488 u64 start;
2489 u64 end;
Miao Xiefacc8a222013-07-25 19:22:34 +08002490 u64 len;
Miao Xie883d0de2013-07-25 19:22:35 +08002491 u64 extent_start = 0;
2492 u64 extent_len = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002493 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002494 int ret;
Kent Overstreet2c30c712013-11-07 12:20:26 -08002495 int i;
Chris Masond1310b22008-01-24 16:13:08 -05002496
Chris Masond20f7042008-12-08 16:58:54 -05002497 if (err)
2498 uptodate = 0;
2499
Kent Overstreet2c30c712013-11-07 12:20:26 -08002500 bio_for_each_segment_all(bvec, bio, i) {
Chris Masond1310b22008-01-24 16:13:08 -05002501 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002502 struct inode *inode = page->mapping->host;
Arne Jansen507903b2011-04-06 10:02:20 +00002503
Kent Overstreetbe3940c2012-09-11 14:23:05 -06002504 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
Miao Xiec1dc0892014-09-12 18:43:56 +08002505 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err,
Chris Mason9be33952013-05-17 18:30:14 -04002506 io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04002507 tree = &BTRFS_I(inode)->io_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04002508
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002509 /* We always issue full-page reads, but if some block
2510 * in a page fails to read, blk_update_request() will
2511 * advance bv_offset and adjust bv_len to compensate.
2512 * Print a warning for nonzero offsets, and an error
2513 * if they don't add up to a full page. */
Frank Holtonefe120a2013-12-20 11:37:06 -05002514 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2515 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2516 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2517 "partial page read in btrfs with offset %u and length %u",
2518 bvec->bv_offset, bvec->bv_len);
2519 else
2520 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2521 "incomplete page read in btrfs with offset %u and "
2522 "length %u",
2523 bvec->bv_offset, bvec->bv_len);
2524 }
Chris Masond1310b22008-01-24 16:13:08 -05002525
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002526 start = page_offset(page);
2527 end = start + bvec->bv_offset + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08002528 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05002529
Chris Mason9be33952013-05-17 18:30:14 -04002530 mirror = io_bio->mirror_num;
Miao Xief2a09da2013-07-25 19:22:33 +08002531 if (likely(uptodate && tree->ops &&
2532 tree->ops->readpage_end_io_hook)) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002533 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2534 page, start, end,
2535 mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002536 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002537 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002538 else
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002539 clean_io_failure(start, page);
Chris Masond1310b22008-01-24 16:13:08 -05002540 }
Josef Bacikea466792012-03-26 21:57:36 -04002541
Miao Xief2a09da2013-07-25 19:22:33 +08002542 if (likely(uptodate))
2543 goto readpage_ok;
2544
2545 if (tree->ops && tree->ops->readpage_io_failed_hook) {
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002546 ret = tree->ops->readpage_io_failed_hook(page, mirror);
Josef Bacikea466792012-03-26 21:57:36 -04002547 if (!ret && !err &&
2548 test_bit(BIO_UPTODATE, &bio->bi_flags))
2549 uptodate = 1;
Miao Xief2a09da2013-07-25 19:22:33 +08002550 } else {
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002551 /*
2552 * The generic bio_readpage_error handles errors the
2553 * following way: If possible, new read requests are
2554 * created and submitted and will end up in
2555 * end_bio_extent_readpage as well (if we're lucky, not
2556 * in the !uptodate case). In that case it returns 0 and
2557 * we just go on with the next page in our bio. If it
2558 * can't handle the error it will return -EIO and we
2559 * remain responsible for that page.
2560 */
Miao Xiefacc8a222013-07-25 19:22:34 +08002561 ret = bio_readpage_error(bio, offset, page, start, end,
2562 mirror);
Chris Mason7e383262008-04-09 16:28:12 -04002563 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04002564 uptodate =
2565 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masond20f7042008-12-08 16:58:54 -05002566 if (err)
2567 uptodate = 0;
Liu Bo38c1c2e2014-08-19 23:33:13 +08002568 offset += len;
Chris Mason7e383262008-04-09 16:28:12 -04002569 continue;
2570 }
2571 }
Miao Xief2a09da2013-07-25 19:22:33 +08002572readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08002573 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04002574 loff_t i_size = i_size_read(inode);
2575 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
Liu Boa583c022014-08-19 23:32:22 +08002576 unsigned off;
Josef Bacika71754f2013-06-17 17:14:39 -04002577
2578 /* Zero out the end if this page straddles i_size */
Liu Boa583c022014-08-19 23:32:22 +08002579 off = i_size & (PAGE_CACHE_SIZE-1);
2580 if (page->index == end_index && off)
2581 zero_user_segment(page, off, PAGE_CACHE_SIZE);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002582 SetPageUptodate(page);
Chris Mason70dec802008-01-29 09:59:12 -05002583 } else {
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002584 ClearPageUptodate(page);
2585 SetPageError(page);
Chris Mason70dec802008-01-29 09:59:12 -05002586 }
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002587 unlock_page(page);
Miao Xiefacc8a222013-07-25 19:22:34 +08002588 offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08002589
2590 if (unlikely(!uptodate)) {
2591 if (extent_len) {
2592 endio_readpage_release_extent(tree,
2593 extent_start,
2594 extent_len, 1);
2595 extent_start = 0;
2596 extent_len = 0;
2597 }
2598 endio_readpage_release_extent(tree, start,
2599 end - start + 1, 0);
2600 } else if (!extent_len) {
2601 extent_start = start;
2602 extent_len = end + 1 - start;
2603 } else if (extent_start + extent_len == start) {
2604 extent_len += end + 1 - start;
2605 } else {
2606 endio_readpage_release_extent(tree, extent_start,
2607 extent_len, uptodate);
2608 extent_start = start;
2609 extent_len = end + 1 - start;
2610 }
Kent Overstreet2c30c712013-11-07 12:20:26 -08002611 }
Chris Masond1310b22008-01-24 16:13:08 -05002612
Miao Xie883d0de2013-07-25 19:22:35 +08002613 if (extent_len)
2614 endio_readpage_release_extent(tree, extent_start, extent_len,
2615 uptodate);
Miao Xiefacc8a222013-07-25 19:22:34 +08002616 if (io_bio->end_io)
2617 io_bio->end_io(io_bio, err);
Chris Masond1310b22008-01-24 16:13:08 -05002618 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002619}
2620
Chris Mason9be33952013-05-17 18:30:14 -04002621/*
2622 * this allocates from the btrfs_bioset. We're returning a bio right now
2623 * but you can call btrfs_io_bio for the appropriate container_of magic
2624 */
Miao Xie88f794e2010-11-22 03:02:55 +00002625struct bio *
2626btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2627 gfp_t gfp_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002628{
Miao Xiefacc8a222013-07-25 19:22:34 +08002629 struct btrfs_io_bio *btrfs_bio;
Chris Masond1310b22008-01-24 16:13:08 -05002630 struct bio *bio;
2631
Chris Mason9be33952013-05-17 18:30:14 -04002632 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -05002633
2634 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
Chris Mason9be33952013-05-17 18:30:14 -04002635 while (!bio && (nr_vecs /= 2)) {
2636 bio = bio_alloc_bioset(gfp_flags,
2637 nr_vecs, btrfs_bioset);
2638 }
Chris Masond1310b22008-01-24 16:13:08 -05002639 }
2640
2641 if (bio) {
2642 bio->bi_bdev = bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002643 bio->bi_iter.bi_sector = first_sector;
Miao Xiefacc8a222013-07-25 19:22:34 +08002644 btrfs_bio = btrfs_io_bio(bio);
2645 btrfs_bio->csum = NULL;
2646 btrfs_bio->csum_allocated = NULL;
2647 btrfs_bio->end_io = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002648 }
2649 return bio;
2650}
2651
Chris Mason9be33952013-05-17 18:30:14 -04002652struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2653{
Miao Xie23ea8e52014-09-12 18:43:54 +08002654 struct btrfs_io_bio *btrfs_bio;
2655 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04002656
Miao Xie23ea8e52014-09-12 18:43:54 +08002657 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2658 if (new) {
2659 btrfs_bio = btrfs_io_bio(new);
2660 btrfs_bio->csum = NULL;
2661 btrfs_bio->csum_allocated = NULL;
2662 btrfs_bio->end_io = NULL;
2663 }
2664 return new;
2665}
Chris Mason9be33952013-05-17 18:30:14 -04002666
2667/* this also allocates from the btrfs_bioset */
2668struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2669{
Miao Xiefacc8a222013-07-25 19:22:34 +08002670 struct btrfs_io_bio *btrfs_bio;
2671 struct bio *bio;
2672
2673 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2674 if (bio) {
2675 btrfs_bio = btrfs_io_bio(bio);
2676 btrfs_bio->csum = NULL;
2677 btrfs_bio->csum_allocated = NULL;
2678 btrfs_bio->end_io = NULL;
2679 }
2680 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04002681}
2682
2683
Jeff Mahoney355808c2011-10-03 23:23:14 -04002684static int __must_check submit_one_bio(int rw, struct bio *bio,
2685 int mirror_num, unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002686{
Chris Masond1310b22008-01-24 16:13:08 -05002687 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05002688 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2689 struct page *page = bvec->bv_page;
2690 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05002691 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05002692
Miao Xie4eee4fa2012-12-21 09:17:45 +00002693 start = page_offset(page) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05002694
David Woodhouse902b22f2008-08-20 08:51:49 -04002695 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002696
2697 bio_get(bio);
2698
Chris Mason065631f2008-02-20 12:07:25 -05002699 if (tree->ops && tree->ops->submit_bio_hook)
liubo6b82ce82011-01-26 06:21:39 +00002700 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04002701 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04002702 else
Stefan Behrens21adbd52011-11-09 13:44:05 +01002703 btrfsic_submit_bio(rw, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002704
Chris Masond1310b22008-01-24 16:13:08 -05002705 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2706 ret = -EOPNOTSUPP;
2707 bio_put(bio);
2708 return ret;
2709}
2710
David Woodhouse64a16702009-07-15 23:29:37 +01002711static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
Jeff Mahoney3444a972011-10-03 23:23:13 -04002712 unsigned long offset, size_t size, struct bio *bio,
2713 unsigned long bio_flags)
2714{
2715 int ret = 0;
2716 if (tree->ops && tree->ops->merge_bio_hook)
David Woodhouse64a16702009-07-15 23:29:37 +01002717 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
Jeff Mahoney3444a972011-10-03 23:23:13 -04002718 bio_flags);
2719 BUG_ON(ret < 0);
2720 return ret;
2721
2722}
2723
Chris Masond1310b22008-01-24 16:13:08 -05002724static int submit_extent_page(int rw, struct extent_io_tree *tree,
2725 struct page *page, sector_t sector,
2726 size_t size, unsigned long offset,
2727 struct block_device *bdev,
2728 struct bio **bio_ret,
2729 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04002730 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002731 int mirror_num,
2732 unsigned long prev_bio_flags,
2733 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002734{
2735 int ret = 0;
2736 struct bio *bio;
2737 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04002738 int contig = 0;
2739 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2740 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05002741 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05002742
2743 if (bio_ret && *bio_ret) {
2744 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04002745 if (old_compressed)
Kent Overstreet4f024f32013-10-11 15:44:27 -07002746 contig = bio->bi_iter.bi_sector == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002747 else
Kent Overstreetf73a1c72012-09-25 15:05:12 -07002748 contig = bio_end_sector(bio) == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002749
2750 if (prev_bio_flags != bio_flags || !contig ||
David Woodhouse64a16702009-07-15 23:29:37 +01002751 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
Chris Masonc8b97812008-10-29 14:49:59 -04002752 bio_add_page(bio, page, page_size, offset) < page_size) {
2753 ret = submit_one_bio(rw, bio, mirror_num,
2754 prev_bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002755 if (ret < 0)
2756 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05002757 bio = NULL;
2758 } else {
2759 return 0;
2760 }
2761 }
Chris Masonc8b97812008-10-29 14:49:59 -04002762 if (this_compressed)
2763 nr = BIO_MAX_PAGES;
2764 else
2765 nr = bio_get_nr_vecs(bdev);
2766
Miao Xie88f794e2010-11-22 03:02:55 +00002767 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
Tsutomu Itoh5df67082011-02-01 09:17:35 +00002768 if (!bio)
2769 return -ENOMEM;
Chris Mason70dec802008-01-29 09:59:12 -05002770
Chris Masonc8b97812008-10-29 14:49:59 -04002771 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05002772 bio->bi_end_io = end_io_func;
2773 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05002774
Chris Masond3977122009-01-05 21:25:51 -05002775 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05002776 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05002777 else
Chris Masonc8b97812008-10-29 14:49:59 -04002778 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002779
2780 return ret;
2781}
2782
Eric Sandeen48a3b632013-04-25 20:41:01 +00002783static void attach_extent_buffer_page(struct extent_buffer *eb,
2784 struct page *page)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002785{
2786 if (!PagePrivate(page)) {
2787 SetPagePrivate(page);
2788 page_cache_get(page);
2789 set_page_private(page, (unsigned long)eb);
2790 } else {
2791 WARN_ON(page->private != (unsigned long)eb);
2792 }
2793}
2794
Chris Masond1310b22008-01-24 16:13:08 -05002795void set_page_extent_mapped(struct page *page)
2796{
2797 if (!PagePrivate(page)) {
2798 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002799 page_cache_get(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04002800 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05002801 }
2802}
2803
Miao Xie125bac012013-07-25 19:22:37 +08002804static struct extent_map *
2805__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2806 u64 start, u64 len, get_extent_t *get_extent,
2807 struct extent_map **em_cached)
2808{
2809 struct extent_map *em;
2810
2811 if (em_cached && *em_cached) {
2812 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00002813 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08002814 start < extent_map_end(em)) {
2815 atomic_inc(&em->refs);
2816 return em;
2817 }
2818
2819 free_extent_map(em);
2820 *em_cached = NULL;
2821 }
2822
2823 em = get_extent(inode, page, pg_offset, start, len, 0);
2824 if (em_cached && !IS_ERR_OR_NULL(em)) {
2825 BUG_ON(*em_cached);
2826 atomic_inc(&em->refs);
2827 *em_cached = em;
2828 }
2829 return em;
2830}
Chris Masond1310b22008-01-24 16:13:08 -05002831/*
2832 * basic readpage implementation. Locked extent state structs are inserted
2833 * into the tree that are removed when the IO is done (by the end_io
2834 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002835 * XXX JDM: This needs looking at to ensure proper page locking
Chris Masond1310b22008-01-24 16:13:08 -05002836 */
Miao Xie99740902013-07-25 19:22:36 +08002837static int __do_readpage(struct extent_io_tree *tree,
2838 struct page *page,
2839 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08002840 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08002841 struct bio **bio, int mirror_num,
2842 unsigned long *bio_flags, int rw)
Chris Masond1310b22008-01-24 16:13:08 -05002843{
2844 struct inode *inode = page->mapping->host;
Miao Xie4eee4fa2012-12-21 09:17:45 +00002845 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05002846 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2847 u64 end;
2848 u64 cur = start;
2849 u64 extent_offset;
2850 u64 last_byte = i_size_read(inode);
2851 u64 block_start;
2852 u64 cur_end;
2853 sector_t sector;
2854 struct extent_map *em;
2855 struct block_device *bdev;
2856 int ret;
2857 int nr = 0;
Mark Fasheh4b384312013-08-06 11:42:50 -07002858 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
David Sterba306e16c2011-04-19 14:29:38 +02002859 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002860 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002861 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002862 size_t blocksize = inode->i_sb->s_blocksize;
Mark Fasheh4b384312013-08-06 11:42:50 -07002863 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
Chris Masond1310b22008-01-24 16:13:08 -05002864
2865 set_page_extent_mapped(page);
2866
Miao Xie99740902013-07-25 19:22:36 +08002867 end = page_end;
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002868 if (!PageUptodate(page)) {
2869 if (cleancache_get_page(page) == 0) {
2870 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08002871 unlock_extent(tree, start, end);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002872 goto out;
2873 }
2874 }
2875
Chris Masonc8b97812008-10-29 14:49:59 -04002876 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2877 char *userpage;
2878 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2879
2880 if (zero_offset) {
2881 iosize = PAGE_CACHE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002882 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04002883 memset(userpage + zero_offset, 0, iosize);
2884 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002885 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04002886 }
2887 }
Chris Masond1310b22008-01-24 16:13:08 -05002888 while (cur <= end) {
Josef Bacikc8f2f242013-02-11 11:33:00 -05002889 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2890
Chris Masond1310b22008-01-24 16:13:08 -05002891 if (cur >= last_byte) {
2892 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002893 struct extent_state *cached = NULL;
2894
David Sterba306e16c2011-04-19 14:29:38 +02002895 iosize = PAGE_CACHE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002896 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002897 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002898 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002899 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002900 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002901 &cached, GFP_NOFS);
Mark Fasheh4b384312013-08-06 11:42:50 -07002902 if (!parent_locked)
2903 unlock_extent_cached(tree, cur,
2904 cur + iosize - 1,
2905 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002906 break;
2907 }
Miao Xie125bac012013-07-25 19:22:37 +08002908 em = __get_extent_map(inode, page, pg_offset, cur,
2909 end - cur + 1, get_extent, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02002910 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002911 SetPageError(page);
Mark Fasheh4b384312013-08-06 11:42:50 -07002912 if (!parent_locked)
2913 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05002914 break;
2915 }
Chris Masond1310b22008-01-24 16:13:08 -05002916 extent_offset = cur - em->start;
2917 BUG_ON(extent_map_end(em) <= cur);
2918 BUG_ON(end < cur);
2919
Li Zefan261507a02010-12-17 14:21:50 +08002920 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07002921 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002922 extent_set_compress_type(&this_bio_flag,
2923 em->compress_type);
2924 }
Chris Masonc8b97812008-10-29 14:49:59 -04002925
Chris Masond1310b22008-01-24 16:13:08 -05002926 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2927 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00002928 iosize = ALIGN(iosize, blocksize);
Chris Masonc8b97812008-10-29 14:49:59 -04002929 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2930 disk_io_size = em->block_len;
2931 sector = em->block_start >> 9;
2932 } else {
2933 sector = (em->block_start + extent_offset) >> 9;
2934 disk_io_size = iosize;
2935 }
Chris Masond1310b22008-01-24 16:13:08 -05002936 bdev = em->bdev;
2937 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002938 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2939 block_start = EXTENT_MAP_HOLE;
Chris Masond1310b22008-01-24 16:13:08 -05002940 free_extent_map(em);
2941 em = NULL;
2942
2943 /* we've found a hole, just zero and go on */
2944 if (block_start == EXTENT_MAP_HOLE) {
2945 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002946 struct extent_state *cached = NULL;
2947
Cong Wang7ac687d2011-11-25 23:14:28 +08002948 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002949 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002950 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002951 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002952
2953 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002954 &cached, GFP_NOFS);
2955 unlock_extent_cached(tree, cur, cur + iosize - 1,
2956 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002957 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002958 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002959 continue;
2960 }
2961 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04002962 if (test_range_bit(tree, cur, cur_end,
2963 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002964 check_page_uptodate(tree, page);
Mark Fasheh4b384312013-08-06 11:42:50 -07002965 if (!parent_locked)
2966 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05002967 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002968 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002969 continue;
2970 }
Chris Mason70dec802008-01-29 09:59:12 -05002971 /* we have an inline extent but it didn't get marked up
2972 * to date. Error out
2973 */
2974 if (block_start == EXTENT_MAP_INLINE) {
2975 SetPageError(page);
Mark Fasheh4b384312013-08-06 11:42:50 -07002976 if (!parent_locked)
2977 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05002978 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002979 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05002980 continue;
2981 }
Chris Masond1310b22008-01-24 16:13:08 -05002982
Josef Bacikc8f2f242013-02-11 11:33:00 -05002983 pnr -= page->index;
Josef Bacikd4c7ca82013-04-19 19:49:09 -04002984 ret = submit_extent_page(rw, tree, page,
David Sterba306e16c2011-04-19 14:29:38 +02002985 sector, disk_io_size, pg_offset,
Chris Mason89642222008-07-24 09:41:53 -04002986 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002987 end_bio_extent_readpage, mirror_num,
2988 *bio_flags,
2989 this_bio_flag);
Josef Bacikc8f2f242013-02-11 11:33:00 -05002990 if (!ret) {
2991 nr++;
2992 *bio_flags = this_bio_flag;
2993 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002994 SetPageError(page);
Mark Fasheh4b384312013-08-06 11:42:50 -07002995 if (!parent_locked)
2996 unlock_extent(tree, cur, cur + iosize - 1);
Josef Bacikedd33c92012-10-05 16:40:32 -04002997 }
Chris Masond1310b22008-01-24 16:13:08 -05002998 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002999 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003000 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003001out:
Chris Masond1310b22008-01-24 16:13:08 -05003002 if (!nr) {
3003 if (!PageError(page))
3004 SetPageUptodate(page);
3005 unlock_page(page);
3006 }
3007 return 0;
3008}
3009
Miao Xie99740902013-07-25 19:22:36 +08003010static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3011 struct page *pages[], int nr_pages,
3012 u64 start, u64 end,
3013 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08003014 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08003015 struct bio **bio, int mirror_num,
3016 unsigned long *bio_flags, int rw)
3017{
3018 struct inode *inode;
3019 struct btrfs_ordered_extent *ordered;
3020 int index;
3021
3022 inode = pages[0]->mapping->host;
3023 while (1) {
3024 lock_extent(tree, start, end);
3025 ordered = btrfs_lookup_ordered_range(inode, start,
3026 end - start + 1);
3027 if (!ordered)
3028 break;
3029 unlock_extent(tree, start, end);
3030 btrfs_start_ordered_extent(inode, ordered, 1);
3031 btrfs_put_ordered_extent(ordered);
3032 }
3033
3034 for (index = 0; index < nr_pages; index++) {
Miao Xie125bac012013-07-25 19:22:37 +08003035 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3036 mirror_num, bio_flags, rw);
Miao Xie99740902013-07-25 19:22:36 +08003037 page_cache_release(pages[index]);
3038 }
3039}
3040
3041static void __extent_readpages(struct extent_io_tree *tree,
3042 struct page *pages[],
3043 int nr_pages, get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08003044 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08003045 struct bio **bio, int mirror_num,
3046 unsigned long *bio_flags, int rw)
3047{
Stefan Behrens35a36212013-08-14 18:12:25 +02003048 u64 start = 0;
Miao Xie99740902013-07-25 19:22:36 +08003049 u64 end = 0;
3050 u64 page_start;
3051 int index;
Stefan Behrens35a36212013-08-14 18:12:25 +02003052 int first_index = 0;
Miao Xie99740902013-07-25 19:22:36 +08003053
3054 for (index = 0; index < nr_pages; index++) {
3055 page_start = page_offset(pages[index]);
3056 if (!end) {
3057 start = page_start;
3058 end = start + PAGE_CACHE_SIZE - 1;
3059 first_index = index;
3060 } else if (end + 1 == page_start) {
3061 end += PAGE_CACHE_SIZE;
3062 } else {
3063 __do_contiguous_readpages(tree, &pages[first_index],
3064 index - first_index, start,
Miao Xie125bac012013-07-25 19:22:37 +08003065 end, get_extent, em_cached,
3066 bio, mirror_num, bio_flags,
3067 rw);
Miao Xie99740902013-07-25 19:22:36 +08003068 start = page_start;
3069 end = start + PAGE_CACHE_SIZE - 1;
3070 first_index = index;
3071 }
3072 }
3073
3074 if (end)
3075 __do_contiguous_readpages(tree, &pages[first_index],
3076 index - first_index, start,
Miao Xie125bac012013-07-25 19:22:37 +08003077 end, get_extent, em_cached, bio,
Miao Xie99740902013-07-25 19:22:36 +08003078 mirror_num, bio_flags, rw);
3079}
3080
3081static int __extent_read_full_page(struct extent_io_tree *tree,
3082 struct page *page,
3083 get_extent_t *get_extent,
3084 struct bio **bio, int mirror_num,
3085 unsigned long *bio_flags, int rw)
3086{
3087 struct inode *inode = page->mapping->host;
3088 struct btrfs_ordered_extent *ordered;
3089 u64 start = page_offset(page);
3090 u64 end = start + PAGE_CACHE_SIZE - 1;
3091 int ret;
3092
3093 while (1) {
3094 lock_extent(tree, start, end);
3095 ordered = btrfs_lookup_ordered_extent(inode, start);
3096 if (!ordered)
3097 break;
3098 unlock_extent(tree, start, end);
3099 btrfs_start_ordered_extent(inode, ordered, 1);
3100 btrfs_put_ordered_extent(ordered);
3101 }
3102
Miao Xie125bac012013-07-25 19:22:37 +08003103 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3104 bio_flags, rw);
Miao Xie99740902013-07-25 19:22:36 +08003105 return ret;
3106}
3107
Chris Masond1310b22008-01-24 16:13:08 -05003108int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003109 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003110{
3111 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003112 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003113 int ret;
3114
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003115 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04003116 &bio_flags, READ);
Chris Masond1310b22008-01-24 16:13:08 -05003117 if (bio)
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003118 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003119 return ret;
3120}
Chris Masond1310b22008-01-24 16:13:08 -05003121
Mark Fasheh4b384312013-08-06 11:42:50 -07003122int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3123 get_extent_t *get_extent, int mirror_num)
3124{
3125 struct bio *bio = NULL;
3126 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3127 int ret;
3128
3129 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3130 &bio_flags, READ);
3131 if (bio)
3132 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3133 return ret;
3134}
3135
Chris Mason11c83492009-04-20 15:50:09 -04003136static noinline void update_nr_written(struct page *page,
3137 struct writeback_control *wbc,
3138 unsigned long nr_written)
3139{
3140 wbc->nr_to_write -= nr_written;
3141 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3142 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3143 page->mapping->writeback_index = page->index + nr_written;
3144}
3145
Chris Masond1310b22008-01-24 16:13:08 -05003146/*
Chris Mason40f76582014-05-21 13:35:51 -07003147 * helper for __extent_writepage, doing all of the delayed allocation setup.
3148 *
3149 * This returns 1 if our fill_delalloc function did all the work required
3150 * to write the page (copy into inline extent). In this case the IO has
3151 * been started and the page is already unlocked.
3152 *
3153 * This returns 0 if all went well (page still locked)
3154 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003155 */
Chris Mason40f76582014-05-21 13:35:51 -07003156static noinline_for_stack int writepage_delalloc(struct inode *inode,
3157 struct page *page, struct writeback_control *wbc,
3158 struct extent_page_data *epd,
3159 u64 delalloc_start,
3160 unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003161{
Chris Mason40f76582014-05-21 13:35:51 -07003162 struct extent_io_tree *tree = epd->tree;
3163 u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
3164 u64 nr_delalloc;
3165 u64 delalloc_to_write = 0;
3166 u64 delalloc_end = 0;
3167 int ret;
3168 int page_started = 0;
3169
3170 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3171 return 0;
3172
3173 while (delalloc_end < page_end) {
3174 nr_delalloc = find_lock_delalloc_range(inode, tree,
3175 page,
3176 &delalloc_start,
3177 &delalloc_end,
3178 128 * 1024 * 1024);
3179 if (nr_delalloc == 0) {
3180 delalloc_start = delalloc_end + 1;
3181 continue;
3182 }
3183 ret = tree->ops->fill_delalloc(inode, page,
3184 delalloc_start,
3185 delalloc_end,
3186 &page_started,
3187 nr_written);
3188 /* File system has been set read-only */
3189 if (ret) {
3190 SetPageError(page);
3191 /* fill_delalloc should be return < 0 for error
3192 * but just in case, we use > 0 here meaning the
3193 * IO is started, so we don't want to return > 0
3194 * unless things are going well.
3195 */
3196 ret = ret < 0 ? ret : -EIO;
3197 goto done;
3198 }
3199 /*
3200 * delalloc_end is already one less than the total
3201 * length, so we don't subtract one from
3202 * PAGE_CACHE_SIZE
3203 */
3204 delalloc_to_write += (delalloc_end - delalloc_start +
3205 PAGE_CACHE_SIZE) >>
3206 PAGE_CACHE_SHIFT;
3207 delalloc_start = delalloc_end + 1;
3208 }
3209 if (wbc->nr_to_write < delalloc_to_write) {
3210 int thresh = 8192;
3211
3212 if (delalloc_to_write < thresh * 2)
3213 thresh = delalloc_to_write;
3214 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3215 thresh);
3216 }
3217
3218 /* did the fill delalloc function already unlock and start
3219 * the IO?
3220 */
3221 if (page_started) {
3222 /*
3223 * we've unlocked the page, so we can't update
3224 * the mapping's writeback index, just update
3225 * nr_to_write.
3226 */
3227 wbc->nr_to_write -= *nr_written;
3228 return 1;
3229 }
3230
3231 ret = 0;
3232
3233done:
3234 return ret;
3235}
3236
3237/*
3238 * helper for __extent_writepage. This calls the writepage start hooks,
3239 * and does the loop to map the page into extents and bios.
3240 *
3241 * We return 1 if the IO is started and the page is unlocked,
3242 * 0 if all went well (page still locked)
3243 * < 0 if there were errors (page still locked)
3244 */
3245static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3246 struct page *page,
3247 struct writeback_control *wbc,
3248 struct extent_page_data *epd,
3249 loff_t i_size,
3250 unsigned long nr_written,
3251 int write_flags, int *nr_ret)
3252{
Chris Masond1310b22008-01-24 16:13:08 -05003253 struct extent_io_tree *tree = epd->tree;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003254 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05003255 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3256 u64 end;
3257 u64 cur = start;
3258 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003259 u64 block_start;
3260 u64 iosize;
3261 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04003262 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003263 struct extent_map *em;
3264 struct block_device *bdev;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003265 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003266 size_t blocksize;
Chris Mason40f76582014-05-21 13:35:51 -07003267 int ret = 0;
3268 int nr = 0;
3269 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003270
Chris Mason247e7432008-07-17 12:53:51 -04003271 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04003272 ret = tree->ops->writepage_start_hook(page, start,
3273 page_end);
Jeff Mahoney87826df2012-02-15 16:23:57 +01003274 if (ret) {
3275 /* Fixup worker will requeue */
3276 if (ret == -EBUSY)
3277 wbc->pages_skipped++;
3278 else
3279 redirty_page_for_writepage(wbc, page);
Chris Mason40f76582014-05-21 13:35:51 -07003280
Chris Mason11c83492009-04-20 15:50:09 -04003281 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04003282 unlock_page(page);
Chris Mason40f76582014-05-21 13:35:51 -07003283 ret = 1;
Chris Mason11c83492009-04-20 15:50:09 -04003284 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04003285 }
3286 }
3287
Chris Mason11c83492009-04-20 15:50:09 -04003288 /*
3289 * we don't want to touch the inode after unlocking the page,
3290 * so we update the mapping writeback index now
3291 */
3292 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003293
Chris Masond1310b22008-01-24 16:13:08 -05003294 end = page_end;
Chris Mason40f76582014-05-21 13:35:51 -07003295 if (i_size <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04003296 if (tree->ops && tree->ops->writepage_end_io_hook)
3297 tree->ops->writepage_end_io_hook(page, start,
3298 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003299 goto done;
3300 }
3301
Chris Masond1310b22008-01-24 16:13:08 -05003302 blocksize = inode->i_sb->s_blocksize;
3303
3304 while (cur <= end) {
Chris Mason40f76582014-05-21 13:35:51 -07003305 u64 em_end;
3306 if (cur >= i_size) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04003307 if (tree->ops && tree->ops->writepage_end_io_hook)
3308 tree->ops->writepage_end_io_hook(page, cur,
3309 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003310 break;
3311 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003312 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05003313 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02003314 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003315 SetPageError(page);
Filipe Manana61391d52014-05-09 17:17:40 +01003316 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003317 break;
3318 }
3319
3320 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003321 em_end = extent_map_end(em);
3322 BUG_ON(em_end <= cur);
Chris Masond1310b22008-01-24 16:13:08 -05003323 BUG_ON(end < cur);
Chris Mason40f76582014-05-21 13:35:51 -07003324 iosize = min(em_end - cur, end - cur + 1);
Qu Wenruofda28322013-02-26 08:10:22 +00003325 iosize = ALIGN(iosize, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05003326 sector = (em->block_start + extent_offset) >> 9;
3327 bdev = em->bdev;
3328 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003329 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05003330 free_extent_map(em);
3331 em = NULL;
3332
Chris Masonc8b97812008-10-29 14:49:59 -04003333 /*
3334 * compressed and inline extents are written through other
3335 * paths in the FS
3336 */
3337 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003338 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04003339 /*
3340 * end_io notification does not happen here for
3341 * compressed extents
3342 */
3343 if (!compressed && tree->ops &&
3344 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04003345 tree->ops->writepage_end_io_hook(page, cur,
3346 cur + iosize - 1,
3347 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003348 else if (compressed) {
3349 /* we don't want to end_page_writeback on
3350 * a compressed extent. this happens
3351 * elsewhere
3352 */
3353 nr++;
3354 }
3355
3356 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003357 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003358 continue;
3359 }
Chris Masonc8b97812008-10-29 14:49:59 -04003360
Chris Masond1310b22008-01-24 16:13:08 -05003361 if (tree->ops && tree->ops->writepage_io_hook) {
3362 ret = tree->ops->writepage_io_hook(page, cur,
3363 cur + iosize - 1);
3364 } else {
3365 ret = 0;
3366 }
Chris Mason1259ab72008-05-12 13:39:03 -04003367 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003368 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003369 } else {
Chris Mason40f76582014-05-21 13:35:51 -07003370 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003371
Chris Masond1310b22008-01-24 16:13:08 -05003372 set_range_writeback(tree, cur, cur + iosize - 1);
3373 if (!PageWriteback(page)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003374 btrfs_err(BTRFS_I(inode)->root->fs_info,
3375 "page %lu not writeback, cur %llu end %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003376 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003377 }
3378
Chris Masonffbd5172009-04-20 15:50:09 -04003379 ret = submit_extent_page(write_flags, tree, page,
3380 sector, iosize, pg_offset,
3381 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04003382 end_bio_extent_writepage,
3383 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05003384 if (ret)
3385 SetPageError(page);
3386 }
3387 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003388 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003389 nr++;
3390 }
3391done:
Chris Mason40f76582014-05-21 13:35:51 -07003392 *nr_ret = nr;
Chris Mason771ed682008-11-06 22:02:51 -05003393
Chris Mason11c83492009-04-20 15:50:09 -04003394done_unlocked:
3395
Chris Mason2c64c532009-09-02 15:04:12 -04003396 /* drop our reference on any cached states */
3397 free_extent_state(cached_state);
Chris Mason40f76582014-05-21 13:35:51 -07003398 return ret;
3399}
3400
3401/*
3402 * the writepage semantics are similar to regular writepage. extent
3403 * records are inserted to lock ranges in the tree, and as dirty areas
3404 * are found, they are marked writeback. Then the lock bits are removed
3405 * and the end_io handler clears the writeback ranges
3406 */
3407static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3408 void *data)
3409{
3410 struct inode *inode = page->mapping->host;
3411 struct extent_page_data *epd = data;
3412 u64 start = page_offset(page);
3413 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3414 int ret;
3415 int nr = 0;
3416 size_t pg_offset = 0;
3417 loff_t i_size = i_size_read(inode);
3418 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3419 int write_flags;
3420 unsigned long nr_written = 0;
3421
3422 if (wbc->sync_mode == WB_SYNC_ALL)
3423 write_flags = WRITE_SYNC;
3424 else
3425 write_flags = WRITE;
3426
3427 trace___extent_writepage(page, inode, wbc);
3428
3429 WARN_ON(!PageLocked(page));
3430
3431 ClearPageError(page);
3432
3433 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3434 if (page->index > end_index ||
3435 (page->index == end_index && !pg_offset)) {
3436 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3437 unlock_page(page);
3438 return 0;
3439 }
3440
3441 if (page->index == end_index) {
3442 char *userpage;
3443
3444 userpage = kmap_atomic(page);
3445 memset(userpage + pg_offset, 0,
3446 PAGE_CACHE_SIZE - pg_offset);
3447 kunmap_atomic(userpage);
3448 flush_dcache_page(page);
3449 }
3450
3451 pg_offset = 0;
3452
3453 set_page_extent_mapped(page);
3454
3455 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3456 if (ret == 1)
3457 goto done_unlocked;
3458 if (ret)
3459 goto done;
3460
3461 ret = __extent_writepage_io(inode, page, wbc, epd,
3462 i_size, nr_written, write_flags, &nr);
3463 if (ret == 1)
3464 goto done_unlocked;
3465
3466done:
Chris Masone6dcd2d2008-07-17 12:53:50 -04003467 if (nr == 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003468 /* make sure the mapping tag for page dirty gets cleared */
Chris Mason771ed682008-11-06 22:02:51 -05003469 set_page_writeback(page);
3470 end_page_writeback(page);
3471 }
Filipe Manana61391d52014-05-09 17:17:40 +01003472 if (PageError(page)) {
3473 ret = ret < 0 ? ret : -EIO;
3474 end_extent_writepage(page, ret, start, page_end);
3475 }
Christoph Hellwigb2950862008-12-02 09:54:17 -05003476 unlock_page(page);
Chris Mason40f76582014-05-21 13:35:51 -07003477 return ret;
Chris Mason4bef0842008-09-08 11:18:08 -04003478
3479done_unlocked:
Chris Masond1310b22008-01-24 16:13:08 -05003480 return 0;
3481}
3482
Josef Bacikfd8b2b62013-04-24 16:41:19 -04003483void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003484{
NeilBrown74316202014-07-07 15:16:04 +10003485 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3486 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003487}
3488
Chris Mason0e378df2014-05-19 20:55:27 -07003489static noinline_for_stack int
3490lock_extent_buffer_for_io(struct extent_buffer *eb,
3491 struct btrfs_fs_info *fs_info,
3492 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003493{
3494 unsigned long i, num_pages;
3495 int flush = 0;
3496 int ret = 0;
3497
3498 if (!btrfs_try_tree_write_lock(eb)) {
3499 flush = 1;
3500 flush_write_bio(epd);
3501 btrfs_tree_lock(eb);
3502 }
3503
3504 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3505 btrfs_tree_unlock(eb);
3506 if (!epd->sync_io)
3507 return 0;
3508 if (!flush) {
3509 flush_write_bio(epd);
3510 flush = 1;
3511 }
Chris Masona098d8e2012-03-21 12:09:56 -04003512 while (1) {
3513 wait_on_extent_buffer_writeback(eb);
3514 btrfs_tree_lock(eb);
3515 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3516 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003517 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003518 }
3519 }
3520
Josef Bacik51561ff2012-07-20 16:25:24 -04003521 /*
3522 * We need to do this to prevent races in people who check if the eb is
3523 * under IO since we can end up having no IO bits set for a short period
3524 * of time.
3525 */
3526 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003527 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3528 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003529 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003530 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Miao Xiee2d84522013-01-29 10:09:20 +00003531 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3532 -eb->len,
3533 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003534 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003535 } else {
3536 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003537 }
3538
3539 btrfs_tree_unlock(eb);
3540
3541 if (!ret)
3542 return ret;
3543
3544 num_pages = num_extent_pages(eb->start, eb->len);
3545 for (i = 0; i < num_pages; i++) {
3546 struct page *p = extent_buffer_page(eb, i);
3547
3548 if (!trylock_page(p)) {
3549 if (!flush) {
3550 flush_write_bio(epd);
3551 flush = 1;
3552 }
3553 lock_page(p);
3554 }
3555 }
3556
3557 return ret;
3558}
3559
3560static void end_extent_buffer_writeback(struct extent_buffer *eb)
3561{
3562 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003563 smp_mb__after_atomic();
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003564 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3565}
3566
3567static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3568{
Kent Overstreet2c30c712013-11-07 12:20:26 -08003569 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003570 struct extent_buffer *eb;
Kent Overstreet2c30c712013-11-07 12:20:26 -08003571 int i, done;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003572
Kent Overstreet2c30c712013-11-07 12:20:26 -08003573 bio_for_each_segment_all(bvec, bio, i) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003574 struct page *page = bvec->bv_page;
3575
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003576 eb = (struct extent_buffer *)page->private;
3577 BUG_ON(!eb);
3578 done = atomic_dec_and_test(&eb->io_pages);
3579
Kent Overstreet2c30c712013-11-07 12:20:26 -08003580 if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003581 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3582 ClearPageUptodate(page);
3583 SetPageError(page);
3584 }
3585
3586 end_page_writeback(page);
3587
3588 if (!done)
3589 continue;
3590
3591 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003592 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003593
3594 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003595}
3596
Chris Mason0e378df2014-05-19 20:55:27 -07003597static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003598 struct btrfs_fs_info *fs_info,
3599 struct writeback_control *wbc,
3600 struct extent_page_data *epd)
3601{
3602 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
Josef Bacikf28491e2013-12-16 13:24:27 -05003603 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003604 u64 offset = eb->start;
3605 unsigned long i, num_pages;
Josef Bacikde0022b2012-09-25 14:25:58 -04003606 unsigned long bio_flags = 0;
Josef Bacikd4c7ca82013-04-19 19:49:09 -04003607 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04003608 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003609
3610 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3611 num_pages = num_extent_pages(eb->start, eb->len);
3612 atomic_set(&eb->io_pages, num_pages);
Josef Bacikde0022b2012-09-25 14:25:58 -04003613 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3614 bio_flags = EXTENT_BIO_TREE_LOG;
3615
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003616 for (i = 0; i < num_pages; i++) {
3617 struct page *p = extent_buffer_page(eb, i);
3618
3619 clear_page_dirty_for_io(p);
3620 set_page_writeback(p);
Josef Bacikf28491e2013-12-16 13:24:27 -05003621 ret = submit_extent_page(rw, tree, p, offset >> 9,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003622 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3623 -1, end_bio_extent_buffer_writepage,
Josef Bacikde0022b2012-09-25 14:25:58 -04003624 0, epd->bio_flags, bio_flags);
3625 epd->bio_flags = bio_flags;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003626 if (ret) {
3627 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3628 SetPageError(p);
3629 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3630 end_extent_buffer_writeback(eb);
3631 ret = -EIO;
3632 break;
3633 }
3634 offset += PAGE_CACHE_SIZE;
3635 update_nr_written(p, wbc, 1);
3636 unlock_page(p);
3637 }
3638
3639 if (unlikely(ret)) {
3640 for (; i < num_pages; i++) {
3641 struct page *p = extent_buffer_page(eb, i);
3642 unlock_page(p);
3643 }
3644 }
3645
3646 return ret;
3647}
3648
3649int btree_write_cache_pages(struct address_space *mapping,
3650 struct writeback_control *wbc)
3651{
3652 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3653 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3654 struct extent_buffer *eb, *prev_eb = NULL;
3655 struct extent_page_data epd = {
3656 .bio = NULL,
3657 .tree = tree,
3658 .extent_locked = 0,
3659 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04003660 .bio_flags = 0,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003661 };
3662 int ret = 0;
3663 int done = 0;
3664 int nr_to_write_done = 0;
3665 struct pagevec pvec;
3666 int nr_pages;
3667 pgoff_t index;
3668 pgoff_t end; /* Inclusive */
3669 int scanned = 0;
3670 int tag;
3671
3672 pagevec_init(&pvec, 0);
3673 if (wbc->range_cyclic) {
3674 index = mapping->writeback_index; /* Start from prev offset */
3675 end = -1;
3676 } else {
3677 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3678 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3679 scanned = 1;
3680 }
3681 if (wbc->sync_mode == WB_SYNC_ALL)
3682 tag = PAGECACHE_TAG_TOWRITE;
3683 else
3684 tag = PAGECACHE_TAG_DIRTY;
3685retry:
3686 if (wbc->sync_mode == WB_SYNC_ALL)
3687 tag_pages_for_writeback(mapping, index, end);
3688 while (!done && !nr_to_write_done && (index <= end) &&
3689 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3690 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3691 unsigned i;
3692
3693 scanned = 1;
3694 for (i = 0; i < nr_pages; i++) {
3695 struct page *page = pvec.pages[i];
3696
3697 if (!PagePrivate(page))
3698 continue;
3699
3700 if (!wbc->range_cyclic && page->index > end) {
3701 done = 1;
3702 break;
3703 }
3704
Josef Bacikb5bae262012-09-14 13:43:01 -04003705 spin_lock(&mapping->private_lock);
3706 if (!PagePrivate(page)) {
3707 spin_unlock(&mapping->private_lock);
3708 continue;
3709 }
3710
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003711 eb = (struct extent_buffer *)page->private;
Josef Bacikb5bae262012-09-14 13:43:01 -04003712
3713 /*
3714 * Shouldn't happen and normally this would be a BUG_ON
3715 * but no sense in crashing the users box for something
3716 * we can survive anyway.
3717 */
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05303718 if (WARN_ON(!eb)) {
Josef Bacikb5bae262012-09-14 13:43:01 -04003719 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003720 continue;
3721 }
3722
Josef Bacikb5bae262012-09-14 13:43:01 -04003723 if (eb == prev_eb) {
3724 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003725 continue;
3726 }
3727
Josef Bacikb5bae262012-09-14 13:43:01 -04003728 ret = atomic_inc_not_zero(&eb->refs);
3729 spin_unlock(&mapping->private_lock);
3730 if (!ret)
3731 continue;
3732
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003733 prev_eb = eb;
3734 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3735 if (!ret) {
3736 free_extent_buffer(eb);
3737 continue;
3738 }
3739
3740 ret = write_one_eb(eb, fs_info, wbc, &epd);
3741 if (ret) {
3742 done = 1;
3743 free_extent_buffer(eb);
3744 break;
3745 }
3746 free_extent_buffer(eb);
3747
3748 /*
3749 * the filesystem may choose to bump up nr_to_write.
3750 * We have to make sure to honor the new nr_to_write
3751 * at any time
3752 */
3753 nr_to_write_done = wbc->nr_to_write <= 0;
3754 }
3755 pagevec_release(&pvec);
3756 cond_resched();
3757 }
3758 if (!scanned && !done) {
3759 /*
3760 * We hit the last page and there is more work to be done: wrap
3761 * back to the start of the file
3762 */
3763 scanned = 1;
3764 index = 0;
3765 goto retry;
3766 }
3767 flush_write_bio(&epd);
3768 return ret;
3769}
3770
Chris Masond1310b22008-01-24 16:13:08 -05003771/**
3772 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3773 * @mapping: address space structure to write
3774 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3775 * @writepage: function called for each page
3776 * @data: data passed to writepage function
3777 *
3778 * If a page is already under I/O, write_cache_pages() skips it, even
3779 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3780 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3781 * and msync() need to guarantee that all the data which was dirty at the time
3782 * the call was made get new I/O started against them. If wbc->sync_mode is
3783 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3784 * existing IO to complete.
3785 */
Chris Mason4bef0842008-09-08 11:18:08 -04003786static int extent_write_cache_pages(struct extent_io_tree *tree,
3787 struct address_space *mapping,
3788 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003789 writepage_t writepage, void *data,
3790 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05003791{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003792 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05003793 int ret = 0;
3794 int done = 0;
Filipe Manana61391d52014-05-09 17:17:40 +01003795 int err = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003796 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003797 struct pagevec pvec;
3798 int nr_pages;
3799 pgoff_t index;
3800 pgoff_t end; /* Inclusive */
3801 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00003802 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05003803
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003804 /*
3805 * We have to hold onto the inode so that ordered extents can do their
3806 * work when the IO finishes. The alternative to this is failing to add
3807 * an ordered extent if the igrab() fails there and that is a huge pain
3808 * to deal with, so instead just hold onto the inode throughout the
3809 * writepages operation. If it fails here we are freeing up the inode
3810 * anyway and we'd rather not waste our time writing out stuff that is
3811 * going to be truncated anyway.
3812 */
3813 if (!igrab(inode))
3814 return 0;
3815
Chris Masond1310b22008-01-24 16:13:08 -05003816 pagevec_init(&pvec, 0);
3817 if (wbc->range_cyclic) {
3818 index = mapping->writeback_index; /* Start from prev offset */
3819 end = -1;
3820 } else {
3821 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3822 end = wbc->range_end >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003823 scanned = 1;
3824 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00003825 if (wbc->sync_mode == WB_SYNC_ALL)
3826 tag = PAGECACHE_TAG_TOWRITE;
3827 else
3828 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05003829retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00003830 if (wbc->sync_mode == WB_SYNC_ALL)
3831 tag_pages_for_writeback(mapping, index, end);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003832 while (!done && !nr_to_write_done && (index <= end) &&
Josef Bacikf7aaa062011-07-15 21:26:38 +00003833 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3834 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05003835 unsigned i;
3836
3837 scanned = 1;
3838 for (i = 0; i < nr_pages; i++) {
3839 struct page *page = pvec.pages[i];
3840
3841 /*
3842 * At this point we hold neither mapping->tree_lock nor
3843 * lock on the page itself: the page may be truncated or
3844 * invalidated (changing page->mapping to NULL), or even
3845 * swizzled back from swapper_space to tmpfs file
3846 * mapping
3847 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05003848 if (!trylock_page(page)) {
3849 flush_fn(data);
3850 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04003851 }
Chris Masond1310b22008-01-24 16:13:08 -05003852
3853 if (unlikely(page->mapping != mapping)) {
3854 unlock_page(page);
3855 continue;
3856 }
3857
3858 if (!wbc->range_cyclic && page->index > end) {
3859 done = 1;
3860 unlock_page(page);
3861 continue;
3862 }
3863
Chris Masond2c3f4f2008-11-19 12:44:22 -05003864 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05003865 if (PageWriteback(page))
3866 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05003867 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003868 }
Chris Masond1310b22008-01-24 16:13:08 -05003869
3870 if (PageWriteback(page) ||
3871 !clear_page_dirty_for_io(page)) {
3872 unlock_page(page);
3873 continue;
3874 }
3875
3876 ret = (*writepage)(page, wbc, data);
3877
3878 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3879 unlock_page(page);
3880 ret = 0;
3881 }
Filipe Manana61391d52014-05-09 17:17:40 +01003882 if (!err && ret < 0)
3883 err = ret;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003884
3885 /*
3886 * the filesystem may choose to bump up nr_to_write.
3887 * We have to make sure to honor the new nr_to_write
3888 * at any time
3889 */
3890 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05003891 }
3892 pagevec_release(&pvec);
3893 cond_resched();
3894 }
Filipe Manana61391d52014-05-09 17:17:40 +01003895 if (!scanned && !done && !err) {
Chris Masond1310b22008-01-24 16:13:08 -05003896 /*
3897 * We hit the last page and there is more work to be done: wrap
3898 * back to the start of the file
3899 */
3900 scanned = 1;
3901 index = 0;
3902 goto retry;
3903 }
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003904 btrfs_add_delayed_iput(inode);
Filipe Manana61391d52014-05-09 17:17:40 +01003905 return err;
Chris Masond1310b22008-01-24 16:13:08 -05003906}
Chris Masond1310b22008-01-24 16:13:08 -05003907
Chris Masonffbd5172009-04-20 15:50:09 -04003908static void flush_epd_write_bio(struct extent_page_data *epd)
3909{
3910 if (epd->bio) {
Jeff Mahoney355808c2011-10-03 23:23:14 -04003911 int rw = WRITE;
3912 int ret;
3913
Chris Masonffbd5172009-04-20 15:50:09 -04003914 if (epd->sync_io)
Jeff Mahoney355808c2011-10-03 23:23:14 -04003915 rw = WRITE_SYNC;
3916
Josef Bacikde0022b2012-09-25 14:25:58 -04003917 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003918 BUG_ON(ret < 0); /* -ENOMEM */
Chris Masonffbd5172009-04-20 15:50:09 -04003919 epd->bio = NULL;
3920 }
3921}
3922
Chris Masond2c3f4f2008-11-19 12:44:22 -05003923static noinline void flush_write_bio(void *data)
3924{
3925 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04003926 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003927}
3928
Chris Masond1310b22008-01-24 16:13:08 -05003929int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3930 get_extent_t *get_extent,
3931 struct writeback_control *wbc)
3932{
3933 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05003934 struct extent_page_data epd = {
3935 .bio = NULL,
3936 .tree = tree,
3937 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003938 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003939 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04003940 .bio_flags = 0,
Chris Masond1310b22008-01-24 16:13:08 -05003941 };
Chris Masond1310b22008-01-24 16:13:08 -05003942
Chris Masond1310b22008-01-24 16:13:08 -05003943 ret = __extent_writepage(page, wbc, &epd);
3944
Chris Masonffbd5172009-04-20 15:50:09 -04003945 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003946 return ret;
3947}
Chris Masond1310b22008-01-24 16:13:08 -05003948
Chris Mason771ed682008-11-06 22:02:51 -05003949int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3950 u64 start, u64 end, get_extent_t *get_extent,
3951 int mode)
3952{
3953 int ret = 0;
3954 struct address_space *mapping = inode->i_mapping;
3955 struct page *page;
3956 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3957 PAGE_CACHE_SHIFT;
3958
3959 struct extent_page_data epd = {
3960 .bio = NULL,
3961 .tree = tree,
3962 .get_extent = get_extent,
3963 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04003964 .sync_io = mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04003965 .bio_flags = 0,
Chris Mason771ed682008-11-06 22:02:51 -05003966 };
3967 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05003968 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05003969 .nr_to_write = nr_pages * 2,
3970 .range_start = start,
3971 .range_end = end + 1,
3972 };
3973
Chris Masond3977122009-01-05 21:25:51 -05003974 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05003975 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3976 if (clear_page_dirty_for_io(page))
3977 ret = __extent_writepage(page, &wbc_writepages, &epd);
3978 else {
3979 if (tree->ops && tree->ops->writepage_end_io_hook)
3980 tree->ops->writepage_end_io_hook(page, start,
3981 start + PAGE_CACHE_SIZE - 1,
3982 NULL, 1);
3983 unlock_page(page);
3984 }
3985 page_cache_release(page);
3986 start += PAGE_CACHE_SIZE;
3987 }
3988
Chris Masonffbd5172009-04-20 15:50:09 -04003989 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05003990 return ret;
3991}
Chris Masond1310b22008-01-24 16:13:08 -05003992
3993int extent_writepages(struct extent_io_tree *tree,
3994 struct address_space *mapping,
3995 get_extent_t *get_extent,
3996 struct writeback_control *wbc)
3997{
3998 int ret = 0;
3999 struct extent_page_data epd = {
4000 .bio = NULL,
4001 .tree = tree,
4002 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05004003 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004004 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04004005 .bio_flags = 0,
Chris Masond1310b22008-01-24 16:13:08 -05004006 };
4007
Chris Mason4bef0842008-09-08 11:18:08 -04004008 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05004009 __extent_writepage, &epd,
4010 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04004011 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004012 return ret;
4013}
Chris Masond1310b22008-01-24 16:13:08 -05004014
4015int extent_readpages(struct extent_io_tree *tree,
4016 struct address_space *mapping,
4017 struct list_head *pages, unsigned nr_pages,
4018 get_extent_t get_extent)
4019{
4020 struct bio *bio = NULL;
4021 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04004022 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06004023 struct page *pagepool[16];
4024 struct page *page;
Miao Xie125bac012013-07-25 19:22:37 +08004025 struct extent_map *em_cached = NULL;
Liu Bo67c96842012-07-20 21:43:09 -06004026 int nr = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004027
Chris Masond1310b22008-01-24 16:13:08 -05004028 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Liu Bo67c96842012-07-20 21:43:09 -06004029 page = list_entry(pages->prev, struct page, lru);
Chris Masond1310b22008-01-24 16:13:08 -05004030
4031 prefetchw(&page->flags);
4032 list_del(&page->lru);
Liu Bo67c96842012-07-20 21:43:09 -06004033 if (add_to_page_cache_lru(page, mapping,
Itaru Kitayama43e817a2011-04-25 19:43:51 -04004034 page->index, GFP_NOFS)) {
Liu Bo67c96842012-07-20 21:43:09 -06004035 page_cache_release(page);
4036 continue;
Chris Masond1310b22008-01-24 16:13:08 -05004037 }
Liu Bo67c96842012-07-20 21:43:09 -06004038
4039 pagepool[nr++] = page;
4040 if (nr < ARRAY_SIZE(pagepool))
4041 continue;
Miao Xie125bac012013-07-25 19:22:37 +08004042 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
Miao Xie99740902013-07-25 19:22:36 +08004043 &bio, 0, &bio_flags, READ);
Liu Bo67c96842012-07-20 21:43:09 -06004044 nr = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004045 }
Miao Xie99740902013-07-25 19:22:36 +08004046 if (nr)
Miao Xie125bac012013-07-25 19:22:37 +08004047 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
Miao Xie99740902013-07-25 19:22:36 +08004048 &bio, 0, &bio_flags, READ);
Liu Bo67c96842012-07-20 21:43:09 -06004049
Miao Xie125bac012013-07-25 19:22:37 +08004050 if (em_cached)
4051 free_extent_map(em_cached);
4052
Chris Masond1310b22008-01-24 16:13:08 -05004053 BUG_ON(!list_empty(pages));
4054 if (bio)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004055 return submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05004056 return 0;
4057}
Chris Masond1310b22008-01-24 16:13:08 -05004058
4059/*
4060 * basic invalidatepage code, this waits on any locked or writeback
4061 * ranges corresponding to the page, and then deletes any extent state
4062 * records from the tree
4063 */
4064int extent_invalidatepage(struct extent_io_tree *tree,
4065 struct page *page, unsigned long offset)
4066{
Josef Bacik2ac55d42010-02-03 19:33:23 +00004067 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004068 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05004069 u64 end = start + PAGE_CACHE_SIZE - 1;
4070 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4071
Qu Wenruofda28322013-02-26 08:10:22 +00004072 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05004073 if (start > end)
4074 return 0;
4075
Jeff Mahoneyd0082372012-03-01 14:57:19 +01004076 lock_extent_bits(tree, start, end, 0, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04004077 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05004078 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04004079 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4080 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00004081 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05004082 return 0;
4083}
Chris Masond1310b22008-01-24 16:13:08 -05004084
4085/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04004086 * a helper for releasepage, this tests for areas of the page that
4087 * are locked or under IO and drops the related state bits if it is safe
4088 * to drop the page.
4089 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00004090static int try_release_extent_state(struct extent_map_tree *map,
4091 struct extent_io_tree *tree,
4092 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04004093{
Miao Xie4eee4fa2012-12-21 09:17:45 +00004094 u64 start = page_offset(page);
Chris Mason7b13b7b2008-04-18 10:29:50 -04004095 u64 end = start + PAGE_CACHE_SIZE - 1;
4096 int ret = 1;
4097
Chris Mason211f90e2008-07-18 11:56:15 -04004098 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04004099 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04004100 ret = 0;
4101 else {
4102 if ((mask & GFP_NOFS) == GFP_NOFS)
4103 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04004104 /*
4105 * at this point we can safely clear everything except the
4106 * locked bit and the nodatasum bit
4107 */
Chris Masone3f24cc2011-02-14 12:52:08 -05004108 ret = clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04004109 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4110 0, 0, NULL, mask);
Chris Masone3f24cc2011-02-14 12:52:08 -05004111
4112 /* if clear_extent_bit failed for enomem reasons,
4113 * we can't allow the release to continue.
4114 */
4115 if (ret < 0)
4116 ret = 0;
4117 else
4118 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004119 }
4120 return ret;
4121}
Chris Mason7b13b7b2008-04-18 10:29:50 -04004122
4123/*
Chris Masond1310b22008-01-24 16:13:08 -05004124 * a helper for releasepage. As long as there are no locked extents
4125 * in the range corresponding to the page, both state records and extent
4126 * map records are removed
4127 */
4128int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05004129 struct extent_io_tree *tree, struct page *page,
4130 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05004131{
4132 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004133 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05004134 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004135
Chris Mason70dec802008-01-29 09:59:12 -05004136 if ((mask & __GFP_WAIT) &&
4137 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05004138 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05004139 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05004140 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04004141 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05004142 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09004143 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04004144 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004145 break;
4146 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04004147 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4148 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04004149 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004150 free_extent_map(em);
4151 break;
4152 }
4153 if (!test_range_bit(tree, em->start,
4154 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04004155 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04004156 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05004157 remove_extent_mapping(map, em);
4158 /* once for the rb tree */
4159 free_extent_map(em);
4160 }
4161 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04004162 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004163
4164 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05004165 free_extent_map(em);
4166 }
Chris Masond1310b22008-01-24 16:13:08 -05004167 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04004168 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05004169}
Chris Masond1310b22008-01-24 16:13:08 -05004170
Chris Masonec29ed52011-02-23 16:23:20 -05004171/*
4172 * helper function for fiemap, which doesn't want to see any holes.
4173 * This maps until we find something past 'last'
4174 */
4175static struct extent_map *get_extent_skip_holes(struct inode *inode,
4176 u64 offset,
4177 u64 last,
4178 get_extent_t *get_extent)
4179{
4180 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4181 struct extent_map *em;
4182 u64 len;
4183
4184 if (offset >= last)
4185 return NULL;
4186
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05304187 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05004188 len = last - offset;
4189 if (len == 0)
4190 break;
Qu Wenruofda28322013-02-26 08:10:22 +00004191 len = ALIGN(len, sectorsize);
Chris Masonec29ed52011-02-23 16:23:20 -05004192 em = get_extent(inode, NULL, 0, offset, len, 0);
David Sterbac7040052011-04-19 18:00:01 +02004193 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05004194 return em;
4195
4196 /* if this isn't a hole return it */
4197 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4198 em->block_start != EXTENT_MAP_HOLE) {
4199 return em;
4200 }
4201
4202 /* this is a hole, advance to the next extent */
4203 offset = extent_map_end(em);
4204 free_extent_map(em);
4205 if (offset >= last)
4206 break;
4207 }
4208 return NULL;
4209}
4210
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004211int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4212 __u64 start, __u64 len, get_extent_t *get_extent)
4213{
Josef Bacik975f84f2010-11-23 19:36:57 +00004214 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004215 u64 off = start;
4216 u64 max = start + len;
4217 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004218 u32 found_type;
4219 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05004220 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004221 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004222 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00004223 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004224 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00004225 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00004226 struct btrfs_path *path;
Josef Bacikdc046b12014-09-10 16:20:45 -04004227 struct btrfs_root *root = BTRFS_I(inode)->root;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004228 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004229 u64 em_start = 0;
4230 u64 em_len = 0;
4231 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004232
4233 if (len == 0)
4234 return -EINVAL;
4235
Josef Bacik975f84f2010-11-23 19:36:57 +00004236 path = btrfs_alloc_path();
4237 if (!path)
4238 return -ENOMEM;
4239 path->leave_spinning = 1;
4240
Qu Wenruo2c919432014-07-18 09:55:43 +08004241 start = round_down(start, BTRFS_I(inode)->root->sectorsize);
4242 len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05004243
Chris Masonec29ed52011-02-23 16:23:20 -05004244 /*
4245 * lookup the last file extent. We're not using i_size here
4246 * because there might be preallocation past i_size
4247 */
Josef Bacikdc046b12014-09-10 16:20:45 -04004248 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4249 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00004250 if (ret < 0) {
4251 btrfs_free_path(path);
4252 return ret;
4253 }
4254 WARN_ON(!ret);
4255 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00004256 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02004257 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00004258
Chris Masonec29ed52011-02-23 16:23:20 -05004259 /* No extents, but there might be delalloc bits */
Li Zefan33345d012011-04-20 10:31:50 +08004260 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00004261 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05004262 /* have to trust i_size as the end */
4263 last = (u64)-1;
4264 last_for_get_extent = isize;
4265 } else {
4266 /*
4267 * remember the start of the last extent. There are a
4268 * bunch of different factors that go into the length of the
4269 * extent, so its much less complex to remember where it started
4270 */
4271 last = found_key.offset;
4272 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00004273 }
Liu Bofe09e162013-09-22 12:54:23 +08004274 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00004275
Chris Masonec29ed52011-02-23 16:23:20 -05004276 /*
4277 * we might have some extents allocated but more delalloc past those
4278 * extents. so, we trust isize unless the start of the last extent is
4279 * beyond isize
4280 */
4281 if (last < isize) {
4282 last = (u64)-1;
4283 last_for_get_extent = isize;
4284 }
4285
Liu Boa52f4cd2013-05-01 16:23:41 +00004286 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01004287 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05004288
Josef Bacik4d479cf2011-11-17 11:34:31 -05004289 em = get_extent_skip_holes(inode, start, last_for_get_extent,
Chris Masonec29ed52011-02-23 16:23:20 -05004290 get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004291 if (!em)
4292 goto out;
4293 if (IS_ERR(em)) {
4294 ret = PTR_ERR(em);
4295 goto out;
4296 }
Josef Bacik975f84f2010-11-23 19:36:57 +00004297
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004298 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04004299 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004300
Chris Masonea8efc72011-03-08 11:54:40 -05004301 /* break if the extent we found is outside the range */
4302 if (em->start >= max || extent_map_end(em) < off)
4303 break;
4304
4305 /*
4306 * get_extent may return an extent that starts before our
4307 * requested range. We have to make sure the ranges
4308 * we return to fiemap always move forward and don't
4309 * overlap, so adjust the offsets here
4310 */
4311 em_start = max(em->start, off);
4312
4313 /*
4314 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04004315 * for adjusting the disk offset below. Only do this if the
4316 * extent isn't compressed since our in ram offset may be past
4317 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05004318 */
Josef Bacikb76bb702013-07-05 13:52:51 -04004319 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4320 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05004321 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05004322 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004323 disko = 0;
4324 flags = 0;
4325
Chris Masonea8efc72011-03-08 11:54:40 -05004326 /*
4327 * bump off for our next call to get_extent
4328 */
4329 off = extent_map_end(em);
4330 if (off >= max)
4331 end = 1;
4332
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004333 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004334 end = 1;
4335 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004336 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004337 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4338 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004339 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004340 flags |= (FIEMAP_EXTENT_DELALLOC |
4341 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04004342 } else if (fieinfo->fi_extents_max) {
4343 u64 bytenr = em->block_start -
4344 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08004345
Chris Masonea8efc72011-03-08 11:54:40 -05004346 disko = em->block_start + offset_in_extent;
Liu Bofe09e162013-09-22 12:54:23 +08004347
4348 /*
4349 * As btrfs supports shared space, this information
4350 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04004351 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4352 * then we're just getting a count and we can skip the
4353 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08004354 */
Josef Bacikdc046b12014-09-10 16:20:45 -04004355 ret = btrfs_check_shared(NULL, root->fs_info,
4356 root->objectid,
4357 btrfs_ino(inode), bytenr);
4358 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08004359 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04004360 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08004361 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04004362 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004363 }
4364 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4365 flags |= FIEMAP_EXTENT_ENCODED;
4366
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004367 free_extent_map(em);
4368 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05004369 if ((em_start >= last) || em_len == (u64)-1 ||
4370 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004371 flags |= FIEMAP_EXTENT_LAST;
4372 end = 1;
4373 }
4374
Chris Masonec29ed52011-02-23 16:23:20 -05004375 /* now scan forward to see if this is really the last extent. */
4376 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4377 get_extent);
4378 if (IS_ERR(em)) {
4379 ret = PTR_ERR(em);
4380 goto out;
4381 }
4382 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00004383 flags |= FIEMAP_EXTENT_LAST;
4384 end = 1;
4385 }
Chris Masonec29ed52011-02-23 16:23:20 -05004386 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4387 em_len, flags);
4388 if (ret)
4389 goto out_free;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004390 }
4391out_free:
4392 free_extent_map(em);
4393out:
Liu Bofe09e162013-09-22 12:54:23 +08004394 btrfs_free_path(path);
Liu Boa52f4cd2013-05-01 16:23:41 +00004395 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
Josef Bacik2ac55d42010-02-03 19:33:23 +00004396 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004397 return ret;
4398}
4399
Chris Mason727011e2010-08-06 13:21:20 -04004400static void __free_extent_buffer(struct extent_buffer *eb)
4401{
Eric Sandeen6d49ba12013-04-22 16:12:31 +00004402 btrfs_leak_debug_del(&eb->leak_list);
Chris Mason727011e2010-08-06 13:21:20 -04004403 kmem_cache_free(extent_buffer_cache, eb);
4404}
4405
Josef Bacika26e8c92014-03-28 17:07:27 -04004406int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004407{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004408 return (atomic_read(&eb->io_pages) ||
4409 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4410 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05004411}
4412
Miao Xie897ca6e92010-10-26 20:57:29 -04004413/*
4414 * Helper for releasing extent buffer page.
4415 */
4416static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4417 unsigned long start_idx)
4418{
4419 unsigned long index;
Wang Sheng-Hui39bab872012-04-06 14:35:31 +08004420 unsigned long num_pages;
Miao Xie897ca6e92010-10-26 20:57:29 -04004421 struct page *page;
Jan Schmidt815a51c2012-05-16 17:00:02 +02004422 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
Miao Xie897ca6e92010-10-26 20:57:29 -04004423
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004424 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e92010-10-26 20:57:29 -04004425
Wang Sheng-Hui39bab872012-04-06 14:35:31 +08004426 num_pages = num_extent_pages(eb->start, eb->len);
4427 index = start_idx + num_pages;
Miao Xie897ca6e92010-10-26 20:57:29 -04004428 if (start_idx >= index)
4429 return;
4430
4431 do {
4432 index--;
4433 page = extent_buffer_page(eb, index);
Jan Schmidt815a51c2012-05-16 17:00:02 +02004434 if (page && mapped) {
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004435 spin_lock(&page->mapping->private_lock);
4436 /*
4437 * We do this since we'll remove the pages after we've
4438 * removed the eb from the radix tree, so we could race
4439 * and have this page now attached to the new eb. So
4440 * only clear page_private if it's still connected to
4441 * this eb.
4442 */
4443 if (PagePrivate(page) &&
4444 page->private == (unsigned long)eb) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004445 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Josef Bacik3083ee22012-03-09 16:01:49 -05004446 BUG_ON(PageDirty(page));
4447 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004448 /*
4449 * We need to make sure we haven't be attached
4450 * to a new eb.
4451 */
4452 ClearPagePrivate(page);
4453 set_page_private(page, 0);
4454 /* One for the page private */
4455 page_cache_release(page);
4456 }
4457 spin_unlock(&page->mapping->private_lock);
4458
Jan Schmidt815a51c2012-05-16 17:00:02 +02004459 }
4460 if (page) {
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004461 /* One for when we alloced the page */
Miao Xie897ca6e92010-10-26 20:57:29 -04004462 page_cache_release(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004463 }
Miao Xie897ca6e92010-10-26 20:57:29 -04004464 } while (index != start_idx);
4465}
4466
4467/*
4468 * Helper for releasing the extent buffer.
4469 */
4470static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4471{
4472 btrfs_release_extent_buffer_page(eb, 0);
4473 __free_extent_buffer(eb);
4474}
4475
Josef Bacikf28491e2013-12-16 13:24:27 -05004476static struct extent_buffer *
4477__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4478 unsigned long len, gfp_t mask)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004479{
4480 struct extent_buffer *eb = NULL;
4481
4482 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4483 if (eb == NULL)
4484 return NULL;
4485 eb->start = start;
4486 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05004487 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004488 eb->bflags = 0;
4489 rwlock_init(&eb->lock);
4490 atomic_set(&eb->write_locks, 0);
4491 atomic_set(&eb->read_locks, 0);
4492 atomic_set(&eb->blocking_readers, 0);
4493 atomic_set(&eb->blocking_writers, 0);
4494 atomic_set(&eb->spinning_readers, 0);
4495 atomic_set(&eb->spinning_writers, 0);
4496 eb->lock_nested = 0;
4497 init_waitqueue_head(&eb->write_lock_wq);
4498 init_waitqueue_head(&eb->read_lock_wq);
4499
4500 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4501
4502 spin_lock_init(&eb->refs_lock);
4503 atomic_set(&eb->refs, 1);
4504 atomic_set(&eb->io_pages, 0);
4505
4506 /*
4507 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4508 */
4509 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4510 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4511 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4512
4513 return eb;
4514}
4515
4516struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4517{
4518 unsigned long i;
4519 struct page *p;
4520 struct extent_buffer *new;
4521 unsigned long num_pages = num_extent_pages(src->start, src->len);
4522
Josef Bacik9ec72672013-08-07 16:57:23 -04004523 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004524 if (new == NULL)
4525 return NULL;
4526
4527 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004528 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004529 if (!p) {
4530 btrfs_release_extent_buffer(new);
4531 return NULL;
4532 }
4533 attach_extent_buffer_page(new, p);
4534 WARN_ON(PageDirty(p));
4535 SetPageUptodate(p);
4536 new->pages[i] = p;
4537 }
4538
4539 copy_extent_buffer(new, src, 0, 0, src->len);
4540 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4541 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4542
4543 return new;
4544}
4545
4546struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4547{
4548 struct extent_buffer *eb;
4549 unsigned long num_pages = num_extent_pages(0, len);
4550 unsigned long i;
4551
Josef Bacik9ec72672013-08-07 16:57:23 -04004552 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004553 if (!eb)
4554 return NULL;
4555
4556 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004557 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004558 if (!eb->pages[i])
4559 goto err;
4560 }
4561 set_extent_buffer_uptodate(eb);
4562 btrfs_set_header_nritems(eb, 0);
4563 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4564
4565 return eb;
4566err:
4567 for (; i > 0; i--)
4568 __free_page(eb->pages[i - 1]);
4569 __free_extent_buffer(eb);
4570 return NULL;
4571}
4572
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004573static void check_buffer_tree_ref(struct extent_buffer *eb)
4574{
Chris Mason242e18c2013-01-29 17:49:37 -05004575 int refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004576 /* the ref bit is tricky. We have to make sure it is set
4577 * if we have the buffer dirty. Otherwise the
4578 * code to free a buffer can end up dropping a dirty
4579 * page
4580 *
4581 * Once the ref bit is set, it won't go away while the
4582 * buffer is dirty or in writeback, and it also won't
4583 * go away while we have the reference count on the
4584 * eb bumped.
4585 *
4586 * We can't just set the ref bit without bumping the
4587 * ref on the eb because free_extent_buffer might
4588 * see the ref bit and try to clear it. If this happens
4589 * free_extent_buffer might end up dropping our original
4590 * ref by mistake and freeing the page before we are able
4591 * to add one more ref.
4592 *
4593 * So bump the ref count first, then set the bit. If someone
4594 * beat us to it, drop the ref we added.
4595 */
Chris Mason242e18c2013-01-29 17:49:37 -05004596 refs = atomic_read(&eb->refs);
4597 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4598 return;
4599
Josef Bacik594831c2012-07-20 16:11:08 -04004600 spin_lock(&eb->refs_lock);
4601 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004602 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04004603 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004604}
4605
Mel Gorman2457aec2014-06-04 16:10:31 -07004606static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4607 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04004608{
4609 unsigned long num_pages, i;
4610
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004611 check_buffer_tree_ref(eb);
4612
Josef Bacik5df42352012-03-15 18:24:42 -04004613 num_pages = num_extent_pages(eb->start, eb->len);
4614 for (i = 0; i < num_pages; i++) {
4615 struct page *p = extent_buffer_page(eb, i);
Mel Gorman2457aec2014-06-04 16:10:31 -07004616 if (p != accessed)
4617 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04004618 }
4619}
4620
Josef Bacikf28491e2013-12-16 13:24:27 -05004621struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4622 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004623{
4624 struct extent_buffer *eb;
4625
4626 rcu_read_lock();
Josef Bacikf28491e2013-12-16 13:24:27 -05004627 eb = radix_tree_lookup(&fs_info->buffer_radix,
4628 start >> PAGE_CACHE_SHIFT);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004629 if (eb && atomic_inc_not_zero(&eb->refs)) {
4630 rcu_read_unlock();
Mel Gorman2457aec2014-06-04 16:10:31 -07004631 mark_extent_buffer_accessed(eb, NULL);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004632 return eb;
4633 }
4634 rcu_read_unlock();
4635
4636 return NULL;
4637}
4638
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004639#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4640struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4641 u64 start, unsigned long len)
4642{
4643 struct extent_buffer *eb, *exists = NULL;
4644 int ret;
4645
4646 eb = find_extent_buffer(fs_info, start);
4647 if (eb)
4648 return eb;
4649 eb = alloc_dummy_extent_buffer(start, len);
4650 if (!eb)
4651 return NULL;
4652 eb->fs_info = fs_info;
4653again:
4654 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4655 if (ret)
4656 goto free_eb;
4657 spin_lock(&fs_info->buffer_lock);
4658 ret = radix_tree_insert(&fs_info->buffer_radix,
4659 start >> PAGE_CACHE_SHIFT, eb);
4660 spin_unlock(&fs_info->buffer_lock);
4661 radix_tree_preload_end();
4662 if (ret == -EEXIST) {
4663 exists = find_extent_buffer(fs_info, start);
4664 if (exists)
4665 goto free_eb;
4666 else
4667 goto again;
4668 }
4669 check_buffer_tree_ref(eb);
4670 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4671
4672 /*
4673 * We will free dummy extent buffer's if they come into
4674 * free_extent_buffer with a ref count of 2, but if we are using this we
4675 * want the buffers to stay in memory until we're done with them, so
4676 * bump the ref count again.
4677 */
4678 atomic_inc(&eb->refs);
4679 return eb;
4680free_eb:
4681 btrfs_release_extent_buffer(eb);
4682 return exists;
4683}
4684#endif
4685
Josef Bacikf28491e2013-12-16 13:24:27 -05004686struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
Chris Mason727011e2010-08-06 13:21:20 -04004687 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05004688{
4689 unsigned long num_pages = num_extent_pages(start, len);
4690 unsigned long i;
4691 unsigned long index = start >> PAGE_CACHE_SHIFT;
4692 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004693 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004694 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05004695 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05004696 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004697 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004698
Josef Bacikf28491e2013-12-16 13:24:27 -05004699 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004700 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04004701 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004702
Josef Bacikf28491e2013-12-16 13:24:27 -05004703 eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
Peter2b114d12008-04-01 11:21:40 -04004704 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004705 return NULL;
4706
Chris Mason727011e2010-08-06 13:21:20 -04004707 for (i = 0; i < num_pages; i++, index++) {
Chris Masona6591712011-07-19 12:04:14 -04004708 p = find_or_create_page(mapping, index, GFP_NOFS);
Josef Bacik4804b382012-10-05 16:43:45 -04004709 if (!p)
Chris Mason6af118ce2008-07-22 11:18:07 -04004710 goto free_eb;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004711
4712 spin_lock(&mapping->private_lock);
4713 if (PagePrivate(p)) {
4714 /*
4715 * We could have already allocated an eb for this page
4716 * and attached one so lets see if we can get a ref on
4717 * the existing eb, and if we can we know it's good and
4718 * we can just return that one, else we know we can just
4719 * overwrite page->private.
4720 */
4721 exists = (struct extent_buffer *)p->private;
4722 if (atomic_inc_not_zero(&exists->refs)) {
4723 spin_unlock(&mapping->private_lock);
4724 unlock_page(p);
Josef Bacik17de39a2012-05-04 15:16:06 -04004725 page_cache_release(p);
Mel Gorman2457aec2014-06-04 16:10:31 -07004726 mark_extent_buffer_accessed(exists, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004727 goto free_eb;
4728 }
4729
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004730 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004731 * Do this so attach doesn't complain and we need to
4732 * drop the ref the old guy had.
4733 */
4734 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004735 WARN_ON(PageDirty(p));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004736 page_cache_release(p);
Chris Masond1310b22008-01-24 16:13:08 -05004737 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004738 attach_extent_buffer_page(eb, p);
4739 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004740 WARN_ON(PageDirty(p));
Chris Mason727011e2010-08-06 13:21:20 -04004741 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05004742 if (!PageUptodate(p))
4743 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05004744
4745 /*
4746 * see below about how we avoid a nasty race with release page
4747 * and why we unlock later
4748 */
Chris Masond1310b22008-01-24 16:13:08 -05004749 }
4750 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004751 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05004752again:
Miao Xie19fe0a82010-10-26 20:57:29 -04004753 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4754 if (ret)
4755 goto free_eb;
4756
Josef Bacikf28491e2013-12-16 13:24:27 -05004757 spin_lock(&fs_info->buffer_lock);
4758 ret = radix_tree_insert(&fs_info->buffer_radix,
4759 start >> PAGE_CACHE_SHIFT, eb);
4760 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004761 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04004762 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05004763 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004764 if (exists)
4765 goto free_eb;
4766 else
Josef Bacik115391d2012-03-09 09:51:43 -05004767 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04004768 }
Chris Mason6af118ce2008-07-22 11:18:07 -04004769 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004770 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05004771 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05004772
4773 /*
4774 * there is a race where release page may have
4775 * tried to find this extent buffer in the radix
4776 * but failed. It will tell the VM it is safe to
4777 * reclaim the, and it will clear the page private bit.
4778 * We must make sure to set the page private bit properly
4779 * after the extent buffer is in the radix tree so
4780 * it doesn't get lost
4781 */
Chris Mason727011e2010-08-06 13:21:20 -04004782 SetPageChecked(eb->pages[0]);
4783 for (i = 1; i < num_pages; i++) {
4784 p = extent_buffer_page(eb, i);
Chris Mason727011e2010-08-06 13:21:20 -04004785 ClearPageChecked(p);
4786 unlock_page(p);
4787 }
4788 unlock_page(eb->pages[0]);
Chris Masond1310b22008-01-24 16:13:08 -05004789 return eb;
4790
Chris Mason6af118ce2008-07-22 11:18:07 -04004791free_eb:
Chris Mason727011e2010-08-06 13:21:20 -04004792 for (i = 0; i < num_pages; i++) {
4793 if (eb->pages[i])
4794 unlock_page(eb->pages[i]);
4795 }
Chris Masoneb14ab82011-02-10 12:35:00 -05004796
Josef Bacik17de39a2012-05-04 15:16:06 -04004797 WARN_ON(!atomic_dec_and_test(&eb->refs));
Miao Xie897ca6e92010-10-26 20:57:29 -04004798 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04004799 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05004800}
Chris Masond1310b22008-01-24 16:13:08 -05004801
Josef Bacik3083ee22012-03-09 16:01:49 -05004802static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4803{
4804 struct extent_buffer *eb =
4805 container_of(head, struct extent_buffer, rcu_head);
4806
4807 __free_extent_buffer(eb);
4808}
4809
Josef Bacik3083ee22012-03-09 16:01:49 -05004810/* Expects to have eb->eb_lock already held */
David Sterbaf7a52a42013-04-26 14:56:29 +00004811static int release_extent_buffer(struct extent_buffer *eb)
Josef Bacik3083ee22012-03-09 16:01:49 -05004812{
4813 WARN_ON(atomic_read(&eb->refs) == 0);
4814 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05004815 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05004816 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05004817
Jan Schmidt815a51c2012-05-16 17:00:02 +02004818 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05004819
Josef Bacikf28491e2013-12-16 13:24:27 -05004820 spin_lock(&fs_info->buffer_lock);
4821 radix_tree_delete(&fs_info->buffer_radix,
Jan Schmidt815a51c2012-05-16 17:00:02 +02004822 eb->start >> PAGE_CACHE_SHIFT);
Josef Bacikf28491e2013-12-16 13:24:27 -05004823 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05004824 } else {
4825 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02004826 }
Josef Bacik3083ee22012-03-09 16:01:49 -05004827
4828 /* Should be safe to release our pages at this point */
4829 btrfs_release_extent_buffer_page(eb, 0);
Josef Bacik3083ee22012-03-09 16:01:49 -05004830 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04004831 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05004832 }
4833 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04004834
4835 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05004836}
4837
Chris Masond1310b22008-01-24 16:13:08 -05004838void free_extent_buffer(struct extent_buffer *eb)
4839{
Chris Mason242e18c2013-01-29 17:49:37 -05004840 int refs;
4841 int old;
Chris Masond1310b22008-01-24 16:13:08 -05004842 if (!eb)
4843 return;
4844
Chris Mason242e18c2013-01-29 17:49:37 -05004845 while (1) {
4846 refs = atomic_read(&eb->refs);
4847 if (refs <= 3)
4848 break;
4849 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4850 if (old == refs)
4851 return;
4852 }
4853
Josef Bacik3083ee22012-03-09 16:01:49 -05004854 spin_lock(&eb->refs_lock);
4855 if (atomic_read(&eb->refs) == 2 &&
Jan Schmidt815a51c2012-05-16 17:00:02 +02004856 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4857 atomic_dec(&eb->refs);
4858
4859 if (atomic_read(&eb->refs) == 2 &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004860 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004861 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004862 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4863 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05004864
Josef Bacik3083ee22012-03-09 16:01:49 -05004865 /*
4866 * I know this is terrible, but it's temporary until we stop tracking
4867 * the uptodate bits and such for the extent buffers.
4868 */
David Sterbaf7a52a42013-04-26 14:56:29 +00004869 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05004870}
Chris Masond1310b22008-01-24 16:13:08 -05004871
Josef Bacik3083ee22012-03-09 16:01:49 -05004872void free_extent_buffer_stale(struct extent_buffer *eb)
4873{
4874 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004875 return;
4876
Josef Bacik3083ee22012-03-09 16:01:49 -05004877 spin_lock(&eb->refs_lock);
4878 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4879
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004880 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004881 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4882 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00004883 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05004884}
4885
Chris Mason1d4284b2012-03-28 20:31:37 -04004886void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004887{
Chris Masond1310b22008-01-24 16:13:08 -05004888 unsigned long i;
4889 unsigned long num_pages;
4890 struct page *page;
4891
Chris Masond1310b22008-01-24 16:13:08 -05004892 num_pages = num_extent_pages(eb->start, eb->len);
4893
4894 for (i = 0; i < num_pages; i++) {
4895 page = extent_buffer_page(eb, i);
Chris Masonb9473432009-03-13 11:00:37 -04004896 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05004897 continue;
4898
Chris Masona61e6f22008-07-22 11:18:08 -04004899 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05004900 WARN_ON(!PagePrivate(page));
4901
Chris Masond1310b22008-01-24 16:13:08 -05004902 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04004903 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05004904 if (!PageDirty(page)) {
4905 radix_tree_tag_clear(&page->mapping->page_tree,
4906 page_index(page),
4907 PAGECACHE_TAG_DIRTY);
4908 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04004909 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masonbf0da8c2011-11-04 12:29:37 -04004910 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04004911 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05004912 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004913 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05004914}
Chris Masond1310b22008-01-24 16:13:08 -05004915
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004916int set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004917{
4918 unsigned long i;
4919 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04004920 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004921
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004922 check_buffer_tree_ref(eb);
4923
Chris Masonb9473432009-03-13 11:00:37 -04004924 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004925
Chris Masond1310b22008-01-24 16:13:08 -05004926 num_pages = num_extent_pages(eb->start, eb->len);
Josef Bacik3083ee22012-03-09 16:01:49 -05004927 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004928 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4929
Chris Masonb9473432009-03-13 11:00:37 -04004930 for (i = 0; i < num_pages; i++)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004931 set_page_dirty(extent_buffer_page(eb, i));
Chris Masonb9473432009-03-13 11:00:37 -04004932 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05004933}
Chris Masond1310b22008-01-24 16:13:08 -05004934
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004935int clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04004936{
4937 unsigned long i;
4938 struct page *page;
4939 unsigned long num_pages;
4940
Chris Masonb4ce94d2009-02-04 09:25:08 -05004941 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004942 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04004943 for (i = 0; i < num_pages; i++) {
4944 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04004945 if (page)
4946 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04004947 }
4948 return 0;
4949}
4950
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004951int set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004952{
4953 unsigned long i;
4954 struct page *page;
4955 unsigned long num_pages;
4956
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004957 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004958 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05004959 for (i = 0; i < num_pages; i++) {
4960 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004961 SetPageUptodate(page);
4962 }
4963 return 0;
4964}
Chris Masond1310b22008-01-24 16:13:08 -05004965
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004966int extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004967{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004968 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004969}
Chris Masond1310b22008-01-24 16:13:08 -05004970
4971int read_extent_buffer_pages(struct extent_io_tree *tree,
Arne Jansenbb82ab82011-06-10 14:06:53 +02004972 struct extent_buffer *eb, u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04004973 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05004974{
4975 unsigned long i;
4976 unsigned long start_i;
4977 struct page *page;
4978 int err;
4979 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04004980 int locked_pages = 0;
4981 int all_uptodate = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004982 unsigned long num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04004983 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05004984 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04004985 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05004986
Chris Masonb4ce94d2009-02-04 09:25:08 -05004987 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05004988 return 0;
4989
Chris Masond1310b22008-01-24 16:13:08 -05004990 if (start) {
4991 WARN_ON(start < eb->start);
4992 start_i = (start >> PAGE_CACHE_SHIFT) -
4993 (eb->start >> PAGE_CACHE_SHIFT);
4994 } else {
4995 start_i = 0;
4996 }
4997
4998 num_pages = num_extent_pages(eb->start, eb->len);
4999 for (i = start_i; i < num_pages; i++) {
5000 page = extent_buffer_page(eb, i);
Arne Jansenbb82ab82011-06-10 14:06:53 +02005001 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04005002 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04005003 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05005004 } else {
5005 lock_page(page);
5006 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005007 locked_pages++;
Chris Mason727011e2010-08-06 13:21:20 -04005008 if (!PageUptodate(page)) {
5009 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04005010 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04005011 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005012 }
5013 if (all_uptodate) {
5014 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05005015 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04005016 goto unlock_exit;
5017 }
5018
Josef Bacikea466792012-03-26 21:57:36 -04005019 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04005020 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005021 atomic_set(&eb->io_pages, num_reads);
Chris Masonce9adaa2008-04-09 16:28:12 -04005022 for (i = start_i; i < num_pages; i++) {
5023 page = extent_buffer_page(eb, i);
Chris Masonce9adaa2008-04-09 16:28:12 -04005024 if (!PageUptodate(page)) {
Chris Masonf1885912008-04-09 16:28:12 -04005025 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05005026 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04005027 get_extent, &bio,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005028 mirror_num, &bio_flags,
5029 READ | REQ_META);
Chris Masond3977122009-01-05 21:25:51 -05005030 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05005031 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05005032 } else {
5033 unlock_page(page);
5034 }
5035 }
5036
Jeff Mahoney355808c2011-10-03 23:23:14 -04005037 if (bio) {
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005038 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
5039 bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01005040 if (err)
5041 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04005042 }
Chris Masona86c12c2008-02-07 10:50:54 -05005043
Arne Jansenbb82ab82011-06-10 14:06:53 +02005044 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05005045 return ret;
Chris Masond3977122009-01-05 21:25:51 -05005046
Chris Masond1310b22008-01-24 16:13:08 -05005047 for (i = start_i; i < num_pages; i++) {
5048 page = extent_buffer_page(eb, i);
5049 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05005050 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05005051 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05005052 }
Chris Masond3977122009-01-05 21:25:51 -05005053
Chris Masond1310b22008-01-24 16:13:08 -05005054 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04005055
5056unlock_exit:
5057 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05005058 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04005059 page = extent_buffer_page(eb, i);
5060 i++;
5061 unlock_page(page);
5062 locked_pages--;
5063 }
5064 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05005065}
Chris Masond1310b22008-01-24 16:13:08 -05005066
5067void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5068 unsigned long start,
5069 unsigned long len)
5070{
5071 size_t cur;
5072 size_t offset;
5073 struct page *page;
5074 char *kaddr;
5075 char *dst = (char *)dstv;
5076 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5077 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005078
5079 WARN_ON(start > eb->len);
5080 WARN_ON(start + len > eb->start + eb->len);
5081
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005082 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005083
Chris Masond3977122009-01-05 21:25:51 -05005084 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005085 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05005086
5087 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04005088 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005089 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005090
5091 dst += cur;
5092 len -= cur;
5093 offset = 0;
5094 i++;
5095 }
5096}
Chris Masond1310b22008-01-24 16:13:08 -05005097
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005098int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5099 unsigned long start,
5100 unsigned long len)
5101{
5102 size_t cur;
5103 size_t offset;
5104 struct page *page;
5105 char *kaddr;
5106 char __user *dst = (char __user *)dstv;
5107 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5108 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5109 int ret = 0;
5110
5111 WARN_ON(start > eb->len);
5112 WARN_ON(start + len > eb->start + eb->len);
5113
5114 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5115
5116 while (len > 0) {
5117 page = extent_buffer_page(eb, i);
5118
5119 cur = min(len, (PAGE_CACHE_SIZE - offset));
5120 kaddr = page_address(page);
5121 if (copy_to_user(dst, kaddr + offset, cur)) {
5122 ret = -EFAULT;
5123 break;
5124 }
5125
5126 dst += cur;
5127 len -= cur;
5128 offset = 0;
5129 i++;
5130 }
5131
5132 return ret;
5133}
5134
Chris Masond1310b22008-01-24 16:13:08 -05005135int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masona6591712011-07-19 12:04:14 -04005136 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -05005137 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -04005138 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05005139{
5140 size_t offset = start & (PAGE_CACHE_SIZE - 1);
5141 char *kaddr;
5142 struct page *p;
5143 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5144 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5145 unsigned long end_i = (start_offset + start + min_len - 1) >>
5146 PAGE_CACHE_SHIFT;
5147
5148 if (i != end_i)
5149 return -EINVAL;
5150
5151 if (i == 0) {
5152 offset = start_offset;
5153 *map_start = 0;
5154 } else {
5155 offset = 0;
5156 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
5157 }
Chris Masond3977122009-01-05 21:25:51 -05005158
Chris Masond1310b22008-01-24 16:13:08 -05005159 if (start + min_len > eb->len) {
Julia Lawall31b1a2b2012-11-03 10:58:34 +00005160 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02005161 "wanted %lu %lu\n",
5162 eb->start, eb->len, start, min_len);
Josef Bacik850265332011-03-15 14:52:12 -04005163 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05005164 }
5165
5166 p = extent_buffer_page(eb, i);
Chris Masona6591712011-07-19 12:04:14 -04005167 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05005168 *map = kaddr + offset;
5169 *map_len = PAGE_CACHE_SIZE - offset;
5170 return 0;
5171}
Chris Masond1310b22008-01-24 16:13:08 -05005172
Chris Masond1310b22008-01-24 16:13:08 -05005173int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5174 unsigned long start,
5175 unsigned long len)
5176{
5177 size_t cur;
5178 size_t offset;
5179 struct page *page;
5180 char *kaddr;
5181 char *ptr = (char *)ptrv;
5182 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5183 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5184 int ret = 0;
5185
5186 WARN_ON(start > eb->len);
5187 WARN_ON(start + len > eb->start + eb->len);
5188
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005189 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005190
Chris Masond3977122009-01-05 21:25:51 -05005191 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005192 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05005193
5194 cur = min(len, (PAGE_CACHE_SIZE - offset));
5195
Chris Masona6591712011-07-19 12:04:14 -04005196 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005197 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005198 if (ret)
5199 break;
5200
5201 ptr += cur;
5202 len -= cur;
5203 offset = 0;
5204 i++;
5205 }
5206 return ret;
5207}
Chris Masond1310b22008-01-24 16:13:08 -05005208
5209void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5210 unsigned long start, unsigned long len)
5211{
5212 size_t cur;
5213 size_t offset;
5214 struct page *page;
5215 char *kaddr;
5216 char *src = (char *)srcv;
5217 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5218 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5219
5220 WARN_ON(start > eb->len);
5221 WARN_ON(start + len > eb->start + eb->len);
5222
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005223 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005224
Chris Masond3977122009-01-05 21:25:51 -05005225 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005226 page = extent_buffer_page(eb, i);
5227 WARN_ON(!PageUptodate(page));
5228
5229 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005230 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005231 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005232
5233 src += cur;
5234 len -= cur;
5235 offset = 0;
5236 i++;
5237 }
5238}
Chris Masond1310b22008-01-24 16:13:08 -05005239
5240void memset_extent_buffer(struct extent_buffer *eb, char c,
5241 unsigned long start, unsigned long len)
5242{
5243 size_t cur;
5244 size_t offset;
5245 struct page *page;
5246 char *kaddr;
5247 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5248 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5249
5250 WARN_ON(start > eb->len);
5251 WARN_ON(start + len > eb->start + eb->len);
5252
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005253 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005254
Chris Masond3977122009-01-05 21:25:51 -05005255 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005256 page = extent_buffer_page(eb, i);
5257 WARN_ON(!PageUptodate(page));
5258
5259 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005260 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005261 memset(kaddr + offset, c, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005262
5263 len -= cur;
5264 offset = 0;
5265 i++;
5266 }
5267}
Chris Masond1310b22008-01-24 16:13:08 -05005268
5269void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5270 unsigned long dst_offset, unsigned long src_offset,
5271 unsigned long len)
5272{
5273 u64 dst_len = dst->len;
5274 size_t cur;
5275 size_t offset;
5276 struct page *page;
5277 char *kaddr;
5278 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5279 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5280
5281 WARN_ON(src->len != dst_len);
5282
5283 offset = (start_offset + dst_offset) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005284 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005285
Chris Masond3977122009-01-05 21:25:51 -05005286 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005287 page = extent_buffer_page(dst, i);
5288 WARN_ON(!PageUptodate(page));
5289
5290 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5291
Chris Masona6591712011-07-19 12:04:14 -04005292 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005293 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005294
5295 src_offset += cur;
5296 len -= cur;
5297 offset = 0;
5298 i++;
5299 }
5300}
Chris Masond1310b22008-01-24 16:13:08 -05005301
Sergei Trofimovich33872062011-04-11 21:52:52 +00005302static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5303{
5304 unsigned long distance = (src > dst) ? src - dst : dst - src;
5305 return distance < len;
5306}
5307
Chris Masond1310b22008-01-24 16:13:08 -05005308static void copy_pages(struct page *dst_page, struct page *src_page,
5309 unsigned long dst_off, unsigned long src_off,
5310 unsigned long len)
5311{
Chris Masona6591712011-07-19 12:04:14 -04005312 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05005313 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005314 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05005315
Sergei Trofimovich33872062011-04-11 21:52:52 +00005316 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04005317 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00005318 } else {
Chris Masond1310b22008-01-24 16:13:08 -05005319 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005320 if (areas_overlap(src_off, dst_off, len))
5321 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00005322 }
Chris Masond1310b22008-01-24 16:13:08 -05005323
Chris Mason727011e2010-08-06 13:21:20 -04005324 if (must_memmove)
5325 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5326 else
5327 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05005328}
5329
5330void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5331 unsigned long src_offset, unsigned long len)
5332{
5333 size_t cur;
5334 size_t dst_off_in_page;
5335 size_t src_off_in_page;
5336 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5337 unsigned long dst_i;
5338 unsigned long src_i;
5339
5340 if (src_offset + len > dst->len) {
Frank Holtonefe120a2013-12-20 11:37:06 -05005341 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
Chris Masond3977122009-01-05 21:25:51 -05005342 "len %lu dst len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005343 BUG_ON(1);
5344 }
5345 if (dst_offset + len > dst->len) {
Frank Holtonefe120a2013-12-20 11:37:06 -05005346 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
Chris Masond3977122009-01-05 21:25:51 -05005347 "len %lu dst len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005348 BUG_ON(1);
5349 }
5350
Chris Masond3977122009-01-05 21:25:51 -05005351 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005352 dst_off_in_page = (start_offset + dst_offset) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005353 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005354 src_off_in_page = (start_offset + src_offset) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005355 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005356
5357 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5358 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5359
5360 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5361 src_off_in_page));
5362 cur = min_t(unsigned long, cur,
5363 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5364
5365 copy_pages(extent_buffer_page(dst, dst_i),
5366 extent_buffer_page(dst, src_i),
5367 dst_off_in_page, src_off_in_page, cur);
5368
5369 src_offset += cur;
5370 dst_offset += cur;
5371 len -= cur;
5372 }
5373}
Chris Masond1310b22008-01-24 16:13:08 -05005374
5375void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5376 unsigned long src_offset, unsigned long len)
5377{
5378 size_t cur;
5379 size_t dst_off_in_page;
5380 size_t src_off_in_page;
5381 unsigned long dst_end = dst_offset + len - 1;
5382 unsigned long src_end = src_offset + len - 1;
5383 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5384 unsigned long dst_i;
5385 unsigned long src_i;
5386
5387 if (src_offset + len > dst->len) {
Frank Holtonefe120a2013-12-20 11:37:06 -05005388 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
Chris Masond3977122009-01-05 21:25:51 -05005389 "len %lu len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005390 BUG_ON(1);
5391 }
5392 if (dst_offset + len > dst->len) {
Frank Holtonefe120a2013-12-20 11:37:06 -05005393 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
Chris Masond3977122009-01-05 21:25:51 -05005394 "len %lu len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005395 BUG_ON(1);
5396 }
Chris Mason727011e2010-08-06 13:21:20 -04005397 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05005398 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5399 return;
5400 }
Chris Masond3977122009-01-05 21:25:51 -05005401 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005402 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5403 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5404
5405 dst_off_in_page = (start_offset + dst_end) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005406 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005407 src_off_in_page = (start_offset + src_end) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005408 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005409
5410 cur = min_t(unsigned long, len, src_off_in_page + 1);
5411 cur = min(cur, dst_off_in_page + 1);
Zach Brown1877e1a2013-10-16 12:10:33 -07005412 copy_pages(extent_buffer_page(dst, dst_i),
Chris Masond1310b22008-01-24 16:13:08 -05005413 extent_buffer_page(dst, src_i),
5414 dst_off_in_page - cur + 1,
5415 src_off_in_page - cur + 1, cur);
5416
5417 dst_end -= cur;
5418 src_end -= cur;
5419 len -= cur;
5420 }
5421}
Chris Mason6af118ce2008-07-22 11:18:07 -04005422
David Sterbaf7a52a42013-04-26 14:56:29 +00005423int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04005424{
Chris Mason6af118ce2008-07-22 11:18:07 -04005425 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04005426
Miao Xie19fe0a82010-10-26 20:57:29 -04005427 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05005428 * We need to make sure noboody is attaching this page to an eb right
5429 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04005430 */
Josef Bacik3083ee22012-03-09 16:01:49 -05005431 spin_lock(&page->mapping->private_lock);
5432 if (!PagePrivate(page)) {
5433 spin_unlock(&page->mapping->private_lock);
5434 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005435 }
5436
Josef Bacik3083ee22012-03-09 16:01:49 -05005437 eb = (struct extent_buffer *)page->private;
5438 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04005439
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005440 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05005441 * This is a little awful but should be ok, we need to make sure that
5442 * the eb doesn't disappear out from under us while we're looking at
5443 * this page.
5444 */
5445 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005446 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05005447 spin_unlock(&eb->refs_lock);
5448 spin_unlock(&page->mapping->private_lock);
5449 return 0;
5450 }
5451 spin_unlock(&page->mapping->private_lock);
5452
Josef Bacik3083ee22012-03-09 16:01:49 -05005453 /*
5454 * If tree ref isn't set then we know the ref on this eb is a real ref,
5455 * so just return, this page will likely be freed soon anyway.
5456 */
5457 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5458 spin_unlock(&eb->refs_lock);
5459 return 0;
5460 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005461
David Sterbaf7a52a42013-04-26 14:56:29 +00005462 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005463}