blob: 485cf0719b3ce70e568645da13d742d5b20d28a4 [file] [log] [blame]
Chris Masona52d9a82007-08-27 16:49:44 -04001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
Chris Mason4dc119042007-10-15 16:18:14 -040011#include <linux/swap.h>
Jens Axboe0a2118d2007-10-19 09:23:05 -040012#include <linux/version.h>
Chris Masonb293f022007-11-01 19:45:34 -040013#include <linux/writeback.h>
Chris Mason3ab2fb52007-11-08 10:59:22 -050014#include <linux/pagevec.h>
Chris Masona52d9a82007-08-27 16:49:44 -040015#include "extent_map.h"
16
Chris Mason86479a02007-09-10 19:58:16 -040017/* temporary define until extent_map moves out of btrfs */
18struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
21 unsigned long));
22
Chris Masona52d9a82007-08-27 16:49:44 -040023static struct kmem_cache *extent_map_cache;
24static struct kmem_cache *extent_state_cache;
Chris Mason6d36dcd2007-10-15 16:14:37 -040025static struct kmem_cache *extent_buffer_cache;
Chris Masonf510cfe2007-10-15 16:14:48 -040026
Chris Masonf510cfe2007-10-15 16:14:48 -040027static LIST_HEAD(buffers);
28static LIST_HEAD(states);
29
Chris Masonf510cfe2007-10-15 16:14:48 -040030static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
Chris Mason4dc119042007-10-15 16:18:14 -040031#define BUFFER_LRU_MAX 64
Chris Masona52d9a82007-08-27 16:49:44 -040032
33struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38};
39
Chris Masonb293f022007-11-01 19:45:34 -040040struct extent_page_data {
41 struct bio *bio;
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
44};
Chris Masonca664622007-11-27 11:16:35 -050045
Wyatt Banks2f4cbe62007-11-19 10:22:33 -050046int __init extent_map_init(void)
Chris Masona52d9a82007-08-27 16:49:44 -040047{
Chris Mason86479a02007-09-10 19:58:16 -040048 extent_map_cache = btrfs_cache_create("extent_map",
Chris Mason6d36dcd2007-10-15 16:14:37 -040049 sizeof(struct extent_map), 0,
Chris Masona52d9a82007-08-27 16:49:44 -040050 NULL);
Wyatt Banks2f4cbe62007-11-19 10:22:33 -050051 if (!extent_map_cache)
52 return -ENOMEM;
Chris Mason86479a02007-09-10 19:58:16 -040053 extent_state_cache = btrfs_cache_create("extent_state",
Chris Mason6d36dcd2007-10-15 16:14:37 -040054 sizeof(struct extent_state), 0,
Chris Masona52d9a82007-08-27 16:49:44 -040055 NULL);
Wyatt Banks2f4cbe62007-11-19 10:22:33 -050056 if (!extent_state_cache)
57 goto free_map_cache;
Chris Mason6d36dcd2007-10-15 16:14:37 -040058 extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 sizeof(struct extent_buffer), 0,
60 NULL);
Wyatt Banks2f4cbe62007-11-19 10:22:33 -050061 if (!extent_buffer_cache)
62 goto free_state_cache;
63 return 0;
64
65free_state_cache:
66 kmem_cache_destroy(extent_state_cache);
67free_map_cache:
68 kmem_cache_destroy(extent_map_cache);
69 return -ENOMEM;
Chris Masona52d9a82007-08-27 16:49:44 -040070}
71
Christian Hesse17636e02007-12-11 09:25:06 -050072void extent_map_exit(void)
Chris Masona52d9a82007-08-27 16:49:44 -040073{
Chris Masonf510cfe2007-10-15 16:14:48 -040074 struct extent_state *state;
Chris Mason6d36dcd2007-10-15 16:14:37 -040075
Chris Masonf510cfe2007-10-15 16:14:48 -040076 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 list_del(&state->list);
80 kmem_cache_free(extent_state_cache, state);
81
82 }
Chris Masonf510cfe2007-10-15 16:14:48 -040083
Chris Masona52d9a82007-08-27 16:49:44 -040084 if (extent_map_cache)
85 kmem_cache_destroy(extent_map_cache);
86 if (extent_state_cache)
87 kmem_cache_destroy(extent_state_cache);
Chris Mason6d36dcd2007-10-15 16:14:37 -040088 if (extent_buffer_cache)
89 kmem_cache_destroy(extent_buffer_cache);
Chris Masona52d9a82007-08-27 16:49:44 -040090}
91
92void extent_map_tree_init(struct extent_map_tree *tree,
93 struct address_space *mapping, gfp_t mask)
94{
95 tree->map.rb_node = NULL;
96 tree->state.rb_node = NULL;
Chris Mason07157aa2007-08-30 08:50:51 -040097 tree->ops = NULL;
Chris Masonca664622007-11-27 11:16:35 -050098 tree->dirty_bytes = 0;
Chris Masona52d9a82007-08-27 16:49:44 -040099 rwlock_init(&tree->lock);
Chris Mason4dc119042007-10-15 16:18:14 -0400100 spin_lock_init(&tree->lru_lock);
Chris Masona52d9a82007-08-27 16:49:44 -0400101 tree->mapping = mapping;
Chris Mason4dc119042007-10-15 16:18:14 -0400102 INIT_LIST_HEAD(&tree->buffer_lru);
103 tree->lru_size = 0;
Chris Masona52d9a82007-08-27 16:49:44 -0400104}
105EXPORT_SYMBOL(extent_map_tree_init);
106
Chris Mason19c00dd2007-10-15 16:19:22 -0400107void extent_map_tree_empty_lru(struct extent_map_tree *tree)
Chris Mason4dc119042007-10-15 16:18:14 -0400108{
109 struct extent_buffer *eb;
110 while(!list_empty(&tree->buffer_lru)) {
111 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
112 lru);
Chris Mason0591fb52007-11-11 08:22:00 -0500113 list_del_init(&eb->lru);
Chris Mason4dc119042007-10-15 16:18:14 -0400114 free_extent_buffer(eb);
115 }
116}
Chris Mason19c00dd2007-10-15 16:19:22 -0400117EXPORT_SYMBOL(extent_map_tree_empty_lru);
Chris Mason4dc119042007-10-15 16:18:14 -0400118
Chris Masona52d9a82007-08-27 16:49:44 -0400119struct extent_map *alloc_extent_map(gfp_t mask)
120{
121 struct extent_map *em;
122 em = kmem_cache_alloc(extent_map_cache, mask);
123 if (!em || IS_ERR(em))
124 return em;
125 em->in_tree = 0;
126 atomic_set(&em->refs, 1);
127 return em;
128}
129EXPORT_SYMBOL(alloc_extent_map);
130
131void free_extent_map(struct extent_map *em)
132{
Chris Mason2bf5a722007-08-30 11:54:02 -0400133 if (!em)
134 return;
Chris Masona52d9a82007-08-27 16:49:44 -0400135 if (atomic_dec_and_test(&em->refs)) {
136 WARN_ON(em->in_tree);
137 kmem_cache_free(extent_map_cache, em);
138 }
139}
140EXPORT_SYMBOL(free_extent_map);
141
142
143struct extent_state *alloc_extent_state(gfp_t mask)
144{
145 struct extent_state *state;
Chris Masonf510cfe2007-10-15 16:14:48 -0400146 unsigned long flags;
147
Chris Masona52d9a82007-08-27 16:49:44 -0400148 state = kmem_cache_alloc(extent_state_cache, mask);
149 if (!state || IS_ERR(state))
150 return state;
151 state->state = 0;
152 state->in_tree = 0;
Chris Mason07157aa2007-08-30 08:50:51 -0400153 state->private = 0;
Chris Masonf510cfe2007-10-15 16:14:48 -0400154
155 spin_lock_irqsave(&state_lock, flags);
156 list_add(&state->list, &states);
157 spin_unlock_irqrestore(&state_lock, flags);
158
Chris Masona52d9a82007-08-27 16:49:44 -0400159 atomic_set(&state->refs, 1);
160 init_waitqueue_head(&state->wq);
Chris Masona52d9a82007-08-27 16:49:44 -0400161 return state;
162}
163EXPORT_SYMBOL(alloc_extent_state);
164
165void free_extent_state(struct extent_state *state)
166{
Chris Masonf510cfe2007-10-15 16:14:48 -0400167 unsigned long flags;
Chris Mason2bf5a722007-08-30 11:54:02 -0400168 if (!state)
169 return;
Chris Masona52d9a82007-08-27 16:49:44 -0400170 if (atomic_dec_and_test(&state->refs)) {
171 WARN_ON(state->in_tree);
Chris Masonf510cfe2007-10-15 16:14:48 -0400172 spin_lock_irqsave(&state_lock, flags);
173 list_del(&state->list);
174 spin_unlock_irqrestore(&state_lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400175 kmem_cache_free(extent_state_cache, state);
176 }
177}
178EXPORT_SYMBOL(free_extent_state);
179
180static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 struct rb_node *node)
182{
183 struct rb_node ** p = &root->rb_node;
184 struct rb_node * parent = NULL;
185 struct tree_entry *entry;
186
187 while(*p) {
188 parent = *p;
189 entry = rb_entry(parent, struct tree_entry, rb_node);
190
191 if (offset < entry->start)
192 p = &(*p)->rb_left;
193 else if (offset > entry->end)
194 p = &(*p)->rb_right;
195 else
196 return parent;
197 }
198
199 entry = rb_entry(node, struct tree_entry, rb_node);
200 entry->in_tree = 1;
201 rb_link_node(node, parent, p);
202 rb_insert_color(node, root);
203 return NULL;
204}
205
206static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207 struct rb_node **prev_ret)
208{
209 struct rb_node * n = root->rb_node;
210 struct rb_node *prev = NULL;
211 struct tree_entry *entry;
212 struct tree_entry *prev_entry = NULL;
213
214 while(n) {
215 entry = rb_entry(n, struct tree_entry, rb_node);
216 prev = n;
217 prev_entry = entry;
218
219 if (offset < entry->start)
220 n = n->rb_left;
221 else if (offset > entry->end)
222 n = n->rb_right;
223 else
224 return n;
225 }
226 if (!prev_ret)
227 return NULL;
228 while(prev && offset > prev_entry->end) {
229 prev = rb_next(prev);
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 }
232 *prev_ret = prev;
233 return NULL;
234}
235
236static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
237{
238 struct rb_node *prev;
239 struct rb_node *ret;
240 ret = __tree_search(root, offset, &prev);
241 if (!ret)
242 return prev;
243 return ret;
244}
245
246static int tree_delete(struct rb_root *root, u64 offset)
247{
248 struct rb_node *node;
249 struct tree_entry *entry;
250
251 node = __tree_search(root, offset, NULL);
252 if (!node)
253 return -ENOENT;
254 entry = rb_entry(node, struct tree_entry, rb_node);
255 entry->in_tree = 0;
256 rb_erase(node, root);
257 return 0;
258}
259
260/*
261 * add_extent_mapping tries a simple backward merge with existing
262 * mappings. The extent_map struct passed in will be inserted into
263 * the tree directly (no copies made, just a reference taken).
264 */
265int add_extent_mapping(struct extent_map_tree *tree,
266 struct extent_map *em)
267{
268 int ret = 0;
269 struct extent_map *prev = NULL;
270 struct rb_node *rb;
271
272 write_lock_irq(&tree->lock);
273 rb = tree_insert(&tree->map, em->end, &em->rb_node);
274 if (rb) {
275 prev = rb_entry(rb, struct extent_map, rb_node);
Chris Masona52d9a82007-08-27 16:49:44 -0400276 ret = -EEXIST;
277 goto out;
278 }
279 atomic_inc(&em->refs);
280 if (em->start != 0) {
281 rb = rb_prev(&em->rb_node);
282 if (rb)
283 prev = rb_entry(rb, struct extent_map, rb_node);
284 if (prev && prev->end + 1 == em->start &&
Chris Mason5f39d392007-10-15 16:14:19 -0400285 ((em->block_start == EXTENT_MAP_HOLE &&
286 prev->block_start == EXTENT_MAP_HOLE) ||
Chris Mason179e29e2007-11-01 11:28:41 -0400287 (em->block_start == EXTENT_MAP_INLINE &&
288 prev->block_start == EXTENT_MAP_INLINE) ||
289 (em->block_start == EXTENT_MAP_DELALLOC &&
290 prev->block_start == EXTENT_MAP_DELALLOC) ||
291 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
292 em->block_start == prev->block_end + 1))) {
Chris Masona52d9a82007-08-27 16:49:44 -0400293 em->start = prev->start;
294 em->block_start = prev->block_start;
295 rb_erase(&prev->rb_node, &tree->map);
296 prev->in_tree = 0;
297 free_extent_map(prev);
298 }
299 }
300out:
301 write_unlock_irq(&tree->lock);
302 return ret;
303}
304EXPORT_SYMBOL(add_extent_mapping);
305
306/*
307 * lookup_extent_mapping returns the first extent_map struct in the
308 * tree that intersects the [start, end] (inclusive) range. There may
309 * be additional objects in the tree that intersect, so check the object
310 * returned carefully to make sure you don't need additional lookups.
311 */
312struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
313 u64 start, u64 end)
314{
315 struct extent_map *em;
316 struct rb_node *rb_node;
317
318 read_lock_irq(&tree->lock);
319 rb_node = tree_search(&tree->map, start);
320 if (!rb_node) {
321 em = NULL;
322 goto out;
323 }
324 if (IS_ERR(rb_node)) {
325 em = ERR_PTR(PTR_ERR(rb_node));
326 goto out;
327 }
328 em = rb_entry(rb_node, struct extent_map, rb_node);
329 if (em->end < start || em->start > end) {
330 em = NULL;
331 goto out;
332 }
333 atomic_inc(&em->refs);
334out:
335 read_unlock_irq(&tree->lock);
336 return em;
337}
338EXPORT_SYMBOL(lookup_extent_mapping);
339
340/*
341 * removes an extent_map struct from the tree. No reference counts are
342 * dropped, and no checks are done to see if the range is in use
343 */
344int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
345{
346 int ret;
347
348 write_lock_irq(&tree->lock);
349 ret = tree_delete(&tree->map, em->end);
350 write_unlock_irq(&tree->lock);
351 return ret;
352}
353EXPORT_SYMBOL(remove_extent_mapping);
354
355/*
356 * utility function to look for merge candidates inside a given range.
357 * Any extents with matching state are merged together into a single
358 * extent in the tree. Extents with EXTENT_IO in their state field
359 * are not merged because the end_io handlers need to be able to do
360 * operations on them without sleeping (or doing allocations/splits).
361 *
362 * This should be called with the tree lock held.
363 */
364static int merge_state(struct extent_map_tree *tree,
365 struct extent_state *state)
366{
367 struct extent_state *other;
368 struct rb_node *other_node;
369
370 if (state->state & EXTENT_IOBITS)
371 return 0;
372
373 other_node = rb_prev(&state->rb_node);
374 if (other_node) {
375 other = rb_entry(other_node, struct extent_state, rb_node);
376 if (other->end == state->start - 1 &&
377 other->state == state->state) {
378 state->start = other->start;
379 other->in_tree = 0;
380 rb_erase(&other->rb_node, &tree->state);
381 free_extent_state(other);
382 }
383 }
384 other_node = rb_next(&state->rb_node);
385 if (other_node) {
386 other = rb_entry(other_node, struct extent_state, rb_node);
387 if (other->start == state->end + 1 &&
388 other->state == state->state) {
389 other->start = state->start;
390 state->in_tree = 0;
391 rb_erase(&state->rb_node, &tree->state);
392 free_extent_state(state);
393 }
394 }
395 return 0;
396}
397
398/*
399 * insert an extent_state struct into the tree. 'bits' are set on the
400 * struct before it is inserted.
401 *
402 * This may return -EEXIST if the extent is already there, in which case the
403 * state struct is freed.
404 *
405 * The tree lock is not taken internally. This is a utility function and
406 * probably isn't what you want to call (see set/clear_extent_bit).
407 */
408static int insert_state(struct extent_map_tree *tree,
409 struct extent_state *state, u64 start, u64 end,
410 int bits)
411{
412 struct rb_node *node;
413
414 if (end < start) {
415 printk("end < start %Lu %Lu\n", end, start);
416 WARN_ON(1);
417 }
Chris Masonca664622007-11-27 11:16:35 -0500418 if (bits & EXTENT_DIRTY)
419 tree->dirty_bytes += end - start + 1;
Chris Masona52d9a82007-08-27 16:49:44 -0400420 state->state |= bits;
421 state->start = start;
422 state->end = end;
Chris Masona52d9a82007-08-27 16:49:44 -0400423 node = tree_insert(&tree->state, end, &state->rb_node);
424 if (node) {
425 struct extent_state *found;
426 found = rb_entry(node, struct extent_state, rb_node);
Chris Masonb888db22007-08-27 16:49:44 -0400427 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
Chris Masona52d9a82007-08-27 16:49:44 -0400428 free_extent_state(state);
429 return -EEXIST;
430 }
431 merge_state(tree, state);
432 return 0;
433}
434
435/*
436 * split a given extent state struct in two, inserting the preallocated
437 * struct 'prealloc' as the newly created second half. 'split' indicates an
438 * offset inside 'orig' where it should be split.
439 *
440 * Before calling,
441 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
442 * are two extent state structs in the tree:
443 * prealloc: [orig->start, split - 1]
444 * orig: [ split, orig->end ]
445 *
446 * The tree locks are not taken by this function. They need to be held
447 * by the caller.
448 */
449static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
450 struct extent_state *prealloc, u64 split)
451{
452 struct rb_node *node;
453 prealloc->start = orig->start;
454 prealloc->end = split - 1;
455 prealloc->state = orig->state;
456 orig->start = split;
Chris Masonf510cfe2007-10-15 16:14:48 -0400457
Chris Masona52d9a82007-08-27 16:49:44 -0400458 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
459 if (node) {
460 struct extent_state *found;
461 found = rb_entry(node, struct extent_state, rb_node);
Chris Masonb888db22007-08-27 16:49:44 -0400462 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
Chris Masona52d9a82007-08-27 16:49:44 -0400463 free_extent_state(prealloc);
464 return -EEXIST;
465 }
466 return 0;
467}
468
469/*
470 * utility function to clear some bits in an extent state struct.
471 * it will optionally wake up any one waiting on this state (wake == 1), or
472 * forcibly remove the state from the tree (delete == 1).
473 *
474 * If no bits are set on the state struct after clearing things, the
475 * struct is freed and removed from the tree
476 */
477static int clear_state_bit(struct extent_map_tree *tree,
478 struct extent_state *state, int bits, int wake,
479 int delete)
480{
481 int ret = state->state & bits;
Chris Masonca664622007-11-27 11:16:35 -0500482
483 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
484 u64 range = state->end - state->start + 1;
485 WARN_ON(range > tree->dirty_bytes);
486 tree->dirty_bytes -= range;
487 }
Chris Masona52d9a82007-08-27 16:49:44 -0400488 state->state &= ~bits;
489 if (wake)
490 wake_up(&state->wq);
491 if (delete || state->state == 0) {
492 if (state->in_tree) {
493 rb_erase(&state->rb_node, &tree->state);
494 state->in_tree = 0;
495 free_extent_state(state);
496 } else {
497 WARN_ON(1);
498 }
499 } else {
500 merge_state(tree, state);
501 }
502 return ret;
503}
504
505/*
506 * clear some bits on a range in the tree. This may require splitting
507 * or inserting elements in the tree, so the gfp mask is used to
508 * indicate which allocations or sleeping are allowed.
509 *
510 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
511 * the given range from the tree regardless of state (ie for truncate).
512 *
513 * the range [start, end] is inclusive.
514 *
515 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
516 * bits were already set, or zero if none of the bits were already set.
517 */
518int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
519 int bits, int wake, int delete, gfp_t mask)
520{
521 struct extent_state *state;
522 struct extent_state *prealloc = NULL;
523 struct rb_node *node;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400524 unsigned long flags;
Chris Masona52d9a82007-08-27 16:49:44 -0400525 int err;
526 int set = 0;
527
528again:
529 if (!prealloc && (mask & __GFP_WAIT)) {
530 prealloc = alloc_extent_state(mask);
531 if (!prealloc)
532 return -ENOMEM;
533 }
534
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400535 write_lock_irqsave(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400536 /*
537 * this search will find the extents that end after
538 * our range starts
539 */
540 node = tree_search(&tree->state, start);
541 if (!node)
542 goto out;
543 state = rb_entry(node, struct extent_state, rb_node);
544 if (state->start > end)
545 goto out;
546 WARN_ON(state->end < start);
547
548 /*
549 * | ---- desired range ---- |
550 * | state | or
551 * | ------------- state -------------- |
552 *
553 * We need to split the extent we found, and may flip
554 * bits on second half.
555 *
556 * If the extent we found extends past our range, we
557 * just split and search again. It'll get split again
558 * the next time though.
559 *
560 * If the extent we found is inside our range, we clear
561 * the desired bit on it.
562 */
563
564 if (state->start < start) {
565 err = split_state(tree, state, prealloc, start);
566 BUG_ON(err == -EEXIST);
567 prealloc = NULL;
568 if (err)
569 goto out;
570 if (state->end <= end) {
571 start = state->end + 1;
572 set |= clear_state_bit(tree, state, bits,
573 wake, delete);
574 } else {
575 start = state->start;
576 }
577 goto search_again;
578 }
579 /*
580 * | ---- desired range ---- |
581 * | state |
582 * We need to split the extent, and clear the bit
583 * on the first half
584 */
585 if (state->start <= end && state->end > end) {
586 err = split_state(tree, state, prealloc, end + 1);
587 BUG_ON(err == -EEXIST);
588
589 if (wake)
590 wake_up(&state->wq);
591 set |= clear_state_bit(tree, prealloc, bits,
592 wake, delete);
593 prealloc = NULL;
594 goto out;
595 }
596
597 start = state->end + 1;
598 set |= clear_state_bit(tree, state, bits, wake, delete);
599 goto search_again;
600
601out:
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400602 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400603 if (prealloc)
604 free_extent_state(prealloc);
605
606 return set;
607
608search_again:
Chris Mason96b51792007-10-15 16:15:19 -0400609 if (start > end)
Chris Masona52d9a82007-08-27 16:49:44 -0400610 goto out;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400611 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400612 if (mask & __GFP_WAIT)
613 cond_resched();
614 goto again;
615}
616EXPORT_SYMBOL(clear_extent_bit);
617
618static int wait_on_state(struct extent_map_tree *tree,
619 struct extent_state *state)
620{
621 DEFINE_WAIT(wait);
622 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
623 read_unlock_irq(&tree->lock);
624 schedule();
625 read_lock_irq(&tree->lock);
626 finish_wait(&state->wq, &wait);
627 return 0;
628}
629
630/*
631 * waits for one or more bits to clear on a range in the state tree.
632 * The range [start, end] is inclusive.
633 * The tree lock is taken by this function
634 */
635int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
636{
637 struct extent_state *state;
638 struct rb_node *node;
639
640 read_lock_irq(&tree->lock);
641again:
642 while (1) {
643 /*
644 * this search will find all the extents that end after
645 * our range starts
646 */
647 node = tree_search(&tree->state, start);
648 if (!node)
649 break;
650
651 state = rb_entry(node, struct extent_state, rb_node);
652
653 if (state->start > end)
654 goto out;
655
656 if (state->state & bits) {
657 start = state->start;
658 atomic_inc(&state->refs);
659 wait_on_state(tree, state);
660 free_extent_state(state);
661 goto again;
662 }
663 start = state->end + 1;
664
665 if (start > end)
666 break;
667
668 if (need_resched()) {
669 read_unlock_irq(&tree->lock);
670 cond_resched();
671 read_lock_irq(&tree->lock);
672 }
673 }
674out:
675 read_unlock_irq(&tree->lock);
676 return 0;
677}
678EXPORT_SYMBOL(wait_extent_bit);
679
Chris Masonca664622007-11-27 11:16:35 -0500680static void set_state_bits(struct extent_map_tree *tree,
681 struct extent_state *state,
682 int bits)
683{
684 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
685 u64 range = state->end - state->start + 1;
686 tree->dirty_bytes += range;
687 }
688 state->state |= bits;
689}
690
Chris Masona52d9a82007-08-27 16:49:44 -0400691/*
692 * set some bits on a range in the tree. This may require allocations
693 * or sleeping, so the gfp mask is used to indicate what is allowed.
694 *
695 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
696 * range already has the desired bits set. The start of the existing
697 * range is returned in failed_start in this case.
698 *
699 * [start, end] is inclusive
700 * This takes the tree lock.
701 */
702int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
703 int exclusive, u64 *failed_start, gfp_t mask)
704{
705 struct extent_state *state;
706 struct extent_state *prealloc = NULL;
707 struct rb_node *node;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400708 unsigned long flags;
Chris Masona52d9a82007-08-27 16:49:44 -0400709 int err = 0;
710 int set;
711 u64 last_start;
712 u64 last_end;
713again:
714 if (!prealloc && (mask & __GFP_WAIT)) {
715 prealloc = alloc_extent_state(mask);
716 if (!prealloc)
717 return -ENOMEM;
718 }
719
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400720 write_lock_irqsave(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400721 /*
722 * this search will find all the extents that end after
723 * our range starts.
724 */
725 node = tree_search(&tree->state, start);
726 if (!node) {
727 err = insert_state(tree, prealloc, start, end, bits);
728 prealloc = NULL;
729 BUG_ON(err == -EEXIST);
730 goto out;
731 }
732
733 state = rb_entry(node, struct extent_state, rb_node);
734 last_start = state->start;
735 last_end = state->end;
736
737 /*
738 * | ---- desired range ---- |
739 * | state |
740 *
741 * Just lock what we found and keep going
742 */
743 if (state->start == start && state->end <= end) {
744 set = state->state & bits;
745 if (set && exclusive) {
746 *failed_start = state->start;
747 err = -EEXIST;
748 goto out;
749 }
Chris Masonca664622007-11-27 11:16:35 -0500750 set_state_bits(tree, state, bits);
Chris Masona52d9a82007-08-27 16:49:44 -0400751 start = state->end + 1;
752 merge_state(tree, state);
753 goto search_again;
754 }
755
756 /*
757 * | ---- desired range ---- |
758 * | state |
759 * or
760 * | ------------- state -------------- |
761 *
762 * We need to split the extent we found, and may flip bits on
763 * second half.
764 *
765 * If the extent we found extends past our
766 * range, we just split and search again. It'll get split
767 * again the next time though.
768 *
769 * If the extent we found is inside our range, we set the
770 * desired bit on it.
771 */
772 if (state->start < start) {
773 set = state->state & bits;
774 if (exclusive && set) {
775 *failed_start = start;
776 err = -EEXIST;
777 goto out;
778 }
779 err = split_state(tree, state, prealloc, start);
780 BUG_ON(err == -EEXIST);
781 prealloc = NULL;
782 if (err)
783 goto out;
784 if (state->end <= end) {
Chris Masonca664622007-11-27 11:16:35 -0500785 set_state_bits(tree, state, bits);
Chris Masona52d9a82007-08-27 16:49:44 -0400786 start = state->end + 1;
787 merge_state(tree, state);
788 } else {
789 start = state->start;
790 }
791 goto search_again;
792 }
793 /*
794 * | ---- desired range ---- |
Chris Masona52d9a82007-08-27 16:49:44 -0400795 * | state | or | state |
796 *
797 * There's a hole, we need to insert something in it and
798 * ignore the extent we found.
799 */
800 if (state->start > start) {
801 u64 this_end;
802 if (end < last_start)
803 this_end = end;
804 else
805 this_end = last_start -1;
806 err = insert_state(tree, prealloc, start, this_end,
807 bits);
808 prealloc = NULL;
809 BUG_ON(err == -EEXIST);
810 if (err)
811 goto out;
812 start = this_end + 1;
813 goto search_again;
814 }
Chris Masona8c450b2007-09-10 20:00:27 -0400815 /*
816 * | ---- desired range ---- |
817 * | state |
818 * We need to split the extent, and set the bit
819 * on the first half
820 */
821 if (state->start <= end && state->end > end) {
822 set = state->state & bits;
823 if (exclusive && set) {
824 *failed_start = start;
825 err = -EEXIST;
826 goto out;
827 }
828 err = split_state(tree, state, prealloc, end + 1);
829 BUG_ON(err == -EEXIST);
830
Chris Masonca664622007-11-27 11:16:35 -0500831 set_state_bits(tree, prealloc, bits);
Chris Masona8c450b2007-09-10 20:00:27 -0400832 merge_state(tree, prealloc);
833 prealloc = NULL;
834 goto out;
835 }
836
Chris Masona52d9a82007-08-27 16:49:44 -0400837 goto search_again;
838
839out:
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400840 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400841 if (prealloc)
842 free_extent_state(prealloc);
843
844 return err;
845
846search_again:
847 if (start > end)
848 goto out;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400849 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400850 if (mask & __GFP_WAIT)
851 cond_resched();
852 goto again;
853}
854EXPORT_SYMBOL(set_extent_bit);
855
856/* wrappers around set/clear extent bit */
857int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
858 gfp_t mask)
859{
860 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
861 mask);
862}
863EXPORT_SYMBOL(set_extent_dirty);
864
Chris Mason96b51792007-10-15 16:15:19 -0400865int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
866 int bits, gfp_t mask)
867{
868 return set_extent_bit(tree, start, end, bits, 0, NULL,
869 mask);
870}
871EXPORT_SYMBOL(set_extent_bits);
872
873int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
874 int bits, gfp_t mask)
875{
876 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
877}
878EXPORT_SYMBOL(clear_extent_bits);
879
Chris Masonb888db22007-08-27 16:49:44 -0400880int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
881 gfp_t mask)
882{
883 return set_extent_bit(tree, start, end,
884 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
885 mask);
886}
887EXPORT_SYMBOL(set_extent_delalloc);
888
Chris Masona52d9a82007-08-27 16:49:44 -0400889int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
890 gfp_t mask)
891{
Chris Masonb888db22007-08-27 16:49:44 -0400892 return clear_extent_bit(tree, start, end,
893 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
Chris Masona52d9a82007-08-27 16:49:44 -0400894}
895EXPORT_SYMBOL(clear_extent_dirty);
896
897int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
898 gfp_t mask)
899{
900 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
901 mask);
902}
903EXPORT_SYMBOL(set_extent_new);
904
905int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
906 gfp_t mask)
907{
908 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
909}
910EXPORT_SYMBOL(clear_extent_new);
911
912int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
913 gfp_t mask)
914{
915 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
916 mask);
917}
918EXPORT_SYMBOL(set_extent_uptodate);
919
920int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
921 gfp_t mask)
922{
923 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
924}
925EXPORT_SYMBOL(clear_extent_uptodate);
926
927int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
928 gfp_t mask)
929{
930 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
931 0, NULL, mask);
932}
933EXPORT_SYMBOL(set_extent_writeback);
934
935int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
936 gfp_t mask)
937{
938 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
939}
940EXPORT_SYMBOL(clear_extent_writeback);
941
942int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
943{
944 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
945}
946EXPORT_SYMBOL(wait_on_extent_writeback);
947
948/*
949 * locks a range in ascending order, waiting for any locked regions
950 * it hits on the way. [start,end] are inclusive, and this will sleep.
951 */
952int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
953{
954 int err;
955 u64 failed_start;
956 while (1) {
957 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
958 &failed_start, mask);
959 if (err == -EEXIST && (mask & __GFP_WAIT)) {
960 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
961 start = failed_start;
962 } else {
963 break;
964 }
965 WARN_ON(start > end);
966 }
967 return err;
968}
969EXPORT_SYMBOL(lock_extent);
970
971int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
972 gfp_t mask)
973{
974 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
975}
976EXPORT_SYMBOL(unlock_extent);
977
978/*
979 * helper function to set pages and extents in the tree dirty
980 */
981int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
982{
983 unsigned long index = start >> PAGE_CACHE_SHIFT;
984 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
985 struct page *page;
986
987 while (index <= end_index) {
988 page = find_get_page(tree->mapping, index);
989 BUG_ON(!page);
990 __set_page_dirty_nobuffers(page);
991 page_cache_release(page);
992 index++;
993 }
994 set_extent_dirty(tree, start, end, GFP_NOFS);
995 return 0;
996}
997EXPORT_SYMBOL(set_range_dirty);
998
999/*
1000 * helper function to set both pages and extents in the tree writeback
1001 */
1002int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1003{
1004 unsigned long index = start >> PAGE_CACHE_SHIFT;
1005 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1006 struct page *page;
1007
1008 while (index <= end_index) {
1009 page = find_get_page(tree->mapping, index);
1010 BUG_ON(!page);
1011 set_page_writeback(page);
1012 page_cache_release(page);
1013 index++;
1014 }
1015 set_extent_writeback(tree, start, end, GFP_NOFS);
1016 return 0;
1017}
1018EXPORT_SYMBOL(set_range_writeback);
1019
Chris Mason5f39d392007-10-15 16:14:19 -04001020int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1021 u64 *start_ret, u64 *end_ret, int bits)
1022{
1023 struct rb_node *node;
1024 struct extent_state *state;
1025 int ret = 1;
1026
Chris Masone19caa52007-10-15 16:17:44 -04001027 read_lock_irq(&tree->lock);
Chris Mason5f39d392007-10-15 16:14:19 -04001028 /*
1029 * this search will find all the extents that end after
1030 * our range starts.
1031 */
1032 node = tree_search(&tree->state, start);
1033 if (!node || IS_ERR(node)) {
1034 goto out;
1035 }
1036
1037 while(1) {
1038 state = rb_entry(node, struct extent_state, rb_node);
Chris Masone19caa52007-10-15 16:17:44 -04001039 if (state->end >= start && (state->state & bits)) {
Chris Mason5f39d392007-10-15 16:14:19 -04001040 *start_ret = state->start;
1041 *end_ret = state->end;
1042 ret = 0;
Chris Masonf510cfe2007-10-15 16:14:48 -04001043 break;
Chris Mason5f39d392007-10-15 16:14:19 -04001044 }
1045 node = rb_next(node);
1046 if (!node)
1047 break;
1048 }
1049out:
Chris Masone19caa52007-10-15 16:17:44 -04001050 read_unlock_irq(&tree->lock);
Chris Mason5f39d392007-10-15 16:14:19 -04001051 return ret;
1052}
1053EXPORT_SYMBOL(find_first_extent_bit);
1054
Chris Masonb888db22007-08-27 16:49:44 -04001055u64 find_lock_delalloc_range(struct extent_map_tree *tree,
Chris Mason3e9fd942007-11-20 10:47:25 -05001056 u64 *start, u64 *end, u64 max_bytes)
Chris Masonb888db22007-08-27 16:49:44 -04001057{
1058 struct rb_node *node;
1059 struct extent_state *state;
Chris Mason3e9fd942007-11-20 10:47:25 -05001060 u64 cur_start = *start;
Chris Masonb888db22007-08-27 16:49:44 -04001061 u64 found = 0;
1062 u64 total_bytes = 0;
1063
1064 write_lock_irq(&tree->lock);
1065 /*
1066 * this search will find all the extents that end after
1067 * our range starts.
1068 */
1069search_again:
1070 node = tree_search(&tree->state, cur_start);
1071 if (!node || IS_ERR(node)) {
Chris Mason190662b2007-12-18 16:25:45 -05001072 *end = (u64)-1;
Chris Masonb888db22007-08-27 16:49:44 -04001073 goto out;
1074 }
1075
1076 while(1) {
1077 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason3e9fd942007-11-20 10:47:25 -05001078 if (found && state->start != cur_start) {
Chris Masonb888db22007-08-27 16:49:44 -04001079 goto out;
1080 }
1081 if (!(state->state & EXTENT_DELALLOC)) {
Chris Mason190662b2007-12-18 16:25:45 -05001082 if (!found)
1083 *end = state->end;
Chris Masonb888db22007-08-27 16:49:44 -04001084 goto out;
1085 }
Chris Mason3e9fd942007-11-20 10:47:25 -05001086 if (!found) {
1087 struct extent_state *prev_state;
1088 struct rb_node *prev_node = node;
1089 while(1) {
1090 prev_node = rb_prev(prev_node);
1091 if (!prev_node)
1092 break;
1093 prev_state = rb_entry(prev_node,
1094 struct extent_state,
1095 rb_node);
1096 if (!(prev_state->state & EXTENT_DELALLOC))
1097 break;
1098 state = prev_state;
1099 node = prev_node;
Chris Masonb888db22007-08-27 16:49:44 -04001100 }
Chris Masonb888db22007-08-27 16:49:44 -04001101 }
Chris Mason3e9fd942007-11-20 10:47:25 -05001102 if (state->state & EXTENT_LOCKED) {
1103 DEFINE_WAIT(wait);
1104 atomic_inc(&state->refs);
1105 prepare_to_wait(&state->wq, &wait,
1106 TASK_UNINTERRUPTIBLE);
1107 write_unlock_irq(&tree->lock);
1108 schedule();
1109 write_lock_irq(&tree->lock);
1110 finish_wait(&state->wq, &wait);
1111 free_extent_state(state);
1112 goto search_again;
1113 }
1114 state->state |= EXTENT_LOCKED;
1115 if (!found)
1116 *start = state->start;
Chris Masonb888db22007-08-27 16:49:44 -04001117 found++;
1118 *end = state->end;
1119 cur_start = state->end + 1;
1120 node = rb_next(node);
1121 if (!node)
1122 break;
Yan944746e2007-11-01 11:28:42 -04001123 total_bytes += state->end - state->start + 1;
Chris Masonb888db22007-08-27 16:49:44 -04001124 if (total_bytes >= max_bytes)
1125 break;
1126 }
1127out:
1128 write_unlock_irq(&tree->lock);
1129 return found;
1130}
1131
Chris Mason793955b2007-11-26 16:34:41 -08001132u64 count_range_bits(struct extent_map_tree *tree,
Chris Mason1832a6d2007-12-21 16:27:21 -05001133 u64 *start, u64 search_end, u64 max_bytes,
1134 unsigned long bits)
Chris Mason793955b2007-11-26 16:34:41 -08001135{
1136 struct rb_node *node;
1137 struct extent_state *state;
1138 u64 cur_start = *start;
1139 u64 total_bytes = 0;
1140 int found = 0;
1141
Chris Mason1832a6d2007-12-21 16:27:21 -05001142 if (search_end <= cur_start) {
1143 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1144 WARN_ON(1);
1145 return 0;
1146 }
1147
Chris Mason793955b2007-11-26 16:34:41 -08001148 write_lock_irq(&tree->lock);
Chris Mason1832a6d2007-12-21 16:27:21 -05001149 if (cur_start == 0 && bits == EXTENT_DIRTY) {
Chris Masonca664622007-11-27 11:16:35 -05001150 total_bytes = tree->dirty_bytes;
1151 goto out;
1152 }
Chris Mason793955b2007-11-26 16:34:41 -08001153 /*
1154 * this search will find all the extents that end after
1155 * our range starts.
1156 */
1157 node = tree_search(&tree->state, cur_start);
1158 if (!node || IS_ERR(node)) {
1159 goto out;
1160 }
1161
1162 while(1) {
1163 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason1832a6d2007-12-21 16:27:21 -05001164 if (state->start > search_end)
1165 break;
1166 if (state->end >= cur_start && (state->state & bits)) {
1167 total_bytes += min(search_end, state->end) + 1 -
1168 max(cur_start, state->start);
Chris Mason793955b2007-11-26 16:34:41 -08001169 if (total_bytes >= max_bytes)
1170 break;
1171 if (!found) {
1172 *start = state->start;
1173 found = 1;
1174 }
1175 }
1176 node = rb_next(node);
1177 if (!node)
1178 break;
1179 }
1180out:
1181 write_unlock_irq(&tree->lock);
1182 return total_bytes;
1183}
Chris Masona52d9a82007-08-27 16:49:44 -04001184/*
1185 * helper function to lock both pages and extents in the tree.
1186 * pages must be locked first.
1187 */
1188int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1189{
1190 unsigned long index = start >> PAGE_CACHE_SHIFT;
1191 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1192 struct page *page;
1193 int err;
1194
1195 while (index <= end_index) {
1196 page = grab_cache_page(tree->mapping, index);
1197 if (!page) {
1198 err = -ENOMEM;
1199 goto failed;
1200 }
1201 if (IS_ERR(page)) {
1202 err = PTR_ERR(page);
1203 goto failed;
1204 }
1205 index++;
1206 }
1207 lock_extent(tree, start, end, GFP_NOFS);
1208 return 0;
1209
1210failed:
1211 /*
1212 * we failed above in getting the page at 'index', so we undo here
1213 * up to but not including the page at 'index'
1214 */
1215 end_index = index;
1216 index = start >> PAGE_CACHE_SHIFT;
1217 while (index < end_index) {
1218 page = find_get_page(tree->mapping, index);
1219 unlock_page(page);
1220 page_cache_release(page);
1221 index++;
1222 }
1223 return err;
1224}
1225EXPORT_SYMBOL(lock_range);
1226
1227/*
1228 * helper function to unlock both pages and extents in the tree.
1229 */
1230int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1231{
1232 unsigned long index = start >> PAGE_CACHE_SHIFT;
1233 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1234 struct page *page;
1235
1236 while (index <= end_index) {
1237 page = find_get_page(tree->mapping, index);
1238 unlock_page(page);
1239 page_cache_release(page);
1240 index++;
1241 }
1242 unlock_extent(tree, start, end, GFP_NOFS);
1243 return 0;
1244}
1245EXPORT_SYMBOL(unlock_range);
1246
Chris Mason07157aa2007-08-30 08:50:51 -04001247int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1248{
1249 struct rb_node *node;
1250 struct extent_state *state;
1251 int ret = 0;
1252
1253 write_lock_irq(&tree->lock);
1254 /*
1255 * this search will find all the extents that end after
1256 * our range starts.
1257 */
1258 node = tree_search(&tree->state, start);
1259 if (!node || IS_ERR(node)) {
1260 ret = -ENOENT;
1261 goto out;
1262 }
1263 state = rb_entry(node, struct extent_state, rb_node);
1264 if (state->start != start) {
1265 ret = -ENOENT;
1266 goto out;
1267 }
1268 state->private = private;
1269out:
1270 write_unlock_irq(&tree->lock);
1271 return ret;
Chris Mason07157aa2007-08-30 08:50:51 -04001272}
1273
1274int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1275{
1276 struct rb_node *node;
1277 struct extent_state *state;
1278 int ret = 0;
1279
1280 read_lock_irq(&tree->lock);
1281 /*
1282 * this search will find all the extents that end after
1283 * our range starts.
1284 */
1285 node = tree_search(&tree->state, start);
1286 if (!node || IS_ERR(node)) {
1287 ret = -ENOENT;
1288 goto out;
1289 }
1290 state = rb_entry(node, struct extent_state, rb_node);
1291 if (state->start != start) {
1292 ret = -ENOENT;
1293 goto out;
1294 }
1295 *private = state->private;
1296out:
1297 read_unlock_irq(&tree->lock);
1298 return ret;
1299}
1300
Chris Masona52d9a82007-08-27 16:49:44 -04001301/*
1302 * searches a range in the state tree for a given mask.
1303 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1304 * has the bits set. Otherwise, 1 is returned if any bit in the
1305 * range is found set.
1306 */
Chris Mason1a5bc162007-10-15 16:15:26 -04001307int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1308 int bits, int filled)
Chris Masona52d9a82007-08-27 16:49:44 -04001309{
1310 struct extent_state *state = NULL;
1311 struct rb_node *node;
1312 int bitset = 0;
1313
1314 read_lock_irq(&tree->lock);
1315 node = tree_search(&tree->state, start);
1316 while (node && start <= end) {
1317 state = rb_entry(node, struct extent_state, rb_node);
Chris Masona52d9a82007-08-27 16:49:44 -04001318
1319 if (filled && state->start > start) {
1320 bitset = 0;
1321 break;
1322 }
Chris Mason0591fb52007-11-11 08:22:00 -05001323
1324 if (state->start > end)
1325 break;
1326
Chris Masona52d9a82007-08-27 16:49:44 -04001327 if (state->state & bits) {
1328 bitset = 1;
1329 if (!filled)
1330 break;
1331 } else if (filled) {
1332 bitset = 0;
1333 break;
1334 }
1335 start = state->end + 1;
1336 if (start > end)
1337 break;
1338 node = rb_next(node);
Yanf0c5da12008-01-22 12:46:56 -05001339 if (!node) {
1340 if (filled)
1341 bitset = 0;
1342 break;
1343 }
Chris Masona52d9a82007-08-27 16:49:44 -04001344 }
1345 read_unlock_irq(&tree->lock);
1346 return bitset;
1347}
Chris Mason1a5bc162007-10-15 16:15:26 -04001348EXPORT_SYMBOL(test_range_bit);
Chris Masona52d9a82007-08-27 16:49:44 -04001349
1350/*
1351 * helper function to set a given page up to date if all the
1352 * extents in the tree for that page are up to date
1353 */
1354static int check_page_uptodate(struct extent_map_tree *tree,
1355 struct page *page)
1356{
Chris Mason35ebb932007-10-30 16:56:53 -04001357 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001358 u64 end = start + PAGE_CACHE_SIZE - 1;
1359 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1360 SetPageUptodate(page);
1361 return 0;
1362}
1363
1364/*
1365 * helper function to unlock a page if all the extents in the tree
1366 * for that page are unlocked
1367 */
1368static int check_page_locked(struct extent_map_tree *tree,
1369 struct page *page)
1370{
Chris Mason35ebb932007-10-30 16:56:53 -04001371 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001372 u64 end = start + PAGE_CACHE_SIZE - 1;
1373 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1374 unlock_page(page);
1375 return 0;
1376}
1377
1378/*
1379 * helper function to end page writeback if all the extents
1380 * in the tree for that page are done with writeback
1381 */
1382static int check_page_writeback(struct extent_map_tree *tree,
1383 struct page *page)
1384{
Chris Mason35ebb932007-10-30 16:56:53 -04001385 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001386 u64 end = start + PAGE_CACHE_SIZE - 1;
1387 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1388 end_page_writeback(page);
1389 return 0;
1390}
1391
1392/* lots and lots of room for performance fixes in the end_bio funcs */
1393
1394/*
1395 * after a writepage IO is done, we need to:
1396 * clear the uptodate bits on error
1397 * clear the writeback bits in the extent tree for this IO
1398 * end_page_writeback if the page has no more pending IO
1399 *
1400 * Scheduling is not allowed, so the extent state tree is expected
1401 * to have one and only one object corresponding to this IO.
1402 */
Jens Axboe0a2118d2007-10-19 09:23:05 -04001403#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1404static void end_bio_extent_writepage(struct bio *bio, int err)
1405#else
Chris Masona52d9a82007-08-27 16:49:44 -04001406static int end_bio_extent_writepage(struct bio *bio,
1407 unsigned int bytes_done, int err)
Jens Axboe0a2118d2007-10-19 09:23:05 -04001408#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001409{
1410 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1411 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1412 struct extent_map_tree *tree = bio->bi_private;
1413 u64 start;
1414 u64 end;
1415 int whole_page;
1416
Jens Axboe0a2118d2007-10-19 09:23:05 -04001417#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001418 if (bio->bi_size)
1419 return 1;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001420#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001421
1422 do {
1423 struct page *page = bvec->bv_page;
Chris Mason35ebb932007-10-30 16:56:53 -04001424 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1425 bvec->bv_offset;
Chris Masona52d9a82007-08-27 16:49:44 -04001426 end = start + bvec->bv_len - 1;
1427
1428 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1429 whole_page = 1;
1430 else
1431 whole_page = 0;
1432
1433 if (--bvec >= bio->bi_io_vec)
1434 prefetchw(&bvec->bv_page->flags);
1435
1436 if (!uptodate) {
1437 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1438 ClearPageUptodate(page);
1439 SetPageError(page);
1440 }
1441 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1442
1443 if (whole_page)
1444 end_page_writeback(page);
1445 else
1446 check_page_writeback(tree, page);
Christoph Hellwig0e2752a2007-09-10 20:02:33 -04001447 if (tree->ops && tree->ops->writepage_end_io_hook)
1448 tree->ops->writepage_end_io_hook(page, start, end);
Chris Masona52d9a82007-08-27 16:49:44 -04001449 } while (bvec >= bio->bi_io_vec);
1450
1451 bio_put(bio);
Jens Axboe0a2118d2007-10-19 09:23:05 -04001452#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001453 return 0;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001454#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001455}
1456
1457/*
1458 * after a readpage IO is done, we need to:
1459 * clear the uptodate bits on error
1460 * set the uptodate bits if things worked
1461 * set the page up to date if all extents in the tree are uptodate
1462 * clear the lock bit in the extent tree
1463 * unlock the page if there are no other extents locked for it
1464 *
1465 * Scheduling is not allowed, so the extent state tree is expected
1466 * to have one and only one object corresponding to this IO.
1467 */
Jens Axboe0a2118d2007-10-19 09:23:05 -04001468#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1469static void end_bio_extent_readpage(struct bio *bio, int err)
1470#else
Chris Masona52d9a82007-08-27 16:49:44 -04001471static int end_bio_extent_readpage(struct bio *bio,
1472 unsigned int bytes_done, int err)
Jens Axboe0a2118d2007-10-19 09:23:05 -04001473#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001474{
Chris Mason07157aa2007-08-30 08:50:51 -04001475 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masona52d9a82007-08-27 16:49:44 -04001476 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1477 struct extent_map_tree *tree = bio->bi_private;
1478 u64 start;
1479 u64 end;
1480 int whole_page;
Chris Mason07157aa2007-08-30 08:50:51 -04001481 int ret;
Chris Masona52d9a82007-08-27 16:49:44 -04001482
Jens Axboe0a2118d2007-10-19 09:23:05 -04001483#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001484 if (bio->bi_size)
1485 return 1;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001486#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001487
1488 do {
1489 struct page *page = bvec->bv_page;
Chris Mason35ebb932007-10-30 16:56:53 -04001490 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1491 bvec->bv_offset;
Chris Masona52d9a82007-08-27 16:49:44 -04001492 end = start + bvec->bv_len - 1;
1493
1494 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1495 whole_page = 1;
1496 else
1497 whole_page = 0;
1498
1499 if (--bvec >= bio->bi_io_vec)
1500 prefetchw(&bvec->bv_page->flags);
1501
Chris Mason07157aa2007-08-30 08:50:51 -04001502 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1503 ret = tree->ops->readpage_end_io_hook(page, start, end);
1504 if (ret)
1505 uptodate = 0;
1506 }
Chris Masona52d9a82007-08-27 16:49:44 -04001507 if (uptodate) {
1508 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1509 if (whole_page)
1510 SetPageUptodate(page);
1511 else
1512 check_page_uptodate(tree, page);
1513 } else {
1514 ClearPageUptodate(page);
1515 SetPageError(page);
1516 }
1517
1518 unlock_extent(tree, start, end, GFP_ATOMIC);
1519
1520 if (whole_page)
1521 unlock_page(page);
1522 else
1523 check_page_locked(tree, page);
1524 } while (bvec >= bio->bi_io_vec);
1525
1526 bio_put(bio);
Jens Axboe0a2118d2007-10-19 09:23:05 -04001527#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001528 return 0;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001529#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001530}
1531
1532/*
1533 * IO done from prepare_write is pretty simple, we just unlock
1534 * the structs in the extent tree when done, and set the uptodate bits
1535 * as appropriate.
1536 */
Jens Axboe0a2118d2007-10-19 09:23:05 -04001537#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1538static void end_bio_extent_preparewrite(struct bio *bio, int err)
1539#else
Chris Masona52d9a82007-08-27 16:49:44 -04001540static int end_bio_extent_preparewrite(struct bio *bio,
1541 unsigned int bytes_done, int err)
Jens Axboe0a2118d2007-10-19 09:23:05 -04001542#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001543{
1544 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1545 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1546 struct extent_map_tree *tree = bio->bi_private;
1547 u64 start;
1548 u64 end;
1549
Jens Axboe0a2118d2007-10-19 09:23:05 -04001550#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001551 if (bio->bi_size)
1552 return 1;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001553#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001554
1555 do {
1556 struct page *page = bvec->bv_page;
Chris Mason35ebb932007-10-30 16:56:53 -04001557 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1558 bvec->bv_offset;
Chris Masona52d9a82007-08-27 16:49:44 -04001559 end = start + bvec->bv_len - 1;
1560
1561 if (--bvec >= bio->bi_io_vec)
1562 prefetchw(&bvec->bv_page->flags);
1563
1564 if (uptodate) {
1565 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1566 } else {
1567 ClearPageUptodate(page);
1568 SetPageError(page);
1569 }
1570
1571 unlock_extent(tree, start, end, GFP_ATOMIC);
1572
1573 } while (bvec >= bio->bi_io_vec);
1574
1575 bio_put(bio);
Jens Axboe0a2118d2007-10-19 09:23:05 -04001576#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001577 return 0;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001578#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001579}
1580
Chris Masonb293f022007-11-01 19:45:34 -04001581static struct bio *
1582extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1583 gfp_t gfp_flags)
1584{
1585 struct bio *bio;
1586
1587 bio = bio_alloc(gfp_flags, nr_vecs);
1588
1589 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1590 while (!bio && (nr_vecs /= 2))
1591 bio = bio_alloc(gfp_flags, nr_vecs);
1592 }
1593
1594 if (bio) {
1595 bio->bi_bdev = bdev;
1596 bio->bi_sector = first_sector;
1597 }
1598 return bio;
1599}
1600
1601static int submit_one_bio(int rw, struct bio *bio)
1602{
Chris Mason6da6aba2007-12-18 16:15:09 -05001603 u64 maxsector;
Chris Masonb293f022007-11-01 19:45:34 -04001604 int ret = 0;
Chris Mason6da6aba2007-12-18 16:15:09 -05001605
Chris Masonb293f022007-11-01 19:45:34 -04001606 bio_get(bio);
Chris Mason6da6aba2007-12-18 16:15:09 -05001607
1608 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1609 if (maxsector < bio->bi_sector) {
1610 printk("sector too large max %Lu got %llu\n", maxsector,
1611 (unsigned long long)bio->bi_sector);
1612 WARN_ON(1);
1613 }
1614
Chris Masonb293f022007-11-01 19:45:34 -04001615 submit_bio(rw, bio);
1616 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1617 ret = -EOPNOTSUPP;
1618 bio_put(bio);
1619 return ret;
1620}
1621
Chris Masona52d9a82007-08-27 16:49:44 -04001622static int submit_extent_page(int rw, struct extent_map_tree *tree,
1623 struct page *page, sector_t sector,
1624 size_t size, unsigned long offset,
1625 struct block_device *bdev,
Chris Masonb293f022007-11-01 19:45:34 -04001626 struct bio **bio_ret,
Chris Mason3ab2fb52007-11-08 10:59:22 -05001627 unsigned long max_pages,
Chris Masona52d9a82007-08-27 16:49:44 -04001628 bio_end_io_t end_io_func)
1629{
Chris Masona52d9a82007-08-27 16:49:44 -04001630 int ret = 0;
Chris Masonb293f022007-11-01 19:45:34 -04001631 struct bio *bio;
1632 int nr;
Chris Masona52d9a82007-08-27 16:49:44 -04001633
Chris Masonb293f022007-11-01 19:45:34 -04001634 if (bio_ret && *bio_ret) {
1635 bio = *bio_ret;
1636 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1637 bio_add_page(bio, page, size, offset) < size) {
1638 ret = submit_one_bio(rw, bio);
1639 bio = NULL;
1640 } else {
1641 return 0;
1642 }
1643 }
Chris Mason3ab2fb52007-11-08 10:59:22 -05001644 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
Chris Masonb293f022007-11-01 19:45:34 -04001645 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1646 if (!bio) {
1647 printk("failed to allocate bio nr %d\n", nr);
1648 }
1649 bio_add_page(bio, page, size, offset);
Chris Masona52d9a82007-08-27 16:49:44 -04001650 bio->bi_end_io = end_io_func;
1651 bio->bi_private = tree;
Chris Masonb293f022007-11-01 19:45:34 -04001652 if (bio_ret) {
1653 *bio_ret = bio;
1654 } else {
1655 ret = submit_one_bio(rw, bio);
1656 }
Chris Masona52d9a82007-08-27 16:49:44 -04001657
Chris Masona52d9a82007-08-27 16:49:44 -04001658 return ret;
1659}
1660
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001661void set_page_extent_mapped(struct page *page)
1662{
1663 if (!PagePrivate(page)) {
1664 SetPagePrivate(page);
1665 WARN_ON(!page->mapping->a_ops->invalidatepage);
Chris Mason19c00dd2007-10-15 16:19:22 -04001666 set_page_private(page, EXTENT_PAGE_PRIVATE);
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001667 page_cache_get(page);
1668 }
1669}
1670
Chris Mason55c69072008-01-09 15:55:33 -05001671void set_page_extent_head(struct page *page, unsigned long len)
1672{
Chris Mason55c69072008-01-09 15:55:33 -05001673 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1674}
1675
Chris Masona52d9a82007-08-27 16:49:44 -04001676/*
1677 * basic readpage implementation. Locked extent state structs are inserted
1678 * into the tree that are removed when the IO is done (by the end_io
1679 * handlers)
1680 */
Chris Mason3ab2fb52007-11-08 10:59:22 -05001681static int __extent_read_full_page(struct extent_map_tree *tree,
1682 struct page *page,
1683 get_extent_t *get_extent,
1684 struct bio **bio)
Chris Masona52d9a82007-08-27 16:49:44 -04001685{
1686 struct inode *inode = page->mapping->host;
Chris Mason35ebb932007-10-30 16:56:53 -04001687 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001688 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1689 u64 end;
1690 u64 cur = start;
1691 u64 extent_offset;
1692 u64 last_byte = i_size_read(inode);
1693 u64 block_start;
1694 u64 cur_end;
1695 sector_t sector;
1696 struct extent_map *em;
1697 struct block_device *bdev;
1698 int ret;
1699 int nr = 0;
1700 size_t page_offset = 0;
1701 size_t iosize;
1702 size_t blocksize = inode->i_sb->s_blocksize;
1703
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001704 set_page_extent_mapped(page);
Chris Masona52d9a82007-08-27 16:49:44 -04001705
1706 end = page_end;
1707 lock_extent(tree, start, end, GFP_NOFS);
1708
1709 while (cur <= end) {
1710 if (cur >= last_byte) {
Chris Mason6da6aba2007-12-18 16:15:09 -05001711 char *userpage;
Chris Masona52d9a82007-08-27 16:49:44 -04001712 iosize = PAGE_CACHE_SIZE - page_offset;
Chris Mason6da6aba2007-12-18 16:15:09 -05001713 userpage = kmap_atomic(page, KM_USER0);
1714 memset(userpage + page_offset, 0, iosize);
1715 flush_dcache_page(page);
1716 kunmap_atomic(userpage, KM_USER0);
Chris Masona52d9a82007-08-27 16:49:44 -04001717 set_extent_uptodate(tree, cur, cur + iosize - 1,
1718 GFP_NOFS);
1719 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1720 break;
1721 }
1722 em = get_extent(inode, page, page_offset, cur, end, 0);
1723 if (IS_ERR(em) || !em) {
1724 SetPageError(page);
1725 unlock_extent(tree, cur, end, GFP_NOFS);
1726 break;
1727 }
1728
1729 extent_offset = cur - em->start;
1730 BUG_ON(em->end < cur);
1731 BUG_ON(end < cur);
1732
1733 iosize = min(em->end - cur, end - cur) + 1;
1734 cur_end = min(em->end, end);
1735 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1736 sector = (em->block_start + extent_offset) >> 9;
1737 bdev = em->bdev;
1738 block_start = em->block_start;
1739 free_extent_map(em);
1740 em = NULL;
1741
1742 /* we've found a hole, just zero and go on */
Chris Mason5f39d392007-10-15 16:14:19 -04001743 if (block_start == EXTENT_MAP_HOLE) {
Chris Mason6da6aba2007-12-18 16:15:09 -05001744 char *userpage;
1745 userpage = kmap_atomic(page, KM_USER0);
1746 memset(userpage + page_offset, 0, iosize);
1747 flush_dcache_page(page);
1748 kunmap_atomic(userpage, KM_USER0);
1749
Chris Masona52d9a82007-08-27 16:49:44 -04001750 set_extent_uptodate(tree, cur, cur + iosize - 1,
1751 GFP_NOFS);
1752 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1753 cur = cur + iosize;
1754 page_offset += iosize;
1755 continue;
1756 }
1757 /* the get_extent function already copied into the page */
1758 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1759 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1760 cur = cur + iosize;
1761 page_offset += iosize;
1762 continue;
1763 }
1764
Chris Mason07157aa2007-08-30 08:50:51 -04001765 ret = 0;
1766 if (tree->ops && tree->ops->readpage_io_hook) {
1767 ret = tree->ops->readpage_io_hook(page, cur,
1768 cur + iosize - 1);
1769 }
1770 if (!ret) {
Chris Mason3ab2fb52007-11-08 10:59:22 -05001771 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1772 nr -= page->index;
Chris Mason07157aa2007-08-30 08:50:51 -04001773 ret = submit_extent_page(READ, tree, page,
Chris Mason3ab2fb52007-11-08 10:59:22 -05001774 sector, iosize, page_offset,
1775 bdev, bio, nr,
1776 end_bio_extent_readpage);
Chris Mason07157aa2007-08-30 08:50:51 -04001777 }
Chris Masona52d9a82007-08-27 16:49:44 -04001778 if (ret)
1779 SetPageError(page);
1780 cur = cur + iosize;
1781 page_offset += iosize;
1782 nr++;
1783 }
1784 if (!nr) {
1785 if (!PageError(page))
1786 SetPageUptodate(page);
1787 unlock_page(page);
1788 }
1789 return 0;
1790}
Chris Mason3ab2fb52007-11-08 10:59:22 -05001791
1792int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1793 get_extent_t *get_extent)
1794{
1795 struct bio *bio = NULL;
1796 int ret;
1797
1798 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1799 if (bio)
1800 submit_one_bio(READ, bio);
1801 return ret;
1802}
Chris Masona52d9a82007-08-27 16:49:44 -04001803EXPORT_SYMBOL(extent_read_full_page);
1804
1805/*
1806 * the writepage semantics are similar to regular writepage. extent
1807 * records are inserted to lock ranges in the tree, and as dirty areas
1808 * are found, they are marked writeback. Then the lock bits are removed
1809 * and the end_io handler clears the writeback ranges
1810 */
Chris Masonb293f022007-11-01 19:45:34 -04001811static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1812 void *data)
Chris Masona52d9a82007-08-27 16:49:44 -04001813{
1814 struct inode *inode = page->mapping->host;
Chris Masonb293f022007-11-01 19:45:34 -04001815 struct extent_page_data *epd = data;
1816 struct extent_map_tree *tree = epd->tree;
Chris Mason35ebb932007-10-30 16:56:53 -04001817 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Mason3e9fd942007-11-20 10:47:25 -05001818 u64 delalloc_start;
Chris Masona52d9a82007-08-27 16:49:44 -04001819 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1820 u64 end;
1821 u64 cur = start;
1822 u64 extent_offset;
1823 u64 last_byte = i_size_read(inode);
1824 u64 block_start;
Chris Mason179e29e2007-11-01 11:28:41 -04001825 u64 iosize;
Chris Masona52d9a82007-08-27 16:49:44 -04001826 sector_t sector;
1827 struct extent_map *em;
1828 struct block_device *bdev;
1829 int ret;
1830 int nr = 0;
1831 size_t page_offset = 0;
Chris Masona52d9a82007-08-27 16:49:44 -04001832 size_t blocksize;
1833 loff_t i_size = i_size_read(inode);
1834 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
Chris Masonb888db22007-08-27 16:49:44 -04001835 u64 nr_delalloc;
1836 u64 delalloc_end;
Chris Masona52d9a82007-08-27 16:49:44 -04001837
Chris Masonb888db22007-08-27 16:49:44 -04001838 WARN_ON(!PageLocked(page));
Chris Masona52d9a82007-08-27 16:49:44 -04001839 if (page->index > end_index) {
1840 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1841 unlock_page(page);
1842 return 0;
1843 }
1844
1845 if (page->index == end_index) {
Chris Mason6da6aba2007-12-18 16:15:09 -05001846 char *userpage;
1847
Chris Masona52d9a82007-08-27 16:49:44 -04001848 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason6da6aba2007-12-18 16:15:09 -05001849
1850 userpage = kmap_atomic(page, KM_USER0);
1851 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1852 flush_dcache_page(page);
1853 kunmap_atomic(userpage, KM_USER0);
Chris Masona52d9a82007-08-27 16:49:44 -04001854 }
1855
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001856 set_page_extent_mapped(page);
Chris Masona52d9a82007-08-27 16:49:44 -04001857
Chris Mason3e9fd942007-11-20 10:47:25 -05001858 delalloc_start = start;
1859 delalloc_end = 0;
1860 while(delalloc_end < page_end) {
1861 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1862 &delalloc_end,
1863 128 * 1024 * 1024);
Chris Mason190662b2007-12-18 16:25:45 -05001864 if (nr_delalloc == 0) {
1865 delalloc_start = delalloc_end + 1;
1866 continue;
1867 }
Chris Mason3e9fd942007-11-20 10:47:25 -05001868 tree->ops->fill_delalloc(inode, delalloc_start,
1869 delalloc_end);
1870 clear_extent_bit(tree, delalloc_start,
1871 delalloc_end,
1872 EXTENT_LOCKED | EXTENT_DELALLOC,
1873 1, 0, GFP_NOFS);
1874 delalloc_start = delalloc_end + 1;
Chris Masonb888db22007-08-27 16:49:44 -04001875 }
Chris Mason3e9fd942007-11-20 10:47:25 -05001876 lock_extent(tree, start, page_end, GFP_NOFS);
Chris Masonb888db22007-08-27 16:49:44 -04001877
1878 end = page_end;
1879 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1880 printk("found delalloc bits after lock_extent\n");
1881 }
Chris Masona52d9a82007-08-27 16:49:44 -04001882
1883 if (last_byte <= start) {
1884 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1885 goto done;
1886 }
1887
1888 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1889 blocksize = inode->i_sb->s_blocksize;
1890
1891 while (cur <= end) {
1892 if (cur >= last_byte) {
1893 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1894 break;
1895 }
Chris Masonb293f022007-11-01 19:45:34 -04001896 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
Chris Masona52d9a82007-08-27 16:49:44 -04001897 if (IS_ERR(em) || !em) {
1898 SetPageError(page);
1899 break;
1900 }
1901
1902 extent_offset = cur - em->start;
1903 BUG_ON(em->end < cur);
1904 BUG_ON(end < cur);
1905 iosize = min(em->end - cur, end - cur) + 1;
1906 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1907 sector = (em->block_start + extent_offset) >> 9;
1908 bdev = em->bdev;
1909 block_start = em->block_start;
1910 free_extent_map(em);
1911 em = NULL;
1912
Chris Mason5f39d392007-10-15 16:14:19 -04001913 if (block_start == EXTENT_MAP_HOLE ||
1914 block_start == EXTENT_MAP_INLINE) {
Chris Masona52d9a82007-08-27 16:49:44 -04001915 clear_extent_dirty(tree, cur,
1916 cur + iosize - 1, GFP_NOFS);
1917 cur = cur + iosize;
1918 page_offset += iosize;
1919 continue;
1920 }
1921
1922 /* leave this out until we have a page_mkwrite call */
1923 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1924 EXTENT_DIRTY, 0)) {
1925 cur = cur + iosize;
1926 page_offset += iosize;
1927 continue;
1928 }
1929 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
Christoph Hellwigb06355f2007-09-10 20:02:32 -04001930 if (tree->ops && tree->ops->writepage_io_hook) {
1931 ret = tree->ops->writepage_io_hook(page, cur,
1932 cur + iosize - 1);
1933 } else {
1934 ret = 0;
1935 }
Chris Masona52d9a82007-08-27 16:49:44 -04001936 if (ret)
1937 SetPageError(page);
Chris Mason07157aa2007-08-30 08:50:51 -04001938 else {
Chris Mason7073c8e2007-11-20 13:44:45 -05001939 unsigned long max_nr = end_index + 1;
Chris Mason07157aa2007-08-30 08:50:51 -04001940 set_range_writeback(tree, cur, cur + iosize - 1);
Chris Mason7073c8e2007-11-20 13:44:45 -05001941 if (!PageWriteback(page)) {
1942 printk("warning page %lu not writeback, "
1943 "cur %llu end %llu\n", page->index,
1944 (unsigned long long)cur,
1945 (unsigned long long)end);
1946 }
Chris Masonb293f022007-11-01 19:45:34 -04001947
Chris Mason07157aa2007-08-30 08:50:51 -04001948 ret = submit_extent_page(WRITE, tree, page, sector,
1949 iosize, page_offset, bdev,
Chris Mason7073c8e2007-11-20 13:44:45 -05001950 &epd->bio, max_nr,
Chris Mason07157aa2007-08-30 08:50:51 -04001951 end_bio_extent_writepage);
1952 if (ret)
1953 SetPageError(page);
1954 }
Chris Masona52d9a82007-08-27 16:49:44 -04001955 cur = cur + iosize;
1956 page_offset += iosize;
1957 nr++;
1958 }
1959done:
Chris Mason7073c8e2007-11-20 13:44:45 -05001960 if (nr == 0) {
1961 /* make sure the mapping tag for page dirty gets cleared */
1962 set_page_writeback(page);
1963 end_page_writeback(page);
1964 }
Chris Masona52d9a82007-08-27 16:49:44 -04001965 unlock_extent(tree, start, page_end, GFP_NOFS);
1966 unlock_page(page);
1967 return 0;
1968}
Chris Masonb293f022007-11-01 19:45:34 -04001969
Chris Mason6da6aba2007-12-18 16:15:09 -05001970#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1971
1972/* Taken directly from 2.6.23 for 2.6.18 back port */
1973typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1974 void *data);
1975
1976/**
1977 * write_cache_pages - walk the list of dirty pages of the given address space
1978 * and write all of them.
1979 * @mapping: address space structure to write
1980 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1981 * @writepage: function called for each page
1982 * @data: data passed to writepage function
1983 *
1984 * If a page is already under I/O, write_cache_pages() skips it, even
1985 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1986 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1987 * and msync() need to guarantee that all the data which was dirty at the time
1988 * the call was made get new I/O started against them. If wbc->sync_mode is
1989 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1990 * existing IO to complete.
1991 */
1992static int write_cache_pages(struct address_space *mapping,
1993 struct writeback_control *wbc, writepage_t writepage,
1994 void *data)
1995{
1996 struct backing_dev_info *bdi = mapping->backing_dev_info;
1997 int ret = 0;
1998 int done = 0;
1999 struct pagevec pvec;
2000 int nr_pages;
2001 pgoff_t index;
2002 pgoff_t end; /* Inclusive */
2003 int scanned = 0;
2004 int range_whole = 0;
2005
2006 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2007 wbc->encountered_congestion = 1;
2008 return 0;
2009 }
2010
2011 pagevec_init(&pvec, 0);
2012 if (wbc->range_cyclic) {
2013 index = mapping->writeback_index; /* Start from prev offset */
2014 end = -1;
2015 } else {
2016 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2017 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2018 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2019 range_whole = 1;
2020 scanned = 1;
2021 }
2022retry:
2023 while (!done && (index <= end) &&
2024 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2025 PAGECACHE_TAG_DIRTY,
2026 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2027 unsigned i;
2028
2029 scanned = 1;
2030 for (i = 0; i < nr_pages; i++) {
2031 struct page *page = pvec.pages[i];
2032
2033 /*
2034 * At this point we hold neither mapping->tree_lock nor
2035 * lock on the page itself: the page may be truncated or
2036 * invalidated (changing page->mapping to NULL), or even
2037 * swizzled back from swapper_space to tmpfs file
2038 * mapping
2039 */
2040 lock_page(page);
2041
2042 if (unlikely(page->mapping != mapping)) {
2043 unlock_page(page);
2044 continue;
2045 }
2046
2047 if (!wbc->range_cyclic && page->index > end) {
2048 done = 1;
2049 unlock_page(page);
2050 continue;
2051 }
2052
2053 if (wbc->sync_mode != WB_SYNC_NONE)
2054 wait_on_page_writeback(page);
2055
2056 if (PageWriteback(page) ||
2057 !clear_page_dirty_for_io(page)) {
2058 unlock_page(page);
2059 continue;
2060 }
2061
2062 ret = (*writepage)(page, wbc, data);
2063
2064 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2065 unlock_page(page);
2066 ret = 0;
2067 }
2068 if (ret || (--(wbc->nr_to_write) <= 0))
2069 done = 1;
2070 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2071 wbc->encountered_congestion = 1;
2072 done = 1;
2073 }
2074 }
2075 pagevec_release(&pvec);
2076 cond_resched();
2077 }
2078 if (!scanned && !done) {
2079 /*
2080 * We hit the last page and there is more work to be done: wrap
2081 * back to the start of the file
2082 */
2083 scanned = 1;
2084 index = 0;
2085 goto retry;
2086 }
2087 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2088 mapping->writeback_index = index;
2089 return ret;
2090}
2091#endif
2092
Chris Masonb293f022007-11-01 19:45:34 -04002093int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2094 get_extent_t *get_extent,
2095 struct writeback_control *wbc)
2096{
2097 int ret;
Chris Mason015a739c2007-11-26 16:15:16 -08002098 struct address_space *mapping = page->mapping;
Chris Masonb293f022007-11-01 19:45:34 -04002099 struct extent_page_data epd = {
2100 .bio = NULL,
2101 .tree = tree,
2102 .get_extent = get_extent,
2103 };
Chris Mason015a739c2007-11-26 16:15:16 -08002104 struct writeback_control wbc_writepages = {
2105 .bdi = wbc->bdi,
2106 .sync_mode = WB_SYNC_NONE,
2107 .older_than_this = NULL,
2108 .nr_to_write = 64,
2109 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2110 .range_end = (loff_t)-1,
2111 };
2112
Chris Masonb293f022007-11-01 19:45:34 -04002113
2114 ret = __extent_writepage(page, wbc, &epd);
Chris Mason015a739c2007-11-26 16:15:16 -08002115
2116 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
Chris Mason6da6aba2007-12-18 16:15:09 -05002117 if (epd.bio) {
Chris Masonb293f022007-11-01 19:45:34 -04002118 submit_one_bio(WRITE, epd.bio);
Chris Mason6da6aba2007-12-18 16:15:09 -05002119 }
Chris Masonb293f022007-11-01 19:45:34 -04002120 return ret;
2121}
Chris Masona52d9a82007-08-27 16:49:44 -04002122EXPORT_SYMBOL(extent_write_full_page);
2123
Chris Mason6da6aba2007-12-18 16:15:09 -05002124
Chris Masonb293f022007-11-01 19:45:34 -04002125int extent_writepages(struct extent_map_tree *tree,
2126 struct address_space *mapping,
2127 get_extent_t *get_extent,
2128 struct writeback_control *wbc)
2129{
Chris Mason6da6aba2007-12-18 16:15:09 -05002130 int ret = 0;
Chris Masonb293f022007-11-01 19:45:34 -04002131 struct extent_page_data epd = {
2132 .bio = NULL,
2133 .tree = tree,
2134 .get_extent = get_extent,
2135 };
2136
2137 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
Chris Mason6da6aba2007-12-18 16:15:09 -05002138 if (epd.bio) {
Chris Masonb293f022007-11-01 19:45:34 -04002139 submit_one_bio(WRITE, epd.bio);
Chris Mason6da6aba2007-12-18 16:15:09 -05002140 }
Chris Masonb293f022007-11-01 19:45:34 -04002141 return ret;
2142}
2143EXPORT_SYMBOL(extent_writepages);
2144
Chris Mason3ab2fb52007-11-08 10:59:22 -05002145int extent_readpages(struct extent_map_tree *tree,
2146 struct address_space *mapping,
2147 struct list_head *pages, unsigned nr_pages,
2148 get_extent_t get_extent)
2149{
2150 struct bio *bio = NULL;
2151 unsigned page_idx;
2152 struct pagevec pvec;
2153
2154 pagevec_init(&pvec, 0);
2155 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2156 struct page *page = list_entry(pages->prev, struct page, lru);
2157
2158 prefetchw(&page->flags);
2159 list_del(&page->lru);
2160 /*
2161 * what we want to do here is call add_to_page_cache_lru,
2162 * but that isn't exported, so we reproduce it here
2163 */
2164 if (!add_to_page_cache(page, mapping,
2165 page->index, GFP_KERNEL)) {
2166
2167 /* open coding of lru_cache_add, also not exported */
2168 page_cache_get(page);
2169 if (!pagevec_add(&pvec, page))
2170 __pagevec_lru_add(&pvec);
2171 __extent_read_full_page(tree, page, get_extent, &bio);
2172 }
2173 page_cache_release(page);
2174 }
2175 if (pagevec_count(&pvec))
2176 __pagevec_lru_add(&pvec);
2177 BUG_ON(!list_empty(pages));
2178 if (bio)
2179 submit_one_bio(READ, bio);
2180 return 0;
2181}
2182EXPORT_SYMBOL(extent_readpages);
2183
Chris Masona52d9a82007-08-27 16:49:44 -04002184/*
2185 * basic invalidatepage code, this waits on any locked or writeback
2186 * ranges corresponding to the page, and then deletes any extent state
2187 * records from the tree
2188 */
2189int extent_invalidatepage(struct extent_map_tree *tree,
2190 struct page *page, unsigned long offset)
2191{
Chris Mason35ebb932007-10-30 16:56:53 -04002192 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
Chris Masona52d9a82007-08-27 16:49:44 -04002193 u64 end = start + PAGE_CACHE_SIZE - 1;
2194 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2195
2196 start += (offset + blocksize -1) & ~(blocksize - 1);
2197 if (start > end)
2198 return 0;
2199
2200 lock_extent(tree, start, end, GFP_NOFS);
2201 wait_on_extent_writeback(tree, start, end);
Chris Mason2bf5a722007-08-30 11:54:02 -04002202 clear_extent_bit(tree, start, end,
2203 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
Chris Masona52d9a82007-08-27 16:49:44 -04002204 1, 1, GFP_NOFS);
2205 return 0;
2206}
2207EXPORT_SYMBOL(extent_invalidatepage);
2208
2209/*
2210 * simple commit_write call, set_range_dirty is used to mark both
2211 * the pages and the extent records as dirty
2212 */
2213int extent_commit_write(struct extent_map_tree *tree,
2214 struct inode *inode, struct page *page,
2215 unsigned from, unsigned to)
2216{
2217 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2218
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04002219 set_page_extent_mapped(page);
Chris Masona52d9a82007-08-27 16:49:44 -04002220 set_page_dirty(page);
2221
2222 if (pos > inode->i_size) {
2223 i_size_write(inode, pos);
2224 mark_inode_dirty(inode);
2225 }
2226 return 0;
2227}
2228EXPORT_SYMBOL(extent_commit_write);
2229
2230int extent_prepare_write(struct extent_map_tree *tree,
2231 struct inode *inode, struct page *page,
2232 unsigned from, unsigned to, get_extent_t *get_extent)
2233{
Chris Mason35ebb932007-10-30 16:56:53 -04002234 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04002235 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2236 u64 block_start;
2237 u64 orig_block_start;
2238 u64 block_end;
2239 u64 cur_end;
2240 struct extent_map *em;
2241 unsigned blocksize = 1 << inode->i_blkbits;
2242 size_t page_offset = 0;
2243 size_t block_off_start;
2244 size_t block_off_end;
2245 int err = 0;
2246 int iocount = 0;
2247 int ret = 0;
2248 int isnew;
2249
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04002250 set_page_extent_mapped(page);
2251
Chris Masona52d9a82007-08-27 16:49:44 -04002252 block_start = (page_start + from) & ~((u64)blocksize - 1);
2253 block_end = (page_start + to - 1) | (blocksize - 1);
2254 orig_block_start = block_start;
2255
2256 lock_extent(tree, page_start, page_end, GFP_NOFS);
2257 while(block_start <= block_end) {
2258 em = get_extent(inode, page, page_offset, block_start,
2259 block_end, 1);
2260 if (IS_ERR(em) || !em) {
2261 goto err;
2262 }
2263 cur_end = min(block_end, em->end);
2264 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2265 block_off_end = block_off_start + blocksize;
2266 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2267
2268 if (!PageUptodate(page) && isnew &&
2269 (block_off_end > to || block_off_start < from)) {
2270 void *kaddr;
2271
2272 kaddr = kmap_atomic(page, KM_USER0);
2273 if (block_off_end > to)
2274 memset(kaddr + to, 0, block_off_end - to);
2275 if (block_off_start < from)
2276 memset(kaddr + block_off_start, 0,
2277 from - block_off_start);
2278 flush_dcache_page(page);
2279 kunmap_atomic(kaddr, KM_USER0);
2280 }
Chris Mason6da6aba2007-12-18 16:15:09 -05002281 if ((em->block_start != EXTENT_MAP_HOLE &&
Chris Mason5d4fb732007-12-21 16:27:23 -05002282 em->block_start != EXTENT_MAP_INLINE) &&
Chris Mason6da6aba2007-12-18 16:15:09 -05002283 !isnew && !PageUptodate(page) &&
Chris Masona52d9a82007-08-27 16:49:44 -04002284 (block_off_end > to || block_off_start < from) &&
2285 !test_range_bit(tree, block_start, cur_end,
2286 EXTENT_UPTODATE, 1)) {
2287 u64 sector;
2288 u64 extent_offset = block_start - em->start;
2289 size_t iosize;
2290 sector = (em->block_start + extent_offset) >> 9;
Chris Mason5d4fb732007-12-21 16:27:23 -05002291 iosize = (cur_end - block_start + blocksize) &
Chris Masona52d9a82007-08-27 16:49:44 -04002292 ~((u64)blocksize - 1);
2293 /*
2294 * we've already got the extent locked, but we
2295 * need to split the state such that our end_bio
2296 * handler can clear the lock.
2297 */
2298 set_extent_bit(tree, block_start,
2299 block_start + iosize - 1,
2300 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2301 ret = submit_extent_page(READ, tree, page,
2302 sector, iosize, page_offset, em->bdev,
Chris Masonb293f022007-11-01 19:45:34 -04002303 NULL, 1,
Chris Masona52d9a82007-08-27 16:49:44 -04002304 end_bio_extent_preparewrite);
2305 iocount++;
2306 block_start = block_start + iosize;
2307 } else {
2308 set_extent_uptodate(tree, block_start, cur_end,
2309 GFP_NOFS);
2310 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2311 block_start = cur_end + 1;
2312 }
2313 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2314 free_extent_map(em);
2315 }
2316 if (iocount) {
2317 wait_extent_bit(tree, orig_block_start,
2318 block_end, EXTENT_LOCKED);
2319 }
2320 check_page_uptodate(tree, page);
2321err:
2322 /* FIXME, zero out newly allocated blocks on error */
2323 return err;
2324}
2325EXPORT_SYMBOL(extent_prepare_write);
2326
2327/*
2328 * a helper for releasepage. As long as there are no locked extents
2329 * in the range corresponding to the page, both state records and extent
2330 * map records are removed
2331 */
2332int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2333{
2334 struct extent_map *em;
Chris Mason35ebb932007-10-30 16:56:53 -04002335 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04002336 u64 end = start + PAGE_CACHE_SIZE - 1;
2337 u64 orig_start = start;
Chris Masonb888db22007-08-27 16:49:44 -04002338 int ret = 1;
Chris Masona52d9a82007-08-27 16:49:44 -04002339
2340 while (start <= end) {
2341 em = lookup_extent_mapping(tree, start, end);
2342 if (!em || IS_ERR(em))
2343 break;
Chris Masonb888db22007-08-27 16:49:44 -04002344 if (!test_range_bit(tree, em->start, em->end,
2345 EXTENT_LOCKED, 0)) {
2346 remove_extent_mapping(tree, em);
2347 /* once for the rb tree */
Chris Masona52d9a82007-08-27 16:49:44 -04002348 free_extent_map(em);
Chris Masona52d9a82007-08-27 16:49:44 -04002349 }
Chris Masona52d9a82007-08-27 16:49:44 -04002350 start = em->end + 1;
Chris Masona52d9a82007-08-27 16:49:44 -04002351 /* once for us */
2352 free_extent_map(em);
2353 }
Chris Masonb888db22007-08-27 16:49:44 -04002354 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2355 ret = 0;
2356 else
2357 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2358 1, 1, GFP_NOFS);
2359 return ret;
Chris Masona52d9a82007-08-27 16:49:44 -04002360}
2361EXPORT_SYMBOL(try_release_extent_mapping);
2362
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04002363sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2364 get_extent_t *get_extent)
2365{
2366 struct inode *inode = mapping->host;
2367 u64 start = iblock << inode->i_blkbits;
2368 u64 end = start + (1 << inode->i_blkbits) - 1;
Yanc67cda12007-10-29 11:41:05 -04002369 sector_t sector = 0;
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04002370 struct extent_map *em;
2371
2372 em = get_extent(inode, NULL, 0, start, end, 0);
2373 if (!em || IS_ERR(em))
2374 return 0;
2375
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04002376 if (em->block_start == EXTENT_MAP_INLINE ||
Chris Mason5f39d392007-10-15 16:14:19 -04002377 em->block_start == EXTENT_MAP_HOLE)
Yanc67cda12007-10-29 11:41:05 -04002378 goto out;
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04002379
Yanc67cda12007-10-29 11:41:05 -04002380 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2381out:
2382 free_extent_map(em);
2383 return sector;
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04002384}
Chris Mason5f39d392007-10-15 16:14:19 -04002385
Chris Mason4dc119042007-10-15 16:18:14 -04002386static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
Chris Mason6d36dcd2007-10-15 16:14:37 -04002387{
Chris Mason4dc119042007-10-15 16:18:14 -04002388 if (list_empty(&eb->lru)) {
2389 extent_buffer_get(eb);
2390 list_add(&eb->lru, &tree->buffer_lru);
2391 tree->lru_size++;
2392 if (tree->lru_size >= BUFFER_LRU_MAX) {
2393 struct extent_buffer *rm;
2394 rm = list_entry(tree->buffer_lru.prev,
2395 struct extent_buffer, lru);
2396 tree->lru_size--;
Chris Mason856bf3e2007-11-08 10:59:05 -05002397 list_del_init(&rm->lru);
Chris Mason4dc119042007-10-15 16:18:14 -04002398 free_extent_buffer(rm);
2399 }
2400 } else
2401 list_move(&eb->lru, &tree->buffer_lru);
2402 return 0;
Chris Mason6d36dcd2007-10-15 16:14:37 -04002403}
Chris Mason4dc119042007-10-15 16:18:14 -04002404static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2405 u64 start, unsigned long len)
Chris Mason6d36dcd2007-10-15 16:14:37 -04002406{
Chris Mason4dc119042007-10-15 16:18:14 -04002407 struct list_head *lru = &tree->buffer_lru;
2408 struct list_head *cur = lru->next;
2409 struct extent_buffer *eb;
Chris Masonf510cfe2007-10-15 16:14:48 -04002410
Chris Mason4dc119042007-10-15 16:18:14 -04002411 if (list_empty(lru))
2412 return NULL;
Chris Masonf510cfe2007-10-15 16:14:48 -04002413
Chris Mason4dc119042007-10-15 16:18:14 -04002414 do {
2415 eb = list_entry(cur, struct extent_buffer, lru);
2416 if (eb->start == start && eb->len == len) {
2417 extent_buffer_get(eb);
2418 return eb;
2419 }
2420 cur = cur->next;
2421 } while (cur != lru);
2422 return NULL;
Chris Mason6d36dcd2007-10-15 16:14:37 -04002423}
2424
Chris Masondb945352007-10-15 16:15:53 -04002425static inline unsigned long num_extent_pages(u64 start, u64 len)
2426{
2427 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2428 (start >> PAGE_CACHE_SHIFT);
2429}
Chris Mason4dc119042007-10-15 16:18:14 -04002430
2431static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2432 unsigned long i)
2433{
2434 struct page *p;
Chris Mason3685f792007-10-19 09:23:27 -04002435 struct address_space *mapping;
Chris Mason4dc119042007-10-15 16:18:14 -04002436
2437 if (i == 0)
Chris Mason810191f2007-10-15 16:18:55 -04002438 return eb->first_page;
Chris Mason4dc119042007-10-15 16:18:14 -04002439 i += eb->start >> PAGE_CACHE_SHIFT;
Chris Mason3685f792007-10-19 09:23:27 -04002440 mapping = eb->first_page->mapping;
2441 read_lock_irq(&mapping->tree_lock);
2442 p = radix_tree_lookup(&mapping->page_tree, i);
2443 read_unlock_irq(&mapping->tree_lock);
Chris Mason4dc119042007-10-15 16:18:14 -04002444 return p;
2445}
2446
2447static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2448 u64 start,
2449 unsigned long len,
2450 gfp_t mask)
2451{
2452 struct extent_buffer *eb = NULL;
2453
2454 spin_lock(&tree->lru_lock);
2455 eb = find_lru(tree, start, len);
Chris Mason4dc119042007-10-15 16:18:14 -04002456 spin_unlock(&tree->lru_lock);
Chris Mason4dc119042007-10-15 16:18:14 -04002457 if (eb) {
Chris Mason09be2072007-11-07 21:08:16 -05002458 return eb;
Chris Mason4dc119042007-10-15 16:18:14 -04002459 }
Chris Mason09be2072007-11-07 21:08:16 -05002460
2461 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Chris Mason4dc119042007-10-15 16:18:14 -04002462 INIT_LIST_HEAD(&eb->lru);
2463 eb->start = start;
2464 eb->len = len;
2465 atomic_set(&eb->refs, 1);
2466
Chris Mason4dc119042007-10-15 16:18:14 -04002467 return eb;
2468}
2469
2470static void __free_extent_buffer(struct extent_buffer *eb)
2471{
2472 kmem_cache_free(extent_buffer_cache, eb);
2473}
2474
Chris Mason5f39d392007-10-15 16:14:19 -04002475struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2476 u64 start, unsigned long len,
Chris Mason19c00dd2007-10-15 16:19:22 -04002477 struct page *page0,
Chris Mason5f39d392007-10-15 16:14:19 -04002478 gfp_t mask)
2479{
Chris Masondb945352007-10-15 16:15:53 -04002480 unsigned long num_pages = num_extent_pages(start, len);
Chris Mason5f39d392007-10-15 16:14:19 -04002481 unsigned long i;
2482 unsigned long index = start >> PAGE_CACHE_SHIFT;
2483 struct extent_buffer *eb;
2484 struct page *p;
2485 struct address_space *mapping = tree->mapping;
Yan65555a02007-10-25 15:42:57 -04002486 int uptodate = 1;
Chris Mason5f39d392007-10-15 16:14:19 -04002487
Chris Mason4dc119042007-10-15 16:18:14 -04002488 eb = __alloc_extent_buffer(tree, start, len, mask);
Chris Mason5f39d392007-10-15 16:14:19 -04002489 if (!eb || IS_ERR(eb))
2490 return NULL;
2491
Chris Mason4dc119042007-10-15 16:18:14 -04002492 if (eb->flags & EXTENT_BUFFER_FILLED)
Chris Mason09be2072007-11-07 21:08:16 -05002493 goto lru_add;
Chris Mason5f39d392007-10-15 16:14:19 -04002494
Chris Mason19c00dd2007-10-15 16:19:22 -04002495 if (page0) {
2496 eb->first_page = page0;
2497 i = 1;
2498 index++;
2499 page_cache_get(page0);
Chris Masonff79f812007-10-15 16:22:25 -04002500 mark_page_accessed(page0);
Chris Mason19c00dd2007-10-15 16:19:22 -04002501 set_page_extent_mapped(page0);
Chris Mason0591fb52007-11-11 08:22:00 -05002502 WARN_ON(!PageUptodate(page0));
Chris Mason55c69072008-01-09 15:55:33 -05002503 set_page_extent_head(page0, len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002504 } else {
2505 i = 0;
2506 }
2507 for (; i < num_pages; i++, index++) {
Chris Mason5f39d392007-10-15 16:14:19 -04002508 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
Chris Mason6d36dcd2007-10-15 16:14:37 -04002509 if (!p) {
Chris Masondb945352007-10-15 16:15:53 -04002510 WARN_ON(1);
Chris Mason5f39d392007-10-15 16:14:19 -04002511 goto fail;
Chris Mason6d36dcd2007-10-15 16:14:37 -04002512 }
Chris Masonf510cfe2007-10-15 16:14:48 -04002513 set_page_extent_mapped(p);
Chris Masonff79f812007-10-15 16:22:25 -04002514 mark_page_accessed(p);
Chris Mason19c00dd2007-10-15 16:19:22 -04002515 if (i == 0) {
Chris Mason810191f2007-10-15 16:18:55 -04002516 eb->first_page = p;
Chris Mason55c69072008-01-09 15:55:33 -05002517 set_page_extent_head(p, len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002518 } else {
2519 set_page_private(p, EXTENT_PAGE_PRIVATE);
2520 }
Chris Mason5f39d392007-10-15 16:14:19 -04002521 if (!PageUptodate(p))
2522 uptodate = 0;
2523 unlock_page(p);
2524 }
2525 if (uptodate)
2526 eb->flags |= EXTENT_UPTODATE;
Chris Mason4dc119042007-10-15 16:18:14 -04002527 eb->flags |= EXTENT_BUFFER_FILLED;
Chris Mason09be2072007-11-07 21:08:16 -05002528
2529lru_add:
2530 spin_lock(&tree->lru_lock);
2531 add_lru(tree, eb);
2532 spin_unlock(&tree->lru_lock);
Chris Mason5f39d392007-10-15 16:14:19 -04002533 return eb;
Chris Mason09be2072007-11-07 21:08:16 -05002534
Chris Mason5f39d392007-10-15 16:14:19 -04002535fail:
Chris Mason856bf3e2007-11-08 10:59:05 -05002536 spin_lock(&tree->lru_lock);
2537 list_del_init(&eb->lru);
2538 spin_unlock(&tree->lru_lock);
Chris Mason09be2072007-11-07 21:08:16 -05002539 if (!atomic_dec_and_test(&eb->refs))
2540 return NULL;
Chris Mason0591fb52007-11-11 08:22:00 -05002541 for (index = 1; index < i; index++) {
Chris Mason09be2072007-11-07 21:08:16 -05002542 page_cache_release(extent_buffer_page(eb, index));
2543 }
Chris Mason0591fb52007-11-11 08:22:00 -05002544 if (i > 0)
2545 page_cache_release(extent_buffer_page(eb, 0));
Chris Mason09be2072007-11-07 21:08:16 -05002546 __free_extent_buffer(eb);
Chris Mason5f39d392007-10-15 16:14:19 -04002547 return NULL;
2548}
2549EXPORT_SYMBOL(alloc_extent_buffer);
2550
2551struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2552 u64 start, unsigned long len,
2553 gfp_t mask)
2554{
Chris Masondb945352007-10-15 16:15:53 -04002555 unsigned long num_pages = num_extent_pages(start, len);
Chris Mason09be2072007-11-07 21:08:16 -05002556 unsigned long i;
2557 unsigned long index = start >> PAGE_CACHE_SHIFT;
Chris Mason5f39d392007-10-15 16:14:19 -04002558 struct extent_buffer *eb;
2559 struct page *p;
2560 struct address_space *mapping = tree->mapping;
Chris Mason14048ed2007-10-15 16:16:28 -04002561 int uptodate = 1;
Chris Mason5f39d392007-10-15 16:14:19 -04002562
Chris Mason4dc119042007-10-15 16:18:14 -04002563 eb = __alloc_extent_buffer(tree, start, len, mask);
Chris Mason5f39d392007-10-15 16:14:19 -04002564 if (!eb || IS_ERR(eb))
2565 return NULL;
2566
Chris Mason4dc119042007-10-15 16:18:14 -04002567 if (eb->flags & EXTENT_BUFFER_FILLED)
Chris Mason09be2072007-11-07 21:08:16 -05002568 goto lru_add;
Chris Mason5f39d392007-10-15 16:14:19 -04002569
2570 for (i = 0; i < num_pages; i++, index++) {
Chris Mason14048ed2007-10-15 16:16:28 -04002571 p = find_lock_page(mapping, index);
Chris Mason6d36dcd2007-10-15 16:14:37 -04002572 if (!p) {
Chris Mason5f39d392007-10-15 16:14:19 -04002573 goto fail;
Chris Mason6d36dcd2007-10-15 16:14:37 -04002574 }
Chris Masonf510cfe2007-10-15 16:14:48 -04002575 set_page_extent_mapped(p);
Chris Masonff79f812007-10-15 16:22:25 -04002576 mark_page_accessed(p);
Chris Mason19c00dd2007-10-15 16:19:22 -04002577
2578 if (i == 0) {
Chris Mason810191f2007-10-15 16:18:55 -04002579 eb->first_page = p;
Chris Mason55c69072008-01-09 15:55:33 -05002580 set_page_extent_head(p, len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002581 } else {
2582 set_page_private(p, EXTENT_PAGE_PRIVATE);
2583 }
2584
Chris Mason14048ed2007-10-15 16:16:28 -04002585 if (!PageUptodate(p))
2586 uptodate = 0;
2587 unlock_page(p);
Chris Mason5f39d392007-10-15 16:14:19 -04002588 }
Chris Mason14048ed2007-10-15 16:16:28 -04002589 if (uptodate)
2590 eb->flags |= EXTENT_UPTODATE;
Chris Mason4dc119042007-10-15 16:18:14 -04002591 eb->flags |= EXTENT_BUFFER_FILLED;
Chris Mason09be2072007-11-07 21:08:16 -05002592
2593lru_add:
2594 spin_lock(&tree->lru_lock);
2595 add_lru(tree, eb);
2596 spin_unlock(&tree->lru_lock);
Chris Mason5f39d392007-10-15 16:14:19 -04002597 return eb;
2598fail:
Chris Mason856bf3e2007-11-08 10:59:05 -05002599 spin_lock(&tree->lru_lock);
2600 list_del_init(&eb->lru);
2601 spin_unlock(&tree->lru_lock);
Chris Mason09be2072007-11-07 21:08:16 -05002602 if (!atomic_dec_and_test(&eb->refs))
2603 return NULL;
Chris Mason0591fb52007-11-11 08:22:00 -05002604 for (index = 1; index < i; index++) {
Chris Mason09be2072007-11-07 21:08:16 -05002605 page_cache_release(extent_buffer_page(eb, index));
2606 }
Chris Mason0591fb52007-11-11 08:22:00 -05002607 if (i > 0)
2608 page_cache_release(extent_buffer_page(eb, 0));
Chris Mason09be2072007-11-07 21:08:16 -05002609 __free_extent_buffer(eb);
Chris Mason5f39d392007-10-15 16:14:19 -04002610 return NULL;
2611}
2612EXPORT_SYMBOL(find_extent_buffer);
2613
2614void free_extent_buffer(struct extent_buffer *eb)
2615{
2616 unsigned long i;
2617 unsigned long num_pages;
2618
2619 if (!eb)
2620 return;
2621
2622 if (!atomic_dec_and_test(&eb->refs))
2623 return;
2624
Chris Mason0591fb52007-11-11 08:22:00 -05002625 WARN_ON(!list_empty(&eb->lru));
Chris Masondb945352007-10-15 16:15:53 -04002626 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002627
Chris Mason0591fb52007-11-11 08:22:00 -05002628 for (i = 1; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002629 page_cache_release(extent_buffer_page(eb, i));
Chris Mason5f39d392007-10-15 16:14:19 -04002630 }
Chris Mason0591fb52007-11-11 08:22:00 -05002631 page_cache_release(extent_buffer_page(eb, 0));
Chris Mason6d36dcd2007-10-15 16:14:37 -04002632 __free_extent_buffer(eb);
Chris Mason5f39d392007-10-15 16:14:19 -04002633}
2634EXPORT_SYMBOL(free_extent_buffer);
2635
2636int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2637 struct extent_buffer *eb)
2638{
2639 int set;
2640 unsigned long i;
2641 unsigned long num_pages;
2642 struct page *page;
2643
2644 u64 start = eb->start;
2645 u64 end = start + eb->len - 1;
2646
2647 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
Chris Masondb945352007-10-15 16:15:53 -04002648 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002649
2650 for (i = 0; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002651 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002652 lock_page(page);
Chris Mason55c69072008-01-09 15:55:33 -05002653 if (i == 0)
2654 set_page_extent_head(page, eb->len);
2655 else
2656 set_page_private(page, EXTENT_PAGE_PRIVATE);
2657
Chris Mason5f39d392007-10-15 16:14:19 -04002658 /*
2659 * if we're on the last page or the first page and the
2660 * block isn't aligned on a page boundary, do extra checks
2661 * to make sure we don't clean page that is partially dirty
2662 */
2663 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2664 ((i == num_pages - 1) &&
Yan65555a02007-10-25 15:42:57 -04002665 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
Chris Mason35ebb932007-10-30 16:56:53 -04002666 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Mason5f39d392007-10-15 16:14:19 -04002667 end = start + PAGE_CACHE_SIZE - 1;
2668 if (test_range_bit(tree, start, end,
2669 EXTENT_DIRTY, 0)) {
2670 unlock_page(page);
2671 continue;
2672 }
2673 }
2674 clear_page_dirty_for_io(page);
Chris Mason7073c8e2007-11-20 13:44:45 -05002675 write_lock_irq(&page->mapping->tree_lock);
2676 if (!PageDirty(page)) {
2677 radix_tree_tag_clear(&page->mapping->page_tree,
2678 page_index(page),
2679 PAGECACHE_TAG_DIRTY);
2680 }
2681 write_unlock_irq(&page->mapping->tree_lock);
Chris Mason5f39d392007-10-15 16:14:19 -04002682 unlock_page(page);
2683 }
2684 return 0;
2685}
2686EXPORT_SYMBOL(clear_extent_buffer_dirty);
2687
2688int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2689 struct extent_buffer *eb)
2690{
2691 return wait_on_extent_writeback(tree, eb->start,
2692 eb->start + eb->len - 1);
2693}
2694EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2695
2696int set_extent_buffer_dirty(struct extent_map_tree *tree,
2697 struct extent_buffer *eb)
2698{
Chris Mason810191f2007-10-15 16:18:55 -04002699 unsigned long i;
2700 unsigned long num_pages;
2701
2702 num_pages = num_extent_pages(eb->start, eb->len);
2703 for (i = 0; i < num_pages; i++) {
Chris Mason19c00dd2007-10-15 16:19:22 -04002704 struct page *page = extent_buffer_page(eb, i);
2705 /* writepage may need to do something special for the
2706 * first page, we have to make sure page->private is
2707 * properly set. releasepage may drop page->private
2708 * on us if the page isn't already dirty.
2709 */
2710 if (i == 0) {
2711 lock_page(page);
Chris Mason55c69072008-01-09 15:55:33 -05002712 set_page_extent_head(page, eb->len);
2713 } else if (PagePrivate(page) &&
2714 page->private != EXTENT_PAGE_PRIVATE) {
2715 lock_page(page);
2716 set_page_extent_mapped(page);
2717 unlock_page(page);
Chris Mason19c00dd2007-10-15 16:19:22 -04002718 }
Chris Mason810191f2007-10-15 16:18:55 -04002719 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Mason19c00dd2007-10-15 16:19:22 -04002720 if (i == 0)
2721 unlock_page(page);
Chris Mason810191f2007-10-15 16:18:55 -04002722 }
2723 return set_extent_dirty(tree, eb->start,
2724 eb->start + eb->len - 1, GFP_NOFS);
Chris Mason5f39d392007-10-15 16:14:19 -04002725}
2726EXPORT_SYMBOL(set_extent_buffer_dirty);
2727
2728int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2729 struct extent_buffer *eb)
2730{
2731 unsigned long i;
2732 struct page *page;
2733 unsigned long num_pages;
2734
Chris Masondb945352007-10-15 16:15:53 -04002735 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002736
2737 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2738 GFP_NOFS);
2739 for (i = 0; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002740 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002741 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2742 ((i == num_pages - 1) &&
Yan65555a02007-10-25 15:42:57 -04002743 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
Chris Mason5f39d392007-10-15 16:14:19 -04002744 check_page_uptodate(tree, page);
2745 continue;
2746 }
2747 SetPageUptodate(page);
2748 }
2749 return 0;
2750}
2751EXPORT_SYMBOL(set_extent_buffer_uptodate);
2752
2753int extent_buffer_uptodate(struct extent_map_tree *tree,
2754 struct extent_buffer *eb)
2755{
2756 if (eb->flags & EXTENT_UPTODATE)
2757 return 1;
2758 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2759 EXTENT_UPTODATE, 1);
2760}
2761EXPORT_SYMBOL(extent_buffer_uptodate);
2762
2763int read_extent_buffer_pages(struct extent_map_tree *tree,
Chris Mason19c00dd2007-10-15 16:19:22 -04002764 struct extent_buffer *eb,
2765 u64 start,
2766 int wait)
Chris Mason5f39d392007-10-15 16:14:19 -04002767{
2768 unsigned long i;
Chris Mason19c00dd2007-10-15 16:19:22 -04002769 unsigned long start_i;
Chris Mason5f39d392007-10-15 16:14:19 -04002770 struct page *page;
2771 int err;
2772 int ret = 0;
2773 unsigned long num_pages;
2774
2775 if (eb->flags & EXTENT_UPTODATE)
2776 return 0;
2777
Chris Mason14048ed2007-10-15 16:16:28 -04002778 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Mason5f39d392007-10-15 16:14:19 -04002779 EXTENT_UPTODATE, 1)) {
2780 return 0;
2781 }
Chris Mason0591fb52007-11-11 08:22:00 -05002782
Chris Mason19c00dd2007-10-15 16:19:22 -04002783 if (start) {
2784 WARN_ON(start < eb->start);
2785 start_i = (start >> PAGE_CACHE_SHIFT) -
2786 (eb->start >> PAGE_CACHE_SHIFT);
2787 } else {
2788 start_i = 0;
2789 }
Chris Mason5f39d392007-10-15 16:14:19 -04002790
Chris Masondb945352007-10-15 16:15:53 -04002791 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002792 for (i = start_i; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002793 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002794 if (PageUptodate(page)) {
2795 continue;
2796 }
2797 if (!wait) {
2798 if (TestSetPageLocked(page)) {
2799 continue;
2800 }
2801 } else {
2802 lock_page(page);
2803 }
2804 if (!PageUptodate(page)) {
2805 err = page->mapping->a_ops->readpage(NULL, page);
2806 if (err) {
2807 ret = err;
2808 }
2809 } else {
2810 unlock_page(page);
2811 }
2812 }
2813
2814 if (ret || !wait) {
2815 return ret;
2816 }
2817
Chris Mason19c00dd2007-10-15 16:19:22 -04002818 for (i = start_i; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002819 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002820 wait_on_page_locked(page);
2821 if (!PageUptodate(page)) {
2822 ret = -EIO;
2823 }
2824 }
Chris Mason4dc119042007-10-15 16:18:14 -04002825 if (!ret)
2826 eb->flags |= EXTENT_UPTODATE;
Chris Mason5f39d392007-10-15 16:14:19 -04002827 return ret;
2828}
2829EXPORT_SYMBOL(read_extent_buffer_pages);
2830
2831void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2832 unsigned long start,
2833 unsigned long len)
2834{
2835 size_t cur;
2836 size_t offset;
2837 struct page *page;
2838 char *kaddr;
2839 char *dst = (char *)dstv;
2840 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2841 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Mason14048ed2007-10-15 16:16:28 -04002842 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002843
2844 WARN_ON(start > eb->len);
2845 WARN_ON(start + len > eb->start + eb->len);
2846
Chris Mason3685f792007-10-19 09:23:27 -04002847 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002848
2849 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002850 page = extent_buffer_page(eb, i);
Chris Mason14048ed2007-10-15 16:16:28 -04002851 if (!PageUptodate(page)) {
2852 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2853 WARN_ON(1);
2854 }
Chris Mason5f39d392007-10-15 16:14:19 -04002855 WARN_ON(!PageUptodate(page));
2856
2857 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Mason59d169e2007-10-19 09:23:09 -04002858 kaddr = kmap_atomic(page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002859 memcpy(dst, kaddr + offset, cur);
Chris Mason59d169e2007-10-19 09:23:09 -04002860 kunmap_atomic(kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002861
2862 dst += cur;
2863 len -= cur;
2864 offset = 0;
2865 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04002866 }
2867}
2868EXPORT_SYMBOL(read_extent_buffer);
2869
Chris Mason19c00dd2007-10-15 16:19:22 -04002870int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masondb945352007-10-15 16:15:53 -04002871 unsigned long min_len, char **token, char **map,
2872 unsigned long *map_start,
2873 unsigned long *map_len, int km)
Chris Mason5f39d392007-10-15 16:14:19 -04002874{
Chris Mason479965d2007-10-15 16:14:27 -04002875 size_t offset = start & (PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002876 char *kaddr;
Chris Masondb945352007-10-15 16:15:53 -04002877 struct page *p;
Chris Mason5f39d392007-10-15 16:14:19 -04002878 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2879 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Yan65555a02007-10-25 15:42:57 -04002880 unsigned long end_i = (start_offset + start + min_len - 1) >>
Chris Mason810191f2007-10-15 16:18:55 -04002881 PAGE_CACHE_SHIFT;
Chris Mason479965d2007-10-15 16:14:27 -04002882
2883 if (i != end_i)
2884 return -EINVAL;
Chris Mason5f39d392007-10-15 16:14:19 -04002885
Chris Mason5f39d392007-10-15 16:14:19 -04002886 if (i == 0) {
2887 offset = start_offset;
2888 *map_start = 0;
2889 } else {
Chris Masondb945352007-10-15 16:15:53 -04002890 offset = 0;
Chris Mason0591fb52007-11-11 08:22:00 -05002891 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
Chris Mason5f39d392007-10-15 16:14:19 -04002892 }
Yan65555a02007-10-25 15:42:57 -04002893 if (start + min_len > eb->len) {
Chris Mason19c00dd2007-10-15 16:19:22 -04002894printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2895 WARN_ON(1);
2896 }
Chris Mason5f39d392007-10-15 16:14:19 -04002897
Chris Masondb945352007-10-15 16:15:53 -04002898 p = extent_buffer_page(eb, i);
2899 WARN_ON(!PageUptodate(p));
2900 kaddr = kmap_atomic(p, km);
Chris Mason5f39d392007-10-15 16:14:19 -04002901 *token = kaddr;
2902 *map = kaddr + offset;
2903 *map_len = PAGE_CACHE_SIZE - offset;
2904 return 0;
2905}
Chris Mason19c00dd2007-10-15 16:19:22 -04002906EXPORT_SYMBOL(map_private_extent_buffer);
Chris Masondb945352007-10-15 16:15:53 -04002907
2908int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2909 unsigned long min_len,
2910 char **token, char **map,
2911 unsigned long *map_start,
2912 unsigned long *map_len, int km)
2913{
2914 int err;
2915 int save = 0;
2916 if (eb->map_token) {
Chris Masondb945352007-10-15 16:15:53 -04002917 unmap_extent_buffer(eb, eb->map_token, km);
2918 eb->map_token = NULL;
2919 save = 1;
2920 }
Chris Mason19c00dd2007-10-15 16:19:22 -04002921 err = map_private_extent_buffer(eb, start, min_len, token, map,
2922 map_start, map_len, km);
Chris Masondb945352007-10-15 16:15:53 -04002923 if (!err && save) {
2924 eb->map_token = *token;
2925 eb->kaddr = *map;
2926 eb->map_start = *map_start;
2927 eb->map_len = *map_len;
2928 }
2929 return err;
2930}
Chris Mason5f39d392007-10-15 16:14:19 -04002931EXPORT_SYMBOL(map_extent_buffer);
2932
2933void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2934{
Chris Masonae5252b2007-10-15 16:14:41 -04002935 kunmap_atomic(token, km);
Chris Mason5f39d392007-10-15 16:14:19 -04002936}
2937EXPORT_SYMBOL(unmap_extent_buffer);
2938
2939int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2940 unsigned long start,
2941 unsigned long len)
2942{
2943 size_t cur;
2944 size_t offset;
2945 struct page *page;
2946 char *kaddr;
2947 char *ptr = (char *)ptrv;
2948 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2949 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2950 int ret = 0;
2951
2952 WARN_ON(start > eb->len);
2953 WARN_ON(start + len > eb->start + eb->len);
2954
Chris Mason3685f792007-10-19 09:23:27 -04002955 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002956
2957 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002958 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002959 WARN_ON(!PageUptodate(page));
2960
2961 cur = min(len, (PAGE_CACHE_SIZE - offset));
2962
Chris Masonae5252b2007-10-15 16:14:41 -04002963 kaddr = kmap_atomic(page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002964 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masonae5252b2007-10-15 16:14:41 -04002965 kunmap_atomic(kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002966 if (ret)
2967 break;
2968
2969 ptr += cur;
2970 len -= cur;
2971 offset = 0;
2972 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04002973 }
2974 return ret;
2975}
2976EXPORT_SYMBOL(memcmp_extent_buffer);
2977
2978void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2979 unsigned long start, unsigned long len)
2980{
2981 size_t cur;
2982 size_t offset;
2983 struct page *page;
2984 char *kaddr;
2985 char *src = (char *)srcv;
2986 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2987 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2988
2989 WARN_ON(start > eb->len);
2990 WARN_ON(start + len > eb->start + eb->len);
2991
Chris Mason3685f792007-10-19 09:23:27 -04002992 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002993
2994 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002995 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002996 WARN_ON(!PageUptodate(page));
2997
2998 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Mason59d169e2007-10-19 09:23:09 -04002999 kaddr = kmap_atomic(page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04003000 memcpy(kaddr + offset, src, cur);
Chris Mason59d169e2007-10-19 09:23:09 -04003001 kunmap_atomic(kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04003002
3003 src += cur;
3004 len -= cur;
3005 offset = 0;
3006 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04003007 }
3008}
3009EXPORT_SYMBOL(write_extent_buffer);
3010
3011void memset_extent_buffer(struct extent_buffer *eb, char c,
3012 unsigned long start, unsigned long len)
3013{
3014 size_t cur;
3015 size_t offset;
3016 struct page *page;
3017 char *kaddr;
3018 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3019 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3020
3021 WARN_ON(start > eb->len);
3022 WARN_ON(start + len > eb->start + eb->len);
3023
Chris Mason3685f792007-10-19 09:23:27 -04003024 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04003025
3026 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04003027 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04003028 WARN_ON(!PageUptodate(page));
3029
3030 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masonae5252b2007-10-15 16:14:41 -04003031 kaddr = kmap_atomic(page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003032 memset(kaddr + offset, c, cur);
Chris Masonae5252b2007-10-15 16:14:41 -04003033 kunmap_atomic(kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003034
3035 len -= cur;
3036 offset = 0;
3037 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04003038 }
3039}
3040EXPORT_SYMBOL(memset_extent_buffer);
3041
3042void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3043 unsigned long dst_offset, unsigned long src_offset,
3044 unsigned long len)
3045{
3046 u64 dst_len = dst->len;
3047 size_t cur;
3048 size_t offset;
3049 struct page *page;
3050 char *kaddr;
3051 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3052 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3053
3054 WARN_ON(src->len != dst_len);
3055
Chris Mason3685f792007-10-19 09:23:27 -04003056 offset = (start_offset + dst_offset) &
3057 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04003058
3059 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04003060 page = extent_buffer_page(dst, i);
Chris Mason5f39d392007-10-15 16:14:19 -04003061 WARN_ON(!PageUptodate(page));
3062
3063 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3064
Chris Masonff190c02007-10-19 10:39:41 -04003065 kaddr = kmap_atomic(page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003066 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masonff190c02007-10-19 10:39:41 -04003067 kunmap_atomic(kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003068
3069 src_offset += cur;
3070 len -= cur;
3071 offset = 0;
3072 i++;
3073 }
3074}
3075EXPORT_SYMBOL(copy_extent_buffer);
3076
3077static void move_pages(struct page *dst_page, struct page *src_page,
3078 unsigned long dst_off, unsigned long src_off,
3079 unsigned long len)
3080{
Chris Masonae5252b2007-10-15 16:14:41 -04003081 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003082 if (dst_page == src_page) {
3083 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3084 } else {
Chris Masonae5252b2007-10-15 16:14:41 -04003085 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04003086 char *p = dst_kaddr + dst_off + len;
3087 char *s = src_kaddr + src_off + len;
3088
3089 while (len--)
3090 *--p = *--s;
3091
Chris Masonae5252b2007-10-15 16:14:41 -04003092 kunmap_atomic(src_kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04003093 }
Chris Masonae5252b2007-10-15 16:14:41 -04003094 kunmap_atomic(dst_kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003095}
3096
3097static void copy_pages(struct page *dst_page, struct page *src_page,
3098 unsigned long dst_off, unsigned long src_off,
3099 unsigned long len)
3100{
Chris Masonae5252b2007-10-15 16:14:41 -04003101 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04003102 char *src_kaddr;
3103
3104 if (dst_page != src_page)
Chris Masonae5252b2007-10-15 16:14:41 -04003105 src_kaddr = kmap_atomic(src_page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04003106 else
3107 src_kaddr = dst_kaddr;
3108
3109 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Mason5f39d392007-10-15 16:14:19 -04003110 kunmap_atomic(dst_kaddr, KM_USER0);
3111 if (dst_page != src_page)
3112 kunmap_atomic(src_kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04003113}
3114
3115void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3116 unsigned long src_offset, unsigned long len)
3117{
3118 size_t cur;
3119 size_t dst_off_in_page;
3120 size_t src_off_in_page;
3121 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3122 unsigned long dst_i;
3123 unsigned long src_i;
3124
3125 if (src_offset + len > dst->len) {
3126 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3127 src_offset, len, dst->len);
3128 BUG_ON(1);
3129 }
3130 if (dst_offset + len > dst->len) {
3131 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3132 dst_offset, len, dst->len);
3133 BUG_ON(1);
3134 }
3135
3136 while(len > 0) {
Chris Mason3685f792007-10-19 09:23:27 -04003137 dst_off_in_page = (start_offset + dst_offset) &
Chris Mason5f39d392007-10-15 16:14:19 -04003138 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason3685f792007-10-19 09:23:27 -04003139 src_off_in_page = (start_offset + src_offset) &
Chris Mason5f39d392007-10-15 16:14:19 -04003140 ((unsigned long)PAGE_CACHE_SIZE - 1);
3141
3142 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3143 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3144
Chris Mason5f39d392007-10-15 16:14:19 -04003145 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3146 src_off_in_page));
Jens Axboeae2f5412007-10-19 09:22:59 -04003147 cur = min_t(unsigned long, cur,
3148 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
Chris Mason5f39d392007-10-15 16:14:19 -04003149
Chris Mason6d36dcd2007-10-15 16:14:37 -04003150 copy_pages(extent_buffer_page(dst, dst_i),
3151 extent_buffer_page(dst, src_i),
Chris Mason5f39d392007-10-15 16:14:19 -04003152 dst_off_in_page, src_off_in_page, cur);
3153
3154 src_offset += cur;
3155 dst_offset += cur;
3156 len -= cur;
3157 }
3158}
3159EXPORT_SYMBOL(memcpy_extent_buffer);
3160
3161void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3162 unsigned long src_offset, unsigned long len)
3163{
3164 size_t cur;
3165 size_t dst_off_in_page;
3166 size_t src_off_in_page;
3167 unsigned long dst_end = dst_offset + len - 1;
3168 unsigned long src_end = src_offset + len - 1;
3169 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3170 unsigned long dst_i;
3171 unsigned long src_i;
3172
3173 if (src_offset + len > dst->len) {
3174 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3175 src_offset, len, dst->len);
3176 BUG_ON(1);
3177 }
3178 if (dst_offset + len > dst->len) {
3179 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3180 dst_offset, len, dst->len);
3181 BUG_ON(1);
3182 }
3183 if (dst_offset < src_offset) {
3184 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3185 return;
3186 }
3187 while(len > 0) {
3188 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3189 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3190
Chris Mason3685f792007-10-19 09:23:27 -04003191 dst_off_in_page = (start_offset + dst_end) &
Chris Mason5f39d392007-10-15 16:14:19 -04003192 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason3685f792007-10-19 09:23:27 -04003193 src_off_in_page = (start_offset + src_end) &
Chris Mason5f39d392007-10-15 16:14:19 -04003194 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04003195
Jens Axboeae2f5412007-10-19 09:22:59 -04003196 cur = min_t(unsigned long, len, src_off_in_page + 1);
Chris Mason5f39d392007-10-15 16:14:19 -04003197 cur = min(cur, dst_off_in_page + 1);
Chris Mason6d36dcd2007-10-15 16:14:37 -04003198 move_pages(extent_buffer_page(dst, dst_i),
3199 extent_buffer_page(dst, src_i),
Chris Mason5f39d392007-10-15 16:14:19 -04003200 dst_off_in_page - cur + 1,
3201 src_off_in_page - cur + 1, cur);
3202
Chris Masondb945352007-10-15 16:15:53 -04003203 dst_end -= cur;
3204 src_end -= cur;
Chris Mason5f39d392007-10-15 16:14:19 -04003205 len -= cur;
3206 }
3207}
3208EXPORT_SYMBOL(memmove_extent_buffer);