aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c253
1 files changed, 131 insertions, 122 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e71039ea66cf..0e69295d0031 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -48,6 +48,7 @@
#include "rcu-string.h"
#include "dev-replace.h"
#include "raid56.h"
+#include "sysfs.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -299,11 +300,11 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
memcpy(&found, result, csum_size);
read_extent_buffer(buf, &val, 0, csum_size);
- printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
- "failed on %llu wanted %X found %X "
- "level %d\n",
- root->fs_info->sb->s_id, buf->start,
- val, found, btrfs_header_level(buf));
+ printk_ratelimited(KERN_INFO
+ "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
+ "level %d\n",
+ root->fs_info->sb->s_id, buf->start,
+ val, found, btrfs_header_level(buf));
if (result != (char *)&inline_result)
kfree(result);
return 1;
@@ -382,13 +383,14 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
ret = 1;
if (ret && btrfs_super_generation(disk_sb) < 10) {
- printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
+ printk(KERN_WARNING
+ "BTRFS: super block crcs don't match, older mkfs detected\n");
ret = 0;
}
}
if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
- printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
+ printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
csum_type);
ret = 1;
}
@@ -464,13 +466,10 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
- struct extent_io_tree *tree;
u64 start = page_offset(page);
u64 found_start;
struct extent_buffer *eb;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
-
eb = (struct extent_buffer *)page->private;
if (page != eb->pages[0])
return 0;
@@ -500,8 +499,8 @@ static int check_tree_block_fsid(struct btrfs_root *root,
}
#define CORRUPT(reason, eb, root, slot) \
- printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
- "root=%llu, slot=%d\n", reason, \
+ btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \
+ "root=%llu, slot=%d", reason, \
btrfs_header_bytenr(eb), root->objectid, slot)
static noinline int check_leaf(struct btrfs_root *root,
@@ -569,7 +568,6 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
u64 phy_offset, struct page *page,
u64 start, u64 end, int mirror)
{
- struct extent_io_tree *tree;
u64 found_start;
int found_level;
struct extent_buffer *eb;
@@ -580,7 +578,6 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
if (!page->private)
goto out;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
eb = (struct extent_buffer *)page->private;
/* the pending IO might have been the only thing that kept this buffer
@@ -600,21 +597,21 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) {
- printk_ratelimited(KERN_INFO "btrfs bad tree block start "
+ printk_ratelimited(KERN_INFO "BTRFS: bad tree block start "
"%llu %llu\n",
found_start, eb->start);
ret = -EIO;
goto err;
}
if (check_tree_block_fsid(root, eb)) {
- printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
+ printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n",
eb->start);
ret = -EIO;
goto err;
}
found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) {
- btrfs_info(root->fs_info, "bad tree block level %d\n",
+ btrfs_info(root->fs_info, "bad tree block level %d",
(int)btrfs_header_level(eb));
ret = -EIO;
goto err;
@@ -964,11 +961,9 @@ static int btree_migratepage(struct address_space *mapping,
static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct extent_io_tree *tree;
struct btrfs_fs_info *fs_info;
int ret;
- tree = &BTRFS_I(mapping->host)->io_tree;
if (wbc->sync_mode == WB_SYNC_NONE) {
if (wbc->for_kupdate)
@@ -1007,8 +1002,9 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
extent_invalidatepage(tree, page, offset);
btree_releasepage(page, GFP_NOFS);
if (PagePrivate(page)) {
- printk(KERN_WARNING "btrfs warning page private not zero "
- "on page %llu\n", (unsigned long long)page_offset(page));
+ btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
+ "page private not zero on page %llu",
+ (unsigned long long)page_offset(page));
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
@@ -1092,21 +1088,13 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
- struct extent_buffer *eb;
- eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr);
- return eb;
+ return find_extent_buffer(root->fs_info, bytenr);
}
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
- struct extent_buffer *eb;
-
- eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize);
- return eb;
+ return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
}
@@ -1270,7 +1258,6 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root;
struct btrfs_key key;
int ret = 0;
- u64 bytenr;
uuid_le uuid;
root = btrfs_alloc_root(fs_info);
@@ -1292,7 +1279,6 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
goto fail;
}
- bytenr = leaf->start;
memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
@@ -1613,7 +1599,8 @@ again:
if (ret)
goto fail;
- ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
+ ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
+ location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
if (ret < 0)
goto fail;
if (ret == 0)
@@ -1681,12 +1668,10 @@ static void end_workqueue_fn(struct btrfs_work *work)
{
struct bio *bio;
struct end_io_wq *end_io_wq;
- struct btrfs_fs_info *fs_info;
int error;
end_io_wq = container_of(work, struct end_io_wq, work);
bio = end_io_wq->bio;
- fs_info = end_io_wq->info;
error = end_io_wq->error;
bio->bi_private = end_io_wq->private;
@@ -2077,6 +2062,12 @@ static void del_fs_roots(struct btrfs_fs_info *fs_info)
for (i = 0; i < ret; i++)
btrfs_drop_and_free_fs_root(fs_info, gang[i]);
}
+
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+ btrfs_free_log_root_tree(NULL, fs_info);
+ btrfs_destroy_pinned_extent(fs_info->tree_root,
+ fs_info->pinned_extents);
+ }
}
int open_ctree(struct super_block *sb,
@@ -2151,6 +2142,7 @@ int open_ctree(struct super_block *sb,
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
+ INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
@@ -2164,6 +2156,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->free_chunk_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock);
+ spin_lock_init(&fs_info->buffer_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex);
seqlock_init(&fs_info->profiles_lock);
@@ -2195,7 +2188,7 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
-
+ fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
@@ -2334,7 +2327,7 @@ int open_ctree(struct super_block *sb,
* Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
*/
if (btrfs_check_super_csum(bh->b_data)) {
- printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
+ printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
err = -EINVAL;
goto fail_alloc;
}
@@ -2353,7 +2346,7 @@ int open_ctree(struct super_block *sb,
ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
if (ret) {
- printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+ printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
err = -EINVAL;
goto fail_alloc;
}
@@ -2418,7 +2411,7 @@ int open_ctree(struct super_block *sb,
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
- printk(KERN_ERR "btrfs: has skinny extents\n");
+ printk(KERN_ERR "BTRFS: has skinny extents\n");
/*
* flag our filesystem as having big metadata blocks if
@@ -2426,7 +2419,7 @@ int open_ctree(struct super_block *sb,
*/
if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
- printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
+ printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
}
@@ -2443,7 +2436,7 @@ int open_ctree(struct super_block *sb,
*/
if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
(sectorsize != leafsize)) {
- printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
+ printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
"are not allowed for mixed block groups on %s\n",
sb->s_id);
goto fail_alloc;
@@ -2580,12 +2573,12 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize_bits = blksize_bits(sectorsize);
if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
- printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
+ printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
goto fail_sb_buffer;
}
if (sectorsize != PAGE_SIZE) {
- printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
+ printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
"found on %s\n", (unsigned long)sectorsize, sb->s_id);
goto fail_sb_buffer;
}
@@ -2594,7 +2587,7 @@ int open_ctree(struct super_block *sb,
ret = btrfs_read_sys_array(tree_root);
mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
- printk(KERN_WARNING "btrfs: failed to read the system "
+ printk(KERN_WARNING "BTRFS: failed to read the system "
"array on %s\n", sb->s_id);
goto fail_sb_buffer;
}
@@ -2611,7 +2604,7 @@ int open_ctree(struct super_block *sb,
blocksize, generation);
if (!chunk_root->node ||
!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
- printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
+ printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
sb->s_id);
goto fail_tree_roots;
}
@@ -2623,7 +2616,7 @@ int open_ctree(struct super_block *sb,
ret = btrfs_read_chunk_tree(chunk_root);
if (ret) {
- printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
+ printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
sb->s_id);
goto fail_tree_roots;
}
@@ -2635,7 +2628,7 @@ int open_ctree(struct super_block *sb,
btrfs_close_extra_devices(fs_info, fs_devices, 0);
if (!fs_devices->latest_bdev) {
- printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
+ printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
sb->s_id);
goto fail_tree_roots;
}
@@ -2650,7 +2643,7 @@ retry_root_backup:
blocksize, generation);
if (!tree_root->node ||
!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
- printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
+ printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
sb->s_id);
goto recovery_tree_root;
@@ -2721,50 +2714,56 @@ retry_root_backup:
ret = btrfs_recover_balance(fs_info);
if (ret) {
- printk(KERN_WARNING "btrfs: failed to recover balance\n");
+ printk(KERN_WARNING "BTRFS: failed to recover balance\n");
goto fail_block_groups;
}
ret = btrfs_init_dev_stats(fs_info);
if (ret) {
- printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
+ printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
ret);
goto fail_block_groups;
}
ret = btrfs_init_dev_replace(fs_info);
if (ret) {
- pr_err("btrfs: failed to init dev_replace: %d\n", ret);
+ pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
goto fail_block_groups;
}
btrfs_close_extra_devices(fs_info, fs_devices, 1);
- ret = btrfs_init_space_info(fs_info);
+ ret = btrfs_sysfs_add_one(fs_info);
if (ret) {
- printk(KERN_ERR "Failed to initial space info: %d\n", ret);
+ pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
goto fail_block_groups;
}
+ ret = btrfs_init_space_info(fs_info);
+ if (ret) {
+ printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
+ goto fail_sysfs;
+ }
+
ret = btrfs_read_block_groups(extent_root);
if (ret) {
- printk(KERN_ERR "Failed to read block groups: %d\n", ret);
- goto fail_block_groups;
+ printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
+ goto fail_sysfs;
}
fs_info->num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
if (fs_info->fs_devices->missing_devices >
fs_info->num_tolerated_disk_barrier_failures &&
!(sb->s_flags & MS_RDONLY)) {
- printk(KERN_WARNING
- "Btrfs: too many missing devices, writeable mount is not allowed\n");
- goto fail_block_groups;
+ printk(KERN_WARNING "BTRFS: "
+ "too many missing devices, writeable mount is not allowed\n");
+ goto fail_sysfs;
}
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
"btrfs-cleaner");
if (IS_ERR(fs_info->cleaner_kthread))
- goto fail_block_groups;
+ goto fail_sysfs;
fs_info->transaction_kthread = kthread_run(transaction_kthread,
tree_root,
@@ -2775,11 +2774,15 @@ retry_root_backup:
if (!btrfs_test_opt(tree_root, SSD) &&
!btrfs_test_opt(tree_root, NOSSD) &&
!fs_info->fs_devices->rotating) {
- printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
+ printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
"mode\n");
btrfs_set_opt(fs_info->mount_opt, SSD);
}
+ /* Set the real inode map cache flag */
+ if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE))
+ btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE);
+
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
ret = btrfsic_mount(tree_root, fs_devices,
@@ -2788,7 +2791,7 @@ retry_root_backup:
1 : 0,
fs_info->check_integrity_print_mask);
if (ret)
- printk(KERN_WARNING "btrfs: failed to initialize"
+ printk(KERN_WARNING "BTRFS: failed to initialize"
" integrity check module %s\n", sb->s_id);
}
#endif
@@ -2801,7 +2804,7 @@ retry_root_backup:
u64 bytenr = btrfs_super_log_root(disk_super);
if (fs_devices->rw_devices == 0) {
- printk(KERN_WARNING "Btrfs log replay required "
+ printk(KERN_WARNING "BTRFS: log replay required "
"on RO media\n");
err = -EIO;
goto fail_qgroup;
@@ -2824,7 +2827,7 @@ retry_root_backup:
generation + 1);
if (!log_tree_root->node ||
!extent_buffer_uptodate(log_tree_root->node)) {
- printk(KERN_ERR "btrfs: failed to read log tree\n");
+ printk(KERN_ERR "BTRFS: failed to read log tree\n");
free_extent_buffer(log_tree_root->node);
kfree(log_tree_root);
goto fail_trans_kthread;
@@ -2858,7 +2861,7 @@ retry_root_backup:
ret = btrfs_recover_relocation(tree_root);
if (ret < 0) {
printk(KERN_WARNING
- "btrfs: failed to recover relocation\n");
+ "BTRFS: failed to recover relocation\n");
err = -EINVAL;
goto fail_qgroup;
}
@@ -2888,14 +2891,14 @@ retry_root_backup:
ret = btrfs_resume_balance_async(fs_info);
if (ret) {
- printk(KERN_WARNING "btrfs: failed to resume balance\n");
+ printk(KERN_WARNING "BTRFS: failed to resume balance\n");
close_ctree(tree_root);
return ret;
}
ret = btrfs_resume_dev_replace_async(fs_info);
if (ret) {
- pr_warn("btrfs: failed to resume dev_replace\n");
+ pr_warn("BTRFS: failed to resume dev_replace\n");
close_ctree(tree_root);
return ret;
}
@@ -2903,20 +2906,20 @@ retry_root_backup:
btrfs_qgroup_rescan_resume(fs_info);
if (create_uuid_tree) {
- pr_info("btrfs: creating UUID tree\n");
+ pr_info("BTRFS: creating UUID tree\n");
ret = btrfs_create_uuid_tree(fs_info);
if (ret) {
- pr_warn("btrfs: failed to create the UUID tree %d\n",
+ pr_warn("BTRFS: failed to create the UUID tree %d\n",
ret);
close_ctree(tree_root);
return ret;
}
} else if (check_uuid_tree ||
btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
- pr_info("btrfs: checking UUID tree\n");
+ pr_info("BTRFS: checking UUID tree\n");
ret = btrfs_check_uuid_tree(fs_info);
if (ret) {
- pr_warn("btrfs: failed to check the UUID tree %d\n",
+ pr_warn("BTRFS: failed to check the UUID tree %d\n",
ret);
close_ctree(tree_root);
return ret;
@@ -2942,6 +2945,9 @@ fail_cleaner:
*/
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
+fail_sysfs:
+ btrfs_sysfs_remove_one(fs_info);
+
fail_block_groups:
btrfs_put_block_group_cache(fs_info);
btrfs_free_block_groups(fs_info);
@@ -2997,7 +3003,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
struct btrfs_device *device = (struct btrfs_device *)
bh->b_private;
- printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
+ printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to "
"I/O error on %s\n",
rcu_str_deref(device->name));
/* note, we dont' set_buffer_write_io_error because we have
@@ -3116,7 +3122,7 @@ static int write_dev_supers(struct btrfs_device *device,
bh = __getblk(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
if (!bh) {
- printk(KERN_ERR "btrfs: couldn't get super "
+ printk(KERN_ERR "BTRFS: couldn't get super "
"buffer head for bytenr %Lu\n", bytenr);
errors++;
continue;
@@ -3137,7 +3143,10 @@ static int write_dev_supers(struct btrfs_device *device,
* we fua the first super. The others we allow
* to go down lazy.
*/
- ret = btrfsic_submit_bh(WRITE_FUA, bh);
+ if (i == 0)
+ ret = btrfsic_submit_bh(WRITE_FUA, bh);
+ else
+ ret = btrfsic_submit_bh(WRITE_SYNC, bh);
if (ret)
errors++;
}
@@ -3183,7 +3192,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
wait_for_completion(&device->flush_wait);
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
- printk_in_rcu("btrfs: disabling barriers on dev %s\n",
+ printk_in_rcu("BTRFS: disabling barriers on dev %s\n",
rcu_str_deref(device->name));
device->nobarriers = 1;
} else if (!bio_flagged(bio, BIO_UPTODATE)) {
@@ -3404,7 +3413,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
total_errors++;
}
if (total_errors > max_errors) {
- printk(KERN_ERR "btrfs: %d errors while writing supers\n",
+ btrfs_err(root->fs_info, "%d errors while writing supers",
total_errors);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
@@ -3452,10 +3461,8 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
btrfs_free_log(NULL, root);
- btrfs_free_log_root_tree(NULL, fs_info);
- }
__btrfs_remove_free_space_cache(root->free_ino_pinned);
__btrfs_remove_free_space_cache(root->free_ino_ctl);
@@ -3560,14 +3567,12 @@ int close_ctree(struct btrfs_root *root)
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
ret = btrfs_commit_super(root);
if (ret)
- printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
+ btrfs_err(root->fs_info, "commit super ret %d", ret);
}
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
btrfs_error_commit_super(root);
- btrfs_put_block_group_cache(fs_info);
-
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
@@ -3577,12 +3582,16 @@ int close_ctree(struct btrfs_root *root)
btrfs_free_qgroup_config(root->fs_info);
if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
- printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
+ btrfs_info(root->fs_info, "at unmount delalloc count %lld",
percpu_counter_sum(&fs_info->delalloc_bytes));
}
+ btrfs_sysfs_remove_one(fs_info);
+
del_fs_roots(fs_info);
+ btrfs_put_block_group_cache(fs_info);
+
btrfs_free_block_groups(fs_info);
btrfs_stop_all_workers(fs_info);
@@ -3800,55 +3809,55 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
delayed_refs = &trans->delayed_refs;
spin_lock(&delayed_refs->lock);
- if (delayed_refs->num_entries == 0) {
+ if (atomic_read(&delayed_refs->num_entries) == 0) {
spin_unlock(&delayed_refs->lock);
- printk(KERN_INFO "delayed_refs has NO entry\n");
+ btrfs_info(root->fs_info, "delayed_refs has NO entry");
return ret;
}
- while ((node = rb_first(&delayed_refs->root)) != NULL) {
- struct btrfs_delayed_ref_head *head = NULL;
+ while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+ struct btrfs_delayed_ref_head *head;
bool pin_bytes = false;
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
- atomic_set(&ref->refs, 1);
- if (btrfs_delayed_ref_is_head(ref)) {
-
- head = btrfs_delayed_node_to_head(ref);
- if (!mutex_trylock(&head->mutex)) {
- atomic_inc(&ref->refs);
- spin_unlock(&delayed_refs->lock);
-
- /* Need to wait for the delayed ref to run */
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(ref);
-
- spin_lock(&delayed_refs->lock);
- continue;
- }
-
- if (head->must_insert_reserved)
- pin_bytes = true;
- btrfs_free_delayed_extent_op(head->extent_op);
- delayed_refs->num_heads--;
- if (list_empty(&head->cluster))
- delayed_refs->num_heads_ready--;
- list_del_init(&head->cluster);
- }
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+ href_node);
+ if (!mutex_trylock(&head->mutex)) {
+ atomic_inc(&head->node.refs);
+ spin_unlock(&delayed_refs->lock);
- ref->in_tree = 0;
- rb_erase(&ref->rb_node, &delayed_refs->root);
- delayed_refs->num_entries--;
- spin_unlock(&delayed_refs->lock);
- if (head) {
- if (pin_bytes)
- btrfs_pin_extent(root, ref->bytenr,
- ref->num_bytes, 1);
+ mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref(&head->node);
+ spin_lock(&delayed_refs->lock);
+ continue;
+ }
+ spin_lock(&head->lock);
+ while ((node = rb_first(&head->ref_root)) != NULL) {
+ ref = rb_entry(node, struct btrfs_delayed_ref_node,
+ rb_node);
+ ref->in_tree = 0;
+ rb_erase(&ref->rb_node, &head->ref_root);
+ atomic_dec(&delayed_refs->num_entries);
+ btrfs_put_delayed_ref(ref);
+ cond_resched_lock(&head->lock);
}
- btrfs_put_delayed_ref(ref);
+ if (head->must_insert_reserved)
+ pin_bytes = true;
+ btrfs_free_delayed_extent_op(head->extent_op);
+ delayed_refs->num_heads--;
+ if (head->processing == 0)
+ delayed_refs->num_heads_ready--;
+ atomic_dec(&delayed_refs->num_entries);
+ head->node.in_tree = 0;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ mutex_unlock(&head->mutex);
+ if (pin_bytes)
+ btrfs_pin_extent(root, head->node.bytenr,
+ head->node.num_bytes, 1);
+ btrfs_put_delayed_ref(&head->node);
cond_resched();
spin_lock(&delayed_refs->lock);
}