blob: ca4b9eb8b5daa4f0d8ca50b209c26bf62cb58482 [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010028#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010029#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040030#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050031#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010032
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010042 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010043 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010044 */
45
Stefan Behrensb5d67f62012-03-27 14:21:27 -040046struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010047struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010048
Stefan Behrensff023aa2012-11-06 11:43:11 +010049/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010058
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040064#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010065
Miao Xieaf8e2d12014-10-23 14:42:50 +080066struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
69 u64 *raid_map;
70 u64 map_length;
71};
72
Arne Jansena2de7332011-03-08 14:14:00 +010073struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040074 struct scrub_block *sblock;
75 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020076 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010077 u64 flags; /* extent flags */
78 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040079 u64 logical;
80 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010081 u64 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +010082 atomic_t ref_count;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040083 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
Arne Jansena2de7332011-03-08 14:14:00 +010088 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080089
90 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010091};
92
93struct scrub_bio {
94 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010095 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010096 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010097 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +0100107 int next_free;
108 struct btrfs_work work;
109};
110
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400111struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400113 int page_count;
114 atomic_t outstanding_pages;
115 atomic_t ref_count; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100116 struct scrub_ctx *sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400117 struct {
118 unsigned int header_error:1;
119 unsigned int checksum_error:1;
120 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200121 unsigned int generation_error:1; /* also sets header_error */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400122 };
123};
124
Stefan Behrensff023aa2012-11-06 11:43:11 +0100125struct scrub_wr_ctx {
126 struct scrub_bio *wr_curr_bio;
127 struct btrfs_device *tgtdev;
128 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
129 atomic_t flush_all_writes;
130 struct mutex wr_lock;
131};
132
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100133struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100134 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100135 struct btrfs_root *dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +0100136 int first_free;
137 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100138 atomic_t bios_in_flight;
139 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100140 spinlock_t list_lock;
141 wait_queue_head_t list_wait;
142 u16 csum_size;
143 struct list_head csum_list;
144 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100145 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100146 int pages_per_rd_bio;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400147 u32 sectorsize;
148 u32 nodesize;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100149
150 int is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100151 struct scrub_wr_ctx wr_ctx;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100152
Arne Jansena2de7332011-03-08 14:14:00 +0100153 /*
154 * statistics
155 */
156 struct btrfs_scrub_progress stat;
157 spinlock_t stat_lock;
158};
159
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200160struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100161 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100162 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200163 u64 logical;
164 struct btrfs_root *root;
165 struct btrfs_work work;
166 int mirror_num;
167};
168
Josef Bacik652f25a2013-09-12 16:58:28 -0400169struct scrub_nocow_inode {
170 u64 inum;
171 u64 offset;
172 u64 root;
173 struct list_head list;
174};
175
Stefan Behrensff023aa2012-11-06 11:43:11 +0100176struct scrub_copy_nocow_ctx {
177 struct scrub_ctx *sctx;
178 u64 logical;
179 u64 len;
180 int mirror_num;
181 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400182 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100183 struct btrfs_work work;
184};
185
Jan Schmidt558540c2011-06-13 19:59:12 +0200186struct scrub_warning {
187 struct btrfs_path *path;
188 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200189 const char *errstr;
190 sector_t sector;
191 u64 logical;
192 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200193};
194
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100195static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
196static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
197static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
198static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400199static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100200static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100201 struct btrfs_fs_info *fs_info,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100202 struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400203 u64 length, u64 logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100204 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100205static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
206 struct scrub_block *sblock, int is_metadata,
207 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800208 u16 csum_size, int retry_failed_mirror);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400209static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
210 struct scrub_block *sblock,
211 int is_metadata, int have_csum,
212 const u8 *csum, u64 generation,
213 u16 csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400214static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
215 struct scrub_block *sblock_good,
216 int force_write);
217static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
218 struct scrub_block *sblock_good,
219 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100220static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
221static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
222 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400223static int scrub_checksum_data(struct scrub_block *sblock);
224static int scrub_checksum_tree_block(struct scrub_block *sblock);
225static int scrub_checksum_super(struct scrub_block *sblock);
226static void scrub_block_get(struct scrub_block *sblock);
227static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100228static void scrub_page_get(struct scrub_page *spage);
229static void scrub_page_put(struct scrub_page *spage);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100230static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
231 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100232static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100233 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100234 u64 gen, int mirror_num, u8 *csum, int force,
235 u64 physical_for_dev_replace);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400236static void scrub_bio_end_io(struct bio *bio, int err);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400237static void scrub_bio_end_io_worker(struct btrfs_work *work);
238static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100239static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
240 u64 extent_logical, u64 extent_len,
241 u64 *extent_physical,
242 struct btrfs_device **extent_dev,
243 int *extent_mirror_num);
244static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
245 struct scrub_wr_ctx *wr_ctx,
246 struct btrfs_fs_info *fs_info,
247 struct btrfs_device *dev,
248 int is_dev_replace);
249static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
250static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
251 struct scrub_page *spage);
252static void scrub_wr_submit(struct scrub_ctx *sctx);
253static void scrub_wr_bio_end_io(struct bio *bio, int err);
254static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
255static int write_page_nocow(struct scrub_ctx *sctx,
256 u64 physical_for_dev_replace, struct page *page);
257static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400258 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100259static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
260 int mirror_num, u64 physical_for_dev_replace);
261static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800262static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800263static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400264
265
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100266static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
267{
268 atomic_inc(&sctx->bios_in_flight);
269}
270
271static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
272{
273 atomic_dec(&sctx->bios_in_flight);
274 wake_up(&sctx->list_wait);
275}
276
Wang Shilongcb7ab022013-12-04 21:16:53 +0800277static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800278{
279 while (atomic_read(&fs_info->scrub_pause_req)) {
280 mutex_unlock(&fs_info->scrub_lock);
281 wait_event(fs_info->scrub_pause_wait,
282 atomic_read(&fs_info->scrub_pause_req) == 0);
283 mutex_lock(&fs_info->scrub_lock);
284 }
285}
286
Wang Shilongcb7ab022013-12-04 21:16:53 +0800287static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
288{
289 atomic_inc(&fs_info->scrubs_paused);
290 wake_up(&fs_info->scrub_pause_wait);
291
292 mutex_lock(&fs_info->scrub_lock);
293 __scrub_blocked_if_needed(fs_info);
294 atomic_dec(&fs_info->scrubs_paused);
295 mutex_unlock(&fs_info->scrub_lock);
296
297 wake_up(&fs_info->scrub_pause_wait);
298}
299
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100300/*
301 * used for workers that require transaction commits (i.e., for the
302 * NOCOW case)
303 */
304static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
305{
306 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
307
308 /*
309 * increment scrubs_running to prevent cancel requests from
310 * completing as long as a worker is running. we must also
311 * increment scrubs_paused to prevent deadlocking on pause
312 * requests used for transactions commits (as the worker uses a
313 * transaction context). it is safe to regard the worker
314 * as paused for all matters practical. effectively, we only
315 * avoid cancellation requests from completing.
316 */
317 mutex_lock(&fs_info->scrub_lock);
318 atomic_inc(&fs_info->scrubs_running);
319 atomic_inc(&fs_info->scrubs_paused);
320 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800321
322 /*
323 * check if @scrubs_running=@scrubs_paused condition
324 * inside wait_event() is not an atomic operation.
325 * which means we may inc/dec @scrub_running/paused
326 * at any time. Let's wake up @scrub_pause_wait as
327 * much as we can to let commit transaction blocked less.
328 */
329 wake_up(&fs_info->scrub_pause_wait);
330
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100331 atomic_inc(&sctx->workers_pending);
332}
333
334/* used for workers that require transaction commits */
335static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
336{
337 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
338
339 /*
340 * see scrub_pending_trans_workers_inc() why we're pretending
341 * to be paused in the scrub counters
342 */
343 mutex_lock(&fs_info->scrub_lock);
344 atomic_dec(&fs_info->scrubs_running);
345 atomic_dec(&fs_info->scrubs_paused);
346 mutex_unlock(&fs_info->scrub_lock);
347 atomic_dec(&sctx->workers_pending);
348 wake_up(&fs_info->scrub_pause_wait);
349 wake_up(&sctx->list_wait);
350}
351
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100352static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100353{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100354 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100355 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100356 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100357 struct btrfs_ordered_sum, list);
358 list_del(&sum->list);
359 kfree(sum);
360 }
361}
362
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100363static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100364{
365 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100366
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100367 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100368 return;
369
Stefan Behrensff023aa2012-11-06 11:43:11 +0100370 scrub_free_wr_ctx(&sctx->wr_ctx);
371
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400372 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100373 if (sctx->curr != -1) {
374 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400375
376 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100377 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400378 scrub_block_put(sbio->pagev[i]->sblock);
379 }
380 bio_put(sbio->bio);
381 }
382
Stefan Behrensff023aa2012-11-06 11:43:11 +0100383 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100384 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100385
386 if (!sbio)
387 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100388 kfree(sbio);
389 }
390
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100391 scrub_free_csums(sctx);
392 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100393}
394
395static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100396struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100397{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100398 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100399 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100400 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100401 int pages_per_rd_bio;
402 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +0100403
Stefan Behrensff023aa2012-11-06 11:43:11 +0100404 /*
405 * the setting of pages_per_rd_bio is correct for scrub but might
406 * be wrong for the dev_replace code where we might read from
407 * different devices in the initial huge bios. However, that
408 * code is able to correctly handle the case when adding a page
409 * to a bio fails.
410 */
411 if (dev->bdev)
412 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
413 bio_get_nr_vecs(dev->bdev));
414 else
415 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100416 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
417 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100418 goto nomem;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100419 sctx->is_dev_replace = is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100420 sctx->pages_per_rd_bio = pages_per_rd_bio;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100421 sctx->curr = -1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100422 sctx->dev_root = dev->dev_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100423 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100424 struct scrub_bio *sbio;
425
426 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
427 if (!sbio)
428 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100429 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100430
Arne Jansena2de7332011-03-08 14:14:00 +0100431 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100432 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400433 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800434 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
435 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100436
Stefan Behrensff023aa2012-11-06 11:43:11 +0100437 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100438 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200439 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100440 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100441 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100442 sctx->first_free = 0;
443 sctx->nodesize = dev->dev_root->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100444 sctx->sectorsize = dev->dev_root->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100445 atomic_set(&sctx->bios_in_flight, 0);
446 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100447 atomic_set(&sctx->cancel_req, 0);
448 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
449 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100450
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100451 spin_lock_init(&sctx->list_lock);
452 spin_lock_init(&sctx->stat_lock);
453 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100454
455 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
456 fs_info->dev_replace.tgtdev, is_dev_replace);
457 if (ret) {
458 scrub_free_ctx(sctx);
459 return ERR_PTR(ret);
460 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100461 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100462
463nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100464 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100465 return ERR_PTR(-ENOMEM);
466}
467
Stefan Behrensff023aa2012-11-06 11:43:11 +0100468static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
469 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200470{
471 u64 isize;
472 u32 nlink;
473 int ret;
474 int i;
475 struct extent_buffer *eb;
476 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100477 struct scrub_warning *swarn = warn_ctx;
Jan Schmidt558540c2011-06-13 19:59:12 +0200478 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
479 struct inode_fs_paths *ipath = NULL;
480 struct btrfs_root *local_root;
481 struct btrfs_key root_key;
482
483 root_key.objectid = root;
484 root_key.type = BTRFS_ROOT_ITEM_KEY;
485 root_key.offset = (u64)-1;
486 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
487 if (IS_ERR(local_root)) {
488 ret = PTR_ERR(local_root);
489 goto err;
490 }
491
492 ret = inode_item_info(inum, 0, local_root, swarn->path);
493 if (ret) {
494 btrfs_release_path(swarn->path);
495 goto err;
496 }
497
498 eb = swarn->path->nodes[0];
499 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
500 struct btrfs_inode_item);
501 isize = btrfs_inode_size(eb, inode_item);
502 nlink = btrfs_inode_nlink(eb, inode_item);
503 btrfs_release_path(swarn->path);
504
505 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300506 if (IS_ERR(ipath)) {
507 ret = PTR_ERR(ipath);
508 ipath = NULL;
509 goto err;
510 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200511 ret = paths_from_inode(inum, ipath);
512
513 if (ret < 0)
514 goto err;
515
516 /*
517 * we deliberately ignore the bit ipath might have been too small to
518 * hold all of the paths here
519 */
520 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Frank Holtonefe120a2013-12-20 11:37:06 -0500521 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200522 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
523 "length %llu, links %u (path: %s)\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400524 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200525 (unsigned long long)swarn->sector, root, inum, offset,
526 min(isize - offset, (u64)PAGE_SIZE), nlink,
Jeff Mahoney745c4d82011-11-20 07:31:57 -0500527 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200528
529 free_ipath(ipath);
530 return 0;
531
532err:
Frank Holtonefe120a2013-12-20 11:37:06 -0500533 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200534 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
535 "resolving failed with ret=%d\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400536 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200537 (unsigned long long)swarn->sector, root, inum, offset, ret);
538
539 free_ipath(ipath);
540 return 0;
541}
542
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400543static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200544{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100545 struct btrfs_device *dev;
546 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200547 struct btrfs_path *path;
548 struct btrfs_key found_key;
549 struct extent_buffer *eb;
550 struct btrfs_extent_item *ei;
551 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200552 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100553 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600554 u64 flags = 0;
555 u64 ref_root;
556 u32 item_size;
557 u8 ref_level;
Liu Bo69917e42012-09-07 20:01:28 -0600558 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200559
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100560 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100561 dev = sblock->pagev[0]->dev;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100562 fs_info = sblock->sctx->dev_root->fs_info;
563
Jan Schmidt558540c2011-06-13 19:59:12 +0200564 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200565 if (!path)
566 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200567
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100568 swarn.sector = (sblock->pagev[0]->physical) >> 9;
569 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200570 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100571 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200572
Liu Bo69917e42012-09-07 20:01:28 -0600573 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
574 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200575 if (ret < 0)
576 goto out;
577
Jan Schmidt4692cf52011-12-02 14:56:41 +0100578 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200579 swarn.extent_item_size = found_key.offset;
580
581 eb = path->nodes[0];
582 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
583 item_size = btrfs_item_size_nr(eb, path->slots[0]);
584
Liu Bo69917e42012-09-07 20:01:28 -0600585 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200586 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800587 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
588 item_size, &ref_root,
589 &ref_level);
Josef Bacik606686e2012-06-04 14:03:51 -0400590 printk_in_rcu(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -0500591 "BTRFS: %s at logical %llu on dev %s, "
Jan Schmidt558540c2011-06-13 19:59:12 +0200592 "sector %llu: metadata %s (level %d) in tree "
Josef Bacik606686e2012-06-04 14:03:51 -0400593 "%llu\n", errstr, swarn.logical,
594 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200595 (unsigned long long)swarn.sector,
596 ref_level ? "node" : "leaf",
597 ret < 0 ? -1 : ref_level,
598 ret < 0 ? -1 : ref_root);
599 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600600 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200601 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600602 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200603 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100604 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100605 iterate_extent_inodes(fs_info, found_key.objectid,
606 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200607 scrub_print_warning_inode, &swarn);
608 }
609
610out:
611 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200612}
613
Stefan Behrensff023aa2012-11-06 11:43:11 +0100614static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200615{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200616 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200617 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100618 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200619 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200620 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200621 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200622 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000623 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200624 u64 end = offset + PAGE_SIZE - 1;
625 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000626 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200627
628 key.objectid = root;
629 key.type = BTRFS_ROOT_ITEM_KEY;
630 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000631
632 fs_info = fixup->root->fs_info;
633 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
634
635 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
636 if (IS_ERR(local_root)) {
637 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200638 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000639 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200640
641 key.type = BTRFS_INODE_ITEM_KEY;
642 key.objectid = inum;
643 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000644 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
645 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200646 if (IS_ERR(inode))
647 return PTR_ERR(inode);
648
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200649 index = offset >> PAGE_CACHE_SHIFT;
650
651 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200652 if (!page) {
653 ret = -ENOMEM;
654 goto out;
655 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200656
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200657 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200658 if (PageDirty(page)) {
659 /*
660 * we need to write the data to the defect sector. the
661 * data that was in that sector is not in memory,
662 * because the page was modified. we must not write the
663 * modified page to that sector.
664 *
665 * TODO: what could be done here: wait for the delalloc
666 * runner to write out that page (might involve
667 * COW) and see whether the sector is still
668 * referenced afterwards.
669 *
670 * For the meantime, we'll treat this error
671 * incorrectable, although there is a chance that a
672 * later scrub will find the bad sector again and that
673 * there's no dirty page in memory, then.
674 */
675 ret = -EIO;
676 goto out;
677 }
Miao Xie1203b682014-09-12 18:44:01 +0800678 ret = repair_io_failure(inode, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200679 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800680 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200681 fixup->mirror_num);
682 unlock_page(page);
683 corrected = !ret;
684 } else {
685 /*
686 * we need to get good data first. the general readpage path
687 * will call repair_io_failure for us, we just have to make
688 * sure we read the bad mirror.
689 */
690 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
691 EXTENT_DAMAGED, GFP_NOFS);
692 if (ret) {
693 /* set_extent_bits should give proper error */
694 WARN_ON(ret > 0);
695 if (ret > 0)
696 ret = -EFAULT;
697 goto out;
698 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200699
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200700 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
701 btrfs_get_extent,
702 fixup->mirror_num);
703 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200704
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200705 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
706 end, EXTENT_DAMAGED, 0, NULL);
707 if (!corrected)
708 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
709 EXTENT_DAMAGED, GFP_NOFS);
710 }
711
712out:
713 if (page)
714 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200715
716 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200717
718 if (ret < 0)
719 return ret;
720
721 if (ret == 0 && corrected) {
722 /*
723 * we only need to call readpage for one of the inodes belonging
724 * to this extent. so make iterate_extent_inodes stop
725 */
726 return 1;
727 }
728
729 return -EIO;
730}
731
732static void scrub_fixup_nodatasum(struct btrfs_work *work)
733{
734 int ret;
735 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100736 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200737 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200738 struct btrfs_path *path;
739 int uncorrectable = 0;
740
741 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100742 sctx = fixup->sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200743
744 path = btrfs_alloc_path();
745 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100746 spin_lock(&sctx->stat_lock);
747 ++sctx->stat.malloc_errors;
748 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200749 uncorrectable = 1;
750 goto out;
751 }
752
753 trans = btrfs_join_transaction(fixup->root);
754 if (IS_ERR(trans)) {
755 uncorrectable = 1;
756 goto out;
757 }
758
759 /*
760 * the idea is to trigger a regular read through the standard path. we
761 * read a page from the (failed) logical address by specifying the
762 * corresponding copynum of the failed sector. thus, that readpage is
763 * expected to fail.
764 * that is the point where on-the-fly error correction will kick in
765 * (once it's finished) and rewrite the failed sector if a good copy
766 * can be found.
767 */
768 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
769 path, scrub_fixup_readpage,
770 fixup);
771 if (ret < 0) {
772 uncorrectable = 1;
773 goto out;
774 }
775 WARN_ON(ret != 1);
776
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100777 spin_lock(&sctx->stat_lock);
778 ++sctx->stat.corrected_errors;
779 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200780
781out:
782 if (trans && !IS_ERR(trans))
783 btrfs_end_transaction(trans, fixup->root);
784 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100785 spin_lock(&sctx->stat_lock);
786 ++sctx->stat.uncorrectable_errors;
787 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100788 btrfs_dev_replace_stats_inc(
789 &sctx->dev_root->fs_info->dev_replace.
790 num_uncorrectable_read_errors);
Frank Holtonefe120a2013-12-20 11:37:06 -0500791 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
792 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200793 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200794 }
795
796 btrfs_free_path(path);
797 kfree(fixup);
798
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100799 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200800}
801
Miao Xieaf8e2d12014-10-23 14:42:50 +0800802static inline void scrub_get_recover(struct scrub_recover *recover)
803{
804 atomic_inc(&recover->refs);
805}
806
807static inline void scrub_put_recover(struct scrub_recover *recover)
808{
809 if (atomic_dec_and_test(&recover->refs)) {
810 kfree(recover->bbio);
811 kfree(recover->raid_map);
812 kfree(recover);
813 }
814}
815
Arne Jansena2de7332011-03-08 14:14:00 +0100816/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400817 * scrub_handle_errored_block gets called when either verification of the
818 * pages failed or the bio failed to read, e.g. with EIO. In the latter
819 * case, this function handles all pages in the bio, even though only one
820 * may be bad.
821 * The goal of this function is to repair the errored block by using the
822 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100823 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400824static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100825{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100826 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100827 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400828 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100829 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400830 u64 logical;
831 u64 generation;
832 unsigned int failed_mirror_index;
833 unsigned int is_metadata;
834 unsigned int have_csum;
835 u8 *csum;
836 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
837 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100838 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400839 int mirror_index;
840 int page_num;
841 int success;
842 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
843 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100844
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400845 BUG_ON(sblock_to_check->page_count < 1);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100846 fs_info = sctx->dev_root->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000847 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
848 /*
849 * if we find an error in a super block, we just report it.
850 * They will get written with the next transaction commit
851 * anyway
852 */
853 spin_lock(&sctx->stat_lock);
854 ++sctx->stat.super_errors;
855 spin_unlock(&sctx->stat_lock);
856 return 0;
857 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400858 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100859 logical = sblock_to_check->pagev[0]->logical;
860 generation = sblock_to_check->pagev[0]->generation;
861 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
862 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
863 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400864 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100865 have_csum = sblock_to_check->pagev[0]->have_csum;
866 csum = sblock_to_check->pagev[0]->csum;
867 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400868
Stefan Behrensff023aa2012-11-06 11:43:11 +0100869 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
870 sblocks_for_recheck = NULL;
871 goto nodatasum_case;
872 }
873
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400874 /*
875 * read all mirrors one after the other. This includes to
876 * re-read the extent or metadata block that failed (that was
877 * the cause that this fixup code is called) another time,
878 * page by page this time in order to know which pages
879 * caused I/O errors and which ones are good (for all mirrors).
880 * It is the goal to handle the situation when more than one
881 * mirror contains I/O errors, but the errors do not
882 * overlap, i.e. the data can be repaired by selecting the
883 * pages from those mirrors without I/O error on the
884 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
885 * would be that mirror #1 has an I/O error on the first page,
886 * the second page is good, and mirror #2 has an I/O error on
887 * the second page, but the first page is good.
888 * Then the first page of the first mirror can be repaired by
889 * taking the first page of the second mirror, and the
890 * second page of the second mirror can be repaired by
891 * copying the contents of the 2nd page of the 1st mirror.
892 * One more note: if the pages of one mirror contain I/O
893 * errors, the checksum cannot be verified. In order to get
894 * the best data for repairing, the first attempt is to find
895 * a mirror without I/O errors and with a validated checksum.
896 * Only if this is not possible, the pages are picked from
897 * mirrors with I/O errors without considering the checksum.
898 * If the latter is the case, at the end, the checksum of the
899 * repaired area is verified in order to correctly maintain
900 * the statistics.
901 */
902
903 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
904 sizeof(*sblocks_for_recheck),
905 GFP_NOFS);
906 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100907 spin_lock(&sctx->stat_lock);
908 sctx->stat.malloc_errors++;
909 sctx->stat.read_errors++;
910 sctx->stat.uncorrectable_errors++;
911 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100912 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400913 goto out;
914 }
915
916 /* setup the context, map the logical blocks and alloc the pages */
Stefan Behrensff023aa2012-11-06 11:43:11 +0100917 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400918 logical, sblocks_for_recheck);
919 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100920 spin_lock(&sctx->stat_lock);
921 sctx->stat.read_errors++;
922 sctx->stat.uncorrectable_errors++;
923 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100924 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400925 goto out;
926 }
927 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
928 sblock_bad = sblocks_for_recheck + failed_mirror_index;
929
930 /* build and submit the bios for the failed mirror, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100931 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800932 csum, generation, sctx->csum_size, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400933
934 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
935 sblock_bad->no_io_error_seen) {
936 /*
937 * the error disappeared after reading page by page, or
938 * the area was part of a huge bio and other parts of the
939 * bio caused I/O errors, or the block layer merged several
940 * read requests into one and the error is caused by a
941 * different bio (usually one of the two latter cases is
942 * the cause)
943 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100944 spin_lock(&sctx->stat_lock);
945 sctx->stat.unverified_errors++;
946 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400947
Stefan Behrensff023aa2012-11-06 11:43:11 +0100948 if (sctx->is_dev_replace)
949 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400950 goto out;
951 }
952
953 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100954 spin_lock(&sctx->stat_lock);
955 sctx->stat.read_errors++;
956 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400957 if (__ratelimit(&_rs))
958 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100959 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400960 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100961 spin_lock(&sctx->stat_lock);
962 sctx->stat.csum_errors++;
963 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400964 if (__ratelimit(&_rs))
965 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100966 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200967 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400968 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100969 spin_lock(&sctx->stat_lock);
970 sctx->stat.verify_errors++;
971 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400972 if (__ratelimit(&_rs))
973 scrub_print_warning("checksum/header error",
974 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200975 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100976 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200977 BTRFS_DEV_STAT_GENERATION_ERRS);
978 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100979 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200980 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400981 }
982
Ilya Dryomov33ef30a2013-11-03 19:06:38 +0200983 if (sctx->readonly) {
984 ASSERT(!sctx->is_dev_replace);
985 goto out;
986 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400987
988 if (!is_metadata && !have_csum) {
989 struct scrub_fixup_nodatasum *fixup_nodatasum;
990
Stefan Behrensff023aa2012-11-06 11:43:11 +0100991nodatasum_case:
992 WARN_ON(sctx->is_dev_replace);
993
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400994 /*
995 * !is_metadata and !have_csum, this means that the data
996 * might not be COW'ed, that it might be modified
997 * concurrently. The general strategy to work on the
998 * commit root does not help in the case when COW is not
999 * used.
1000 */
1001 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1002 if (!fixup_nodatasum)
1003 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001004 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001005 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001006 fixup_nodatasum->logical = logical;
1007 fixup_nodatasum->root = fs_info->extent_root;
1008 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001009 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001010 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1011 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001012 btrfs_queue_work(fs_info->scrub_workers,
1013 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001014 goto out;
1015 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001016
1017 /*
1018 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001019 * checksums.
1020 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001021 * errors and also does not have a checksum error.
1022 * If one is found, and if a checksum is present, the full block
1023 * that is known to contain an error is rewritten. Afterwards
1024 * the block is known to be corrected.
1025 * If a mirror is found which is completely correct, and no
1026 * checksum is present, only those pages are rewritten that had
1027 * an I/O error in the block to be repaired, since it cannot be
1028 * determined, which copy of the other pages is better (and it
1029 * could happen otherwise that a correct page would be
1030 * overwritten by a bad one).
1031 */
1032 for (mirror_index = 0;
1033 mirror_index < BTRFS_MAX_MIRRORS &&
1034 sblocks_for_recheck[mirror_index].page_count > 0;
1035 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001036 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001037
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001038 if (mirror_index == failed_mirror_index)
1039 continue;
1040 sblock_other = sblocks_for_recheck + mirror_index;
1041
1042 /* build and submit the bios, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001043 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1044 have_csum, csum, generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001045 sctx->csum_size, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001046
1047 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001048 !sblock_other->checksum_error &&
1049 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001050 if (sctx->is_dev_replace) {
1051 scrub_write_block_to_dev_replace(sblock_other);
1052 } else {
1053 int force_write = is_metadata || have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001054
Stefan Behrensff023aa2012-11-06 11:43:11 +01001055 ret = scrub_repair_block_from_good_copy(
1056 sblock_bad, sblock_other,
1057 force_write);
1058 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001059 if (0 == ret)
1060 goto corrected_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001061 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001062 }
1063
1064 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001065 * for dev_replace, pick good pages and write to the target device.
1066 */
1067 if (sctx->is_dev_replace) {
1068 success = 1;
1069 for (page_num = 0; page_num < sblock_bad->page_count;
1070 page_num++) {
1071 int sub_success;
1072
1073 sub_success = 0;
1074 for (mirror_index = 0;
1075 mirror_index < BTRFS_MAX_MIRRORS &&
1076 sblocks_for_recheck[mirror_index].page_count > 0;
1077 mirror_index++) {
1078 struct scrub_block *sblock_other =
1079 sblocks_for_recheck + mirror_index;
1080 struct scrub_page *page_other =
1081 sblock_other->pagev[page_num];
1082
1083 if (!page_other->io_error) {
1084 ret = scrub_write_page_to_dev_replace(
1085 sblock_other, page_num);
1086 if (ret == 0) {
1087 /* succeeded for this page */
1088 sub_success = 1;
1089 break;
1090 } else {
1091 btrfs_dev_replace_stats_inc(
1092 &sctx->dev_root->
1093 fs_info->dev_replace.
1094 num_write_errors);
1095 }
1096 }
1097 }
1098
1099 if (!sub_success) {
1100 /*
1101 * did not find a mirror to fetch the page
1102 * from. scrub_write_page_to_dev_replace()
1103 * handles this case (page->io_error), by
1104 * filling the block with zeros before
1105 * submitting the write request
1106 */
1107 success = 0;
1108 ret = scrub_write_page_to_dev_replace(
1109 sblock_bad, page_num);
1110 if (ret)
1111 btrfs_dev_replace_stats_inc(
1112 &sctx->dev_root->fs_info->
1113 dev_replace.num_write_errors);
1114 }
1115 }
1116
1117 goto out;
1118 }
1119
1120 /*
1121 * for regular scrub, repair those pages that are errored.
1122 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001123 * repaired, continue by picking good copies of those pages.
1124 * Select the good pages from mirrors to rewrite bad pages from
1125 * the area to fix. Afterwards verify the checksum of the block
1126 * that is supposed to be repaired. This verification step is
1127 * only done for the purpose of statistic counting and for the
1128 * final scrub report, whether errors remain.
1129 * A perfect algorithm could make use of the checksum and try
1130 * all possible combinations of pages from the different mirrors
1131 * until the checksum verification succeeds. For example, when
1132 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1133 * of mirror #2 is readable but the final checksum test fails,
1134 * then the 2nd page of mirror #3 could be tried, whether now
1135 * the final checksum succeedes. But this would be a rare
1136 * exception and is therefore not implemented. At least it is
1137 * avoided that the good copy is overwritten.
1138 * A more useful improvement would be to pick the sectors
1139 * without I/O error based on sector sizes (512 bytes on legacy
1140 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1141 * mirror could be repaired by taking 512 byte of a different
1142 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1143 * area are unreadable.
1144 */
1145
1146 /* can only fix I/O errors from here on */
1147 if (sblock_bad->no_io_error_seen)
1148 goto did_not_correct_error;
1149
1150 success = 1;
1151 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001152 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001153
1154 if (!page_bad->io_error)
1155 continue;
1156
1157 for (mirror_index = 0;
1158 mirror_index < BTRFS_MAX_MIRRORS &&
1159 sblocks_for_recheck[mirror_index].page_count > 0;
1160 mirror_index++) {
1161 struct scrub_block *sblock_other = sblocks_for_recheck +
1162 mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001163 struct scrub_page *page_other = sblock_other->pagev[
1164 page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001165
1166 if (!page_other->io_error) {
1167 ret = scrub_repair_page_from_good_copy(
1168 sblock_bad, sblock_other, page_num, 0);
1169 if (0 == ret) {
1170 page_bad->io_error = 0;
1171 break; /* succeeded for this page */
1172 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001173 }
1174 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001175
1176 if (page_bad->io_error) {
1177 /* did not find a mirror to copy the page from */
1178 success = 0;
1179 }
1180 }
1181
1182 if (success) {
1183 if (is_metadata || have_csum) {
1184 /*
1185 * need to verify the checksum now that all
1186 * sectors on disk are repaired (the write
1187 * request for data to be repaired is on its way).
1188 * Just be lazy and use scrub_recheck_block()
1189 * which re-reads the data before the checksum
1190 * is verified, but most likely the data comes out
1191 * of the page cache.
1192 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001193 scrub_recheck_block(fs_info, sblock_bad,
1194 is_metadata, have_csum, csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001195 generation, sctx->csum_size, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001196 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001197 !sblock_bad->checksum_error &&
1198 sblock_bad->no_io_error_seen)
1199 goto corrected_error;
1200 else
1201 goto did_not_correct_error;
1202 } else {
1203corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001204 spin_lock(&sctx->stat_lock);
1205 sctx->stat.corrected_errors++;
1206 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001207 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001208 "BTRFS: fixed up error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001209 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001210 }
1211 } else {
1212did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001213 spin_lock(&sctx->stat_lock);
1214 sctx->stat.uncorrectable_errors++;
1215 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001216 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001217 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001218 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001219 }
1220
1221out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001222 if (sblocks_for_recheck) {
1223 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1224 mirror_index++) {
1225 struct scrub_block *sblock = sblocks_for_recheck +
1226 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001227 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001228 int page_index;
1229
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001230 for (page_index = 0; page_index < sblock->page_count;
1231 page_index++) {
1232 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001233 recover = sblock->pagev[page_index]->recover;
1234 if (recover) {
1235 scrub_put_recover(recover);
1236 sblock->pagev[page_index]->recover =
1237 NULL;
1238 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001239 scrub_page_put(sblock->pagev[page_index]);
1240 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001241 }
1242 kfree(sblocks_for_recheck);
1243 }
1244
1245 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001246}
1247
Miao Xieaf8e2d12014-10-23 14:42:50 +08001248static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
1249{
1250 if (raid_map) {
1251 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
1252 return 3;
1253 else
1254 return 2;
1255 } else {
1256 return (int)bbio->num_stripes;
1257 }
1258}
1259
1260static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1261 u64 mapped_length,
1262 int nstripes, int mirror,
1263 int *stripe_index,
1264 u64 *stripe_offset)
1265{
1266 int i;
1267
1268 if (raid_map) {
1269 /* RAID5/6 */
1270 for (i = 0; i < nstripes; i++) {
1271 if (raid_map[i] == RAID6_Q_STRIPE ||
1272 raid_map[i] == RAID5_P_STRIPE)
1273 continue;
1274
1275 if (logical >= raid_map[i] &&
1276 logical < raid_map[i] + mapped_length)
1277 break;
1278 }
1279
1280 *stripe_index = i;
1281 *stripe_offset = logical - raid_map[i];
1282 } else {
1283 /* The other RAID type */
1284 *stripe_index = mirror;
1285 *stripe_offset = 0;
1286 }
1287}
1288
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001289static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001290 struct btrfs_fs_info *fs_info,
Stefan Behrensff023aa2012-11-06 11:43:11 +01001291 struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001292 u64 length, u64 logical,
1293 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001294{
Miao Xieaf8e2d12014-10-23 14:42:50 +08001295 struct scrub_recover *recover;
1296 struct btrfs_bio *bbio;
1297 u64 *raid_map;
1298 u64 sublen;
1299 u64 mapped_length;
1300 u64 stripe_offset;
1301 int stripe_index;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001302 int page_index;
1303 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001304 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001305 int ret;
1306
1307 /*
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001308 * note: the two members ref_count and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001309 * are not used (and not set) in the blocks that are used for
1310 * the recheck procedure
1311 */
1312
1313 page_index = 0;
1314 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001315 sublen = min_t(u64, length, PAGE_SIZE);
1316 mapped_length = sublen;
1317 bbio = NULL;
1318 raid_map = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001319
1320 /*
1321 * with a length of PAGE_SIZE, each returned stripe
1322 * represents one mirror
1323 */
Miao Xieaf8e2d12014-10-23 14:42:50 +08001324 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1325 &mapped_length, &bbio, 0, &raid_map);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001326 if (ret || !bbio || mapped_length < sublen) {
1327 kfree(bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001328 kfree(raid_map);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001329 return -EIO;
1330 }
1331
Miao Xieaf8e2d12014-10-23 14:42:50 +08001332 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1333 if (!recover) {
1334 kfree(bbio);
1335 kfree(raid_map);
1336 return -ENOMEM;
1337 }
1338
1339 atomic_set(&recover->refs, 1);
1340 recover->bbio = bbio;
1341 recover->raid_map = raid_map;
1342 recover->map_length = mapped_length;
1343
Stefan Behrensff023aa2012-11-06 11:43:11 +01001344 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001345
1346 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
1347 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001348 mirror_index++) {
1349 struct scrub_block *sblock;
1350 struct scrub_page *page;
1351
1352 if (mirror_index >= BTRFS_MAX_MIRRORS)
1353 continue;
1354
1355 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001356 sblock->sctx = sctx;
1357 page = kzalloc(sizeof(*page), GFP_NOFS);
1358 if (!page) {
1359leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001360 spin_lock(&sctx->stat_lock);
1361 sctx->stat.malloc_errors++;
1362 spin_unlock(&sctx->stat_lock);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001363 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001364 return -ENOMEM;
1365 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001366 scrub_page_get(page);
1367 sblock->pagev[page_index] = page;
1368 page->logical = logical;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001369
1370 scrub_stripe_index_and_offset(logical, raid_map,
1371 mapped_length,
1372 bbio->num_stripes,
1373 mirror_index,
1374 &stripe_index,
1375 &stripe_offset);
1376 page->physical = bbio->stripes[stripe_index].physical +
1377 stripe_offset;
1378 page->dev = bbio->stripes[stripe_index].dev;
1379
Stefan Behrensff023aa2012-11-06 11:43:11 +01001380 BUG_ON(page_index >= original_sblock->page_count);
1381 page->physical_for_dev_replace =
1382 original_sblock->pagev[page_index]->
1383 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001384 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001385 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001386 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001387 page->page = alloc_page(GFP_NOFS);
1388 if (!page->page)
1389 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001390
1391 scrub_get_recover(recover);
1392 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001393 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001394 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001395 length -= sublen;
1396 logical += sublen;
1397 page_index++;
1398 }
1399
1400 return 0;
1401}
1402
Miao Xieaf8e2d12014-10-23 14:42:50 +08001403struct scrub_bio_ret {
1404 struct completion event;
1405 int error;
1406};
1407
1408static void scrub_bio_wait_endio(struct bio *bio, int error)
1409{
1410 struct scrub_bio_ret *ret = bio->bi_private;
1411
1412 ret->error = error;
1413 complete(&ret->event);
1414}
1415
1416static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1417{
1418 return page->recover && page->recover->raid_map;
1419}
1420
1421static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1422 struct bio *bio,
1423 struct scrub_page *page)
1424{
1425 struct scrub_bio_ret done;
1426 int ret;
1427
1428 init_completion(&done.event);
1429 done.error = 0;
1430 bio->bi_iter.bi_sector = page->logical >> 9;
1431 bio->bi_private = &done;
1432 bio->bi_end_io = scrub_bio_wait_endio;
1433
1434 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1435 page->recover->raid_map,
1436 page->recover->map_length,
1437 page->mirror_num, 1);
1438 if (ret)
1439 return ret;
1440
1441 wait_for_completion(&done.event);
1442 if (done.error)
1443 return -EIO;
1444
1445 return 0;
1446}
1447
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001448/*
1449 * this function will check the on disk data for checksum errors, header
1450 * errors and read I/O errors. If any I/O errors happen, the exact pages
1451 * which are errored are marked as being bad. The goal is to enable scrub
1452 * to take those pages that are not errored from all the mirrors so that
1453 * the pages that are errored in the just handled mirror can be repaired.
1454 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001455static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1456 struct scrub_block *sblock, int is_metadata,
1457 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001458 u16 csum_size, int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001459{
1460 int page_num;
1461
1462 sblock->no_io_error_seen = 1;
1463 sblock->header_error = 0;
1464 sblock->checksum_error = 0;
1465
1466 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1467 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001468 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001469
Stefan Behrens442a4f62012-05-25 16:06:08 +02001470 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001471 page->io_error = 1;
1472 sblock->no_io_error_seen = 0;
1473 continue;
1474 }
1475
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001476 WARN_ON(!page->page);
Chris Mason9be33952013-05-17 18:30:14 -04001477 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001478 if (!bio) {
1479 page->io_error = 1;
1480 sblock->no_io_error_seen = 0;
1481 continue;
1482 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001483 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001484
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001485 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001486 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1487 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1488 sblock->no_io_error_seen = 0;
1489 } else {
1490 bio->bi_iter.bi_sector = page->physical >> 9;
1491
1492 if (btrfsic_submit_bio_wait(READ, bio))
1493 sblock->no_io_error_seen = 0;
1494 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001495
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001496 bio_put(bio);
1497 }
1498
1499 if (sblock->no_io_error_seen)
1500 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1501 have_csum, csum, generation,
1502 csum_size);
1503
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001504 return;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001505}
1506
Miao Xie17a9be22014-07-24 11:37:08 +08001507static inline int scrub_check_fsid(u8 fsid[],
1508 struct scrub_page *spage)
1509{
1510 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1511 int ret;
1512
1513 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1514 return !ret;
1515}
1516
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001517static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1518 struct scrub_block *sblock,
1519 int is_metadata, int have_csum,
1520 const u8 *csum, u64 generation,
1521 u16 csum_size)
1522{
1523 int page_num;
1524 u8 calculated_csum[BTRFS_CSUM_SIZE];
1525 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001526 void *mapped_buffer;
1527
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001528 WARN_ON(!sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001529 if (is_metadata) {
1530 struct btrfs_header *h;
1531
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001532 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001533 h = (struct btrfs_header *)mapped_buffer;
1534
Qu Wenruo3cae2102013-07-16 11:19:18 +08001535 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
Miao Xie17a9be22014-07-24 11:37:08 +08001536 !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001537 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001538 BTRFS_UUID_SIZE)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001539 sblock->header_error = 1;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001540 } else if (generation != btrfs_stack_header_generation(h)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001541 sblock->header_error = 1;
1542 sblock->generation_error = 1;
1543 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001544 csum = h->csum;
1545 } else {
1546 if (!have_csum)
1547 return;
1548
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001549 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001550 }
1551
1552 for (page_num = 0;;) {
1553 if (page_num == 0 && is_metadata)
Liu Bob0496682013-03-14 14:57:45 +00001554 crc = btrfs_csum_data(
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001555 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1556 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1557 else
Liu Bob0496682013-03-14 14:57:45 +00001558 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001559
Linus Torvalds9613beb2012-03-30 12:44:29 -07001560 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001561 page_num++;
1562 if (page_num >= sblock->page_count)
1563 break;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001564 WARN_ON(!sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001565
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001566 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001567 }
1568
1569 btrfs_csum_final(crc, calculated_csum);
1570 if (memcmp(calculated_csum, csum, csum_size))
1571 sblock->checksum_error = 1;
1572}
1573
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001574static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1575 struct scrub_block *sblock_good,
1576 int force_write)
1577{
1578 int page_num;
1579 int ret = 0;
1580
1581 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1582 int ret_sub;
1583
1584 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1585 sblock_good,
1586 page_num,
1587 force_write);
1588 if (ret_sub)
1589 ret = ret_sub;
1590 }
1591
1592 return ret;
1593}
1594
1595static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1596 struct scrub_block *sblock_good,
1597 int page_num, int force_write)
1598{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001599 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1600 struct scrub_page *page_good = sblock_good->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001601
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001602 BUG_ON(page_bad->page == NULL);
1603 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001604 if (force_write || sblock_bad->header_error ||
1605 sblock_bad->checksum_error || page_bad->io_error) {
1606 struct bio *bio;
1607 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001608
Stefan Behrensff023aa2012-11-06 11:43:11 +01001609 if (!page_bad->dev->bdev) {
Frank Holtonefe120a2013-12-20 11:37:06 -05001610 printk_ratelimited(KERN_WARNING "BTRFS: "
1611 "scrub_repair_page_from_good_copy(bdev == NULL) "
1612 "is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001613 return -EIO;
1614 }
1615
Chris Mason9be33952013-05-17 18:30:14 -04001616 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001617 if (!bio)
1618 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001619 bio->bi_bdev = page_bad->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001620 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001621
1622 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1623 if (PAGE_SIZE != ret) {
1624 bio_put(bio);
1625 return -EIO;
1626 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001627
Kent Overstreet33879d42013-11-23 22:33:32 -08001628 if (btrfsic_submit_bio_wait(WRITE, bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001629 btrfs_dev_stat_inc_and_print(page_bad->dev,
1630 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001631 btrfs_dev_replace_stats_inc(
1632 &sblock_bad->sctx->dev_root->fs_info->
1633 dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001634 bio_put(bio);
1635 return -EIO;
1636 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001637 bio_put(bio);
1638 }
1639
1640 return 0;
1641}
1642
Stefan Behrensff023aa2012-11-06 11:43:11 +01001643static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1644{
1645 int page_num;
1646
1647 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1648 int ret;
1649
1650 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1651 if (ret)
1652 btrfs_dev_replace_stats_inc(
1653 &sblock->sctx->dev_root->fs_info->dev_replace.
1654 num_write_errors);
1655 }
1656}
1657
1658static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1659 int page_num)
1660{
1661 struct scrub_page *spage = sblock->pagev[page_num];
1662
1663 BUG_ON(spage->page == NULL);
1664 if (spage->io_error) {
1665 void *mapped_buffer = kmap_atomic(spage->page);
1666
1667 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1668 flush_dcache_page(spage->page);
1669 kunmap_atomic(mapped_buffer);
1670 }
1671 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1672}
1673
1674static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1675 struct scrub_page *spage)
1676{
1677 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1678 struct scrub_bio *sbio;
1679 int ret;
1680
1681 mutex_lock(&wr_ctx->wr_lock);
1682again:
1683 if (!wr_ctx->wr_curr_bio) {
1684 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1685 GFP_NOFS);
1686 if (!wr_ctx->wr_curr_bio) {
1687 mutex_unlock(&wr_ctx->wr_lock);
1688 return -ENOMEM;
1689 }
1690 wr_ctx->wr_curr_bio->sctx = sctx;
1691 wr_ctx->wr_curr_bio->page_count = 0;
1692 }
1693 sbio = wr_ctx->wr_curr_bio;
1694 if (sbio->page_count == 0) {
1695 struct bio *bio;
1696
1697 sbio->physical = spage->physical_for_dev_replace;
1698 sbio->logical = spage->logical;
1699 sbio->dev = wr_ctx->tgtdev;
1700 bio = sbio->bio;
1701 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04001702 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001703 if (!bio) {
1704 mutex_unlock(&wr_ctx->wr_lock);
1705 return -ENOMEM;
1706 }
1707 sbio->bio = bio;
1708 }
1709
1710 bio->bi_private = sbio;
1711 bio->bi_end_io = scrub_wr_bio_end_io;
1712 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001713 bio->bi_iter.bi_sector = sbio->physical >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001714 sbio->err = 0;
1715 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1716 spage->physical_for_dev_replace ||
1717 sbio->logical + sbio->page_count * PAGE_SIZE !=
1718 spage->logical) {
1719 scrub_wr_submit(sctx);
1720 goto again;
1721 }
1722
1723 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1724 if (ret != PAGE_SIZE) {
1725 if (sbio->page_count < 1) {
1726 bio_put(sbio->bio);
1727 sbio->bio = NULL;
1728 mutex_unlock(&wr_ctx->wr_lock);
1729 return -EIO;
1730 }
1731 scrub_wr_submit(sctx);
1732 goto again;
1733 }
1734
1735 sbio->pagev[sbio->page_count] = spage;
1736 scrub_page_get(spage);
1737 sbio->page_count++;
1738 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1739 scrub_wr_submit(sctx);
1740 mutex_unlock(&wr_ctx->wr_lock);
1741
1742 return 0;
1743}
1744
1745static void scrub_wr_submit(struct scrub_ctx *sctx)
1746{
1747 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1748 struct scrub_bio *sbio;
1749
1750 if (!wr_ctx->wr_curr_bio)
1751 return;
1752
1753 sbio = wr_ctx->wr_curr_bio;
1754 wr_ctx->wr_curr_bio = NULL;
1755 WARN_ON(!sbio->bio->bi_bdev);
1756 scrub_pending_bio_inc(sctx);
1757 /* process all writes in a single worker thread. Then the block layer
1758 * orders the requests before sending them to the driver which
1759 * doubled the write performance on spinning disks when measured
1760 * with Linux 3.5 */
1761 btrfsic_submit_bio(WRITE, sbio->bio);
1762}
1763
1764static void scrub_wr_bio_end_io(struct bio *bio, int err)
1765{
1766 struct scrub_bio *sbio = bio->bi_private;
1767 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1768
1769 sbio->err = err;
1770 sbio->bio = bio;
1771
Liu Bo9e0af232014-08-15 23:36:53 +08001772 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1773 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001774 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001775}
1776
1777static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1778{
1779 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1780 struct scrub_ctx *sctx = sbio->sctx;
1781 int i;
1782
1783 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1784 if (sbio->err) {
1785 struct btrfs_dev_replace *dev_replace =
1786 &sbio->sctx->dev_root->fs_info->dev_replace;
1787
1788 for (i = 0; i < sbio->page_count; i++) {
1789 struct scrub_page *spage = sbio->pagev[i];
1790
1791 spage->io_error = 1;
1792 btrfs_dev_replace_stats_inc(&dev_replace->
1793 num_write_errors);
1794 }
1795 }
1796
1797 for (i = 0; i < sbio->page_count; i++)
1798 scrub_page_put(sbio->pagev[i]);
1799
1800 bio_put(sbio->bio);
1801 kfree(sbio);
1802 scrub_pending_bio_dec(sctx);
1803}
1804
1805static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001806{
1807 u64 flags;
1808 int ret;
1809
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001810 WARN_ON(sblock->page_count < 1);
1811 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001812 ret = 0;
1813 if (flags & BTRFS_EXTENT_FLAG_DATA)
1814 ret = scrub_checksum_data(sblock);
1815 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1816 ret = scrub_checksum_tree_block(sblock);
1817 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1818 (void)scrub_checksum_super(sblock);
1819 else
1820 WARN_ON(1);
1821 if (ret)
1822 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001823
1824 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001825}
1826
1827static int scrub_checksum_data(struct scrub_block *sblock)
1828{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001829 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001830 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001831 u8 *on_disk_csum;
1832 struct page *page;
1833 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001834 u32 crc = ~(u32)0;
1835 int fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001836 u64 len;
1837 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001838
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001839 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001840 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001841 return 0;
1842
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001843 on_disk_csum = sblock->pagev[0]->csum;
1844 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001845 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001846
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001847 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001848 index = 0;
1849 for (;;) {
1850 u64 l = min_t(u64, len, PAGE_SIZE);
1851
Liu Bob0496682013-03-14 14:57:45 +00001852 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001853 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001854 len -= l;
1855 if (len == 0)
1856 break;
1857 index++;
1858 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001859 BUG_ON(!sblock->pagev[index]->page);
1860 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001861 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001862 }
1863
Arne Jansena2de7332011-03-08 14:14:00 +01001864 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001865 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001866 fail = 1;
1867
Arne Jansena2de7332011-03-08 14:14:00 +01001868 return fail;
1869}
1870
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001871static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001872{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001873 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001874 struct btrfs_header *h;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001875 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001876 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001877 u8 calculated_csum[BTRFS_CSUM_SIZE];
1878 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1879 struct page *page;
1880 void *mapped_buffer;
1881 u64 mapped_size;
1882 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001883 u32 crc = ~(u32)0;
1884 int fail = 0;
1885 int crc_fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001886 u64 len;
1887 int index;
1888
1889 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001890 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001891 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001892 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001893 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001894
1895 /*
1896 * we don't use the getter functions here, as we
1897 * a) don't have an extent buffer and
1898 * b) the page is already kmapped
1899 */
Arne Jansena2de7332011-03-08 14:14:00 +01001900
Qu Wenruo3cae2102013-07-16 11:19:18 +08001901 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001902 ++fail;
1903
Qu Wenruo3cae2102013-07-16 11:19:18 +08001904 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001905 ++fail;
1906
Miao Xie17a9be22014-07-24 11:37:08 +08001907 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Arne Jansena2de7332011-03-08 14:14:00 +01001908 ++fail;
1909
1910 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1911 BTRFS_UUID_SIZE))
1912 ++fail;
1913
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001914 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001915 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1916 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1917 index = 0;
1918 for (;;) {
1919 u64 l = min_t(u64, len, mapped_size);
1920
Liu Bob0496682013-03-14 14:57:45 +00001921 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001922 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001923 len -= l;
1924 if (len == 0)
1925 break;
1926 index++;
1927 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001928 BUG_ON(!sblock->pagev[index]->page);
1929 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001930 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001931 mapped_size = PAGE_SIZE;
1932 p = mapped_buffer;
1933 }
1934
1935 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001936 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001937 ++crc_fail;
1938
Arne Jansena2de7332011-03-08 14:14:00 +01001939 return fail || crc_fail;
1940}
1941
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001942static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001943{
1944 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001945 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001946 u8 calculated_csum[BTRFS_CSUM_SIZE];
1947 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1948 struct page *page;
1949 void *mapped_buffer;
1950 u64 mapped_size;
1951 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001952 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001953 int fail_gen = 0;
1954 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001955 u64 len;
1956 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001957
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001958 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001959 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001960 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001961 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001962 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001963
Qu Wenruo3cae2102013-07-16 11:19:18 +08001964 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001965 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001966
Qu Wenruo3cae2102013-07-16 11:19:18 +08001967 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001968 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001969
Miao Xie17a9be22014-07-24 11:37:08 +08001970 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001971 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001972
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001973 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1974 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1975 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1976 index = 0;
1977 for (;;) {
1978 u64 l = min_t(u64, len, mapped_size);
1979
Liu Bob0496682013-03-14 14:57:45 +00001980 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001981 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001982 len -= l;
1983 if (len == 0)
1984 break;
1985 index++;
1986 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001987 BUG_ON(!sblock->pagev[index]->page);
1988 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001989 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001990 mapped_size = PAGE_SIZE;
1991 p = mapped_buffer;
1992 }
1993
1994 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001995 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001996 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001997
Stefan Behrens442a4f62012-05-25 16:06:08 +02001998 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01001999 /*
2000 * if we find an error in a super block, we just report it.
2001 * They will get written with the next transaction commit
2002 * anyway
2003 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002004 spin_lock(&sctx->stat_lock);
2005 ++sctx->stat.super_errors;
2006 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002007 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002008 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002009 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2010 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002011 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002012 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01002013 }
2014
Stefan Behrens442a4f62012-05-25 16:06:08 +02002015 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002016}
2017
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002018static void scrub_block_get(struct scrub_block *sblock)
2019{
2020 atomic_inc(&sblock->ref_count);
2021}
2022
2023static void scrub_block_put(struct scrub_block *sblock)
2024{
2025 if (atomic_dec_and_test(&sblock->ref_count)) {
2026 int i;
2027
2028 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002029 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002030 kfree(sblock);
2031 }
2032}
2033
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002034static void scrub_page_get(struct scrub_page *spage)
2035{
2036 atomic_inc(&spage->ref_count);
2037}
2038
2039static void scrub_page_put(struct scrub_page *spage)
2040{
2041 if (atomic_dec_and_test(&spage->ref_count)) {
2042 if (spage->page)
2043 __free_page(spage->page);
2044 kfree(spage);
2045 }
2046}
2047
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002048static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002049{
2050 struct scrub_bio *sbio;
2051
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002052 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002053 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002054
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002055 sbio = sctx->bios[sctx->curr];
2056 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002057 scrub_pending_bio_inc(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002058
Stefan Behrensff023aa2012-11-06 11:43:11 +01002059 if (!sbio->bio->bi_bdev) {
2060 /*
2061 * this case should not happen. If btrfs_map_block() is
2062 * wrong, it could happen for dev-replace operations on
2063 * missing devices when no mirrors are available, but in
2064 * this case it should already fail the mount.
2065 * This case is handled correctly (but _very_ slowly).
2066 */
2067 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05002068 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01002069 bio_endio(sbio->bio, -EIO);
2070 } else {
2071 btrfsic_submit_bio(READ, sbio->bio);
2072 }
Arne Jansena2de7332011-03-08 14:14:00 +01002073}
2074
Stefan Behrensff023aa2012-11-06 11:43:11 +01002075static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2076 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002077{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002078 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002079 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002080 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002081
2082again:
2083 /*
2084 * grab a fresh bio or wait for one to become available
2085 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002086 while (sctx->curr == -1) {
2087 spin_lock(&sctx->list_lock);
2088 sctx->curr = sctx->first_free;
2089 if (sctx->curr != -1) {
2090 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2091 sctx->bios[sctx->curr]->next_free = -1;
2092 sctx->bios[sctx->curr]->page_count = 0;
2093 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002094 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002095 spin_unlock(&sctx->list_lock);
2096 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002097 }
2098 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002099 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002100 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002101 struct bio *bio;
2102
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002103 sbio->physical = spage->physical;
2104 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002105 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002106 bio = sbio->bio;
2107 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04002108 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002109 if (!bio)
2110 return -ENOMEM;
2111 sbio->bio = bio;
2112 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002113
2114 bio->bi_private = sbio;
2115 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002116 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002117 bio->bi_iter.bi_sector = sbio->physical >> 9;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002118 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002119 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2120 spage->physical ||
2121 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002122 spage->logical ||
2123 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002124 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002125 goto again;
2126 }
2127
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002128 sbio->pagev[sbio->page_count] = spage;
2129 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2130 if (ret != PAGE_SIZE) {
2131 if (sbio->page_count < 1) {
2132 bio_put(sbio->bio);
2133 sbio->bio = NULL;
2134 return -EIO;
2135 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002136 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002137 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002138 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002139
Stefan Behrensff023aa2012-11-06 11:43:11 +01002140 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002141 atomic_inc(&sblock->outstanding_pages);
2142 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002143 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002144 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002145
2146 return 0;
2147}
2148
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002149static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002150 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002151 u64 gen, int mirror_num, u8 *csum, int force,
2152 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002153{
2154 struct scrub_block *sblock;
2155 int index;
2156
2157 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2158 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002159 spin_lock(&sctx->stat_lock);
2160 sctx->stat.malloc_errors++;
2161 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002162 return -ENOMEM;
2163 }
2164
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002165 /* one ref inside this function, plus one for each page added to
2166 * a bio later on */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002167 atomic_set(&sblock->ref_count, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002168 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002169 sblock->no_io_error_seen = 1;
2170
2171 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002172 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002173 u64 l = min_t(u64, len, PAGE_SIZE);
2174
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002175 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2176 if (!spage) {
2177leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002178 spin_lock(&sctx->stat_lock);
2179 sctx->stat.malloc_errors++;
2180 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002181 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002182 return -ENOMEM;
2183 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002184 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2185 scrub_page_get(spage);
2186 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002187 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002188 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002189 spage->flags = flags;
2190 spage->generation = gen;
2191 spage->logical = logical;
2192 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002193 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002194 spage->mirror_num = mirror_num;
2195 if (csum) {
2196 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002197 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002198 } else {
2199 spage->have_csum = 0;
2200 }
2201 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002202 spage->page = alloc_page(GFP_NOFS);
2203 if (!spage->page)
2204 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002205 len -= l;
2206 logical += l;
2207 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002208 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002209 }
2210
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002211 WARN_ON(sblock->page_count == 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002212 for (index = 0; index < sblock->page_count; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002213 struct scrub_page *spage = sblock->pagev[index];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002214 int ret;
2215
Stefan Behrensff023aa2012-11-06 11:43:11 +01002216 ret = scrub_add_page_to_rd_bio(sctx, spage);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002217 if (ret) {
2218 scrub_block_put(sblock);
2219 return ret;
2220 }
2221 }
2222
2223 if (force)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002224 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002225
2226 /* last one frees, either here or in bio completion for last page */
2227 scrub_block_put(sblock);
2228 return 0;
2229}
2230
2231static void scrub_bio_end_io(struct bio *bio, int err)
2232{
2233 struct scrub_bio *sbio = bio->bi_private;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002234 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002235
2236 sbio->err = err;
2237 sbio->bio = bio;
2238
Qu Wenruo0339ef22014-02-28 10:46:17 +08002239 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002240}
2241
2242static void scrub_bio_end_io_worker(struct btrfs_work *work)
2243{
2244 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002245 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002246 int i;
2247
Stefan Behrensff023aa2012-11-06 11:43:11 +01002248 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002249 if (sbio->err) {
2250 for (i = 0; i < sbio->page_count; i++) {
2251 struct scrub_page *spage = sbio->pagev[i];
2252
2253 spage->io_error = 1;
2254 spage->sblock->no_io_error_seen = 0;
2255 }
2256 }
2257
2258 /* now complete the scrub_block items that have all pages completed */
2259 for (i = 0; i < sbio->page_count; i++) {
2260 struct scrub_page *spage = sbio->pagev[i];
2261 struct scrub_block *sblock = spage->sblock;
2262
2263 if (atomic_dec_and_test(&sblock->outstanding_pages))
2264 scrub_block_complete(sblock);
2265 scrub_block_put(sblock);
2266 }
2267
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002268 bio_put(sbio->bio);
2269 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002270 spin_lock(&sctx->list_lock);
2271 sbio->next_free = sctx->first_free;
2272 sctx->first_free = sbio->index;
2273 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002274
2275 if (sctx->is_dev_replace &&
2276 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2277 mutex_lock(&sctx->wr_ctx.wr_lock);
2278 scrub_wr_submit(sctx);
2279 mutex_unlock(&sctx->wr_ctx.wr_lock);
2280 }
2281
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002282 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002283}
2284
2285static void scrub_block_complete(struct scrub_block *sblock)
2286{
Stefan Behrensff023aa2012-11-06 11:43:11 +01002287 if (!sblock->no_io_error_seen) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002288 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002289 } else {
2290 /*
2291 * if has checksum error, write via repair mechanism in
2292 * dev replace case, otherwise write here in dev replace
2293 * case.
2294 */
2295 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
2296 scrub_write_block_to_dev_replace(sblock);
2297 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002298}
2299
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002300static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
Arne Jansena2de7332011-03-08 14:14:00 +01002301 u8 *csum)
2302{
2303 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002304 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002305 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002306
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002307 while (!list_empty(&sctx->csum_list)) {
2308 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002309 struct btrfs_ordered_sum, list);
2310 if (sum->bytenr > logical)
2311 return 0;
2312 if (sum->bytenr + sum->len > logical)
2313 break;
2314
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002315 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002316 list_del(&sum->list);
2317 kfree(sum);
2318 sum = NULL;
2319 }
2320 if (!sum)
2321 return 0;
2322
Miao Xief51a4a12013-06-19 10:36:09 +08002323 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002324 num_sectors = sum->len / sctx->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002325 memcpy(csum, sum->sums + index, sctx->csum_size);
2326 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002327 list_del(&sum->list);
2328 kfree(sum);
2329 }
Miao Xief51a4a12013-06-19 10:36:09 +08002330 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002331}
2332
2333/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002334static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002335 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002336 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002337{
2338 int ret;
2339 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002340 u32 blocksize;
2341
2342 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002343 blocksize = sctx->sectorsize;
2344 spin_lock(&sctx->stat_lock);
2345 sctx->stat.data_extents_scrubbed++;
2346 sctx->stat.data_bytes_scrubbed += len;
2347 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002348 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002349 blocksize = sctx->nodesize;
2350 spin_lock(&sctx->stat_lock);
2351 sctx->stat.tree_extents_scrubbed++;
2352 sctx->stat.tree_bytes_scrubbed += len;
2353 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002354 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002355 blocksize = sctx->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002356 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002357 }
Arne Jansena2de7332011-03-08 14:14:00 +01002358
2359 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002360 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002361 int have_csum = 0;
2362
2363 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2364 /* push csums to sbio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002365 have_csum = scrub_find_csum(sctx, logical, l, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002366 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002367 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002368 if (sctx->is_dev_replace && !have_csum) {
2369 ret = copy_nocow_pages(sctx, logical, l,
2370 mirror_num,
2371 physical_for_dev_replace);
2372 goto behind_scrub_pages;
2373 }
Arne Jansena2de7332011-03-08 14:14:00 +01002374 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002375 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002376 mirror_num, have_csum ? csum : NULL, 0,
2377 physical_for_dev_replace);
2378behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002379 if (ret)
2380 return ret;
2381 len -= l;
2382 logical += l;
2383 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002384 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002385 }
2386 return 0;
2387}
2388
Wang Shilong3b080b22014-04-01 18:01:43 +08002389/*
2390 * Given a physical address, this will calculate it's
2391 * logical offset. if this is a parity stripe, it will return
2392 * the most left data stripe's logical offset.
2393 *
2394 * return 0 if it is a data stripe, 1 means parity stripe.
2395 */
2396static int get_raid56_logic_offset(u64 physical, int num,
2397 struct map_lookup *map, u64 *offset)
2398{
2399 int i;
2400 int j = 0;
2401 u64 stripe_nr;
2402 u64 last_offset;
2403 int stripe_index;
2404 int rot;
2405
2406 last_offset = (physical - map->stripes[num].physical) *
2407 nr_data_stripes(map);
2408 *offset = last_offset;
2409 for (i = 0; i < nr_data_stripes(map); i++) {
2410 *offset = last_offset + i * map->stripe_len;
2411
2412 stripe_nr = *offset;
2413 do_div(stripe_nr, map->stripe_len);
2414 do_div(stripe_nr, nr_data_stripes(map));
2415
2416 /* Work out the disk rotation on this stripe-set */
2417 rot = do_div(stripe_nr, map->num_stripes);
2418 /* calculate which stripe this data locates */
2419 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002420 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002421 if (stripe_index == num)
2422 return 0;
2423 if (stripe_index < num)
2424 j++;
2425 }
2426 *offset = last_offset + j * map->stripe_len;
2427 return 1;
2428}
2429
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002430static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002431 struct map_lookup *map,
2432 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002433 int num, u64 base, u64 length,
2434 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002435{
2436 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002437 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01002438 struct btrfs_root *root = fs_info->extent_root;
2439 struct btrfs_root *csum_root = fs_info->csum_root;
2440 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00002441 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01002442 u64 flags;
2443 int ret;
2444 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01002445 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01002446 struct extent_buffer *l;
2447 struct btrfs_key key;
2448 u64 physical;
2449 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00002450 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08002451 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01002452 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02002453 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02002454 struct reada_control *reada1;
2455 struct reada_control *reada2;
2456 struct btrfs_key key_start;
2457 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01002458 u64 increment = map->stripe_len;
2459 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002460 u64 extent_logical;
2461 u64 extent_physical;
2462 u64 extent_len;
2463 struct btrfs_device *extent_dev;
2464 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08002465 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05002466
Arne Jansena2de7332011-03-08 14:14:00 +01002467 nstripes = length;
Wang Shilong3b080b22014-04-01 18:01:43 +08002468 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01002469 offset = 0;
2470 do_div(nstripes, map->stripe_len);
2471 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2472 offset = map->stripe_len * num;
2473 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02002474 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002475 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2476 int factor = map->num_stripes / map->sub_stripes;
2477 offset = map->stripe_len * (num / map->sub_stripes);
2478 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02002479 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002480 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2481 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02002482 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002483 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2484 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02002485 mirror_num = num % map->num_stripes + 1;
Wang Shilong3b080b22014-04-01 18:01:43 +08002486 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2487 BTRFS_BLOCK_GROUP_RAID6)) {
2488 get_raid56_logic_offset(physical, num, map, &offset);
2489 increment = map->stripe_len * nr_data_stripes(map);
2490 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002491 } else {
2492 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02002493 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002494 }
2495
2496 path = btrfs_alloc_path();
2497 if (!path)
2498 return -ENOMEM;
2499
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002500 /*
2501 * work on commit root. The related disk blocks are static as
2502 * long as COW is applied. This means, it is save to rewrite
2503 * them to repair disk errors without any race conditions
2504 */
Arne Jansena2de7332011-03-08 14:14:00 +01002505 path->search_commit_root = 1;
2506 path->skip_locking = 1;
2507
2508 /*
Arne Jansen7a262852011-06-10 12:39:23 +02002509 * trigger the readahead for extent tree csum tree and wait for
2510 * completion. During readahead, the scrub is officially paused
2511 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01002512 */
2513 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08002514 physical_end = physical + nstripes * map->stripe_len;
2515 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2516 BTRFS_BLOCK_GROUP_RAID6)) {
2517 get_raid56_logic_offset(physical_end, num,
2518 map, &logic_end);
2519 logic_end += base;
2520 } else {
2521 logic_end = logical + increment * nstripes;
2522 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002523 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002524 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08002525 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01002526
Arne Jansen7a262852011-06-10 12:39:23 +02002527 /* FIXME it might be better to start readahead at commit root */
2528 key_start.objectid = logical;
2529 key_start.type = BTRFS_EXTENT_ITEM_KEY;
2530 key_start.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08002531 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05002532 key_end.type = BTRFS_METADATA_ITEM_KEY;
2533 key_end.offset = (u64)-1;
Arne Jansen7a262852011-06-10 12:39:23 +02002534 reada1 = btrfs_reada_add(root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01002535
Arne Jansen7a262852011-06-10 12:39:23 +02002536 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2537 key_start.type = BTRFS_EXTENT_CSUM_KEY;
2538 key_start.offset = logical;
2539 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2540 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08002541 key_end.offset = logic_end;
Arne Jansen7a262852011-06-10 12:39:23 +02002542 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01002543
Arne Jansen7a262852011-06-10 12:39:23 +02002544 if (!IS_ERR(reada1))
2545 btrfs_reada_wait(reada1);
2546 if (!IS_ERR(reada2))
2547 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01002548
Arne Jansena2de7332011-03-08 14:14:00 +01002549
2550 /*
2551 * collect all data csums for the stripe to avoid seeking during
2552 * the scrub. This might currently (crc32) end up to be about 1MB
2553 */
Arne Jansene7786c32011-05-28 20:58:38 +00002554 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01002555
Arne Jansena2de7332011-03-08 14:14:00 +01002556 /*
2557 * now find all extents for each stripe and scrub them
2558 */
Arne Jansena2de7332011-03-08 14:14:00 +01002559 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08002560 while (physical < physical_end) {
2561 /* for raid56, we skip parity stripe */
2562 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2563 BTRFS_BLOCK_GROUP_RAID6)) {
2564 ret = get_raid56_logic_offset(physical, num,
2565 map, &logical);
2566 logical += base;
2567 if (ret)
2568 goto skip;
2569 }
Arne Jansena2de7332011-03-08 14:14:00 +01002570 /*
2571 * canceled?
2572 */
2573 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002574 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01002575 ret = -ECANCELED;
2576 goto out;
2577 }
2578 /*
2579 * check to see if we have to pause
2580 */
2581 if (atomic_read(&fs_info->scrub_pause_req)) {
2582 /* push queued extents */
Stefan Behrensff023aa2012-11-06 11:43:11 +01002583 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002584 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002585 mutex_lock(&sctx->wr_ctx.wr_lock);
2586 scrub_wr_submit(sctx);
2587 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002588 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002589 atomic_read(&sctx->bios_in_flight) == 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002590 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Wang Shilong3cb09292013-12-04 21:15:19 +08002591 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01002592 }
2593
Wang Shilong7c76edb2014-01-12 21:38:32 +08002594 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2595 key.type = BTRFS_METADATA_ITEM_KEY;
2596 else
2597 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01002598 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00002599 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01002600
2601 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2602 if (ret < 0)
2603 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05002604
Arne Jansen8c510322011-06-03 10:09:26 +02002605 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08002606 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01002607 if (ret < 0)
2608 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02002609 if (ret > 0) {
2610 /* there's no smaller item, so stick with the
2611 * larger one */
2612 btrfs_release_path(path);
2613 ret = btrfs_search_slot(NULL, root, &key,
2614 path, 0, 0);
2615 if (ret < 0)
2616 goto out;
2617 }
Arne Jansena2de7332011-03-08 14:14:00 +01002618 }
2619
Liu Bo625f1c8d2013-04-27 02:56:57 +00002620 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01002621 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05002622 u64 bytes;
2623
Arne Jansena2de7332011-03-08 14:14:00 +01002624 l = path->nodes[0];
2625 slot = path->slots[0];
2626 if (slot >= btrfs_header_nritems(l)) {
2627 ret = btrfs_next_leaf(root, path);
2628 if (ret == 0)
2629 continue;
2630 if (ret < 0)
2631 goto out;
2632
Liu Bo625f1c8d2013-04-27 02:56:57 +00002633 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002634 break;
2635 }
2636 btrfs_item_key_to_cpu(l, &key, slot);
2637
Josef Bacik3173a182013-03-07 14:22:04 -05002638 if (key.type == BTRFS_METADATA_ITEM_KEY)
David Sterba707e8a02014-06-04 19:22:26 +02002639 bytes = root->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05002640 else
2641 bytes = key.offset;
2642
2643 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01002644 goto next;
2645
Liu Bo625f1c8d2013-04-27 02:56:57 +00002646 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2647 key.type != BTRFS_METADATA_ITEM_KEY)
2648 goto next;
Arne Jansena2de7332011-03-08 14:14:00 +01002649
Liu Bo625f1c8d2013-04-27 02:56:57 +00002650 if (key.objectid >= logical + map->stripe_len) {
2651 /* out of this device extent */
2652 if (key.objectid >= logic_end)
2653 stop_loop = 1;
2654 break;
2655 }
Arne Jansena2de7332011-03-08 14:14:00 +01002656
2657 extent = btrfs_item_ptr(l, slot,
2658 struct btrfs_extent_item);
2659 flags = btrfs_extent_flags(l, extent);
2660 generation = btrfs_extent_generation(l, extent);
2661
2662 if (key.objectid < logical &&
2663 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05002664 btrfs_err(fs_info,
2665 "scrub: tree block %llu spanning "
2666 "stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02002667 key.objectid, logical);
Arne Jansena2de7332011-03-08 14:14:00 +01002668 goto next;
2669 }
2670
Liu Bo625f1c8d2013-04-27 02:56:57 +00002671again:
2672 extent_logical = key.objectid;
2673 extent_len = bytes;
2674
Arne Jansena2de7332011-03-08 14:14:00 +01002675 /*
2676 * trim extent to this stripe
2677 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00002678 if (extent_logical < logical) {
2679 extent_len -= logical - extent_logical;
2680 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01002681 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00002682 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01002683 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00002684 extent_len = logical + map->stripe_len -
2685 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01002686 }
2687
Liu Bo625f1c8d2013-04-27 02:56:57 +00002688 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002689 extent_dev = scrub_dev;
2690 extent_mirror_num = mirror_num;
2691 if (is_dev_replace)
2692 scrub_remap_extent(fs_info, extent_logical,
2693 extent_len, &extent_physical,
2694 &extent_dev,
2695 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00002696
2697 ret = btrfs_lookup_csums_range(csum_root, logical,
2698 logical + map->stripe_len - 1,
2699 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01002700 if (ret)
2701 goto out;
2702
Liu Bo625f1c8d2013-04-27 02:56:57 +00002703 ret = scrub_extent(sctx, extent_logical, extent_len,
2704 extent_physical, extent_dev, flags,
2705 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02002706 extent_logical - logical + physical);
Liu Bo625f1c8d2013-04-27 02:56:57 +00002707 if (ret)
2708 goto out;
2709
Josef Bacikd88d46c2013-06-10 12:59:04 +00002710 scrub_free_csums(sctx);
Liu Bo625f1c8d2013-04-27 02:56:57 +00002711 if (extent_logical + extent_len <
2712 key.objectid + bytes) {
Wang Shilong3b080b22014-04-01 18:01:43 +08002713 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2714 BTRFS_BLOCK_GROUP_RAID6)) {
2715 /*
2716 * loop until we find next data stripe
2717 * or we have finished all stripes.
2718 */
2719 do {
2720 physical += map->stripe_len;
2721 ret = get_raid56_logic_offset(
2722 physical, num,
2723 map, &logical);
2724 logical += base;
2725 } while (physical < physical_end && ret);
2726 } else {
2727 physical += map->stripe_len;
2728 logical += increment;
2729 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00002730 if (logical < key.objectid + bytes) {
2731 cond_resched();
2732 goto again;
2733 }
2734
Wang Shilong3b080b22014-04-01 18:01:43 +08002735 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00002736 stop_loop = 1;
2737 break;
2738 }
2739 }
Arne Jansena2de7332011-03-08 14:14:00 +01002740next:
2741 path->slots[0]++;
2742 }
Chris Mason71267332011-05-23 06:30:52 -04002743 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08002744skip:
Arne Jansena2de7332011-03-08 14:14:00 +01002745 logical += increment;
2746 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002747 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00002748 if (stop_loop)
2749 sctx->stat.last_physical = map->stripes[num].physical +
2750 length;
2751 else
2752 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002753 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00002754 if (stop_loop)
2755 break;
Arne Jansena2de7332011-03-08 14:14:00 +01002756 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01002757out:
Arne Jansena2de7332011-03-08 14:14:00 +01002758 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002759 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002760 mutex_lock(&sctx->wr_ctx.wr_lock);
2761 scrub_wr_submit(sctx);
2762 mutex_unlock(&sctx->wr_ctx.wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002763
Arne Jansene7786c32011-05-28 20:58:38 +00002764 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01002765 btrfs_free_path(path);
2766 return ret < 0 ? ret : 0;
2767}
2768
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002769static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002770 struct btrfs_device *scrub_dev,
2771 u64 chunk_tree, u64 chunk_objectid,
2772 u64 chunk_offset, u64 length,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002773 u64 dev_offset, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002774{
2775 struct btrfs_mapping_tree *map_tree =
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002776 &sctx->dev_root->fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01002777 struct map_lookup *map;
2778 struct extent_map *em;
2779 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002780 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01002781
2782 read_lock(&map_tree->map_tree.lock);
2783 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2784 read_unlock(&map_tree->map_tree.lock);
2785
2786 if (!em)
2787 return -EINVAL;
2788
2789 map = (struct map_lookup *)em->bdev;
2790 if (em->start != chunk_offset)
2791 goto out;
2792
2793 if (em->len < length)
2794 goto out;
2795
2796 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002797 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01002798 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002799 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002800 chunk_offset, length,
2801 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01002802 if (ret)
2803 goto out;
2804 }
2805 }
2806out:
2807 free_extent_map(em);
2808
2809 return ret;
2810}
2811
2812static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002813int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002814 struct btrfs_device *scrub_dev, u64 start, u64 end,
2815 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002816{
2817 struct btrfs_dev_extent *dev_extent = NULL;
2818 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002819 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01002820 struct btrfs_fs_info *fs_info = root->fs_info;
2821 u64 length;
2822 u64 chunk_tree;
2823 u64 chunk_objectid;
2824 u64 chunk_offset;
2825 int ret;
2826 int slot;
2827 struct extent_buffer *l;
2828 struct btrfs_key key;
2829 struct btrfs_key found_key;
2830 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002831 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01002832
2833 path = btrfs_alloc_path();
2834 if (!path)
2835 return -ENOMEM;
2836
2837 path->reada = 2;
2838 path->search_commit_root = 1;
2839 path->skip_locking = 1;
2840
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002841 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01002842 key.offset = 0ull;
2843 key.type = BTRFS_DEV_EXTENT_KEY;
2844
Arne Jansena2de7332011-03-08 14:14:00 +01002845 while (1) {
2846 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2847 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02002848 break;
2849 if (ret > 0) {
2850 if (path->slots[0] >=
2851 btrfs_header_nritems(path->nodes[0])) {
2852 ret = btrfs_next_leaf(root, path);
2853 if (ret)
2854 break;
2855 }
2856 }
Arne Jansena2de7332011-03-08 14:14:00 +01002857
2858 l = path->nodes[0];
2859 slot = path->slots[0];
2860
2861 btrfs_item_key_to_cpu(l, &found_key, slot);
2862
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002863 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01002864 break;
2865
David Sterba962a2982014-06-04 18:41:45 +02002866 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01002867 break;
2868
2869 if (found_key.offset >= end)
2870 break;
2871
2872 if (found_key.offset < key.offset)
2873 break;
2874
2875 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2876 length = btrfs_dev_extent_length(l, dev_extent);
2877
Qu Wenruoced96ed2014-06-19 10:42:51 +08002878 if (found_key.offset + length <= start)
2879 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01002880
2881 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2882 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2883 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2884
2885 /*
2886 * get a reference on the corresponding block group to prevent
2887 * the chunk from going away while we scrub it
2888 */
2889 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08002890
2891 /* some chunks are removed but not committed to disk yet,
2892 * continue scrubbing */
2893 if (!cache)
2894 goto skip;
2895
Stefan Behrensff023aa2012-11-06 11:43:11 +01002896 dev_replace->cursor_right = found_key.offset + length;
2897 dev_replace->cursor_left = found_key.offset;
2898 dev_replace->item_needs_writeback = 1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002899 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002900 chunk_offset, length, found_key.offset,
2901 is_dev_replace);
2902
2903 /*
2904 * flush, submit all pending read and write bios, afterwards
2905 * wait for them.
2906 * Note that in the dev replace case, a read request causes
2907 * write requests that are submitted in the read completion
2908 * worker. Therefore in the current situation, it is required
2909 * that all write requests are flushed, so that all read and
2910 * write requests are really completed when bios_in_flight
2911 * changes to 0.
2912 */
2913 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
2914 scrub_submit(sctx);
2915 mutex_lock(&sctx->wr_ctx.wr_lock);
2916 scrub_wr_submit(sctx);
2917 mutex_unlock(&sctx->wr_ctx.wr_lock);
2918
2919 wait_event(sctx->list_wait,
2920 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08002921 atomic_inc(&fs_info->scrubs_paused);
2922 wake_up(&fs_info->scrub_pause_wait);
2923
2924 /*
2925 * must be called before we decrease @scrub_paused.
2926 * make sure we don't block transaction commit while
2927 * we are waiting pending workers finished.
2928 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01002929 wait_event(sctx->list_wait,
2930 atomic_read(&sctx->workers_pending) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08002931 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
2932
2933 mutex_lock(&fs_info->scrub_lock);
2934 __scrub_blocked_if_needed(fs_info);
2935 atomic_dec(&fs_info->scrubs_paused);
2936 mutex_unlock(&fs_info->scrub_lock);
2937 wake_up(&fs_info->scrub_pause_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002938
Arne Jansena2de7332011-03-08 14:14:00 +01002939 btrfs_put_block_group(cache);
2940 if (ret)
2941 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00002942 if (is_dev_replace &&
2943 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01002944 ret = -EIO;
2945 break;
2946 }
2947 if (sctx->stat.malloc_errors > 0) {
2948 ret = -ENOMEM;
2949 break;
2950 }
Arne Jansena2de7332011-03-08 14:14:00 +01002951
Ilya Dryomov539f3582013-10-07 13:42:57 +03002952 dev_replace->cursor_left = dev_replace->cursor_right;
2953 dev_replace->item_needs_writeback = 1;
Qu Wenruoced96ed2014-06-19 10:42:51 +08002954skip:
Arne Jansena2de7332011-03-08 14:14:00 +01002955 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04002956 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01002957 }
2958
Arne Jansena2de7332011-03-08 14:14:00 +01002959 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02002960
2961 /*
2962 * ret can still be 1 from search_slot or next_leaf,
2963 * that's not an error
2964 */
2965 return ret < 0 ? ret : 0;
Arne Jansena2de7332011-03-08 14:14:00 +01002966}
2967
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002968static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2969 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01002970{
2971 int i;
2972 u64 bytenr;
2973 u64 gen;
2974 int ret;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002975 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01002976
Miao Xie87533c42013-01-29 10:14:48 +00002977 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002978 return -EIO;
2979
Miao Xie5f546062014-07-24 11:37:09 +08002980 /* Seed devices of a new filesystem has their own generation. */
2981 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
2982 gen = scrub_dev->generation;
2983 else
2984 gen = root->fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01002985
2986 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2987 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08002988 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2989 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01002990 break;
2991
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002992 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002993 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002994 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01002995 if (ret)
2996 return ret;
2997 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002998 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01002999
3000 return 0;
3001}
3002
3003/*
3004 * get a reference count on fs_info->scrub_workers. start worker if necessary
3005 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003006static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3007 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003008{
Josef Bacik0dc3b842011-11-18 14:37:27 -05003009 int ret = 0;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003010 int flags = WQ_FREEZABLE | WQ_UNBOUND;
3011 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003012
Arne Jansen632dd772011-06-10 12:07:07 +02003013 if (fs_info->scrub_workers_refcnt == 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003014 if (is_dev_replace)
Qu Wenruo0339ef22014-02-28 10:46:17 +08003015 fs_info->scrub_workers =
3016 btrfs_alloc_workqueue("btrfs-scrub", flags,
3017 1, 4);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003018 else
Qu Wenruo0339ef22014-02-28 10:46:17 +08003019 fs_info->scrub_workers =
3020 btrfs_alloc_workqueue("btrfs-scrub", flags,
3021 max_active, 4);
3022 if (!fs_info->scrub_workers) {
3023 ret = -ENOMEM;
Josef Bacik0dc3b842011-11-18 14:37:27 -05003024 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003025 }
3026 fs_info->scrub_wr_completion_workers =
3027 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3028 max_active, 2);
3029 if (!fs_info->scrub_wr_completion_workers) {
3030 ret = -ENOMEM;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003031 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003032 }
3033 fs_info->scrub_nocow_workers =
3034 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3035 if (!fs_info->scrub_nocow_workers) {
3036 ret = -ENOMEM;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003037 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003038 }
Arne Jansen632dd772011-06-10 12:07:07 +02003039 }
Arne Jansena2de7332011-03-08 14:14:00 +01003040 ++fs_info->scrub_workers_refcnt;
Josef Bacik0dc3b842011-11-18 14:37:27 -05003041out:
Josef Bacik0dc3b842011-11-18 14:37:27 -05003042 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003043}
3044
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003045static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003046{
Stefan Behrensff023aa2012-11-06 11:43:11 +01003047 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08003048 btrfs_destroy_workqueue(fs_info->scrub_workers);
3049 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3050 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003051 }
Arne Jansena2de7332011-03-08 14:14:00 +01003052 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003053}
3054
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003055int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3056 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003057 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003058{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003059 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003060 int ret;
3061 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08003062 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01003063
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003064 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01003065 return -EINVAL;
3066
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003067 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003068 /*
3069 * in this case scrub is unable to calculate the checksum
3070 * the way scrub is implemented. Do not handle this
3071 * situation at all because it won't ever happen.
3072 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003073 btrfs_err(fs_info,
3074 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003075 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003076 return -EINVAL;
3077 }
3078
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003079 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003080 /* not supported for data w/o checksums */
Frank Holtonefe120a2013-12-20 11:37:06 -05003081 btrfs_err(fs_info,
3082 "scrub: size assumption sectorsize != PAGE_SIZE "
3083 "(%d != %lu) fails",
Geert Uytterhoeven27f9f022013-08-20 13:20:09 +02003084 fs_info->chunk_root->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003085 return -EINVAL;
3086 }
3087
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003088 if (fs_info->chunk_root->nodesize >
3089 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3090 fs_info->chunk_root->sectorsize >
3091 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3092 /*
3093 * would exhaust the array bounds of pagev member in
3094 * struct scrub_block
3095 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003096 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3097 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003098 fs_info->chunk_root->nodesize,
3099 SCRUB_MAX_PAGES_PER_BLOCK,
3100 fs_info->chunk_root->sectorsize,
3101 SCRUB_MAX_PAGES_PER_BLOCK);
3102 return -EINVAL;
3103 }
3104
Arne Jansena2de7332011-03-08 14:14:00 +01003105
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003106 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3107 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003108 if (!dev || (dev->missing && !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003109 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003110 return -ENODEV;
3111 }
Arne Jansena2de7332011-03-08 14:14:00 +01003112
Miao Xie5d68da32014-07-24 11:37:07 +08003113 if (!is_dev_replace && !readonly && !dev->writeable) {
3114 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3115 rcu_read_lock();
3116 name = rcu_dereference(dev->name);
3117 btrfs_err(fs_info, "scrub: device %s is not writable",
3118 name->str);
3119 rcu_read_unlock();
3120 return -EROFS;
3121 }
3122
Wang Shilong3b7a0162013-10-12 02:11:12 +08003123 mutex_lock(&fs_info->scrub_lock);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003124 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
Arne Jansena2de7332011-03-08 14:14:00 +01003125 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003126 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003127 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01003128 }
3129
Stefan Behrens8dabb742012-11-06 13:15:27 +01003130 btrfs_dev_replace_lock(&fs_info->dev_replace);
3131 if (dev->scrub_device ||
3132 (!is_dev_replace &&
3133 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3134 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003135 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003136 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003137 return -EINPROGRESS;
3138 }
Stefan Behrens8dabb742012-11-06 13:15:27 +01003139 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003140
3141 ret = scrub_workers_get(fs_info, is_dev_replace);
3142 if (ret) {
3143 mutex_unlock(&fs_info->scrub_lock);
3144 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3145 return ret;
3146 }
3147
Stefan Behrens63a212a2012-11-05 18:29:28 +01003148 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003149 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003150 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003151 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3152 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003153 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003154 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003155 sctx->readonly = readonly;
3156 dev->scrub_device = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003157 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003158
Wang Shilong3cb09292013-12-04 21:15:19 +08003159 /*
3160 * checking @scrub_pause_req here, we can avoid
3161 * race between committing transaction and scrubbing.
3162 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003163 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003164 atomic_inc(&fs_info->scrubs_running);
3165 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003166
Stefan Behrensff023aa2012-11-06 11:43:11 +01003167 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08003168 /*
3169 * by holding device list mutex, we can
3170 * kick off writing super in log tree sync.
3171 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003172 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003173 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003174 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003175 }
Arne Jansena2de7332011-03-08 14:14:00 +01003176
3177 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003178 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3179 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003180
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003181 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003182 atomic_dec(&fs_info->scrubs_running);
3183 wake_up(&fs_info->scrub_pause_wait);
3184
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003185 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003186
Arne Jansena2de7332011-03-08 14:14:00 +01003187 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003188 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003189
3190 mutex_lock(&fs_info->scrub_lock);
3191 dev->scrub_device = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003192 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003193 mutex_unlock(&fs_info->scrub_lock);
3194
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003195 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003196
3197 return ret;
3198}
3199
Jeff Mahoney143bede2012-03-01 14:56:26 +01003200void btrfs_scrub_pause(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003201{
3202 struct btrfs_fs_info *fs_info = root->fs_info;
3203
3204 mutex_lock(&fs_info->scrub_lock);
3205 atomic_inc(&fs_info->scrub_pause_req);
3206 while (atomic_read(&fs_info->scrubs_paused) !=
3207 atomic_read(&fs_info->scrubs_running)) {
3208 mutex_unlock(&fs_info->scrub_lock);
3209 wait_event(fs_info->scrub_pause_wait,
3210 atomic_read(&fs_info->scrubs_paused) ==
3211 atomic_read(&fs_info->scrubs_running));
3212 mutex_lock(&fs_info->scrub_lock);
3213 }
3214 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003215}
3216
Jeff Mahoney143bede2012-03-01 14:56:26 +01003217void btrfs_scrub_continue(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003218{
3219 struct btrfs_fs_info *fs_info = root->fs_info;
3220
3221 atomic_dec(&fs_info->scrub_pause_req);
3222 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01003223}
3224
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003225int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003226{
Arne Jansena2de7332011-03-08 14:14:00 +01003227 mutex_lock(&fs_info->scrub_lock);
3228 if (!atomic_read(&fs_info->scrubs_running)) {
3229 mutex_unlock(&fs_info->scrub_lock);
3230 return -ENOTCONN;
3231 }
3232
3233 atomic_inc(&fs_info->scrub_cancel_req);
3234 while (atomic_read(&fs_info->scrubs_running)) {
3235 mutex_unlock(&fs_info->scrub_lock);
3236 wait_event(fs_info->scrub_pause_wait,
3237 atomic_read(&fs_info->scrubs_running) == 0);
3238 mutex_lock(&fs_info->scrub_lock);
3239 }
3240 atomic_dec(&fs_info->scrub_cancel_req);
3241 mutex_unlock(&fs_info->scrub_lock);
3242
3243 return 0;
3244}
3245
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003246int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3247 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01003248{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003249 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003250
3251 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003252 sctx = dev->scrub_device;
3253 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01003254 mutex_unlock(&fs_info->scrub_lock);
3255 return -ENOTCONN;
3256 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003257 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01003258 while (dev->scrub_device) {
3259 mutex_unlock(&fs_info->scrub_lock);
3260 wait_event(fs_info->scrub_pause_wait,
3261 dev->scrub_device == NULL);
3262 mutex_lock(&fs_info->scrub_lock);
3263 }
3264 mutex_unlock(&fs_info->scrub_lock);
3265
3266 return 0;
3267}
Stefan Behrens1623ede2012-03-27 14:21:26 -04003268
Arne Jansena2de7332011-03-08 14:14:00 +01003269int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3270 struct btrfs_scrub_progress *progress)
3271{
3272 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003273 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01003274
3275 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003276 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01003277 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003278 sctx = dev->scrub_device;
3279 if (sctx)
3280 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003281 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3282
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003283 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01003284}
Stefan Behrensff023aa2012-11-06 11:43:11 +01003285
3286static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3287 u64 extent_logical, u64 extent_len,
3288 u64 *extent_physical,
3289 struct btrfs_device **extent_dev,
3290 int *extent_mirror_num)
3291{
3292 u64 mapped_length;
3293 struct btrfs_bio *bbio = NULL;
3294 int ret;
3295
3296 mapped_length = extent_len;
3297 ret = btrfs_map_block(fs_info, READ, extent_logical,
3298 &mapped_length, &bbio, 0);
3299 if (ret || !bbio || mapped_length < extent_len ||
3300 !bbio->stripes[0].dev->bdev) {
3301 kfree(bbio);
3302 return;
3303 }
3304
3305 *extent_physical = bbio->stripes[0].physical;
3306 *extent_mirror_num = bbio->mirror_num;
3307 *extent_dev = bbio->stripes[0].dev;
3308 kfree(bbio);
3309}
3310
3311static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3312 struct scrub_wr_ctx *wr_ctx,
3313 struct btrfs_fs_info *fs_info,
3314 struct btrfs_device *dev,
3315 int is_dev_replace)
3316{
3317 WARN_ON(wr_ctx->wr_curr_bio != NULL);
3318
3319 mutex_init(&wr_ctx->wr_lock);
3320 wr_ctx->wr_curr_bio = NULL;
3321 if (!is_dev_replace)
3322 return 0;
3323
3324 WARN_ON(!dev->bdev);
3325 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3326 bio_get_nr_vecs(dev->bdev));
3327 wr_ctx->tgtdev = dev;
3328 atomic_set(&wr_ctx->flush_all_writes, 0);
3329 return 0;
3330}
3331
3332static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3333{
3334 mutex_lock(&wr_ctx->wr_lock);
3335 kfree(wr_ctx->wr_curr_bio);
3336 wr_ctx->wr_curr_bio = NULL;
3337 mutex_unlock(&wr_ctx->wr_lock);
3338}
3339
3340static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3341 int mirror_num, u64 physical_for_dev_replace)
3342{
3343 struct scrub_copy_nocow_ctx *nocow_ctx;
3344 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3345
3346 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3347 if (!nocow_ctx) {
3348 spin_lock(&sctx->stat_lock);
3349 sctx->stat.malloc_errors++;
3350 spin_unlock(&sctx->stat_lock);
3351 return -ENOMEM;
3352 }
3353
3354 scrub_pending_trans_workers_inc(sctx);
3355
3356 nocow_ctx->sctx = sctx;
3357 nocow_ctx->logical = logical;
3358 nocow_ctx->len = len;
3359 nocow_ctx->mirror_num = mirror_num;
3360 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08003361 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3362 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04003363 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08003364 btrfs_queue_work(fs_info->scrub_nocow_workers,
3365 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003366
3367 return 0;
3368}
3369
Josef Bacik652f25a2013-09-12 16:58:28 -04003370static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3371{
3372 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3373 struct scrub_nocow_inode *nocow_inode;
3374
3375 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3376 if (!nocow_inode)
3377 return -ENOMEM;
3378 nocow_inode->inum = inum;
3379 nocow_inode->offset = offset;
3380 nocow_inode->root = root;
3381 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3382 return 0;
3383}
3384
3385#define COPY_COMPLETE 1
3386
Stefan Behrensff023aa2012-11-06 11:43:11 +01003387static void copy_nocow_pages_worker(struct btrfs_work *work)
3388{
3389 struct scrub_copy_nocow_ctx *nocow_ctx =
3390 container_of(work, struct scrub_copy_nocow_ctx, work);
3391 struct scrub_ctx *sctx = nocow_ctx->sctx;
3392 u64 logical = nocow_ctx->logical;
3393 u64 len = nocow_ctx->len;
3394 int mirror_num = nocow_ctx->mirror_num;
3395 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3396 int ret;
3397 struct btrfs_trans_handle *trans = NULL;
3398 struct btrfs_fs_info *fs_info;
3399 struct btrfs_path *path;
3400 struct btrfs_root *root;
3401 int not_written = 0;
3402
3403 fs_info = sctx->dev_root->fs_info;
3404 root = fs_info->extent_root;
3405
3406 path = btrfs_alloc_path();
3407 if (!path) {
3408 spin_lock(&sctx->stat_lock);
3409 sctx->stat.malloc_errors++;
3410 spin_unlock(&sctx->stat_lock);
3411 not_written = 1;
3412 goto out;
3413 }
3414
3415 trans = btrfs_join_transaction(root);
3416 if (IS_ERR(trans)) {
3417 not_written = 1;
3418 goto out;
3419 }
3420
3421 ret = iterate_inodes_from_logical(logical, fs_info, path,
Josef Bacik652f25a2013-09-12 16:58:28 -04003422 record_inode_for_nocow, nocow_ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003423 if (ret != 0 && ret != -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003424 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
3425 "phys %llu, len %llu, mir %u, ret %d",
Geert Uytterhoeven118a0a22013-08-20 13:20:10 +02003426 logical, physical_for_dev_replace, len, mirror_num,
3427 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003428 not_written = 1;
3429 goto out;
3430 }
3431
Josef Bacik652f25a2013-09-12 16:58:28 -04003432 btrfs_end_transaction(trans, root);
3433 trans = NULL;
3434 while (!list_empty(&nocow_ctx->inodes)) {
3435 struct scrub_nocow_inode *entry;
3436 entry = list_first_entry(&nocow_ctx->inodes,
3437 struct scrub_nocow_inode,
3438 list);
3439 list_del_init(&entry->list);
3440 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
3441 entry->root, nocow_ctx);
3442 kfree(entry);
3443 if (ret == COPY_COMPLETE) {
3444 ret = 0;
3445 break;
3446 } else if (ret) {
3447 break;
3448 }
3449 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003450out:
Josef Bacik652f25a2013-09-12 16:58:28 -04003451 while (!list_empty(&nocow_ctx->inodes)) {
3452 struct scrub_nocow_inode *entry;
3453 entry = list_first_entry(&nocow_ctx->inodes,
3454 struct scrub_nocow_inode,
3455 list);
3456 list_del_init(&entry->list);
3457 kfree(entry);
3458 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003459 if (trans && !IS_ERR(trans))
3460 btrfs_end_transaction(trans, root);
3461 if (not_written)
3462 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
3463 num_uncorrectable_read_errors);
3464
3465 btrfs_free_path(path);
3466 kfree(nocow_ctx);
3467
3468 scrub_pending_trans_workers_dec(sctx);
3469}
3470
Josef Bacik652f25a2013-09-12 16:58:28 -04003471static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3472 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003473{
Miao Xie826aa0a2013-06-27 18:50:59 +08003474 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003475 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08003476 struct inode *inode;
3477 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003478 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04003479 struct btrfs_ordered_extent *ordered;
3480 struct extent_map *em;
3481 struct extent_state *cached_state = NULL;
3482 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003483 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04003484 u64 len = nocow_ctx->len;
3485 u64 lockstart = offset, lockend = offset + len - 1;
Miao Xie826aa0a2013-06-27 18:50:59 +08003486 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00003487 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04003488 int ret = 0;
3489 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003490
3491 key.objectid = root;
3492 key.type = BTRFS_ROOT_ITEM_KEY;
3493 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00003494
3495 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
3496
Stefan Behrensff023aa2012-11-06 11:43:11 +01003497 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00003498 if (IS_ERR(local_root)) {
3499 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003500 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00003501 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003502
3503 key.type = BTRFS_INODE_ITEM_KEY;
3504 key.objectid = inum;
3505 key.offset = 0;
3506 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00003507 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003508 if (IS_ERR(inode))
3509 return PTR_ERR(inode);
3510
Miao Xieedd14002013-06-27 18:51:00 +08003511 /* Avoid truncate/dio/punch hole.. */
3512 mutex_lock(&inode->i_mutex);
3513 inode_dio_wait(inode);
3514
Stefan Behrensff023aa2012-11-06 11:43:11 +01003515 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04003516 io_tree = &BTRFS_I(inode)->io_tree;
3517
3518 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
3519 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
3520 if (ordered) {
3521 btrfs_put_ordered_extent(ordered);
3522 goto out_unlock;
3523 }
3524
3525 em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
3526 if (IS_ERR(em)) {
3527 ret = PTR_ERR(em);
3528 goto out_unlock;
3529 }
3530
3531 /*
3532 * This extent does not actually cover the logical extent anymore,
3533 * move on to the next inode.
3534 */
3535 if (em->block_start > nocow_ctx->logical ||
3536 em->block_start + em->block_len < nocow_ctx->logical + len) {
3537 free_extent_map(em);
3538 goto out_unlock;
3539 }
3540 free_extent_map(em);
3541
Stefan Behrensff023aa2012-11-06 11:43:11 +01003542 while (len >= PAGE_CACHE_SIZE) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003543 index = offset >> PAGE_CACHE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08003544again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01003545 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3546 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003547 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01003548 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08003549 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003550 }
3551
3552 if (PageUptodate(page)) {
3553 if (PageDirty(page))
3554 goto next_page;
3555 } else {
3556 ClearPageError(page);
Josef Bacik652f25a2013-09-12 16:58:28 -04003557 err = extent_read_full_page_nolock(io_tree, page,
3558 btrfs_get_extent,
3559 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08003560 if (err) {
3561 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003562 goto next_page;
3563 }
Miao Xieedd14002013-06-27 18:51:00 +08003564
Miao Xie26b258912013-06-27 18:50:58 +08003565 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08003566 /*
3567 * If the page has been remove from the page cache,
3568 * the data on it is meaningless, because it may be
3569 * old one, the new data may be written into the new
3570 * page in the page cache.
3571 */
3572 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04003573 unlock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08003574 page_cache_release(page);
3575 goto again;
3576 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003577 if (!PageUptodate(page)) {
3578 ret = -EIO;
3579 goto next_page;
3580 }
3581 }
Miao Xie826aa0a2013-06-27 18:50:59 +08003582 err = write_page_nocow(nocow_ctx->sctx,
3583 physical_for_dev_replace, page);
3584 if (err)
3585 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003586next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08003587 unlock_page(page);
3588 page_cache_release(page);
3589
3590 if (ret)
3591 break;
3592
Stefan Behrensff023aa2012-11-06 11:43:11 +01003593 offset += PAGE_CACHE_SIZE;
3594 physical_for_dev_replace += PAGE_CACHE_SIZE;
3595 len -= PAGE_CACHE_SIZE;
3596 }
Josef Bacik652f25a2013-09-12 16:58:28 -04003597 ret = COPY_COMPLETE;
3598out_unlock:
3599 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
3600 GFP_NOFS);
Miao Xie826aa0a2013-06-27 18:50:59 +08003601out:
Miao Xieedd14002013-06-27 18:51:00 +08003602 mutex_unlock(&inode->i_mutex);
Miao Xie826aa0a2013-06-27 18:50:59 +08003603 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003604 return ret;
3605}
3606
3607static int write_page_nocow(struct scrub_ctx *sctx,
3608 u64 physical_for_dev_replace, struct page *page)
3609{
3610 struct bio *bio;
3611 struct btrfs_device *dev;
3612 int ret;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003613
3614 dev = sctx->wr_ctx.tgtdev;
3615 if (!dev)
3616 return -EIO;
3617 if (!dev->bdev) {
3618 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05003619 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01003620 return -EIO;
3621 }
Chris Mason9be33952013-05-17 18:30:14 -04003622 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003623 if (!bio) {
3624 spin_lock(&sctx->stat_lock);
3625 sctx->stat.malloc_errors++;
3626 spin_unlock(&sctx->stat_lock);
3627 return -ENOMEM;
3628 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07003629 bio->bi_iter.bi_size = 0;
3630 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003631 bio->bi_bdev = dev->bdev;
3632 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3633 if (ret != PAGE_CACHE_SIZE) {
3634leave_with_eio:
3635 bio_put(bio);
3636 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3637 return -EIO;
3638 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003639
Kent Overstreet33879d42013-11-23 22:33:32 -08003640 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
Stefan Behrensff023aa2012-11-06 11:43:11 +01003641 goto leave_with_eio;
3642
3643 bio_put(bio);
3644 return 0;
3645}