blob: 016512c590ea23d29772d1dc8d166e033c59b1e1 [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010028#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010029#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040030#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050031#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010032
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010042 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010043 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010044 */
45
Stefan Behrensb5d67f62012-03-27 14:21:27 -040046struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010047struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010048
Stefan Behrensff023aa2012-11-06 11:43:11 +010049/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010058
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040064#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010065
Miao Xieaf8e2d12014-10-23 14:42:50 +080066struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080069 u64 map_length;
70};
71
Arne Jansena2de7332011-03-08 14:14:00 +010072struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040073 struct scrub_block *sblock;
74 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020075 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080076 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010077 u64 flags; /* extent flags */
78 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040079 u64 logical;
80 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010081 u64 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +010082 atomic_t ref_count;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040083 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
Arne Jansena2de7332011-03-08 14:14:00 +010088 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080089
90 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010091};
92
93struct scrub_bio {
94 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010095 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010096 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010097 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +0100107 int next_free;
108 struct btrfs_work work;
109};
110
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400111struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400113 int page_count;
114 atomic_t outstanding_pages;
115 atomic_t ref_count; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100116 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800117 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200122 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400127 };
128};
129
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800130/* Used for the chunks with parity stripe such RAID5/6 */
131struct scrub_parity {
132 struct scrub_ctx *sctx;
133
134 struct btrfs_device *scrub_dev;
135
136 u64 logic_start;
137
138 u64 logic_end;
139
140 int nsectors;
141
142 int stripe_len;
143
144 atomic_t ref_count;
145
146 struct list_head spages;
147
148 /* Work of parity check and repair */
149 struct btrfs_work work;
150
151 /* Mark the parity blocks which have data */
152 unsigned long *dbitmap;
153
154 /*
155 * Mark the parity blocks which have data, but errors happen when
156 * read data or check data
157 */
158 unsigned long *ebitmap;
159
160 unsigned long bitmap[0];
161};
162
Stefan Behrensff023aa2012-11-06 11:43:11 +0100163struct scrub_wr_ctx {
164 struct scrub_bio *wr_curr_bio;
165 struct btrfs_device *tgtdev;
166 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
167 atomic_t flush_all_writes;
168 struct mutex wr_lock;
169};
170
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100171struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100172 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100173 struct btrfs_root *dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +0100174 int first_free;
175 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100176 atomic_t bios_in_flight;
177 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100178 spinlock_t list_lock;
179 wait_queue_head_t list_wait;
180 u16 csum_size;
181 struct list_head csum_list;
182 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100183 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100184 int pages_per_rd_bio;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400185 u32 sectorsize;
186 u32 nodesize;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100187
188 int is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100189 struct scrub_wr_ctx wr_ctx;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100190
Arne Jansena2de7332011-03-08 14:14:00 +0100191 /*
192 * statistics
193 */
194 struct btrfs_scrub_progress stat;
195 spinlock_t stat_lock;
196};
197
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200198struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100199 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100200 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200201 u64 logical;
202 struct btrfs_root *root;
203 struct btrfs_work work;
204 int mirror_num;
205};
206
Josef Bacik652f25a2013-09-12 16:58:28 -0400207struct scrub_nocow_inode {
208 u64 inum;
209 u64 offset;
210 u64 root;
211 struct list_head list;
212};
213
Stefan Behrensff023aa2012-11-06 11:43:11 +0100214struct scrub_copy_nocow_ctx {
215 struct scrub_ctx *sctx;
216 u64 logical;
217 u64 len;
218 int mirror_num;
219 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400220 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100221 struct btrfs_work work;
222};
223
Jan Schmidt558540c2011-06-13 19:59:12 +0200224struct scrub_warning {
225 struct btrfs_path *path;
226 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200227 const char *errstr;
228 sector_t sector;
229 u64 logical;
230 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200231};
232
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100233static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
234static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
235static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
236static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400237static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100238static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100239 struct btrfs_fs_info *fs_info,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100240 struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400241 u64 length, u64 logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100242 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100243static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
244 struct scrub_block *sblock, int is_metadata,
245 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800246 u16 csum_size, int retry_failed_mirror);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400247static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
248 struct scrub_block *sblock,
249 int is_metadata, int have_csum,
250 const u8 *csum, u64 generation,
251 u16 csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400252static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
253 struct scrub_block *sblock_good,
254 int force_write);
255static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
256 struct scrub_block *sblock_good,
257 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100258static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
259static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
260 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400261static int scrub_checksum_data(struct scrub_block *sblock);
262static int scrub_checksum_tree_block(struct scrub_block *sblock);
263static int scrub_checksum_super(struct scrub_block *sblock);
264static void scrub_block_get(struct scrub_block *sblock);
265static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100266static void scrub_page_get(struct scrub_page *spage);
267static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800268static void scrub_parity_get(struct scrub_parity *sparity);
269static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100270static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
271 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100272static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100273 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100274 u64 gen, int mirror_num, u8 *csum, int force,
275 u64 physical_for_dev_replace);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400276static void scrub_bio_end_io(struct bio *bio, int err);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400277static void scrub_bio_end_io_worker(struct btrfs_work *work);
278static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100279static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
280 u64 extent_logical, u64 extent_len,
281 u64 *extent_physical,
282 struct btrfs_device **extent_dev,
283 int *extent_mirror_num);
284static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
285 struct scrub_wr_ctx *wr_ctx,
286 struct btrfs_fs_info *fs_info,
287 struct btrfs_device *dev,
288 int is_dev_replace);
289static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
290static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
291 struct scrub_page *spage);
292static void scrub_wr_submit(struct scrub_ctx *sctx);
293static void scrub_wr_bio_end_io(struct bio *bio, int err);
294static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
295static int write_page_nocow(struct scrub_ctx *sctx,
296 u64 physical_for_dev_replace, struct page *page);
297static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400298 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100299static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
300 int mirror_num, u64 physical_for_dev_replace);
301static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800302static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800303static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400304
305
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100306static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
307{
308 atomic_inc(&sctx->bios_in_flight);
309}
310
311static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
312{
313 atomic_dec(&sctx->bios_in_flight);
314 wake_up(&sctx->list_wait);
315}
316
Wang Shilongcb7ab022013-12-04 21:16:53 +0800317static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800318{
319 while (atomic_read(&fs_info->scrub_pause_req)) {
320 mutex_unlock(&fs_info->scrub_lock);
321 wait_event(fs_info->scrub_pause_wait,
322 atomic_read(&fs_info->scrub_pause_req) == 0);
323 mutex_lock(&fs_info->scrub_lock);
324 }
325}
326
Wang Shilongcb7ab022013-12-04 21:16:53 +0800327static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
328{
329 atomic_inc(&fs_info->scrubs_paused);
330 wake_up(&fs_info->scrub_pause_wait);
331
332 mutex_lock(&fs_info->scrub_lock);
333 __scrub_blocked_if_needed(fs_info);
334 atomic_dec(&fs_info->scrubs_paused);
335 mutex_unlock(&fs_info->scrub_lock);
336
337 wake_up(&fs_info->scrub_pause_wait);
338}
339
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100340/*
341 * used for workers that require transaction commits (i.e., for the
342 * NOCOW case)
343 */
344static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
345{
346 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
347
348 /*
349 * increment scrubs_running to prevent cancel requests from
350 * completing as long as a worker is running. we must also
351 * increment scrubs_paused to prevent deadlocking on pause
352 * requests used for transactions commits (as the worker uses a
353 * transaction context). it is safe to regard the worker
354 * as paused for all matters practical. effectively, we only
355 * avoid cancellation requests from completing.
356 */
357 mutex_lock(&fs_info->scrub_lock);
358 atomic_inc(&fs_info->scrubs_running);
359 atomic_inc(&fs_info->scrubs_paused);
360 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800361
362 /*
363 * check if @scrubs_running=@scrubs_paused condition
364 * inside wait_event() is not an atomic operation.
365 * which means we may inc/dec @scrub_running/paused
366 * at any time. Let's wake up @scrub_pause_wait as
367 * much as we can to let commit transaction blocked less.
368 */
369 wake_up(&fs_info->scrub_pause_wait);
370
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100371 atomic_inc(&sctx->workers_pending);
372}
373
374/* used for workers that require transaction commits */
375static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
376{
377 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
378
379 /*
380 * see scrub_pending_trans_workers_inc() why we're pretending
381 * to be paused in the scrub counters
382 */
383 mutex_lock(&fs_info->scrub_lock);
384 atomic_dec(&fs_info->scrubs_running);
385 atomic_dec(&fs_info->scrubs_paused);
386 mutex_unlock(&fs_info->scrub_lock);
387 atomic_dec(&sctx->workers_pending);
388 wake_up(&fs_info->scrub_pause_wait);
389 wake_up(&sctx->list_wait);
390}
391
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100392static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100393{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100394 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100395 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100396 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100397 struct btrfs_ordered_sum, list);
398 list_del(&sum->list);
399 kfree(sum);
400 }
401}
402
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100403static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100404{
405 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100406
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100407 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100408 return;
409
Stefan Behrensff023aa2012-11-06 11:43:11 +0100410 scrub_free_wr_ctx(&sctx->wr_ctx);
411
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400412 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100413 if (sctx->curr != -1) {
414 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400415
416 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100417 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400418 scrub_block_put(sbio->pagev[i]->sblock);
419 }
420 bio_put(sbio->bio);
421 }
422
Stefan Behrensff023aa2012-11-06 11:43:11 +0100423 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100424 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100425
426 if (!sbio)
427 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100428 kfree(sbio);
429 }
430
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100431 scrub_free_csums(sctx);
432 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100433}
434
435static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100436struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100437{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100438 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100439 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100440 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100441 int pages_per_rd_bio;
442 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +0100443
Stefan Behrensff023aa2012-11-06 11:43:11 +0100444 /*
445 * the setting of pages_per_rd_bio is correct for scrub but might
446 * be wrong for the dev_replace code where we might read from
447 * different devices in the initial huge bios. However, that
448 * code is able to correctly handle the case when adding a page
449 * to a bio fails.
450 */
451 if (dev->bdev)
452 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
453 bio_get_nr_vecs(dev->bdev));
454 else
455 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100456 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
457 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100458 goto nomem;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100459 sctx->is_dev_replace = is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100460 sctx->pages_per_rd_bio = pages_per_rd_bio;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100461 sctx->curr = -1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100462 sctx->dev_root = dev->dev_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100463 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100464 struct scrub_bio *sbio;
465
466 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
467 if (!sbio)
468 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100469 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100470
Arne Jansena2de7332011-03-08 14:14:00 +0100471 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100472 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400473 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800474 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
475 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100476
Stefan Behrensff023aa2012-11-06 11:43:11 +0100477 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100478 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200479 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100480 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100481 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100482 sctx->first_free = 0;
483 sctx->nodesize = dev->dev_root->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100484 sctx->sectorsize = dev->dev_root->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100485 atomic_set(&sctx->bios_in_flight, 0);
486 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100487 atomic_set(&sctx->cancel_req, 0);
488 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
489 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100490
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100491 spin_lock_init(&sctx->list_lock);
492 spin_lock_init(&sctx->stat_lock);
493 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100494
495 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
496 fs_info->dev_replace.tgtdev, is_dev_replace);
497 if (ret) {
498 scrub_free_ctx(sctx);
499 return ERR_PTR(ret);
500 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100501 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100502
503nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100504 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100505 return ERR_PTR(-ENOMEM);
506}
507
Stefan Behrensff023aa2012-11-06 11:43:11 +0100508static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
509 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200510{
511 u64 isize;
512 u32 nlink;
513 int ret;
514 int i;
515 struct extent_buffer *eb;
516 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100517 struct scrub_warning *swarn = warn_ctx;
Jan Schmidt558540c2011-06-13 19:59:12 +0200518 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
519 struct inode_fs_paths *ipath = NULL;
520 struct btrfs_root *local_root;
521 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100522 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200523
524 root_key.objectid = root;
525 root_key.type = BTRFS_ROOT_ITEM_KEY;
526 root_key.offset = (u64)-1;
527 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
528 if (IS_ERR(local_root)) {
529 ret = PTR_ERR(local_root);
530 goto err;
531 }
532
David Sterba14692cc2015-01-02 18:55:46 +0100533 /*
534 * this makes the path point to (inum INODE_ITEM ioff)
535 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100536 key.objectid = inum;
537 key.type = BTRFS_INODE_ITEM_KEY;
538 key.offset = 0;
539
540 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200541 if (ret) {
542 btrfs_release_path(swarn->path);
543 goto err;
544 }
545
546 eb = swarn->path->nodes[0];
547 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
548 struct btrfs_inode_item);
549 isize = btrfs_inode_size(eb, inode_item);
550 nlink = btrfs_inode_nlink(eb, inode_item);
551 btrfs_release_path(swarn->path);
552
553 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300554 if (IS_ERR(ipath)) {
555 ret = PTR_ERR(ipath);
556 ipath = NULL;
557 goto err;
558 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200559 ret = paths_from_inode(inum, ipath);
560
561 if (ret < 0)
562 goto err;
563
564 /*
565 * we deliberately ignore the bit ipath might have been too small to
566 * hold all of the paths here
567 */
568 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Frank Holtonefe120a2013-12-20 11:37:06 -0500569 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200570 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
571 "length %llu, links %u (path: %s)\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400572 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200573 (unsigned long long)swarn->sector, root, inum, offset,
574 min(isize - offset, (u64)PAGE_SIZE), nlink,
Jeff Mahoney745c4d82011-11-20 07:31:57 -0500575 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200576
577 free_ipath(ipath);
578 return 0;
579
580err:
Frank Holtonefe120a2013-12-20 11:37:06 -0500581 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200582 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
583 "resolving failed with ret=%d\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400584 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200585 (unsigned long long)swarn->sector, root, inum, offset, ret);
586
587 free_ipath(ipath);
588 return 0;
589}
590
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400591static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200592{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100593 struct btrfs_device *dev;
594 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200595 struct btrfs_path *path;
596 struct btrfs_key found_key;
597 struct extent_buffer *eb;
598 struct btrfs_extent_item *ei;
599 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200600 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100601 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600602 u64 flags = 0;
603 u64 ref_root;
604 u32 item_size;
605 u8 ref_level;
Liu Bo69917e42012-09-07 20:01:28 -0600606 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200607
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100608 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100609 dev = sblock->pagev[0]->dev;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100610 fs_info = sblock->sctx->dev_root->fs_info;
611
Jan Schmidt558540c2011-06-13 19:59:12 +0200612 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200613 if (!path)
614 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200615
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100616 swarn.sector = (sblock->pagev[0]->physical) >> 9;
617 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200618 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100619 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200620
Liu Bo69917e42012-09-07 20:01:28 -0600621 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
622 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200623 if (ret < 0)
624 goto out;
625
Jan Schmidt4692cf52011-12-02 14:56:41 +0100626 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200627 swarn.extent_item_size = found_key.offset;
628
629 eb = path->nodes[0];
630 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
631 item_size = btrfs_item_size_nr(eb, path->slots[0]);
632
Liu Bo69917e42012-09-07 20:01:28 -0600633 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200634 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800635 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
636 item_size, &ref_root,
637 &ref_level);
Josef Bacik606686e2012-06-04 14:03:51 -0400638 printk_in_rcu(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -0500639 "BTRFS: %s at logical %llu on dev %s, "
Jan Schmidt558540c2011-06-13 19:59:12 +0200640 "sector %llu: metadata %s (level %d) in tree "
Josef Bacik606686e2012-06-04 14:03:51 -0400641 "%llu\n", errstr, swarn.logical,
642 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200643 (unsigned long long)swarn.sector,
644 ref_level ? "node" : "leaf",
645 ret < 0 ? -1 : ref_level,
646 ret < 0 ? -1 : ref_root);
647 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600648 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200649 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600650 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200651 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100652 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100653 iterate_extent_inodes(fs_info, found_key.objectid,
654 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200655 scrub_print_warning_inode, &swarn);
656 }
657
658out:
659 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200660}
661
Stefan Behrensff023aa2012-11-06 11:43:11 +0100662static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200663{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200664 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200665 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100666 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200667 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200668 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200669 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200670 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000671 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200672 u64 end = offset + PAGE_SIZE - 1;
673 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000674 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200675
676 key.objectid = root;
677 key.type = BTRFS_ROOT_ITEM_KEY;
678 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000679
680 fs_info = fixup->root->fs_info;
681 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
682
683 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
684 if (IS_ERR(local_root)) {
685 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200686 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000687 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200688
689 key.type = BTRFS_INODE_ITEM_KEY;
690 key.objectid = inum;
691 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000692 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
693 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200694 if (IS_ERR(inode))
695 return PTR_ERR(inode);
696
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200697 index = offset >> PAGE_CACHE_SHIFT;
698
699 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200700 if (!page) {
701 ret = -ENOMEM;
702 goto out;
703 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200704
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200705 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200706 if (PageDirty(page)) {
707 /*
708 * we need to write the data to the defect sector. the
709 * data that was in that sector is not in memory,
710 * because the page was modified. we must not write the
711 * modified page to that sector.
712 *
713 * TODO: what could be done here: wait for the delalloc
714 * runner to write out that page (might involve
715 * COW) and see whether the sector is still
716 * referenced afterwards.
717 *
718 * For the meantime, we'll treat this error
719 * incorrectable, although there is a chance that a
720 * later scrub will find the bad sector again and that
721 * there's no dirty page in memory, then.
722 */
723 ret = -EIO;
724 goto out;
725 }
Miao Xie1203b682014-09-12 18:44:01 +0800726 ret = repair_io_failure(inode, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200727 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800728 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200729 fixup->mirror_num);
730 unlock_page(page);
731 corrected = !ret;
732 } else {
733 /*
734 * we need to get good data first. the general readpage path
735 * will call repair_io_failure for us, we just have to make
736 * sure we read the bad mirror.
737 */
738 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
739 EXTENT_DAMAGED, GFP_NOFS);
740 if (ret) {
741 /* set_extent_bits should give proper error */
742 WARN_ON(ret > 0);
743 if (ret > 0)
744 ret = -EFAULT;
745 goto out;
746 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200747
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200748 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
749 btrfs_get_extent,
750 fixup->mirror_num);
751 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200752
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200753 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
754 end, EXTENT_DAMAGED, 0, NULL);
755 if (!corrected)
756 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
757 EXTENT_DAMAGED, GFP_NOFS);
758 }
759
760out:
761 if (page)
762 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200763
764 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200765
766 if (ret < 0)
767 return ret;
768
769 if (ret == 0 && corrected) {
770 /*
771 * we only need to call readpage for one of the inodes belonging
772 * to this extent. so make iterate_extent_inodes stop
773 */
774 return 1;
775 }
776
777 return -EIO;
778}
779
780static void scrub_fixup_nodatasum(struct btrfs_work *work)
781{
782 int ret;
783 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100784 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200785 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200786 struct btrfs_path *path;
787 int uncorrectable = 0;
788
789 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100790 sctx = fixup->sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200791
792 path = btrfs_alloc_path();
793 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100794 spin_lock(&sctx->stat_lock);
795 ++sctx->stat.malloc_errors;
796 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200797 uncorrectable = 1;
798 goto out;
799 }
800
801 trans = btrfs_join_transaction(fixup->root);
802 if (IS_ERR(trans)) {
803 uncorrectable = 1;
804 goto out;
805 }
806
807 /*
808 * the idea is to trigger a regular read through the standard path. we
809 * read a page from the (failed) logical address by specifying the
810 * corresponding copynum of the failed sector. thus, that readpage is
811 * expected to fail.
812 * that is the point where on-the-fly error correction will kick in
813 * (once it's finished) and rewrite the failed sector if a good copy
814 * can be found.
815 */
816 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
817 path, scrub_fixup_readpage,
818 fixup);
819 if (ret < 0) {
820 uncorrectable = 1;
821 goto out;
822 }
823 WARN_ON(ret != 1);
824
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100825 spin_lock(&sctx->stat_lock);
826 ++sctx->stat.corrected_errors;
827 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200828
829out:
830 if (trans && !IS_ERR(trans))
831 btrfs_end_transaction(trans, fixup->root);
832 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100833 spin_lock(&sctx->stat_lock);
834 ++sctx->stat.uncorrectable_errors;
835 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100836 btrfs_dev_replace_stats_inc(
837 &sctx->dev_root->fs_info->dev_replace.
838 num_uncorrectable_read_errors);
Frank Holtonefe120a2013-12-20 11:37:06 -0500839 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
840 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200841 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200842 }
843
844 btrfs_free_path(path);
845 kfree(fixup);
846
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100847 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200848}
849
Miao Xieaf8e2d12014-10-23 14:42:50 +0800850static inline void scrub_get_recover(struct scrub_recover *recover)
851{
852 atomic_inc(&recover->refs);
853}
854
855static inline void scrub_put_recover(struct scrub_recover *recover)
856{
857 if (atomic_dec_and_test(&recover->refs)) {
Zhao Lei6e9606d2015-01-20 15:11:34 +0800858 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800859 kfree(recover);
860 }
861}
862
Arne Jansena2de7332011-03-08 14:14:00 +0100863/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400864 * scrub_handle_errored_block gets called when either verification of the
865 * pages failed or the bio failed to read, e.g. with EIO. In the latter
866 * case, this function handles all pages in the bio, even though only one
867 * may be bad.
868 * The goal of this function is to repair the errored block by using the
869 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100870 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400871static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100872{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100873 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100874 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400875 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100876 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400877 u64 logical;
878 u64 generation;
879 unsigned int failed_mirror_index;
880 unsigned int is_metadata;
881 unsigned int have_csum;
882 u8 *csum;
883 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
884 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100885 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400886 int mirror_index;
887 int page_num;
888 int success;
889 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
890 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100891
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400892 BUG_ON(sblock_to_check->page_count < 1);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100893 fs_info = sctx->dev_root->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000894 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
895 /*
896 * if we find an error in a super block, we just report it.
897 * They will get written with the next transaction commit
898 * anyway
899 */
900 spin_lock(&sctx->stat_lock);
901 ++sctx->stat.super_errors;
902 spin_unlock(&sctx->stat_lock);
903 return 0;
904 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400905 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100906 logical = sblock_to_check->pagev[0]->logical;
907 generation = sblock_to_check->pagev[0]->generation;
908 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
909 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
910 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400911 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100912 have_csum = sblock_to_check->pagev[0]->have_csum;
913 csum = sblock_to_check->pagev[0]->csum;
914 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400915
Stefan Behrensff023aa2012-11-06 11:43:11 +0100916 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
917 sblocks_for_recheck = NULL;
918 goto nodatasum_case;
919 }
920
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400921 /*
922 * read all mirrors one after the other. This includes to
923 * re-read the extent or metadata block that failed (that was
924 * the cause that this fixup code is called) another time,
925 * page by page this time in order to know which pages
926 * caused I/O errors and which ones are good (for all mirrors).
927 * It is the goal to handle the situation when more than one
928 * mirror contains I/O errors, but the errors do not
929 * overlap, i.e. the data can be repaired by selecting the
930 * pages from those mirrors without I/O error on the
931 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
932 * would be that mirror #1 has an I/O error on the first page,
933 * the second page is good, and mirror #2 has an I/O error on
934 * the second page, but the first page is good.
935 * Then the first page of the first mirror can be repaired by
936 * taking the first page of the second mirror, and the
937 * second page of the second mirror can be repaired by
938 * copying the contents of the 2nd page of the 1st mirror.
939 * One more note: if the pages of one mirror contain I/O
940 * errors, the checksum cannot be verified. In order to get
941 * the best data for repairing, the first attempt is to find
942 * a mirror without I/O errors and with a validated checksum.
943 * Only if this is not possible, the pages are picked from
944 * mirrors with I/O errors without considering the checksum.
945 * If the latter is the case, at the end, the checksum of the
946 * repaired area is verified in order to correctly maintain
947 * the statistics.
948 */
949
950 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
951 sizeof(*sblocks_for_recheck),
952 GFP_NOFS);
953 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100954 spin_lock(&sctx->stat_lock);
955 sctx->stat.malloc_errors++;
956 sctx->stat.read_errors++;
957 sctx->stat.uncorrectable_errors++;
958 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100959 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400960 goto out;
961 }
962
963 /* setup the context, map the logical blocks and alloc the pages */
Stefan Behrensff023aa2012-11-06 11:43:11 +0100964 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400965 logical, sblocks_for_recheck);
966 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100967 spin_lock(&sctx->stat_lock);
968 sctx->stat.read_errors++;
969 sctx->stat.uncorrectable_errors++;
970 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100971 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400972 goto out;
973 }
974 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
975 sblock_bad = sblocks_for_recheck + failed_mirror_index;
976
977 /* build and submit the bios for the failed mirror, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100978 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800979 csum, generation, sctx->csum_size, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400980
981 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
982 sblock_bad->no_io_error_seen) {
983 /*
984 * the error disappeared after reading page by page, or
985 * the area was part of a huge bio and other parts of the
986 * bio caused I/O errors, or the block layer merged several
987 * read requests into one and the error is caused by a
988 * different bio (usually one of the two latter cases is
989 * the cause)
990 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100991 spin_lock(&sctx->stat_lock);
992 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800993 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100994 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400995
Stefan Behrensff023aa2012-11-06 11:43:11 +0100996 if (sctx->is_dev_replace)
997 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400998 goto out;
999 }
1000
1001 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001002 spin_lock(&sctx->stat_lock);
1003 sctx->stat.read_errors++;
1004 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001005 if (__ratelimit(&_rs))
1006 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001007 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001008 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001009 spin_lock(&sctx->stat_lock);
1010 sctx->stat.csum_errors++;
1011 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001012 if (__ratelimit(&_rs))
1013 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001014 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001015 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001016 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001017 spin_lock(&sctx->stat_lock);
1018 sctx->stat.verify_errors++;
1019 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001020 if (__ratelimit(&_rs))
1021 scrub_print_warning("checksum/header error",
1022 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001023 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001024 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001025 BTRFS_DEV_STAT_GENERATION_ERRS);
1026 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001027 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001028 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001029 }
1030
Ilya Dryomov33ef30a2013-11-03 19:06:38 +02001031 if (sctx->readonly) {
1032 ASSERT(!sctx->is_dev_replace);
1033 goto out;
1034 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001035
1036 if (!is_metadata && !have_csum) {
1037 struct scrub_fixup_nodatasum *fixup_nodatasum;
1038
Stefan Behrensff023aa2012-11-06 11:43:11 +01001039 WARN_ON(sctx->is_dev_replace);
1040
Zhao Leib25c94c2015-01-20 15:11:35 +08001041nodatasum_case:
1042
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001043 /*
1044 * !is_metadata and !have_csum, this means that the data
1045 * might not be COW'ed, that it might be modified
1046 * concurrently. The general strategy to work on the
1047 * commit root does not help in the case when COW is not
1048 * used.
1049 */
1050 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1051 if (!fixup_nodatasum)
1052 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001053 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001054 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001055 fixup_nodatasum->logical = logical;
1056 fixup_nodatasum->root = fs_info->extent_root;
1057 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001058 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001059 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1060 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001061 btrfs_queue_work(fs_info->scrub_workers,
1062 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001063 goto out;
1064 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001065
1066 /*
1067 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001068 * checksums.
1069 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001070 * errors and also does not have a checksum error.
1071 * If one is found, and if a checksum is present, the full block
1072 * that is known to contain an error is rewritten. Afterwards
1073 * the block is known to be corrected.
1074 * If a mirror is found which is completely correct, and no
1075 * checksum is present, only those pages are rewritten that had
1076 * an I/O error in the block to be repaired, since it cannot be
1077 * determined, which copy of the other pages is better (and it
1078 * could happen otherwise that a correct page would be
1079 * overwritten by a bad one).
1080 */
1081 for (mirror_index = 0;
1082 mirror_index < BTRFS_MAX_MIRRORS &&
1083 sblocks_for_recheck[mirror_index].page_count > 0;
1084 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001085 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001086
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001087 if (mirror_index == failed_mirror_index)
1088 continue;
1089 sblock_other = sblocks_for_recheck + mirror_index;
1090
1091 /* build and submit the bios, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001092 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1093 have_csum, csum, generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001094 sctx->csum_size, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001095
1096 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001097 !sblock_other->checksum_error &&
1098 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001099 if (sctx->is_dev_replace) {
1100 scrub_write_block_to_dev_replace(sblock_other);
1101 } else {
1102 int force_write = is_metadata || have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001103
Stefan Behrensff023aa2012-11-06 11:43:11 +01001104 ret = scrub_repair_block_from_good_copy(
1105 sblock_bad, sblock_other,
1106 force_write);
1107 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001108 if (0 == ret)
1109 goto corrected_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001110 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001111 }
1112
1113 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001114 * for dev_replace, pick good pages and write to the target device.
1115 */
1116 if (sctx->is_dev_replace) {
1117 success = 1;
1118 for (page_num = 0; page_num < sblock_bad->page_count;
1119 page_num++) {
1120 int sub_success;
1121
1122 sub_success = 0;
1123 for (mirror_index = 0;
1124 mirror_index < BTRFS_MAX_MIRRORS &&
1125 sblocks_for_recheck[mirror_index].page_count > 0;
1126 mirror_index++) {
1127 struct scrub_block *sblock_other =
1128 sblocks_for_recheck + mirror_index;
1129 struct scrub_page *page_other =
1130 sblock_other->pagev[page_num];
1131
1132 if (!page_other->io_error) {
1133 ret = scrub_write_page_to_dev_replace(
1134 sblock_other, page_num);
1135 if (ret == 0) {
1136 /* succeeded for this page */
1137 sub_success = 1;
1138 break;
1139 } else {
1140 btrfs_dev_replace_stats_inc(
1141 &sctx->dev_root->
1142 fs_info->dev_replace.
1143 num_write_errors);
1144 }
1145 }
1146 }
1147
1148 if (!sub_success) {
1149 /*
1150 * did not find a mirror to fetch the page
1151 * from. scrub_write_page_to_dev_replace()
1152 * handles this case (page->io_error), by
1153 * filling the block with zeros before
1154 * submitting the write request
1155 */
1156 success = 0;
1157 ret = scrub_write_page_to_dev_replace(
1158 sblock_bad, page_num);
1159 if (ret)
1160 btrfs_dev_replace_stats_inc(
1161 &sctx->dev_root->fs_info->
1162 dev_replace.num_write_errors);
1163 }
1164 }
1165
1166 goto out;
1167 }
1168
1169 /*
1170 * for regular scrub, repair those pages that are errored.
1171 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001172 * repaired, continue by picking good copies of those pages.
1173 * Select the good pages from mirrors to rewrite bad pages from
1174 * the area to fix. Afterwards verify the checksum of the block
1175 * that is supposed to be repaired. This verification step is
1176 * only done for the purpose of statistic counting and for the
1177 * final scrub report, whether errors remain.
1178 * A perfect algorithm could make use of the checksum and try
1179 * all possible combinations of pages from the different mirrors
1180 * until the checksum verification succeeds. For example, when
1181 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1182 * of mirror #2 is readable but the final checksum test fails,
1183 * then the 2nd page of mirror #3 could be tried, whether now
1184 * the final checksum succeedes. But this would be a rare
1185 * exception and is therefore not implemented. At least it is
1186 * avoided that the good copy is overwritten.
1187 * A more useful improvement would be to pick the sectors
1188 * without I/O error based on sector sizes (512 bytes on legacy
1189 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1190 * mirror could be repaired by taking 512 byte of a different
1191 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1192 * area are unreadable.
1193 */
1194
1195 /* can only fix I/O errors from here on */
1196 if (sblock_bad->no_io_error_seen)
1197 goto did_not_correct_error;
1198
1199 success = 1;
1200 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001201 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001202
1203 if (!page_bad->io_error)
1204 continue;
1205
1206 for (mirror_index = 0;
1207 mirror_index < BTRFS_MAX_MIRRORS &&
1208 sblocks_for_recheck[mirror_index].page_count > 0;
1209 mirror_index++) {
1210 struct scrub_block *sblock_other = sblocks_for_recheck +
1211 mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001212 struct scrub_page *page_other = sblock_other->pagev[
1213 page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001214
1215 if (!page_other->io_error) {
1216 ret = scrub_repair_page_from_good_copy(
1217 sblock_bad, sblock_other, page_num, 0);
1218 if (0 == ret) {
1219 page_bad->io_error = 0;
1220 break; /* succeeded for this page */
1221 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001222 }
1223 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001224
1225 if (page_bad->io_error) {
1226 /* did not find a mirror to copy the page from */
1227 success = 0;
1228 }
1229 }
1230
1231 if (success) {
1232 if (is_metadata || have_csum) {
1233 /*
1234 * need to verify the checksum now that all
1235 * sectors on disk are repaired (the write
1236 * request for data to be repaired is on its way).
1237 * Just be lazy and use scrub_recheck_block()
1238 * which re-reads the data before the checksum
1239 * is verified, but most likely the data comes out
1240 * of the page cache.
1241 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001242 scrub_recheck_block(fs_info, sblock_bad,
1243 is_metadata, have_csum, csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001244 generation, sctx->csum_size, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001245 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001246 !sblock_bad->checksum_error &&
1247 sblock_bad->no_io_error_seen)
1248 goto corrected_error;
1249 else
1250 goto did_not_correct_error;
1251 } else {
1252corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001253 spin_lock(&sctx->stat_lock);
1254 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001255 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001256 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001257 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001258 "BTRFS: fixed up error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001259 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001260 }
1261 } else {
1262did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001263 spin_lock(&sctx->stat_lock);
1264 sctx->stat.uncorrectable_errors++;
1265 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001266 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001267 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001268 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001269 }
1270
1271out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001272 if (sblocks_for_recheck) {
1273 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1274 mirror_index++) {
1275 struct scrub_block *sblock = sblocks_for_recheck +
1276 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001277 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001278 int page_index;
1279
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001280 for (page_index = 0; page_index < sblock->page_count;
1281 page_index++) {
1282 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001283 recover = sblock->pagev[page_index]->recover;
1284 if (recover) {
1285 scrub_put_recover(recover);
1286 sblock->pagev[page_index]->recover =
1287 NULL;
1288 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001289 scrub_page_put(sblock->pagev[page_index]);
1290 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001291 }
1292 kfree(sblocks_for_recheck);
1293 }
1294
1295 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001296}
1297
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001298static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001299{
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001300 if (bbio->raid_map) {
Zhao Leie34c3302015-01-20 15:11:31 +08001301 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
1302
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001303 if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001304 return 3;
1305 else
1306 return 2;
1307 } else {
1308 return (int)bbio->num_stripes;
1309 }
1310}
1311
1312static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1313 u64 mapped_length,
1314 int nstripes, int mirror,
1315 int *stripe_index,
1316 u64 *stripe_offset)
1317{
1318 int i;
1319
1320 if (raid_map) {
1321 /* RAID5/6 */
1322 for (i = 0; i < nstripes; i++) {
1323 if (raid_map[i] == RAID6_Q_STRIPE ||
1324 raid_map[i] == RAID5_P_STRIPE)
1325 continue;
1326
1327 if (logical >= raid_map[i] &&
1328 logical < raid_map[i] + mapped_length)
1329 break;
1330 }
1331
1332 *stripe_index = i;
1333 *stripe_offset = logical - raid_map[i];
1334 } else {
1335 /* The other RAID type */
1336 *stripe_index = mirror;
1337 *stripe_offset = 0;
1338 }
1339}
1340
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001341static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001342 struct btrfs_fs_info *fs_info,
Stefan Behrensff023aa2012-11-06 11:43:11 +01001343 struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001344 u64 length, u64 logical,
1345 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001346{
Miao Xieaf8e2d12014-10-23 14:42:50 +08001347 struct scrub_recover *recover;
1348 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001349 u64 sublen;
1350 u64 mapped_length;
1351 u64 stripe_offset;
1352 int stripe_index;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001353 int page_index;
1354 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001355 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001356 int ret;
1357
1358 /*
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001359 * note: the two members ref_count and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001360 * are not used (and not set) in the blocks that are used for
1361 * the recheck procedure
1362 */
1363
1364 page_index = 0;
1365 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001366 sublen = min_t(u64, length, PAGE_SIZE);
1367 mapped_length = sublen;
1368 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001369
1370 /*
1371 * with a length of PAGE_SIZE, each returned stripe
1372 * represents one mirror
1373 */
Miao Xieaf8e2d12014-10-23 14:42:50 +08001374 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001375 &mapped_length, &bbio, 0, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001376 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001377 btrfs_put_bbio(bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001378 return -EIO;
1379 }
1380
Miao Xieaf8e2d12014-10-23 14:42:50 +08001381 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1382 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001383 btrfs_put_bbio(bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001384 return -ENOMEM;
1385 }
1386
1387 atomic_set(&recover->refs, 1);
1388 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001389 recover->map_length = mapped_length;
1390
Stefan Behrensff023aa2012-11-06 11:43:11 +01001391 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001392
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001393 nmirrors = scrub_nr_raid_mirrors(bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001394 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001395 mirror_index++) {
1396 struct scrub_block *sblock;
1397 struct scrub_page *page;
1398
1399 if (mirror_index >= BTRFS_MAX_MIRRORS)
1400 continue;
1401
1402 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001403 sblock->sctx = sctx;
1404 page = kzalloc(sizeof(*page), GFP_NOFS);
1405 if (!page) {
1406leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001407 spin_lock(&sctx->stat_lock);
1408 sctx->stat.malloc_errors++;
1409 spin_unlock(&sctx->stat_lock);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001410 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001411 return -ENOMEM;
1412 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001413 scrub_page_get(page);
1414 sblock->pagev[page_index] = page;
1415 page->logical = logical;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001416
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001417 scrub_stripe_index_and_offset(logical, bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001418 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001419 bbio->num_stripes -
1420 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001421 mirror_index,
1422 &stripe_index,
1423 &stripe_offset);
1424 page->physical = bbio->stripes[stripe_index].physical +
1425 stripe_offset;
1426 page->dev = bbio->stripes[stripe_index].dev;
1427
Stefan Behrensff023aa2012-11-06 11:43:11 +01001428 BUG_ON(page_index >= original_sblock->page_count);
1429 page->physical_for_dev_replace =
1430 original_sblock->pagev[page_index]->
1431 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001432 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001433 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001434 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001435 page->page = alloc_page(GFP_NOFS);
1436 if (!page->page)
1437 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001438
1439 scrub_get_recover(recover);
1440 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001441 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001442 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001443 length -= sublen;
1444 logical += sublen;
1445 page_index++;
1446 }
1447
1448 return 0;
1449}
1450
Miao Xieaf8e2d12014-10-23 14:42:50 +08001451struct scrub_bio_ret {
1452 struct completion event;
1453 int error;
1454};
1455
1456static void scrub_bio_wait_endio(struct bio *bio, int error)
1457{
1458 struct scrub_bio_ret *ret = bio->bi_private;
1459
1460 ret->error = error;
1461 complete(&ret->event);
1462}
1463
1464static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1465{
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001466 return page->recover && page->recover->bbio->raid_map;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001467}
1468
1469static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1470 struct bio *bio,
1471 struct scrub_page *page)
1472{
1473 struct scrub_bio_ret done;
1474 int ret;
1475
1476 init_completion(&done.event);
1477 done.error = 0;
1478 bio->bi_iter.bi_sector = page->logical >> 9;
1479 bio->bi_private = &done;
1480 bio->bi_end_io = scrub_bio_wait_endio;
1481
1482 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001483 page->recover->map_length,
Miao Xie42452152014-11-25 16:39:28 +08001484 page->mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001485 if (ret)
1486 return ret;
1487
1488 wait_for_completion(&done.event);
1489 if (done.error)
1490 return -EIO;
1491
1492 return 0;
1493}
1494
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001495/*
1496 * this function will check the on disk data for checksum errors, header
1497 * errors and read I/O errors. If any I/O errors happen, the exact pages
1498 * which are errored are marked as being bad. The goal is to enable scrub
1499 * to take those pages that are not errored from all the mirrors so that
1500 * the pages that are errored in the just handled mirror can be repaired.
1501 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001502static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1503 struct scrub_block *sblock, int is_metadata,
1504 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001505 u16 csum_size, int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001506{
1507 int page_num;
1508
1509 sblock->no_io_error_seen = 1;
1510 sblock->header_error = 0;
1511 sblock->checksum_error = 0;
1512
1513 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1514 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001515 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001516
Stefan Behrens442a4f62012-05-25 16:06:08 +02001517 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001518 page->io_error = 1;
1519 sblock->no_io_error_seen = 0;
1520 continue;
1521 }
1522
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001523 WARN_ON(!page->page);
Chris Mason9be33952013-05-17 18:30:14 -04001524 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001525 if (!bio) {
1526 page->io_error = 1;
1527 sblock->no_io_error_seen = 0;
1528 continue;
1529 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001530 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001531
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001532 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001533 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1534 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1535 sblock->no_io_error_seen = 0;
1536 } else {
1537 bio->bi_iter.bi_sector = page->physical >> 9;
1538
1539 if (btrfsic_submit_bio_wait(READ, bio))
1540 sblock->no_io_error_seen = 0;
1541 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001542
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001543 bio_put(bio);
1544 }
1545
1546 if (sblock->no_io_error_seen)
1547 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1548 have_csum, csum, generation,
1549 csum_size);
1550
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001551 return;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001552}
1553
Miao Xie17a9be22014-07-24 11:37:08 +08001554static inline int scrub_check_fsid(u8 fsid[],
1555 struct scrub_page *spage)
1556{
1557 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1558 int ret;
1559
1560 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1561 return !ret;
1562}
1563
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001564static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1565 struct scrub_block *sblock,
1566 int is_metadata, int have_csum,
1567 const u8 *csum, u64 generation,
1568 u16 csum_size)
1569{
1570 int page_num;
1571 u8 calculated_csum[BTRFS_CSUM_SIZE];
1572 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001573 void *mapped_buffer;
1574
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001575 WARN_ON(!sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001576 if (is_metadata) {
1577 struct btrfs_header *h;
1578
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001579 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001580 h = (struct btrfs_header *)mapped_buffer;
1581
Qu Wenruo3cae2102013-07-16 11:19:18 +08001582 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
Miao Xie17a9be22014-07-24 11:37:08 +08001583 !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001584 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001585 BTRFS_UUID_SIZE)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001586 sblock->header_error = 1;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001587 } else if (generation != btrfs_stack_header_generation(h)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001588 sblock->header_error = 1;
1589 sblock->generation_error = 1;
1590 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001591 csum = h->csum;
1592 } else {
1593 if (!have_csum)
1594 return;
1595
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001596 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001597 }
1598
1599 for (page_num = 0;;) {
1600 if (page_num == 0 && is_metadata)
Liu Bob0496682013-03-14 14:57:45 +00001601 crc = btrfs_csum_data(
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001602 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1603 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1604 else
Liu Bob0496682013-03-14 14:57:45 +00001605 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001606
Linus Torvalds9613beb2012-03-30 12:44:29 -07001607 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001608 page_num++;
1609 if (page_num >= sblock->page_count)
1610 break;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001611 WARN_ON(!sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001612
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001613 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001614 }
1615
1616 btrfs_csum_final(crc, calculated_csum);
1617 if (memcmp(calculated_csum, csum, csum_size))
1618 sblock->checksum_error = 1;
1619}
1620
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001621static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1622 struct scrub_block *sblock_good,
1623 int force_write)
1624{
1625 int page_num;
1626 int ret = 0;
1627
1628 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1629 int ret_sub;
1630
1631 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1632 sblock_good,
1633 page_num,
1634 force_write);
1635 if (ret_sub)
1636 ret = ret_sub;
1637 }
1638
1639 return ret;
1640}
1641
1642static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1643 struct scrub_block *sblock_good,
1644 int page_num, int force_write)
1645{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001646 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1647 struct scrub_page *page_good = sblock_good->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001648
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001649 BUG_ON(page_bad->page == NULL);
1650 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001651 if (force_write || sblock_bad->header_error ||
1652 sblock_bad->checksum_error || page_bad->io_error) {
1653 struct bio *bio;
1654 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001655
Stefan Behrensff023aa2012-11-06 11:43:11 +01001656 if (!page_bad->dev->bdev) {
Frank Holtonefe120a2013-12-20 11:37:06 -05001657 printk_ratelimited(KERN_WARNING "BTRFS: "
1658 "scrub_repair_page_from_good_copy(bdev == NULL) "
1659 "is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001660 return -EIO;
1661 }
1662
Chris Mason9be33952013-05-17 18:30:14 -04001663 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001664 if (!bio)
1665 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001666 bio->bi_bdev = page_bad->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001667 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001668
1669 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1670 if (PAGE_SIZE != ret) {
1671 bio_put(bio);
1672 return -EIO;
1673 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001674
Kent Overstreet33879d42013-11-23 22:33:32 -08001675 if (btrfsic_submit_bio_wait(WRITE, bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001676 btrfs_dev_stat_inc_and_print(page_bad->dev,
1677 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001678 btrfs_dev_replace_stats_inc(
1679 &sblock_bad->sctx->dev_root->fs_info->
1680 dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001681 bio_put(bio);
1682 return -EIO;
1683 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001684 bio_put(bio);
1685 }
1686
1687 return 0;
1688}
1689
Stefan Behrensff023aa2012-11-06 11:43:11 +01001690static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1691{
1692 int page_num;
1693
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001694 /*
1695 * This block is used for the check of the parity on the source device,
1696 * so the data needn't be written into the destination device.
1697 */
1698 if (sblock->sparity)
1699 return;
1700
Stefan Behrensff023aa2012-11-06 11:43:11 +01001701 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1702 int ret;
1703
1704 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1705 if (ret)
1706 btrfs_dev_replace_stats_inc(
1707 &sblock->sctx->dev_root->fs_info->dev_replace.
1708 num_write_errors);
1709 }
1710}
1711
1712static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1713 int page_num)
1714{
1715 struct scrub_page *spage = sblock->pagev[page_num];
1716
1717 BUG_ON(spage->page == NULL);
1718 if (spage->io_error) {
1719 void *mapped_buffer = kmap_atomic(spage->page);
1720
1721 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1722 flush_dcache_page(spage->page);
1723 kunmap_atomic(mapped_buffer);
1724 }
1725 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1726}
1727
1728static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1729 struct scrub_page *spage)
1730{
1731 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1732 struct scrub_bio *sbio;
1733 int ret;
1734
1735 mutex_lock(&wr_ctx->wr_lock);
1736again:
1737 if (!wr_ctx->wr_curr_bio) {
1738 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1739 GFP_NOFS);
1740 if (!wr_ctx->wr_curr_bio) {
1741 mutex_unlock(&wr_ctx->wr_lock);
1742 return -ENOMEM;
1743 }
1744 wr_ctx->wr_curr_bio->sctx = sctx;
1745 wr_ctx->wr_curr_bio->page_count = 0;
1746 }
1747 sbio = wr_ctx->wr_curr_bio;
1748 if (sbio->page_count == 0) {
1749 struct bio *bio;
1750
1751 sbio->physical = spage->physical_for_dev_replace;
1752 sbio->logical = spage->logical;
1753 sbio->dev = wr_ctx->tgtdev;
1754 bio = sbio->bio;
1755 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04001756 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001757 if (!bio) {
1758 mutex_unlock(&wr_ctx->wr_lock);
1759 return -ENOMEM;
1760 }
1761 sbio->bio = bio;
1762 }
1763
1764 bio->bi_private = sbio;
1765 bio->bi_end_io = scrub_wr_bio_end_io;
1766 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001767 bio->bi_iter.bi_sector = sbio->physical >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001768 sbio->err = 0;
1769 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1770 spage->physical_for_dev_replace ||
1771 sbio->logical + sbio->page_count * PAGE_SIZE !=
1772 spage->logical) {
1773 scrub_wr_submit(sctx);
1774 goto again;
1775 }
1776
1777 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1778 if (ret != PAGE_SIZE) {
1779 if (sbio->page_count < 1) {
1780 bio_put(sbio->bio);
1781 sbio->bio = NULL;
1782 mutex_unlock(&wr_ctx->wr_lock);
1783 return -EIO;
1784 }
1785 scrub_wr_submit(sctx);
1786 goto again;
1787 }
1788
1789 sbio->pagev[sbio->page_count] = spage;
1790 scrub_page_get(spage);
1791 sbio->page_count++;
1792 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1793 scrub_wr_submit(sctx);
1794 mutex_unlock(&wr_ctx->wr_lock);
1795
1796 return 0;
1797}
1798
1799static void scrub_wr_submit(struct scrub_ctx *sctx)
1800{
1801 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1802 struct scrub_bio *sbio;
1803
1804 if (!wr_ctx->wr_curr_bio)
1805 return;
1806
1807 sbio = wr_ctx->wr_curr_bio;
1808 wr_ctx->wr_curr_bio = NULL;
1809 WARN_ON(!sbio->bio->bi_bdev);
1810 scrub_pending_bio_inc(sctx);
1811 /* process all writes in a single worker thread. Then the block layer
1812 * orders the requests before sending them to the driver which
1813 * doubled the write performance on spinning disks when measured
1814 * with Linux 3.5 */
1815 btrfsic_submit_bio(WRITE, sbio->bio);
1816}
1817
1818static void scrub_wr_bio_end_io(struct bio *bio, int err)
1819{
1820 struct scrub_bio *sbio = bio->bi_private;
1821 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1822
1823 sbio->err = err;
1824 sbio->bio = bio;
1825
Liu Bo9e0af232014-08-15 23:36:53 +08001826 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1827 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001828 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001829}
1830
1831static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1832{
1833 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1834 struct scrub_ctx *sctx = sbio->sctx;
1835 int i;
1836
1837 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1838 if (sbio->err) {
1839 struct btrfs_dev_replace *dev_replace =
1840 &sbio->sctx->dev_root->fs_info->dev_replace;
1841
1842 for (i = 0; i < sbio->page_count; i++) {
1843 struct scrub_page *spage = sbio->pagev[i];
1844
1845 spage->io_error = 1;
1846 btrfs_dev_replace_stats_inc(&dev_replace->
1847 num_write_errors);
1848 }
1849 }
1850
1851 for (i = 0; i < sbio->page_count; i++)
1852 scrub_page_put(sbio->pagev[i]);
1853
1854 bio_put(sbio->bio);
1855 kfree(sbio);
1856 scrub_pending_bio_dec(sctx);
1857}
1858
1859static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001860{
1861 u64 flags;
1862 int ret;
1863
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001864 WARN_ON(sblock->page_count < 1);
1865 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001866 ret = 0;
1867 if (flags & BTRFS_EXTENT_FLAG_DATA)
1868 ret = scrub_checksum_data(sblock);
1869 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1870 ret = scrub_checksum_tree_block(sblock);
1871 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1872 (void)scrub_checksum_super(sblock);
1873 else
1874 WARN_ON(1);
1875 if (ret)
1876 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001877
1878 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001879}
1880
1881static int scrub_checksum_data(struct scrub_block *sblock)
1882{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001883 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001884 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001885 u8 *on_disk_csum;
1886 struct page *page;
1887 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001888 u32 crc = ~(u32)0;
1889 int fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001890 u64 len;
1891 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001892
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001893 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001894 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001895 return 0;
1896
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001897 on_disk_csum = sblock->pagev[0]->csum;
1898 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001899 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001900
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001901 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001902 index = 0;
1903 for (;;) {
1904 u64 l = min_t(u64, len, PAGE_SIZE);
1905
Liu Bob0496682013-03-14 14:57:45 +00001906 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001907 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001908 len -= l;
1909 if (len == 0)
1910 break;
1911 index++;
1912 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001913 BUG_ON(!sblock->pagev[index]->page);
1914 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001915 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001916 }
1917
Arne Jansena2de7332011-03-08 14:14:00 +01001918 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001919 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001920 fail = 1;
1921
Arne Jansena2de7332011-03-08 14:14:00 +01001922 return fail;
1923}
1924
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001925static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001926{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001927 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001928 struct btrfs_header *h;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001929 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001930 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001931 u8 calculated_csum[BTRFS_CSUM_SIZE];
1932 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1933 struct page *page;
1934 void *mapped_buffer;
1935 u64 mapped_size;
1936 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001937 u32 crc = ~(u32)0;
1938 int fail = 0;
1939 int crc_fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001940 u64 len;
1941 int index;
1942
1943 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001944 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001945 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001946 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001947 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001948
1949 /*
1950 * we don't use the getter functions here, as we
1951 * a) don't have an extent buffer and
1952 * b) the page is already kmapped
1953 */
Arne Jansena2de7332011-03-08 14:14:00 +01001954
Qu Wenruo3cae2102013-07-16 11:19:18 +08001955 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001956 ++fail;
1957
Qu Wenruo3cae2102013-07-16 11:19:18 +08001958 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001959 ++fail;
1960
Miao Xie17a9be22014-07-24 11:37:08 +08001961 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Arne Jansena2de7332011-03-08 14:14:00 +01001962 ++fail;
1963
1964 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1965 BTRFS_UUID_SIZE))
1966 ++fail;
1967
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001968 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001969 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1970 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1971 index = 0;
1972 for (;;) {
1973 u64 l = min_t(u64, len, mapped_size);
1974
Liu Bob0496682013-03-14 14:57:45 +00001975 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001976 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001977 len -= l;
1978 if (len == 0)
1979 break;
1980 index++;
1981 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001982 BUG_ON(!sblock->pagev[index]->page);
1983 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001984 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001985 mapped_size = PAGE_SIZE;
1986 p = mapped_buffer;
1987 }
1988
1989 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001990 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001991 ++crc_fail;
1992
Arne Jansena2de7332011-03-08 14:14:00 +01001993 return fail || crc_fail;
1994}
1995
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001996static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001997{
1998 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001999 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002000 u8 calculated_csum[BTRFS_CSUM_SIZE];
2001 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2002 struct page *page;
2003 void *mapped_buffer;
2004 u64 mapped_size;
2005 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01002006 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02002007 int fail_gen = 0;
2008 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002009 u64 len;
2010 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01002011
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002012 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002013 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002014 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002015 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002016 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002017
Qu Wenruo3cae2102013-07-16 11:19:18 +08002018 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002019 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002020
Qu Wenruo3cae2102013-07-16 11:19:18 +08002021 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002022 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002023
Miao Xie17a9be22014-07-24 11:37:08 +08002024 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002025 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002026
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002027 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2028 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2029 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2030 index = 0;
2031 for (;;) {
2032 u64 l = min_t(u64, len, mapped_size);
2033
Liu Bob0496682013-03-14 14:57:45 +00002034 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002035 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002036 len -= l;
2037 if (len == 0)
2038 break;
2039 index++;
2040 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002041 BUG_ON(!sblock->pagev[index]->page);
2042 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002043 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002044 mapped_size = PAGE_SIZE;
2045 p = mapped_buffer;
2046 }
2047
2048 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002049 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002050 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002051
Stefan Behrens442a4f62012-05-25 16:06:08 +02002052 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01002053 /*
2054 * if we find an error in a super block, we just report it.
2055 * They will get written with the next transaction commit
2056 * anyway
2057 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002058 spin_lock(&sctx->stat_lock);
2059 ++sctx->stat.super_errors;
2060 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002061 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002062 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002063 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2064 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002065 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002066 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01002067 }
2068
Stefan Behrens442a4f62012-05-25 16:06:08 +02002069 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002070}
2071
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002072static void scrub_block_get(struct scrub_block *sblock)
2073{
2074 atomic_inc(&sblock->ref_count);
2075}
2076
2077static void scrub_block_put(struct scrub_block *sblock)
2078{
2079 if (atomic_dec_and_test(&sblock->ref_count)) {
2080 int i;
2081
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002082 if (sblock->sparity)
2083 scrub_parity_put(sblock->sparity);
2084
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002085 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002086 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002087 kfree(sblock);
2088 }
2089}
2090
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002091static void scrub_page_get(struct scrub_page *spage)
2092{
2093 atomic_inc(&spage->ref_count);
2094}
2095
2096static void scrub_page_put(struct scrub_page *spage)
2097{
2098 if (atomic_dec_and_test(&spage->ref_count)) {
2099 if (spage->page)
2100 __free_page(spage->page);
2101 kfree(spage);
2102 }
2103}
2104
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002105static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002106{
2107 struct scrub_bio *sbio;
2108
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002109 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002110 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002111
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002112 sbio = sctx->bios[sctx->curr];
2113 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002114 scrub_pending_bio_inc(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002115
Stefan Behrensff023aa2012-11-06 11:43:11 +01002116 if (!sbio->bio->bi_bdev) {
2117 /*
2118 * this case should not happen. If btrfs_map_block() is
2119 * wrong, it could happen for dev-replace operations on
2120 * missing devices when no mirrors are available, but in
2121 * this case it should already fail the mount.
2122 * This case is handled correctly (but _very_ slowly).
2123 */
2124 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05002125 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01002126 bio_endio(sbio->bio, -EIO);
2127 } else {
2128 btrfsic_submit_bio(READ, sbio->bio);
2129 }
Arne Jansena2de7332011-03-08 14:14:00 +01002130}
2131
Stefan Behrensff023aa2012-11-06 11:43:11 +01002132static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2133 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002134{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002135 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002136 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002137 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002138
2139again:
2140 /*
2141 * grab a fresh bio or wait for one to become available
2142 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002143 while (sctx->curr == -1) {
2144 spin_lock(&sctx->list_lock);
2145 sctx->curr = sctx->first_free;
2146 if (sctx->curr != -1) {
2147 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2148 sctx->bios[sctx->curr]->next_free = -1;
2149 sctx->bios[sctx->curr]->page_count = 0;
2150 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002151 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002152 spin_unlock(&sctx->list_lock);
2153 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002154 }
2155 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002156 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002157 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002158 struct bio *bio;
2159
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002160 sbio->physical = spage->physical;
2161 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002162 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002163 bio = sbio->bio;
2164 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04002165 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002166 if (!bio)
2167 return -ENOMEM;
2168 sbio->bio = bio;
2169 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002170
2171 bio->bi_private = sbio;
2172 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002173 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002174 bio->bi_iter.bi_sector = sbio->physical >> 9;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002175 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002176 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2177 spage->physical ||
2178 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002179 spage->logical ||
2180 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002181 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002182 goto again;
2183 }
2184
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002185 sbio->pagev[sbio->page_count] = spage;
2186 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2187 if (ret != PAGE_SIZE) {
2188 if (sbio->page_count < 1) {
2189 bio_put(sbio->bio);
2190 sbio->bio = NULL;
2191 return -EIO;
2192 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002193 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002194 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002195 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002196
Stefan Behrensff023aa2012-11-06 11:43:11 +01002197 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002198 atomic_inc(&sblock->outstanding_pages);
2199 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002200 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002201 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002202
2203 return 0;
2204}
2205
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002206static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002207 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002208 u64 gen, int mirror_num, u8 *csum, int force,
2209 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002210{
2211 struct scrub_block *sblock;
2212 int index;
2213
2214 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2215 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002216 spin_lock(&sctx->stat_lock);
2217 sctx->stat.malloc_errors++;
2218 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002219 return -ENOMEM;
2220 }
2221
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002222 /* one ref inside this function, plus one for each page added to
2223 * a bio later on */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002224 atomic_set(&sblock->ref_count, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002225 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002226 sblock->no_io_error_seen = 1;
2227
2228 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002229 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002230 u64 l = min_t(u64, len, PAGE_SIZE);
2231
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002232 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2233 if (!spage) {
2234leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002235 spin_lock(&sctx->stat_lock);
2236 sctx->stat.malloc_errors++;
2237 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002238 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002239 return -ENOMEM;
2240 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002241 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2242 scrub_page_get(spage);
2243 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002244 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002245 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002246 spage->flags = flags;
2247 spage->generation = gen;
2248 spage->logical = logical;
2249 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002250 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002251 spage->mirror_num = mirror_num;
2252 if (csum) {
2253 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002254 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002255 } else {
2256 spage->have_csum = 0;
2257 }
2258 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002259 spage->page = alloc_page(GFP_NOFS);
2260 if (!spage->page)
2261 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002262 len -= l;
2263 logical += l;
2264 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002265 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002266 }
2267
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002268 WARN_ON(sblock->page_count == 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002269 for (index = 0; index < sblock->page_count; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002270 struct scrub_page *spage = sblock->pagev[index];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002271 int ret;
2272
Stefan Behrensff023aa2012-11-06 11:43:11 +01002273 ret = scrub_add_page_to_rd_bio(sctx, spage);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002274 if (ret) {
2275 scrub_block_put(sblock);
2276 return ret;
2277 }
2278 }
2279
2280 if (force)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002281 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002282
2283 /* last one frees, either here or in bio completion for last page */
2284 scrub_block_put(sblock);
2285 return 0;
2286}
2287
2288static void scrub_bio_end_io(struct bio *bio, int err)
2289{
2290 struct scrub_bio *sbio = bio->bi_private;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002291 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002292
2293 sbio->err = err;
2294 sbio->bio = bio;
2295
Qu Wenruo0339ef22014-02-28 10:46:17 +08002296 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002297}
2298
2299static void scrub_bio_end_io_worker(struct btrfs_work *work)
2300{
2301 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002302 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002303 int i;
2304
Stefan Behrensff023aa2012-11-06 11:43:11 +01002305 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002306 if (sbio->err) {
2307 for (i = 0; i < sbio->page_count; i++) {
2308 struct scrub_page *spage = sbio->pagev[i];
2309
2310 spage->io_error = 1;
2311 spage->sblock->no_io_error_seen = 0;
2312 }
2313 }
2314
2315 /* now complete the scrub_block items that have all pages completed */
2316 for (i = 0; i < sbio->page_count; i++) {
2317 struct scrub_page *spage = sbio->pagev[i];
2318 struct scrub_block *sblock = spage->sblock;
2319
2320 if (atomic_dec_and_test(&sblock->outstanding_pages))
2321 scrub_block_complete(sblock);
2322 scrub_block_put(sblock);
2323 }
2324
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002325 bio_put(sbio->bio);
2326 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002327 spin_lock(&sctx->list_lock);
2328 sbio->next_free = sctx->first_free;
2329 sctx->first_free = sbio->index;
2330 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002331
2332 if (sctx->is_dev_replace &&
2333 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2334 mutex_lock(&sctx->wr_ctx.wr_lock);
2335 scrub_wr_submit(sctx);
2336 mutex_unlock(&sctx->wr_ctx.wr_lock);
2337 }
2338
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002339 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002340}
2341
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002342static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2343 unsigned long *bitmap,
2344 u64 start, u64 len)
2345{
2346 int offset;
2347 int nsectors;
2348 int sectorsize = sparity->sctx->dev_root->sectorsize;
2349
2350 if (len >= sparity->stripe_len) {
2351 bitmap_set(bitmap, 0, sparity->nsectors);
2352 return;
2353 }
2354
2355 start -= sparity->logic_start;
2356 offset = (int)do_div(start, sparity->stripe_len);
2357 offset /= sectorsize;
2358 nsectors = (int)len / sectorsize;
2359
2360 if (offset + nsectors <= sparity->nsectors) {
2361 bitmap_set(bitmap, offset, nsectors);
2362 return;
2363 }
2364
2365 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2366 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2367}
2368
2369static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2370 u64 start, u64 len)
2371{
2372 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2373}
2374
2375static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2376 u64 start, u64 len)
2377{
2378 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2379}
2380
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002381static void scrub_block_complete(struct scrub_block *sblock)
2382{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002383 int corrupted = 0;
2384
Stefan Behrensff023aa2012-11-06 11:43:11 +01002385 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002386 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002387 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002388 } else {
2389 /*
2390 * if has checksum error, write via repair mechanism in
2391 * dev replace case, otherwise write here in dev replace
2392 * case.
2393 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002394 corrupted = scrub_checksum(sblock);
2395 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002396 scrub_write_block_to_dev_replace(sblock);
2397 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002398
2399 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2400 u64 start = sblock->pagev[0]->logical;
2401 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2402 PAGE_SIZE;
2403
2404 scrub_parity_mark_sectors_error(sblock->sparity,
2405 start, end - start);
2406 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002407}
2408
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002409static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
Arne Jansena2de7332011-03-08 14:14:00 +01002410 u8 *csum)
2411{
2412 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002413 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002414 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002415
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002416 while (!list_empty(&sctx->csum_list)) {
2417 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002418 struct btrfs_ordered_sum, list);
2419 if (sum->bytenr > logical)
2420 return 0;
2421 if (sum->bytenr + sum->len > logical)
2422 break;
2423
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002424 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002425 list_del(&sum->list);
2426 kfree(sum);
2427 sum = NULL;
2428 }
2429 if (!sum)
2430 return 0;
2431
Miao Xief51a4a12013-06-19 10:36:09 +08002432 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002433 num_sectors = sum->len / sctx->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002434 memcpy(csum, sum->sums + index, sctx->csum_size);
2435 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002436 list_del(&sum->list);
2437 kfree(sum);
2438 }
Miao Xief51a4a12013-06-19 10:36:09 +08002439 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002440}
2441
2442/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002443static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002444 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002445 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002446{
2447 int ret;
2448 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002449 u32 blocksize;
2450
2451 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002452 blocksize = sctx->sectorsize;
2453 spin_lock(&sctx->stat_lock);
2454 sctx->stat.data_extents_scrubbed++;
2455 sctx->stat.data_bytes_scrubbed += len;
2456 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002457 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002458 blocksize = sctx->nodesize;
2459 spin_lock(&sctx->stat_lock);
2460 sctx->stat.tree_extents_scrubbed++;
2461 sctx->stat.tree_bytes_scrubbed += len;
2462 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002463 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002464 blocksize = sctx->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002465 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002466 }
Arne Jansena2de7332011-03-08 14:14:00 +01002467
2468 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002469 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002470 int have_csum = 0;
2471
2472 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2473 /* push csums to sbio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002474 have_csum = scrub_find_csum(sctx, logical, l, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002475 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002476 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002477 if (sctx->is_dev_replace && !have_csum) {
2478 ret = copy_nocow_pages(sctx, logical, l,
2479 mirror_num,
2480 physical_for_dev_replace);
2481 goto behind_scrub_pages;
2482 }
Arne Jansena2de7332011-03-08 14:14:00 +01002483 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002484 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002485 mirror_num, have_csum ? csum : NULL, 0,
2486 physical_for_dev_replace);
2487behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002488 if (ret)
2489 return ret;
2490 len -= l;
2491 logical += l;
2492 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002493 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002494 }
2495 return 0;
2496}
2497
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002498static int scrub_pages_for_parity(struct scrub_parity *sparity,
2499 u64 logical, u64 len,
2500 u64 physical, struct btrfs_device *dev,
2501 u64 flags, u64 gen, int mirror_num, u8 *csum)
2502{
2503 struct scrub_ctx *sctx = sparity->sctx;
2504 struct scrub_block *sblock;
2505 int index;
2506
2507 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2508 if (!sblock) {
2509 spin_lock(&sctx->stat_lock);
2510 sctx->stat.malloc_errors++;
2511 spin_unlock(&sctx->stat_lock);
2512 return -ENOMEM;
2513 }
2514
2515 /* one ref inside this function, plus one for each page added to
2516 * a bio later on */
2517 atomic_set(&sblock->ref_count, 1);
2518 sblock->sctx = sctx;
2519 sblock->no_io_error_seen = 1;
2520 sblock->sparity = sparity;
2521 scrub_parity_get(sparity);
2522
2523 for (index = 0; len > 0; index++) {
2524 struct scrub_page *spage;
2525 u64 l = min_t(u64, len, PAGE_SIZE);
2526
2527 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2528 if (!spage) {
2529leave_nomem:
2530 spin_lock(&sctx->stat_lock);
2531 sctx->stat.malloc_errors++;
2532 spin_unlock(&sctx->stat_lock);
2533 scrub_block_put(sblock);
2534 return -ENOMEM;
2535 }
2536 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2537 /* For scrub block */
2538 scrub_page_get(spage);
2539 sblock->pagev[index] = spage;
2540 /* For scrub parity */
2541 scrub_page_get(spage);
2542 list_add_tail(&spage->list, &sparity->spages);
2543 spage->sblock = sblock;
2544 spage->dev = dev;
2545 spage->flags = flags;
2546 spage->generation = gen;
2547 spage->logical = logical;
2548 spage->physical = physical;
2549 spage->mirror_num = mirror_num;
2550 if (csum) {
2551 spage->have_csum = 1;
2552 memcpy(spage->csum, csum, sctx->csum_size);
2553 } else {
2554 spage->have_csum = 0;
2555 }
2556 sblock->page_count++;
2557 spage->page = alloc_page(GFP_NOFS);
2558 if (!spage->page)
2559 goto leave_nomem;
2560 len -= l;
2561 logical += l;
2562 physical += l;
2563 }
2564
2565 WARN_ON(sblock->page_count == 0);
2566 for (index = 0; index < sblock->page_count; index++) {
2567 struct scrub_page *spage = sblock->pagev[index];
2568 int ret;
2569
2570 ret = scrub_add_page_to_rd_bio(sctx, spage);
2571 if (ret) {
2572 scrub_block_put(sblock);
2573 return ret;
2574 }
2575 }
2576
2577 /* last one frees, either here or in bio completion for last page */
2578 scrub_block_put(sblock);
2579 return 0;
2580}
2581
2582static int scrub_extent_for_parity(struct scrub_parity *sparity,
2583 u64 logical, u64 len,
2584 u64 physical, struct btrfs_device *dev,
2585 u64 flags, u64 gen, int mirror_num)
2586{
2587 struct scrub_ctx *sctx = sparity->sctx;
2588 int ret;
2589 u8 csum[BTRFS_CSUM_SIZE];
2590 u32 blocksize;
2591
2592 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2593 blocksize = sctx->sectorsize;
2594 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2595 blocksize = sctx->nodesize;
2596 } else {
2597 blocksize = sctx->sectorsize;
2598 WARN_ON(1);
2599 }
2600
2601 while (len) {
2602 u64 l = min_t(u64, len, blocksize);
2603 int have_csum = 0;
2604
2605 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2606 /* push csums to sbio */
2607 have_csum = scrub_find_csum(sctx, logical, l, csum);
2608 if (have_csum == 0)
2609 goto skip;
2610 }
2611 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2612 flags, gen, mirror_num,
2613 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002614 if (ret)
2615 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002616skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002617 len -= l;
2618 logical += l;
2619 physical += l;
2620 }
2621 return 0;
2622}
2623
Wang Shilong3b080b22014-04-01 18:01:43 +08002624/*
2625 * Given a physical address, this will calculate it's
2626 * logical offset. if this is a parity stripe, it will return
2627 * the most left data stripe's logical offset.
2628 *
2629 * return 0 if it is a data stripe, 1 means parity stripe.
2630 */
2631static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002632 struct map_lookup *map, u64 *offset,
2633 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002634{
2635 int i;
2636 int j = 0;
2637 u64 stripe_nr;
2638 u64 last_offset;
2639 int stripe_index;
2640 int rot;
2641
2642 last_offset = (physical - map->stripes[num].physical) *
2643 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002644 if (stripe_start)
2645 *stripe_start = last_offset;
2646
Wang Shilong3b080b22014-04-01 18:01:43 +08002647 *offset = last_offset;
2648 for (i = 0; i < nr_data_stripes(map); i++) {
2649 *offset = last_offset + i * map->stripe_len;
2650
2651 stripe_nr = *offset;
2652 do_div(stripe_nr, map->stripe_len);
2653 do_div(stripe_nr, nr_data_stripes(map));
2654
2655 /* Work out the disk rotation on this stripe-set */
2656 rot = do_div(stripe_nr, map->num_stripes);
2657 /* calculate which stripe this data locates */
2658 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002659 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002660 if (stripe_index == num)
2661 return 0;
2662 if (stripe_index < num)
2663 j++;
2664 }
2665 *offset = last_offset + j * map->stripe_len;
2666 return 1;
2667}
2668
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002669static void scrub_free_parity(struct scrub_parity *sparity)
2670{
2671 struct scrub_ctx *sctx = sparity->sctx;
2672 struct scrub_page *curr, *next;
2673 int nbits;
2674
2675 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2676 if (nbits) {
2677 spin_lock(&sctx->stat_lock);
2678 sctx->stat.read_errors += nbits;
2679 sctx->stat.uncorrectable_errors += nbits;
2680 spin_unlock(&sctx->stat_lock);
2681 }
2682
2683 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2684 list_del_init(&curr->list);
2685 scrub_page_put(curr);
2686 }
2687
2688 kfree(sparity);
2689}
2690
2691static void scrub_parity_bio_endio(struct bio *bio, int error)
2692{
2693 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2694 struct scrub_ctx *sctx = sparity->sctx;
2695
2696 if (error)
2697 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2698 sparity->nsectors);
2699
2700 scrub_free_parity(sparity);
2701 scrub_pending_bio_dec(sctx);
2702 bio_put(bio);
2703}
2704
2705static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2706{
2707 struct scrub_ctx *sctx = sparity->sctx;
2708 struct bio *bio;
2709 struct btrfs_raid_bio *rbio;
2710 struct scrub_page *spage;
2711 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002712 u64 length;
2713 int ret;
2714
2715 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2716 sparity->nsectors))
2717 goto out;
2718
2719 length = sparity->logic_end - sparity->logic_start + 1;
Miao Xie76035972014-11-14 17:45:42 +08002720 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002721 sparity->logic_start,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002722 &length, &bbio, 0, 1);
2723 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002724 goto bbio_out;
2725
2726 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2727 if (!bio)
2728 goto bbio_out;
2729
2730 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2731 bio->bi_private = sparity;
2732 bio->bi_end_io = scrub_parity_bio_endio;
2733
2734 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002735 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002736 sparity->dbitmap,
2737 sparity->nsectors);
2738 if (!rbio)
2739 goto rbio_out;
2740
2741 list_for_each_entry(spage, &sparity->spages, list)
2742 raid56_parity_add_scrub_pages(rbio, spage->page,
2743 spage->logical);
2744
2745 scrub_pending_bio_inc(sctx);
2746 raid56_parity_submit_scrub_rbio(rbio);
2747 return;
2748
2749rbio_out:
2750 bio_put(bio);
2751bbio_out:
Zhao Lei6e9606d2015-01-20 15:11:34 +08002752 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002753 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2754 sparity->nsectors);
2755 spin_lock(&sctx->stat_lock);
2756 sctx->stat.malloc_errors++;
2757 spin_unlock(&sctx->stat_lock);
2758out:
2759 scrub_free_parity(sparity);
2760}
2761
2762static inline int scrub_calc_parity_bitmap_len(int nsectors)
2763{
2764 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2765}
2766
2767static void scrub_parity_get(struct scrub_parity *sparity)
2768{
2769 atomic_inc(&sparity->ref_count);
2770}
2771
2772static void scrub_parity_put(struct scrub_parity *sparity)
2773{
2774 if (!atomic_dec_and_test(&sparity->ref_count))
2775 return;
2776
2777 scrub_parity_check_and_repair(sparity);
2778}
2779
2780static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2781 struct map_lookup *map,
2782 struct btrfs_device *sdev,
2783 struct btrfs_path *path,
2784 u64 logic_start,
2785 u64 logic_end)
2786{
2787 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2788 struct btrfs_root *root = fs_info->extent_root;
2789 struct btrfs_root *csum_root = fs_info->csum_root;
2790 struct btrfs_extent_item *extent;
2791 u64 flags;
2792 int ret;
2793 int slot;
2794 struct extent_buffer *l;
2795 struct btrfs_key key;
2796 u64 generation;
2797 u64 extent_logical;
2798 u64 extent_physical;
2799 u64 extent_len;
2800 struct btrfs_device *extent_dev;
2801 struct scrub_parity *sparity;
2802 int nsectors;
2803 int bitmap_len;
2804 int extent_mirror_num;
2805 int stop_loop = 0;
2806
2807 nsectors = map->stripe_len / root->sectorsize;
2808 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2809 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2810 GFP_NOFS);
2811 if (!sparity) {
2812 spin_lock(&sctx->stat_lock);
2813 sctx->stat.malloc_errors++;
2814 spin_unlock(&sctx->stat_lock);
2815 return -ENOMEM;
2816 }
2817
2818 sparity->stripe_len = map->stripe_len;
2819 sparity->nsectors = nsectors;
2820 sparity->sctx = sctx;
2821 sparity->scrub_dev = sdev;
2822 sparity->logic_start = logic_start;
2823 sparity->logic_end = logic_end;
2824 atomic_set(&sparity->ref_count, 1);
2825 INIT_LIST_HEAD(&sparity->spages);
2826 sparity->dbitmap = sparity->bitmap;
2827 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2828
2829 ret = 0;
2830 while (logic_start < logic_end) {
2831 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2832 key.type = BTRFS_METADATA_ITEM_KEY;
2833 else
2834 key.type = BTRFS_EXTENT_ITEM_KEY;
2835 key.objectid = logic_start;
2836 key.offset = (u64)-1;
2837
2838 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2839 if (ret < 0)
2840 goto out;
2841
2842 if (ret > 0) {
2843 ret = btrfs_previous_extent_item(root, path, 0);
2844 if (ret < 0)
2845 goto out;
2846 if (ret > 0) {
2847 btrfs_release_path(path);
2848 ret = btrfs_search_slot(NULL, root, &key,
2849 path, 0, 0);
2850 if (ret < 0)
2851 goto out;
2852 }
2853 }
2854
2855 stop_loop = 0;
2856 while (1) {
2857 u64 bytes;
2858
2859 l = path->nodes[0];
2860 slot = path->slots[0];
2861 if (slot >= btrfs_header_nritems(l)) {
2862 ret = btrfs_next_leaf(root, path);
2863 if (ret == 0)
2864 continue;
2865 if (ret < 0)
2866 goto out;
2867
2868 stop_loop = 1;
2869 break;
2870 }
2871 btrfs_item_key_to_cpu(l, &key, slot);
2872
2873 if (key.type == BTRFS_METADATA_ITEM_KEY)
2874 bytes = root->nodesize;
2875 else
2876 bytes = key.offset;
2877
2878 if (key.objectid + bytes <= logic_start)
2879 goto next;
2880
2881 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2882 key.type != BTRFS_METADATA_ITEM_KEY)
2883 goto next;
2884
2885 if (key.objectid > logic_end) {
2886 stop_loop = 1;
2887 break;
2888 }
2889
2890 while (key.objectid >= logic_start + map->stripe_len)
2891 logic_start += map->stripe_len;
2892
2893 extent = btrfs_item_ptr(l, slot,
2894 struct btrfs_extent_item);
2895 flags = btrfs_extent_flags(l, extent);
2896 generation = btrfs_extent_generation(l, extent);
2897
2898 if (key.objectid < logic_start &&
2899 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2900 btrfs_err(fs_info,
2901 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2902 key.objectid, logic_start);
2903 goto next;
2904 }
2905again:
2906 extent_logical = key.objectid;
2907 extent_len = bytes;
2908
2909 if (extent_logical < logic_start) {
2910 extent_len -= logic_start - extent_logical;
2911 extent_logical = logic_start;
2912 }
2913
2914 if (extent_logical + extent_len >
2915 logic_start + map->stripe_len)
2916 extent_len = logic_start + map->stripe_len -
2917 extent_logical;
2918
2919 scrub_parity_mark_sectors_data(sparity, extent_logical,
2920 extent_len);
2921
2922 scrub_remap_extent(fs_info, extent_logical,
2923 extent_len, &extent_physical,
2924 &extent_dev,
2925 &extent_mirror_num);
2926
2927 ret = btrfs_lookup_csums_range(csum_root,
2928 extent_logical,
2929 extent_logical + extent_len - 1,
2930 &sctx->csum_list, 1);
2931 if (ret)
2932 goto out;
2933
2934 ret = scrub_extent_for_parity(sparity, extent_logical,
2935 extent_len,
2936 extent_physical,
2937 extent_dev, flags,
2938 generation,
2939 extent_mirror_num);
2940 if (ret)
2941 goto out;
2942
2943 scrub_free_csums(sctx);
2944 if (extent_logical + extent_len <
2945 key.objectid + bytes) {
2946 logic_start += map->stripe_len;
2947
2948 if (logic_start >= logic_end) {
2949 stop_loop = 1;
2950 break;
2951 }
2952
2953 if (logic_start < key.objectid + bytes) {
2954 cond_resched();
2955 goto again;
2956 }
2957 }
2958next:
2959 path->slots[0]++;
2960 }
2961
2962 btrfs_release_path(path);
2963
2964 if (stop_loop)
2965 break;
2966
2967 logic_start += map->stripe_len;
2968 }
2969out:
2970 if (ret < 0)
2971 scrub_parity_mark_sectors_error(sparity, logic_start,
2972 logic_end - logic_start + 1);
2973 scrub_parity_put(sparity);
2974 scrub_submit(sctx);
2975 mutex_lock(&sctx->wr_ctx.wr_lock);
2976 scrub_wr_submit(sctx);
2977 mutex_unlock(&sctx->wr_ctx.wr_lock);
2978
2979 btrfs_release_path(path);
2980 return ret < 0 ? ret : 0;
2981}
2982
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002983static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002984 struct map_lookup *map,
2985 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002986 int num, u64 base, u64 length,
2987 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002988{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002989 struct btrfs_path *path, *ppath;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002990 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01002991 struct btrfs_root *root = fs_info->extent_root;
2992 struct btrfs_root *csum_root = fs_info->csum_root;
2993 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00002994 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01002995 u64 flags;
2996 int ret;
2997 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01002998 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01002999 struct extent_buffer *l;
3000 struct btrfs_key key;
3001 u64 physical;
3002 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003003 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003004 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003005 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003006 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003007 struct reada_control *reada1;
3008 struct reada_control *reada2;
3009 struct btrfs_key key_start;
3010 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003011 u64 increment = map->stripe_len;
3012 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003013 u64 extent_logical;
3014 u64 extent_physical;
3015 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003016 u64 stripe_logical;
3017 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003018 struct btrfs_device *extent_dev;
3019 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003020 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003021
Arne Jansena2de7332011-03-08 14:14:00 +01003022 nstripes = length;
Wang Shilong3b080b22014-04-01 18:01:43 +08003023 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003024 offset = 0;
3025 do_div(nstripes, map->stripe_len);
3026 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3027 offset = map->stripe_len * num;
3028 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003029 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003030 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3031 int factor = map->num_stripes / map->sub_stripes;
3032 offset = map->stripe_len * (num / map->sub_stripes);
3033 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003034 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003035 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3036 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003037 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003038 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3039 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003040 mirror_num = num % map->num_stripes + 1;
Wang Shilong3b080b22014-04-01 18:01:43 +08003041 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3042 BTRFS_BLOCK_GROUP_RAID6)) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003043 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003044 increment = map->stripe_len * nr_data_stripes(map);
3045 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003046 } else {
3047 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003048 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003049 }
3050
3051 path = btrfs_alloc_path();
3052 if (!path)
3053 return -ENOMEM;
3054
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003055 ppath = btrfs_alloc_path();
3056 if (!ppath) {
3057 btrfs_free_path(ppath);
3058 return -ENOMEM;
3059 }
3060
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003061 /*
3062 * work on commit root. The related disk blocks are static as
3063 * long as COW is applied. This means, it is save to rewrite
3064 * them to repair disk errors without any race conditions
3065 */
Arne Jansena2de7332011-03-08 14:14:00 +01003066 path->search_commit_root = 1;
3067 path->skip_locking = 1;
3068
3069 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003070 * trigger the readahead for extent tree csum tree and wait for
3071 * completion. During readahead, the scrub is officially paused
3072 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003073 */
3074 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003075 physical_end = physical + nstripes * map->stripe_len;
3076 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3077 BTRFS_BLOCK_GROUP_RAID6)) {
3078 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003079 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003080 logic_end += base;
3081 } else {
3082 logic_end = logical + increment * nstripes;
3083 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003084 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003085 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003086 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003087
Arne Jansen7a262852011-06-10 12:39:23 +02003088 /* FIXME it might be better to start readahead at commit root */
3089 key_start.objectid = logical;
3090 key_start.type = BTRFS_EXTENT_ITEM_KEY;
3091 key_start.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003092 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003093 key_end.type = BTRFS_METADATA_ITEM_KEY;
3094 key_end.offset = (u64)-1;
Arne Jansen7a262852011-06-10 12:39:23 +02003095 reada1 = btrfs_reada_add(root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003096
Arne Jansen7a262852011-06-10 12:39:23 +02003097 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3098 key_start.type = BTRFS_EXTENT_CSUM_KEY;
3099 key_start.offset = logical;
3100 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3101 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003102 key_end.offset = logic_end;
Arne Jansen7a262852011-06-10 12:39:23 +02003103 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003104
Arne Jansen7a262852011-06-10 12:39:23 +02003105 if (!IS_ERR(reada1))
3106 btrfs_reada_wait(reada1);
3107 if (!IS_ERR(reada2))
3108 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003109
Arne Jansena2de7332011-03-08 14:14:00 +01003110
3111 /*
3112 * collect all data csums for the stripe to avoid seeking during
3113 * the scrub. This might currently (crc32) end up to be about 1MB
3114 */
Arne Jansene7786c32011-05-28 20:58:38 +00003115 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003116
Arne Jansena2de7332011-03-08 14:14:00 +01003117 /*
3118 * now find all extents for each stripe and scrub them
3119 */
Arne Jansena2de7332011-03-08 14:14:00 +01003120 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003121 while (physical < physical_end) {
3122 /* for raid56, we skip parity stripe */
3123 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3124 BTRFS_BLOCK_GROUP_RAID6)) {
3125 ret = get_raid56_logic_offset(physical, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003126 map, &logical, &stripe_logical);
Wang Shilong3b080b22014-04-01 18:01:43 +08003127 logical += base;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003128 if (ret) {
3129 stripe_logical += base;
3130 stripe_end = stripe_logical + increment - 1;
3131 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3132 ppath, stripe_logical,
3133 stripe_end);
3134 if (ret)
3135 goto out;
Wang Shilong3b080b22014-04-01 18:01:43 +08003136 goto skip;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003137 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003138 }
Arne Jansena2de7332011-03-08 14:14:00 +01003139 /*
3140 * canceled?
3141 */
3142 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003143 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003144 ret = -ECANCELED;
3145 goto out;
3146 }
3147 /*
3148 * check to see if we have to pause
3149 */
3150 if (atomic_read(&fs_info->scrub_pause_req)) {
3151 /* push queued extents */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003152 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003153 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003154 mutex_lock(&sctx->wr_ctx.wr_lock);
3155 scrub_wr_submit(sctx);
3156 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003157 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003158 atomic_read(&sctx->bios_in_flight) == 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003159 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Wang Shilong3cb09292013-12-04 21:15:19 +08003160 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003161 }
3162
Wang Shilong7c76edb2014-01-12 21:38:32 +08003163 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3164 key.type = BTRFS_METADATA_ITEM_KEY;
3165 else
3166 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003167 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003168 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003169
3170 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3171 if (ret < 0)
3172 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003173
Arne Jansen8c510322011-06-03 10:09:26 +02003174 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003175 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003176 if (ret < 0)
3177 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003178 if (ret > 0) {
3179 /* there's no smaller item, so stick with the
3180 * larger one */
3181 btrfs_release_path(path);
3182 ret = btrfs_search_slot(NULL, root, &key,
3183 path, 0, 0);
3184 if (ret < 0)
3185 goto out;
3186 }
Arne Jansena2de7332011-03-08 14:14:00 +01003187 }
3188
Liu Bo625f1c8d2013-04-27 02:56:57 +00003189 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003190 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003191 u64 bytes;
3192
Arne Jansena2de7332011-03-08 14:14:00 +01003193 l = path->nodes[0];
3194 slot = path->slots[0];
3195 if (slot >= btrfs_header_nritems(l)) {
3196 ret = btrfs_next_leaf(root, path);
3197 if (ret == 0)
3198 continue;
3199 if (ret < 0)
3200 goto out;
3201
Liu Bo625f1c8d2013-04-27 02:56:57 +00003202 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003203 break;
3204 }
3205 btrfs_item_key_to_cpu(l, &key, slot);
3206
Josef Bacik3173a182013-03-07 14:22:04 -05003207 if (key.type == BTRFS_METADATA_ITEM_KEY)
David Sterba707e8a02014-06-04 19:22:26 +02003208 bytes = root->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003209 else
3210 bytes = key.offset;
3211
3212 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003213 goto next;
3214
Liu Bo625f1c8d2013-04-27 02:56:57 +00003215 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3216 key.type != BTRFS_METADATA_ITEM_KEY)
3217 goto next;
Arne Jansena2de7332011-03-08 14:14:00 +01003218
Liu Bo625f1c8d2013-04-27 02:56:57 +00003219 if (key.objectid >= logical + map->stripe_len) {
3220 /* out of this device extent */
3221 if (key.objectid >= logic_end)
3222 stop_loop = 1;
3223 break;
3224 }
Arne Jansena2de7332011-03-08 14:14:00 +01003225
3226 extent = btrfs_item_ptr(l, slot,
3227 struct btrfs_extent_item);
3228 flags = btrfs_extent_flags(l, extent);
3229 generation = btrfs_extent_generation(l, extent);
3230
3231 if (key.objectid < logical &&
3232 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003233 btrfs_err(fs_info,
3234 "scrub: tree block %llu spanning "
3235 "stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003236 key.objectid, logical);
Arne Jansena2de7332011-03-08 14:14:00 +01003237 goto next;
3238 }
3239
Liu Bo625f1c8d2013-04-27 02:56:57 +00003240again:
3241 extent_logical = key.objectid;
3242 extent_len = bytes;
3243
Arne Jansena2de7332011-03-08 14:14:00 +01003244 /*
3245 * trim extent to this stripe
3246 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003247 if (extent_logical < logical) {
3248 extent_len -= logical - extent_logical;
3249 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003250 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003251 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003252 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003253 extent_len = logical + map->stripe_len -
3254 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003255 }
3256
Liu Bo625f1c8d2013-04-27 02:56:57 +00003257 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003258 extent_dev = scrub_dev;
3259 extent_mirror_num = mirror_num;
3260 if (is_dev_replace)
3261 scrub_remap_extent(fs_info, extent_logical,
3262 extent_len, &extent_physical,
3263 &extent_dev,
3264 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003265
3266 ret = btrfs_lookup_csums_range(csum_root, logical,
3267 logical + map->stripe_len - 1,
3268 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003269 if (ret)
3270 goto out;
3271
Liu Bo625f1c8d2013-04-27 02:56:57 +00003272 ret = scrub_extent(sctx, extent_logical, extent_len,
3273 extent_physical, extent_dev, flags,
3274 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003275 extent_logical - logical + physical);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003276 if (ret)
3277 goto out;
3278
Josef Bacikd88d46c2013-06-10 12:59:04 +00003279 scrub_free_csums(sctx);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003280 if (extent_logical + extent_len <
3281 key.objectid + bytes) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003282 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3283 BTRFS_BLOCK_GROUP_RAID6)) {
3284 /*
3285 * loop until we find next data stripe
3286 * or we have finished all stripes.
3287 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003288loop:
3289 physical += map->stripe_len;
3290 ret = get_raid56_logic_offset(physical,
3291 num, map, &logical,
3292 &stripe_logical);
3293 logical += base;
3294
3295 if (ret && physical < physical_end) {
3296 stripe_logical += base;
3297 stripe_end = stripe_logical +
3298 increment - 1;
3299 ret = scrub_raid56_parity(sctx,
3300 map, scrub_dev, ppath,
3301 stripe_logical,
3302 stripe_end);
3303 if (ret)
3304 goto out;
3305 goto loop;
3306 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003307 } else {
3308 physical += map->stripe_len;
3309 logical += increment;
3310 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003311 if (logical < key.objectid + bytes) {
3312 cond_resched();
3313 goto again;
3314 }
3315
Wang Shilong3b080b22014-04-01 18:01:43 +08003316 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003317 stop_loop = 1;
3318 break;
3319 }
3320 }
Arne Jansena2de7332011-03-08 14:14:00 +01003321next:
3322 path->slots[0]++;
3323 }
Chris Mason71267332011-05-23 06:30:52 -04003324 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003325skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003326 logical += increment;
3327 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003328 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003329 if (stop_loop)
3330 sctx->stat.last_physical = map->stripes[num].physical +
3331 length;
3332 else
3333 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003334 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003335 if (stop_loop)
3336 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003337 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003338out:
Arne Jansena2de7332011-03-08 14:14:00 +01003339 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003340 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003341 mutex_lock(&sctx->wr_ctx.wr_lock);
3342 scrub_wr_submit(sctx);
3343 mutex_unlock(&sctx->wr_ctx.wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003344
Arne Jansene7786c32011-05-28 20:58:38 +00003345 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003346 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003347 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003348 return ret < 0 ? ret : 0;
3349}
3350
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003351static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003352 struct btrfs_device *scrub_dev,
3353 u64 chunk_tree, u64 chunk_objectid,
3354 u64 chunk_offset, u64 length,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003355 u64 dev_offset, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003356{
3357 struct btrfs_mapping_tree *map_tree =
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003358 &sctx->dev_root->fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003359 struct map_lookup *map;
3360 struct extent_map *em;
3361 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003362 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003363
3364 read_lock(&map_tree->map_tree.lock);
3365 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3366 read_unlock(&map_tree->map_tree.lock);
3367
3368 if (!em)
3369 return -EINVAL;
3370
3371 map = (struct map_lookup *)em->bdev;
3372 if (em->start != chunk_offset)
3373 goto out;
3374
3375 if (em->len < length)
3376 goto out;
3377
3378 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003379 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003380 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003381 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003382 chunk_offset, length,
3383 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003384 if (ret)
3385 goto out;
3386 }
3387 }
3388out:
3389 free_extent_map(em);
3390
3391 return ret;
3392}
3393
3394static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003395int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003396 struct btrfs_device *scrub_dev, u64 start, u64 end,
3397 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003398{
3399 struct btrfs_dev_extent *dev_extent = NULL;
3400 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003401 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003402 struct btrfs_fs_info *fs_info = root->fs_info;
3403 u64 length;
3404 u64 chunk_tree;
3405 u64 chunk_objectid;
3406 u64 chunk_offset;
3407 int ret;
3408 int slot;
3409 struct extent_buffer *l;
3410 struct btrfs_key key;
3411 struct btrfs_key found_key;
3412 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003413 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003414
3415 path = btrfs_alloc_path();
3416 if (!path)
3417 return -ENOMEM;
3418
3419 path->reada = 2;
3420 path->search_commit_root = 1;
3421 path->skip_locking = 1;
3422
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003423 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003424 key.offset = 0ull;
3425 key.type = BTRFS_DEV_EXTENT_KEY;
3426
Arne Jansena2de7332011-03-08 14:14:00 +01003427 while (1) {
3428 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3429 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003430 break;
3431 if (ret > 0) {
3432 if (path->slots[0] >=
3433 btrfs_header_nritems(path->nodes[0])) {
3434 ret = btrfs_next_leaf(root, path);
3435 if (ret)
3436 break;
3437 }
3438 }
Arne Jansena2de7332011-03-08 14:14:00 +01003439
3440 l = path->nodes[0];
3441 slot = path->slots[0];
3442
3443 btrfs_item_key_to_cpu(l, &found_key, slot);
3444
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003445 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003446 break;
3447
David Sterba962a2982014-06-04 18:41:45 +02003448 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003449 break;
3450
3451 if (found_key.offset >= end)
3452 break;
3453
3454 if (found_key.offset < key.offset)
3455 break;
3456
3457 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3458 length = btrfs_dev_extent_length(l, dev_extent);
3459
Qu Wenruoced96ed2014-06-19 10:42:51 +08003460 if (found_key.offset + length <= start)
3461 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003462
3463 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3464 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3465 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3466
3467 /*
3468 * get a reference on the corresponding block group to prevent
3469 * the chunk from going away while we scrub it
3470 */
3471 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003472
3473 /* some chunks are removed but not committed to disk yet,
3474 * continue scrubbing */
3475 if (!cache)
3476 goto skip;
3477
Stefan Behrensff023aa2012-11-06 11:43:11 +01003478 dev_replace->cursor_right = found_key.offset + length;
3479 dev_replace->cursor_left = found_key.offset;
3480 dev_replace->item_needs_writeback = 1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003481 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003482 chunk_offset, length, found_key.offset,
3483 is_dev_replace);
3484
3485 /*
3486 * flush, submit all pending read and write bios, afterwards
3487 * wait for them.
3488 * Note that in the dev replace case, a read request causes
3489 * write requests that are submitted in the read completion
3490 * worker. Therefore in the current situation, it is required
3491 * that all write requests are flushed, so that all read and
3492 * write requests are really completed when bios_in_flight
3493 * changes to 0.
3494 */
3495 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3496 scrub_submit(sctx);
3497 mutex_lock(&sctx->wr_ctx.wr_lock);
3498 scrub_wr_submit(sctx);
3499 mutex_unlock(&sctx->wr_ctx.wr_lock);
3500
3501 wait_event(sctx->list_wait,
3502 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003503 atomic_inc(&fs_info->scrubs_paused);
3504 wake_up(&fs_info->scrub_pause_wait);
3505
3506 /*
3507 * must be called before we decrease @scrub_paused.
3508 * make sure we don't block transaction commit while
3509 * we are waiting pending workers finished.
3510 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003511 wait_event(sctx->list_wait,
3512 atomic_read(&sctx->workers_pending) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003513 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3514
3515 mutex_lock(&fs_info->scrub_lock);
3516 __scrub_blocked_if_needed(fs_info);
3517 atomic_dec(&fs_info->scrubs_paused);
3518 mutex_unlock(&fs_info->scrub_lock);
3519 wake_up(&fs_info->scrub_pause_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003520
Arne Jansena2de7332011-03-08 14:14:00 +01003521 btrfs_put_block_group(cache);
3522 if (ret)
3523 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003524 if (is_dev_replace &&
3525 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003526 ret = -EIO;
3527 break;
3528 }
3529 if (sctx->stat.malloc_errors > 0) {
3530 ret = -ENOMEM;
3531 break;
3532 }
Arne Jansena2de7332011-03-08 14:14:00 +01003533
Ilya Dryomov539f3582013-10-07 13:42:57 +03003534 dev_replace->cursor_left = dev_replace->cursor_right;
3535 dev_replace->item_needs_writeback = 1;
Qu Wenruoced96ed2014-06-19 10:42:51 +08003536skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003537 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003538 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003539 }
3540
Arne Jansena2de7332011-03-08 14:14:00 +01003541 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003542
3543 /*
3544 * ret can still be 1 from search_slot or next_leaf,
3545 * that's not an error
3546 */
3547 return ret < 0 ? ret : 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003548}
3549
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003550static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3551 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003552{
3553 int i;
3554 u64 bytenr;
3555 u64 gen;
3556 int ret;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003557 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003558
Miao Xie87533c42013-01-29 10:14:48 +00003559 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003560 return -EIO;
3561
Miao Xie5f546062014-07-24 11:37:09 +08003562 /* Seed devices of a new filesystem has their own generation. */
3563 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3564 gen = scrub_dev->generation;
3565 else
3566 gen = root->fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01003567
3568 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3569 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08003570 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3571 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01003572 break;
3573
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003574 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003575 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003576 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01003577 if (ret)
3578 return ret;
3579 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003580 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003581
3582 return 0;
3583}
3584
3585/*
3586 * get a reference count on fs_info->scrub_workers. start worker if necessary
3587 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003588static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3589 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003590{
Josef Bacik0dc3b842011-11-18 14:37:27 -05003591 int ret = 0;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003592 int flags = WQ_FREEZABLE | WQ_UNBOUND;
3593 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003594
Arne Jansen632dd772011-06-10 12:07:07 +02003595 if (fs_info->scrub_workers_refcnt == 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003596 if (is_dev_replace)
Qu Wenruo0339ef22014-02-28 10:46:17 +08003597 fs_info->scrub_workers =
3598 btrfs_alloc_workqueue("btrfs-scrub", flags,
3599 1, 4);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003600 else
Qu Wenruo0339ef22014-02-28 10:46:17 +08003601 fs_info->scrub_workers =
3602 btrfs_alloc_workqueue("btrfs-scrub", flags,
3603 max_active, 4);
3604 if (!fs_info->scrub_workers) {
3605 ret = -ENOMEM;
Josef Bacik0dc3b842011-11-18 14:37:27 -05003606 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003607 }
3608 fs_info->scrub_wr_completion_workers =
3609 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3610 max_active, 2);
3611 if (!fs_info->scrub_wr_completion_workers) {
3612 ret = -ENOMEM;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003613 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003614 }
3615 fs_info->scrub_nocow_workers =
3616 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3617 if (!fs_info->scrub_nocow_workers) {
3618 ret = -ENOMEM;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003619 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003620 }
Arne Jansen632dd772011-06-10 12:07:07 +02003621 }
Arne Jansena2de7332011-03-08 14:14:00 +01003622 ++fs_info->scrub_workers_refcnt;
Josef Bacik0dc3b842011-11-18 14:37:27 -05003623out:
Josef Bacik0dc3b842011-11-18 14:37:27 -05003624 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003625}
3626
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003627static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003628{
Stefan Behrensff023aa2012-11-06 11:43:11 +01003629 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08003630 btrfs_destroy_workqueue(fs_info->scrub_workers);
3631 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3632 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003633 }
Arne Jansena2de7332011-03-08 14:14:00 +01003634 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003635}
3636
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003637int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3638 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003639 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003640{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003641 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003642 int ret;
3643 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08003644 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01003645
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003646 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01003647 return -EINVAL;
3648
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003649 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003650 /*
3651 * in this case scrub is unable to calculate the checksum
3652 * the way scrub is implemented. Do not handle this
3653 * situation at all because it won't ever happen.
3654 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003655 btrfs_err(fs_info,
3656 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003657 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003658 return -EINVAL;
3659 }
3660
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003661 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003662 /* not supported for data w/o checksums */
Frank Holtonefe120a2013-12-20 11:37:06 -05003663 btrfs_err(fs_info,
3664 "scrub: size assumption sectorsize != PAGE_SIZE "
3665 "(%d != %lu) fails",
Geert Uytterhoeven27f9f022013-08-20 13:20:09 +02003666 fs_info->chunk_root->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003667 return -EINVAL;
3668 }
3669
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003670 if (fs_info->chunk_root->nodesize >
3671 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3672 fs_info->chunk_root->sectorsize >
3673 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3674 /*
3675 * would exhaust the array bounds of pagev member in
3676 * struct scrub_block
3677 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003678 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3679 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003680 fs_info->chunk_root->nodesize,
3681 SCRUB_MAX_PAGES_PER_BLOCK,
3682 fs_info->chunk_root->sectorsize,
3683 SCRUB_MAX_PAGES_PER_BLOCK);
3684 return -EINVAL;
3685 }
3686
Arne Jansena2de7332011-03-08 14:14:00 +01003687
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003688 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3689 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003690 if (!dev || (dev->missing && !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003691 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003692 return -ENODEV;
3693 }
Arne Jansena2de7332011-03-08 14:14:00 +01003694
Miao Xie5d68da32014-07-24 11:37:07 +08003695 if (!is_dev_replace && !readonly && !dev->writeable) {
3696 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3697 rcu_read_lock();
3698 name = rcu_dereference(dev->name);
3699 btrfs_err(fs_info, "scrub: device %s is not writable",
3700 name->str);
3701 rcu_read_unlock();
3702 return -EROFS;
3703 }
3704
Wang Shilong3b7a0162013-10-12 02:11:12 +08003705 mutex_lock(&fs_info->scrub_lock);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003706 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
Arne Jansena2de7332011-03-08 14:14:00 +01003707 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003708 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003709 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01003710 }
3711
Stefan Behrens8dabb742012-11-06 13:15:27 +01003712 btrfs_dev_replace_lock(&fs_info->dev_replace);
3713 if (dev->scrub_device ||
3714 (!is_dev_replace &&
3715 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3716 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003717 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003718 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003719 return -EINPROGRESS;
3720 }
Stefan Behrens8dabb742012-11-06 13:15:27 +01003721 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003722
3723 ret = scrub_workers_get(fs_info, is_dev_replace);
3724 if (ret) {
3725 mutex_unlock(&fs_info->scrub_lock);
3726 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3727 return ret;
3728 }
3729
Stefan Behrens63a212a2012-11-05 18:29:28 +01003730 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003731 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003732 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003733 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3734 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003735 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003736 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003737 sctx->readonly = readonly;
3738 dev->scrub_device = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003739 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003740
Wang Shilong3cb09292013-12-04 21:15:19 +08003741 /*
3742 * checking @scrub_pause_req here, we can avoid
3743 * race between committing transaction and scrubbing.
3744 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003745 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003746 atomic_inc(&fs_info->scrubs_running);
3747 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003748
Stefan Behrensff023aa2012-11-06 11:43:11 +01003749 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08003750 /*
3751 * by holding device list mutex, we can
3752 * kick off writing super in log tree sync.
3753 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003754 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003755 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003756 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003757 }
Arne Jansena2de7332011-03-08 14:14:00 +01003758
3759 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003760 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3761 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003762
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003763 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003764 atomic_dec(&fs_info->scrubs_running);
3765 wake_up(&fs_info->scrub_pause_wait);
3766
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003767 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003768
Arne Jansena2de7332011-03-08 14:14:00 +01003769 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003770 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003771
3772 mutex_lock(&fs_info->scrub_lock);
3773 dev->scrub_device = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003774 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003775 mutex_unlock(&fs_info->scrub_lock);
3776
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003777 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003778
3779 return ret;
3780}
3781
Jeff Mahoney143bede2012-03-01 14:56:26 +01003782void btrfs_scrub_pause(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003783{
3784 struct btrfs_fs_info *fs_info = root->fs_info;
3785
3786 mutex_lock(&fs_info->scrub_lock);
3787 atomic_inc(&fs_info->scrub_pause_req);
3788 while (atomic_read(&fs_info->scrubs_paused) !=
3789 atomic_read(&fs_info->scrubs_running)) {
3790 mutex_unlock(&fs_info->scrub_lock);
3791 wait_event(fs_info->scrub_pause_wait,
3792 atomic_read(&fs_info->scrubs_paused) ==
3793 atomic_read(&fs_info->scrubs_running));
3794 mutex_lock(&fs_info->scrub_lock);
3795 }
3796 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003797}
3798
Jeff Mahoney143bede2012-03-01 14:56:26 +01003799void btrfs_scrub_continue(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003800{
3801 struct btrfs_fs_info *fs_info = root->fs_info;
3802
3803 atomic_dec(&fs_info->scrub_pause_req);
3804 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01003805}
3806
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003807int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003808{
Arne Jansena2de7332011-03-08 14:14:00 +01003809 mutex_lock(&fs_info->scrub_lock);
3810 if (!atomic_read(&fs_info->scrubs_running)) {
3811 mutex_unlock(&fs_info->scrub_lock);
3812 return -ENOTCONN;
3813 }
3814
3815 atomic_inc(&fs_info->scrub_cancel_req);
3816 while (atomic_read(&fs_info->scrubs_running)) {
3817 mutex_unlock(&fs_info->scrub_lock);
3818 wait_event(fs_info->scrub_pause_wait,
3819 atomic_read(&fs_info->scrubs_running) == 0);
3820 mutex_lock(&fs_info->scrub_lock);
3821 }
3822 atomic_dec(&fs_info->scrub_cancel_req);
3823 mutex_unlock(&fs_info->scrub_lock);
3824
3825 return 0;
3826}
3827
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003828int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3829 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01003830{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003831 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003832
3833 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003834 sctx = dev->scrub_device;
3835 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01003836 mutex_unlock(&fs_info->scrub_lock);
3837 return -ENOTCONN;
3838 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003839 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01003840 while (dev->scrub_device) {
3841 mutex_unlock(&fs_info->scrub_lock);
3842 wait_event(fs_info->scrub_pause_wait,
3843 dev->scrub_device == NULL);
3844 mutex_lock(&fs_info->scrub_lock);
3845 }
3846 mutex_unlock(&fs_info->scrub_lock);
3847
3848 return 0;
3849}
Stefan Behrens1623ede2012-03-27 14:21:26 -04003850
Arne Jansena2de7332011-03-08 14:14:00 +01003851int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3852 struct btrfs_scrub_progress *progress)
3853{
3854 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003855 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01003856
3857 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003858 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01003859 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003860 sctx = dev->scrub_device;
3861 if (sctx)
3862 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003863 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3864
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003865 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01003866}
Stefan Behrensff023aa2012-11-06 11:43:11 +01003867
3868static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3869 u64 extent_logical, u64 extent_len,
3870 u64 *extent_physical,
3871 struct btrfs_device **extent_dev,
3872 int *extent_mirror_num)
3873{
3874 u64 mapped_length;
3875 struct btrfs_bio *bbio = NULL;
3876 int ret;
3877
3878 mapped_length = extent_len;
3879 ret = btrfs_map_block(fs_info, READ, extent_logical,
3880 &mapped_length, &bbio, 0);
3881 if (ret || !bbio || mapped_length < extent_len ||
3882 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08003883 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003884 return;
3885 }
3886
3887 *extent_physical = bbio->stripes[0].physical;
3888 *extent_mirror_num = bbio->mirror_num;
3889 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08003890 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003891}
3892
3893static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3894 struct scrub_wr_ctx *wr_ctx,
3895 struct btrfs_fs_info *fs_info,
3896 struct btrfs_device *dev,
3897 int is_dev_replace)
3898{
3899 WARN_ON(wr_ctx->wr_curr_bio != NULL);
3900
3901 mutex_init(&wr_ctx->wr_lock);
3902 wr_ctx->wr_curr_bio = NULL;
3903 if (!is_dev_replace)
3904 return 0;
3905
3906 WARN_ON(!dev->bdev);
3907 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3908 bio_get_nr_vecs(dev->bdev));
3909 wr_ctx->tgtdev = dev;
3910 atomic_set(&wr_ctx->flush_all_writes, 0);
3911 return 0;
3912}
3913
3914static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3915{
3916 mutex_lock(&wr_ctx->wr_lock);
3917 kfree(wr_ctx->wr_curr_bio);
3918 wr_ctx->wr_curr_bio = NULL;
3919 mutex_unlock(&wr_ctx->wr_lock);
3920}
3921
3922static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3923 int mirror_num, u64 physical_for_dev_replace)
3924{
3925 struct scrub_copy_nocow_ctx *nocow_ctx;
3926 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3927
3928 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3929 if (!nocow_ctx) {
3930 spin_lock(&sctx->stat_lock);
3931 sctx->stat.malloc_errors++;
3932 spin_unlock(&sctx->stat_lock);
3933 return -ENOMEM;
3934 }
3935
3936 scrub_pending_trans_workers_inc(sctx);
3937
3938 nocow_ctx->sctx = sctx;
3939 nocow_ctx->logical = logical;
3940 nocow_ctx->len = len;
3941 nocow_ctx->mirror_num = mirror_num;
3942 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08003943 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3944 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04003945 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08003946 btrfs_queue_work(fs_info->scrub_nocow_workers,
3947 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003948
3949 return 0;
3950}
3951
Josef Bacik652f25a2013-09-12 16:58:28 -04003952static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3953{
3954 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3955 struct scrub_nocow_inode *nocow_inode;
3956
3957 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3958 if (!nocow_inode)
3959 return -ENOMEM;
3960 nocow_inode->inum = inum;
3961 nocow_inode->offset = offset;
3962 nocow_inode->root = root;
3963 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3964 return 0;
3965}
3966
3967#define COPY_COMPLETE 1
3968
Stefan Behrensff023aa2012-11-06 11:43:11 +01003969static void copy_nocow_pages_worker(struct btrfs_work *work)
3970{
3971 struct scrub_copy_nocow_ctx *nocow_ctx =
3972 container_of(work, struct scrub_copy_nocow_ctx, work);
3973 struct scrub_ctx *sctx = nocow_ctx->sctx;
3974 u64 logical = nocow_ctx->logical;
3975 u64 len = nocow_ctx->len;
3976 int mirror_num = nocow_ctx->mirror_num;
3977 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3978 int ret;
3979 struct btrfs_trans_handle *trans = NULL;
3980 struct btrfs_fs_info *fs_info;
3981 struct btrfs_path *path;
3982 struct btrfs_root *root;
3983 int not_written = 0;
3984
3985 fs_info = sctx->dev_root->fs_info;
3986 root = fs_info->extent_root;
3987
3988 path = btrfs_alloc_path();
3989 if (!path) {
3990 spin_lock(&sctx->stat_lock);
3991 sctx->stat.malloc_errors++;
3992 spin_unlock(&sctx->stat_lock);
3993 not_written = 1;
3994 goto out;
3995 }
3996
3997 trans = btrfs_join_transaction(root);
3998 if (IS_ERR(trans)) {
3999 not_written = 1;
4000 goto out;
4001 }
4002
4003 ret = iterate_inodes_from_logical(logical, fs_info, path,
Josef Bacik652f25a2013-09-12 16:58:28 -04004004 record_inode_for_nocow, nocow_ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004005 if (ret != 0 && ret != -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004006 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4007 "phys %llu, len %llu, mir %u, ret %d",
Geert Uytterhoeven118a0a22013-08-20 13:20:10 +02004008 logical, physical_for_dev_replace, len, mirror_num,
4009 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004010 not_written = 1;
4011 goto out;
4012 }
4013
Josef Bacik652f25a2013-09-12 16:58:28 -04004014 btrfs_end_transaction(trans, root);
4015 trans = NULL;
4016 while (!list_empty(&nocow_ctx->inodes)) {
4017 struct scrub_nocow_inode *entry;
4018 entry = list_first_entry(&nocow_ctx->inodes,
4019 struct scrub_nocow_inode,
4020 list);
4021 list_del_init(&entry->list);
4022 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4023 entry->root, nocow_ctx);
4024 kfree(entry);
4025 if (ret == COPY_COMPLETE) {
4026 ret = 0;
4027 break;
4028 } else if (ret) {
4029 break;
4030 }
4031 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004032out:
Josef Bacik652f25a2013-09-12 16:58:28 -04004033 while (!list_empty(&nocow_ctx->inodes)) {
4034 struct scrub_nocow_inode *entry;
4035 entry = list_first_entry(&nocow_ctx->inodes,
4036 struct scrub_nocow_inode,
4037 list);
4038 list_del_init(&entry->list);
4039 kfree(entry);
4040 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004041 if (trans && !IS_ERR(trans))
4042 btrfs_end_transaction(trans, root);
4043 if (not_written)
4044 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4045 num_uncorrectable_read_errors);
4046
4047 btrfs_free_path(path);
4048 kfree(nocow_ctx);
4049
4050 scrub_pending_trans_workers_dec(sctx);
4051}
4052
Gui Hecheng32159242014-11-10 15:36:08 +08004053static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4054 u64 logical)
4055{
4056 struct extent_state *cached_state = NULL;
4057 struct btrfs_ordered_extent *ordered;
4058 struct extent_io_tree *io_tree;
4059 struct extent_map *em;
4060 u64 lockstart = start, lockend = start + len - 1;
4061 int ret = 0;
4062
4063 io_tree = &BTRFS_I(inode)->io_tree;
4064
4065 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4066 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4067 if (ordered) {
4068 btrfs_put_ordered_extent(ordered);
4069 ret = 1;
4070 goto out_unlock;
4071 }
4072
4073 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4074 if (IS_ERR(em)) {
4075 ret = PTR_ERR(em);
4076 goto out_unlock;
4077 }
4078
4079 /*
4080 * This extent does not actually cover the logical extent anymore,
4081 * move on to the next inode.
4082 */
4083 if (em->block_start > logical ||
4084 em->block_start + em->block_len < logical + len) {
4085 free_extent_map(em);
4086 ret = 1;
4087 goto out_unlock;
4088 }
4089 free_extent_map(em);
4090
4091out_unlock:
4092 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4093 GFP_NOFS);
4094 return ret;
4095}
4096
Josef Bacik652f25a2013-09-12 16:58:28 -04004097static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4098 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004099{
Miao Xie826aa0a2013-06-27 18:50:59 +08004100 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004101 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08004102 struct inode *inode;
4103 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004104 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04004105 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004106 u64 physical_for_dev_replace;
Gui Hecheng32159242014-11-10 15:36:08 +08004107 u64 nocow_ctx_logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004108 u64 len = nocow_ctx->len;
Miao Xie826aa0a2013-06-27 18:50:59 +08004109 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00004110 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04004111 int ret = 0;
4112 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004113
4114 key.objectid = root;
4115 key.type = BTRFS_ROOT_ITEM_KEY;
4116 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00004117
4118 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4119
Stefan Behrensff023aa2012-11-06 11:43:11 +01004120 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00004121 if (IS_ERR(local_root)) {
4122 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004123 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00004124 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004125
4126 key.type = BTRFS_INODE_ITEM_KEY;
4127 key.objectid = inum;
4128 key.offset = 0;
4129 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00004130 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004131 if (IS_ERR(inode))
4132 return PTR_ERR(inode);
4133
Miao Xieedd14002013-06-27 18:51:00 +08004134 /* Avoid truncate/dio/punch hole.. */
4135 mutex_lock(&inode->i_mutex);
4136 inode_dio_wait(inode);
4137
Stefan Behrensff023aa2012-11-06 11:43:11 +01004138 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04004139 io_tree = &BTRFS_I(inode)->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004140 nocow_ctx_logical = nocow_ctx->logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004141
Gui Hecheng32159242014-11-10 15:36:08 +08004142 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4143 if (ret) {
4144 ret = ret > 0 ? 0 : ret;
4145 goto out;
Josef Bacik652f25a2013-09-12 16:58:28 -04004146 }
4147
Stefan Behrensff023aa2012-11-06 11:43:11 +01004148 while (len >= PAGE_CACHE_SIZE) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004149 index = offset >> PAGE_CACHE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08004150again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01004151 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4152 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004153 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004154 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08004155 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004156 }
4157
4158 if (PageUptodate(page)) {
4159 if (PageDirty(page))
4160 goto next_page;
4161 } else {
4162 ClearPageError(page);
Gui Hecheng32159242014-11-10 15:36:08 +08004163 err = extent_read_full_page(io_tree, page,
Josef Bacik652f25a2013-09-12 16:58:28 -04004164 btrfs_get_extent,
4165 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08004166 if (err) {
4167 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004168 goto next_page;
4169 }
Miao Xieedd14002013-06-27 18:51:00 +08004170
Miao Xie26b258912013-06-27 18:50:58 +08004171 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004172 /*
4173 * If the page has been remove from the page cache,
4174 * the data on it is meaningless, because it may be
4175 * old one, the new data may be written into the new
4176 * page in the page cache.
4177 */
4178 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04004179 unlock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004180 page_cache_release(page);
4181 goto again;
4182 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004183 if (!PageUptodate(page)) {
4184 ret = -EIO;
4185 goto next_page;
4186 }
4187 }
Gui Hecheng32159242014-11-10 15:36:08 +08004188
4189 ret = check_extent_to_block(inode, offset, len,
4190 nocow_ctx_logical);
4191 if (ret) {
4192 ret = ret > 0 ? 0 : ret;
4193 goto next_page;
4194 }
4195
Miao Xie826aa0a2013-06-27 18:50:59 +08004196 err = write_page_nocow(nocow_ctx->sctx,
4197 physical_for_dev_replace, page);
4198 if (err)
4199 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004200next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08004201 unlock_page(page);
4202 page_cache_release(page);
4203
4204 if (ret)
4205 break;
4206
Stefan Behrensff023aa2012-11-06 11:43:11 +01004207 offset += PAGE_CACHE_SIZE;
4208 physical_for_dev_replace += PAGE_CACHE_SIZE;
Gui Hecheng32159242014-11-10 15:36:08 +08004209 nocow_ctx_logical += PAGE_CACHE_SIZE;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004210 len -= PAGE_CACHE_SIZE;
4211 }
Josef Bacik652f25a2013-09-12 16:58:28 -04004212 ret = COPY_COMPLETE;
Miao Xie826aa0a2013-06-27 18:50:59 +08004213out:
Miao Xieedd14002013-06-27 18:51:00 +08004214 mutex_unlock(&inode->i_mutex);
Miao Xie826aa0a2013-06-27 18:50:59 +08004215 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004216 return ret;
4217}
4218
4219static int write_page_nocow(struct scrub_ctx *sctx,
4220 u64 physical_for_dev_replace, struct page *page)
4221{
4222 struct bio *bio;
4223 struct btrfs_device *dev;
4224 int ret;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004225
4226 dev = sctx->wr_ctx.tgtdev;
4227 if (!dev)
4228 return -EIO;
4229 if (!dev->bdev) {
4230 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05004231 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004232 return -EIO;
4233 }
Chris Mason9be33952013-05-17 18:30:14 -04004234 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004235 if (!bio) {
4236 spin_lock(&sctx->stat_lock);
4237 sctx->stat.malloc_errors++;
4238 spin_unlock(&sctx->stat_lock);
4239 return -ENOMEM;
4240 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07004241 bio->bi_iter.bi_size = 0;
4242 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004243 bio->bi_bdev = dev->bdev;
4244 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4245 if (ret != PAGE_CACHE_SIZE) {
4246leave_with_eio:
4247 bio_put(bio);
4248 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4249 return -EIO;
4250 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004251
Kent Overstreet33879d42013-11-23 22:33:32 -08004252 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
Stefan Behrensff023aa2012-11-06 11:43:11 +01004253 goto leave_with_eio;
4254
4255 bio_put(bio);
4256 return 0;
4257}