blob: 4846f66391a4565c37d7da9dded20c9a8fa60116 [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010028#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010029#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040030#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050031#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010032
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010042 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010043 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010044 */
45
Stefan Behrensb5d67f62012-03-27 14:21:27 -040046struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010047struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010048
Stefan Behrensff023aa2012-11-06 11:43:11 +010049/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010058
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040064#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010065
Miao Xieaf8e2d12014-10-23 14:42:50 +080066struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
69 u64 *raid_map;
70 u64 map_length;
71};
72
Arne Jansena2de7332011-03-08 14:14:00 +010073struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040074 struct scrub_block *sblock;
75 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020076 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080077 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010078 u64 flags; /* extent flags */
79 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040080 u64 logical;
81 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010082 u64 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +010083 atomic_t ref_count;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040084 struct {
85 unsigned int mirror_num:8;
86 unsigned int have_csum:1;
87 unsigned int io_error:1;
88 };
Arne Jansena2de7332011-03-08 14:14:00 +010089 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080090
91 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010092};
93
94struct scrub_bio {
95 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010096 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010097 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010098 struct bio *bio;
99 int err;
100 u64 logical;
101 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100102#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
103 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
104#else
105 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
106#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400107 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +0100108 int next_free;
109 struct btrfs_work work;
110};
111
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400112struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100113 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400114 int page_count;
115 atomic_t outstanding_pages;
116 atomic_t ref_count; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100117 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800118 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400119 struct {
120 unsigned int header_error:1;
121 unsigned int checksum_error:1;
122 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200123 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800124
125 /* The following is for the data used to check parity */
126 /* It is for the data with checksum */
127 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400128 };
129};
130
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
145 atomic_t ref_count;
146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
Stefan Behrensff023aa2012-11-06 11:43:11 +0100164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100172struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100174 struct btrfs_root *dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +0100175 int first_free;
176 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100184 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100185 int pages_per_rd_bio;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400186 u32 sectorsize;
187 u32 nodesize;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100188
189 int is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100190 struct scrub_wr_ctx wr_ctx;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100191
Arne Jansena2de7332011-03-08 14:14:00 +0100192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
197};
198
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200199struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100200 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100201 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200202 u64 logical;
203 struct btrfs_root *root;
204 struct btrfs_work work;
205 int mirror_num;
206};
207
Josef Bacik652f25a2013-09-12 16:58:28 -0400208struct scrub_nocow_inode {
209 u64 inum;
210 u64 offset;
211 u64 root;
212 struct list_head list;
213};
214
Stefan Behrensff023aa2012-11-06 11:43:11 +0100215struct scrub_copy_nocow_ctx {
216 struct scrub_ctx *sctx;
217 u64 logical;
218 u64 len;
219 int mirror_num;
220 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400221 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100222 struct btrfs_work work;
223};
224
Jan Schmidt558540c2011-06-13 19:59:12 +0200225struct scrub_warning {
226 struct btrfs_path *path;
227 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200228 const char *errstr;
229 sector_t sector;
230 u64 logical;
231 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200232};
233
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100234static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
235static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
236static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
237static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400238static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100239static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100240 struct btrfs_fs_info *fs_info,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100241 struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400242 u64 length, u64 logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100243 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100244static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
245 struct scrub_block *sblock, int is_metadata,
246 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800247 u16 csum_size, int retry_failed_mirror);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400248static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
249 struct scrub_block *sblock,
250 int is_metadata, int have_csum,
251 const u8 *csum, u64 generation,
252 u16 csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400253static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
254 struct scrub_block *sblock_good,
255 int force_write);
256static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257 struct scrub_block *sblock_good,
258 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100259static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400262static int scrub_checksum_data(struct scrub_block *sblock);
263static int scrub_checksum_tree_block(struct scrub_block *sblock);
264static int scrub_checksum_super(struct scrub_block *sblock);
265static void scrub_block_get(struct scrub_block *sblock);
266static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100267static void scrub_page_get(struct scrub_page *spage);
268static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800269static void scrub_parity_get(struct scrub_parity *sparity);
270static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100271static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100273static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100274 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100275 u64 gen, int mirror_num, u8 *csum, int force,
276 u64 physical_for_dev_replace);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400277static void scrub_bio_end_io(struct bio *bio, int err);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400278static void scrub_bio_end_io_worker(struct btrfs_work *work);
279static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100280static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281 u64 extent_logical, u64 extent_len,
282 u64 *extent_physical,
283 struct btrfs_device **extent_dev,
284 int *extent_mirror_num);
285static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
286 struct scrub_wr_ctx *wr_ctx,
287 struct btrfs_fs_info *fs_info,
288 struct btrfs_device *dev,
289 int is_dev_replace);
290static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
291static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
292 struct scrub_page *spage);
293static void scrub_wr_submit(struct scrub_ctx *sctx);
294static void scrub_wr_bio_end_io(struct bio *bio, int err);
295static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
296static int write_page_nocow(struct scrub_ctx *sctx,
297 u64 physical_for_dev_replace, struct page *page);
298static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400299 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100300static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
301 int mirror_num, u64 physical_for_dev_replace);
302static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800303static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800304static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400305
306
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100307static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
308{
309 atomic_inc(&sctx->bios_in_flight);
310}
311
312static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
313{
314 atomic_dec(&sctx->bios_in_flight);
315 wake_up(&sctx->list_wait);
316}
317
Wang Shilongcb7ab022013-12-04 21:16:53 +0800318static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800319{
320 while (atomic_read(&fs_info->scrub_pause_req)) {
321 mutex_unlock(&fs_info->scrub_lock);
322 wait_event(fs_info->scrub_pause_wait,
323 atomic_read(&fs_info->scrub_pause_req) == 0);
324 mutex_lock(&fs_info->scrub_lock);
325 }
326}
327
Wang Shilongcb7ab022013-12-04 21:16:53 +0800328static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
329{
330 atomic_inc(&fs_info->scrubs_paused);
331 wake_up(&fs_info->scrub_pause_wait);
332
333 mutex_lock(&fs_info->scrub_lock);
334 __scrub_blocked_if_needed(fs_info);
335 atomic_dec(&fs_info->scrubs_paused);
336 mutex_unlock(&fs_info->scrub_lock);
337
338 wake_up(&fs_info->scrub_pause_wait);
339}
340
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100341/*
342 * used for workers that require transaction commits (i.e., for the
343 * NOCOW case)
344 */
345static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
346{
347 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
348
349 /*
350 * increment scrubs_running to prevent cancel requests from
351 * completing as long as a worker is running. we must also
352 * increment scrubs_paused to prevent deadlocking on pause
353 * requests used for transactions commits (as the worker uses a
354 * transaction context). it is safe to regard the worker
355 * as paused for all matters practical. effectively, we only
356 * avoid cancellation requests from completing.
357 */
358 mutex_lock(&fs_info->scrub_lock);
359 atomic_inc(&fs_info->scrubs_running);
360 atomic_inc(&fs_info->scrubs_paused);
361 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800362
363 /*
364 * check if @scrubs_running=@scrubs_paused condition
365 * inside wait_event() is not an atomic operation.
366 * which means we may inc/dec @scrub_running/paused
367 * at any time. Let's wake up @scrub_pause_wait as
368 * much as we can to let commit transaction blocked less.
369 */
370 wake_up(&fs_info->scrub_pause_wait);
371
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100372 atomic_inc(&sctx->workers_pending);
373}
374
375/* used for workers that require transaction commits */
376static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
377{
378 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
379
380 /*
381 * see scrub_pending_trans_workers_inc() why we're pretending
382 * to be paused in the scrub counters
383 */
384 mutex_lock(&fs_info->scrub_lock);
385 atomic_dec(&fs_info->scrubs_running);
386 atomic_dec(&fs_info->scrubs_paused);
387 mutex_unlock(&fs_info->scrub_lock);
388 atomic_dec(&sctx->workers_pending);
389 wake_up(&fs_info->scrub_pause_wait);
390 wake_up(&sctx->list_wait);
391}
392
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100393static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100394{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100395 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100396 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100397 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100398 struct btrfs_ordered_sum, list);
399 list_del(&sum->list);
400 kfree(sum);
401 }
402}
403
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100404static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100405{
406 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100407
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100408 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100409 return;
410
Stefan Behrensff023aa2012-11-06 11:43:11 +0100411 scrub_free_wr_ctx(&sctx->wr_ctx);
412
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400413 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100414 if (sctx->curr != -1) {
415 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400416
417 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100418 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400419 scrub_block_put(sbio->pagev[i]->sblock);
420 }
421 bio_put(sbio->bio);
422 }
423
Stefan Behrensff023aa2012-11-06 11:43:11 +0100424 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100425 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100426
427 if (!sbio)
428 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100429 kfree(sbio);
430 }
431
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100432 scrub_free_csums(sctx);
433 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100434}
435
436static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100437struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100438{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100439 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100440 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100441 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100442 int pages_per_rd_bio;
443 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +0100444
Stefan Behrensff023aa2012-11-06 11:43:11 +0100445 /*
446 * the setting of pages_per_rd_bio is correct for scrub but might
447 * be wrong for the dev_replace code where we might read from
448 * different devices in the initial huge bios. However, that
449 * code is able to correctly handle the case when adding a page
450 * to a bio fails.
451 */
452 if (dev->bdev)
453 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
454 bio_get_nr_vecs(dev->bdev));
455 else
456 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100457 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
458 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100459 goto nomem;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100460 sctx->is_dev_replace = is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100461 sctx->pages_per_rd_bio = pages_per_rd_bio;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100462 sctx->curr = -1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100463 sctx->dev_root = dev->dev_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100464 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100465 struct scrub_bio *sbio;
466
467 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
468 if (!sbio)
469 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100470 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100471
Arne Jansena2de7332011-03-08 14:14:00 +0100472 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100473 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400474 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800475 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
476 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100477
Stefan Behrensff023aa2012-11-06 11:43:11 +0100478 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100479 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200480 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100481 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100482 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100483 sctx->first_free = 0;
484 sctx->nodesize = dev->dev_root->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100485 sctx->sectorsize = dev->dev_root->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100486 atomic_set(&sctx->bios_in_flight, 0);
487 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100488 atomic_set(&sctx->cancel_req, 0);
489 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
490 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100491
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100492 spin_lock_init(&sctx->list_lock);
493 spin_lock_init(&sctx->stat_lock);
494 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100495
496 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
497 fs_info->dev_replace.tgtdev, is_dev_replace);
498 if (ret) {
499 scrub_free_ctx(sctx);
500 return ERR_PTR(ret);
501 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100502 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100503
504nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100505 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100506 return ERR_PTR(-ENOMEM);
507}
508
Stefan Behrensff023aa2012-11-06 11:43:11 +0100509static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
510 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200511{
512 u64 isize;
513 u32 nlink;
514 int ret;
515 int i;
516 struct extent_buffer *eb;
517 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100518 struct scrub_warning *swarn = warn_ctx;
Jan Schmidt558540c2011-06-13 19:59:12 +0200519 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
520 struct inode_fs_paths *ipath = NULL;
521 struct btrfs_root *local_root;
522 struct btrfs_key root_key;
523
524 root_key.objectid = root;
525 root_key.type = BTRFS_ROOT_ITEM_KEY;
526 root_key.offset = (u64)-1;
527 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
528 if (IS_ERR(local_root)) {
529 ret = PTR_ERR(local_root);
530 goto err;
531 }
532
David Sterba14692cc2015-01-02 18:55:46 +0100533 /*
534 * this makes the path point to (inum INODE_ITEM ioff)
535 */
536 ret = btrfs_find_item(local_root, swarn->path, inum, 0,
537 BTRFS_INODE_ITEM_KEY, NULL);
Jan Schmidt558540c2011-06-13 19:59:12 +0200538 if (ret) {
539 btrfs_release_path(swarn->path);
540 goto err;
541 }
542
543 eb = swarn->path->nodes[0];
544 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
545 struct btrfs_inode_item);
546 isize = btrfs_inode_size(eb, inode_item);
547 nlink = btrfs_inode_nlink(eb, inode_item);
548 btrfs_release_path(swarn->path);
549
550 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300551 if (IS_ERR(ipath)) {
552 ret = PTR_ERR(ipath);
553 ipath = NULL;
554 goto err;
555 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200556 ret = paths_from_inode(inum, ipath);
557
558 if (ret < 0)
559 goto err;
560
561 /*
562 * we deliberately ignore the bit ipath might have been too small to
563 * hold all of the paths here
564 */
565 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Frank Holtonefe120a2013-12-20 11:37:06 -0500566 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200567 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
568 "length %llu, links %u (path: %s)\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400569 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200570 (unsigned long long)swarn->sector, root, inum, offset,
571 min(isize - offset, (u64)PAGE_SIZE), nlink,
Jeff Mahoney745c4d82011-11-20 07:31:57 -0500572 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200573
574 free_ipath(ipath);
575 return 0;
576
577err:
Frank Holtonefe120a2013-12-20 11:37:06 -0500578 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200579 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
580 "resolving failed with ret=%d\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400581 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200582 (unsigned long long)swarn->sector, root, inum, offset, ret);
583
584 free_ipath(ipath);
585 return 0;
586}
587
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400588static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200589{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100590 struct btrfs_device *dev;
591 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200592 struct btrfs_path *path;
593 struct btrfs_key found_key;
594 struct extent_buffer *eb;
595 struct btrfs_extent_item *ei;
596 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200597 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100598 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600599 u64 flags = 0;
600 u64 ref_root;
601 u32 item_size;
602 u8 ref_level;
Liu Bo69917e42012-09-07 20:01:28 -0600603 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200604
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100605 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100606 dev = sblock->pagev[0]->dev;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100607 fs_info = sblock->sctx->dev_root->fs_info;
608
Jan Schmidt558540c2011-06-13 19:59:12 +0200609 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200610 if (!path)
611 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200612
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100613 swarn.sector = (sblock->pagev[0]->physical) >> 9;
614 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200615 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100616 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200617
Liu Bo69917e42012-09-07 20:01:28 -0600618 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
619 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200620 if (ret < 0)
621 goto out;
622
Jan Schmidt4692cf52011-12-02 14:56:41 +0100623 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200624 swarn.extent_item_size = found_key.offset;
625
626 eb = path->nodes[0];
627 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
628 item_size = btrfs_item_size_nr(eb, path->slots[0]);
629
Liu Bo69917e42012-09-07 20:01:28 -0600630 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200631 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800632 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
633 item_size, &ref_root,
634 &ref_level);
Josef Bacik606686e2012-06-04 14:03:51 -0400635 printk_in_rcu(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -0500636 "BTRFS: %s at logical %llu on dev %s, "
Jan Schmidt558540c2011-06-13 19:59:12 +0200637 "sector %llu: metadata %s (level %d) in tree "
Josef Bacik606686e2012-06-04 14:03:51 -0400638 "%llu\n", errstr, swarn.logical,
639 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200640 (unsigned long long)swarn.sector,
641 ref_level ? "node" : "leaf",
642 ret < 0 ? -1 : ref_level,
643 ret < 0 ? -1 : ref_root);
644 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600645 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200646 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600647 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200648 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100649 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100650 iterate_extent_inodes(fs_info, found_key.objectid,
651 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200652 scrub_print_warning_inode, &swarn);
653 }
654
655out:
656 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200657}
658
Stefan Behrensff023aa2012-11-06 11:43:11 +0100659static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200660{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200661 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200662 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100663 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200664 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200665 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200666 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200667 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000668 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200669 u64 end = offset + PAGE_SIZE - 1;
670 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000671 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200672
673 key.objectid = root;
674 key.type = BTRFS_ROOT_ITEM_KEY;
675 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000676
677 fs_info = fixup->root->fs_info;
678 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
679
680 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
681 if (IS_ERR(local_root)) {
682 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200683 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000684 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200685
686 key.type = BTRFS_INODE_ITEM_KEY;
687 key.objectid = inum;
688 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000689 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
690 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200691 if (IS_ERR(inode))
692 return PTR_ERR(inode);
693
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200694 index = offset >> PAGE_CACHE_SHIFT;
695
696 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200697 if (!page) {
698 ret = -ENOMEM;
699 goto out;
700 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200701
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200702 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200703 if (PageDirty(page)) {
704 /*
705 * we need to write the data to the defect sector. the
706 * data that was in that sector is not in memory,
707 * because the page was modified. we must not write the
708 * modified page to that sector.
709 *
710 * TODO: what could be done here: wait for the delalloc
711 * runner to write out that page (might involve
712 * COW) and see whether the sector is still
713 * referenced afterwards.
714 *
715 * For the meantime, we'll treat this error
716 * incorrectable, although there is a chance that a
717 * later scrub will find the bad sector again and that
718 * there's no dirty page in memory, then.
719 */
720 ret = -EIO;
721 goto out;
722 }
Miao Xie1203b682014-09-12 18:44:01 +0800723 ret = repair_io_failure(inode, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200724 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800725 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200726 fixup->mirror_num);
727 unlock_page(page);
728 corrected = !ret;
729 } else {
730 /*
731 * we need to get good data first. the general readpage path
732 * will call repair_io_failure for us, we just have to make
733 * sure we read the bad mirror.
734 */
735 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
736 EXTENT_DAMAGED, GFP_NOFS);
737 if (ret) {
738 /* set_extent_bits should give proper error */
739 WARN_ON(ret > 0);
740 if (ret > 0)
741 ret = -EFAULT;
742 goto out;
743 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200744
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200745 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
746 btrfs_get_extent,
747 fixup->mirror_num);
748 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200749
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200750 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
751 end, EXTENT_DAMAGED, 0, NULL);
752 if (!corrected)
753 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
754 EXTENT_DAMAGED, GFP_NOFS);
755 }
756
757out:
758 if (page)
759 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200760
761 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200762
763 if (ret < 0)
764 return ret;
765
766 if (ret == 0 && corrected) {
767 /*
768 * we only need to call readpage for one of the inodes belonging
769 * to this extent. so make iterate_extent_inodes stop
770 */
771 return 1;
772 }
773
774 return -EIO;
775}
776
777static void scrub_fixup_nodatasum(struct btrfs_work *work)
778{
779 int ret;
780 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100781 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200782 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200783 struct btrfs_path *path;
784 int uncorrectable = 0;
785
786 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100787 sctx = fixup->sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200788
789 path = btrfs_alloc_path();
790 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100791 spin_lock(&sctx->stat_lock);
792 ++sctx->stat.malloc_errors;
793 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200794 uncorrectable = 1;
795 goto out;
796 }
797
798 trans = btrfs_join_transaction(fixup->root);
799 if (IS_ERR(trans)) {
800 uncorrectable = 1;
801 goto out;
802 }
803
804 /*
805 * the idea is to trigger a regular read through the standard path. we
806 * read a page from the (failed) logical address by specifying the
807 * corresponding copynum of the failed sector. thus, that readpage is
808 * expected to fail.
809 * that is the point where on-the-fly error correction will kick in
810 * (once it's finished) and rewrite the failed sector if a good copy
811 * can be found.
812 */
813 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
814 path, scrub_fixup_readpage,
815 fixup);
816 if (ret < 0) {
817 uncorrectable = 1;
818 goto out;
819 }
820 WARN_ON(ret != 1);
821
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100822 spin_lock(&sctx->stat_lock);
823 ++sctx->stat.corrected_errors;
824 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200825
826out:
827 if (trans && !IS_ERR(trans))
828 btrfs_end_transaction(trans, fixup->root);
829 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100830 spin_lock(&sctx->stat_lock);
831 ++sctx->stat.uncorrectable_errors;
832 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100833 btrfs_dev_replace_stats_inc(
834 &sctx->dev_root->fs_info->dev_replace.
835 num_uncorrectable_read_errors);
Frank Holtonefe120a2013-12-20 11:37:06 -0500836 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
837 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200838 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200839 }
840
841 btrfs_free_path(path);
842 kfree(fixup);
843
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100844 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200845}
846
Miao Xieaf8e2d12014-10-23 14:42:50 +0800847static inline void scrub_get_recover(struct scrub_recover *recover)
848{
849 atomic_inc(&recover->refs);
850}
851
852static inline void scrub_put_recover(struct scrub_recover *recover)
853{
854 if (atomic_dec_and_test(&recover->refs)) {
855 kfree(recover->bbio);
856 kfree(recover->raid_map);
857 kfree(recover);
858 }
859}
860
Arne Jansena2de7332011-03-08 14:14:00 +0100861/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400862 * scrub_handle_errored_block gets called when either verification of the
863 * pages failed or the bio failed to read, e.g. with EIO. In the latter
864 * case, this function handles all pages in the bio, even though only one
865 * may be bad.
866 * The goal of this function is to repair the errored block by using the
867 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100868 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400869static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100870{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100871 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100872 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400873 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100874 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400875 u64 logical;
876 u64 generation;
877 unsigned int failed_mirror_index;
878 unsigned int is_metadata;
879 unsigned int have_csum;
880 u8 *csum;
881 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
882 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100883 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400884 int mirror_index;
885 int page_num;
886 int success;
887 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
888 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100889
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400890 BUG_ON(sblock_to_check->page_count < 1);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100891 fs_info = sctx->dev_root->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000892 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
893 /*
894 * if we find an error in a super block, we just report it.
895 * They will get written with the next transaction commit
896 * anyway
897 */
898 spin_lock(&sctx->stat_lock);
899 ++sctx->stat.super_errors;
900 spin_unlock(&sctx->stat_lock);
901 return 0;
902 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400903 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100904 logical = sblock_to_check->pagev[0]->logical;
905 generation = sblock_to_check->pagev[0]->generation;
906 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
907 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
908 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400909 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100910 have_csum = sblock_to_check->pagev[0]->have_csum;
911 csum = sblock_to_check->pagev[0]->csum;
912 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400913
Stefan Behrensff023aa2012-11-06 11:43:11 +0100914 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
915 sblocks_for_recheck = NULL;
916 goto nodatasum_case;
917 }
918
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400919 /*
920 * read all mirrors one after the other. This includes to
921 * re-read the extent or metadata block that failed (that was
922 * the cause that this fixup code is called) another time,
923 * page by page this time in order to know which pages
924 * caused I/O errors and which ones are good (for all mirrors).
925 * It is the goal to handle the situation when more than one
926 * mirror contains I/O errors, but the errors do not
927 * overlap, i.e. the data can be repaired by selecting the
928 * pages from those mirrors without I/O error on the
929 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
930 * would be that mirror #1 has an I/O error on the first page,
931 * the second page is good, and mirror #2 has an I/O error on
932 * the second page, but the first page is good.
933 * Then the first page of the first mirror can be repaired by
934 * taking the first page of the second mirror, and the
935 * second page of the second mirror can be repaired by
936 * copying the contents of the 2nd page of the 1st mirror.
937 * One more note: if the pages of one mirror contain I/O
938 * errors, the checksum cannot be verified. In order to get
939 * the best data for repairing, the first attempt is to find
940 * a mirror without I/O errors and with a validated checksum.
941 * Only if this is not possible, the pages are picked from
942 * mirrors with I/O errors without considering the checksum.
943 * If the latter is the case, at the end, the checksum of the
944 * repaired area is verified in order to correctly maintain
945 * the statistics.
946 */
947
948 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
949 sizeof(*sblocks_for_recheck),
950 GFP_NOFS);
951 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100952 spin_lock(&sctx->stat_lock);
953 sctx->stat.malloc_errors++;
954 sctx->stat.read_errors++;
955 sctx->stat.uncorrectable_errors++;
956 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100957 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400958 goto out;
959 }
960
961 /* setup the context, map the logical blocks and alloc the pages */
Stefan Behrensff023aa2012-11-06 11:43:11 +0100962 ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400963 logical, sblocks_for_recheck);
964 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100965 spin_lock(&sctx->stat_lock);
966 sctx->stat.read_errors++;
967 sctx->stat.uncorrectable_errors++;
968 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100969 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400970 goto out;
971 }
972 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
973 sblock_bad = sblocks_for_recheck + failed_mirror_index;
974
975 /* build and submit the bios for the failed mirror, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100976 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800977 csum, generation, sctx->csum_size, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400978
979 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
980 sblock_bad->no_io_error_seen) {
981 /*
982 * the error disappeared after reading page by page, or
983 * the area was part of a huge bio and other parts of the
984 * bio caused I/O errors, or the block layer merged several
985 * read requests into one and the error is caused by a
986 * different bio (usually one of the two latter cases is
987 * the cause)
988 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100989 spin_lock(&sctx->stat_lock);
990 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800991 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100992 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400993
Stefan Behrensff023aa2012-11-06 11:43:11 +0100994 if (sctx->is_dev_replace)
995 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400996 goto out;
997 }
998
999 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001000 spin_lock(&sctx->stat_lock);
1001 sctx->stat.read_errors++;
1002 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001003 if (__ratelimit(&_rs))
1004 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001005 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001006 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001007 spin_lock(&sctx->stat_lock);
1008 sctx->stat.csum_errors++;
1009 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001010 if (__ratelimit(&_rs))
1011 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001012 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001013 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001014 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001015 spin_lock(&sctx->stat_lock);
1016 sctx->stat.verify_errors++;
1017 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001018 if (__ratelimit(&_rs))
1019 scrub_print_warning("checksum/header error",
1020 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001021 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001022 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001023 BTRFS_DEV_STAT_GENERATION_ERRS);
1024 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001025 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001026 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001027 }
1028
Ilya Dryomov33ef30a2013-11-03 19:06:38 +02001029 if (sctx->readonly) {
1030 ASSERT(!sctx->is_dev_replace);
1031 goto out;
1032 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001033
1034 if (!is_metadata && !have_csum) {
1035 struct scrub_fixup_nodatasum *fixup_nodatasum;
1036
Stefan Behrensff023aa2012-11-06 11:43:11 +01001037nodatasum_case:
1038 WARN_ON(sctx->is_dev_replace);
1039
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001040 /*
1041 * !is_metadata and !have_csum, this means that the data
1042 * might not be COW'ed, that it might be modified
1043 * concurrently. The general strategy to work on the
1044 * commit root does not help in the case when COW is not
1045 * used.
1046 */
1047 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1048 if (!fixup_nodatasum)
1049 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001050 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001051 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001052 fixup_nodatasum->logical = logical;
1053 fixup_nodatasum->root = fs_info->extent_root;
1054 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001055 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001056 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1057 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001058 btrfs_queue_work(fs_info->scrub_workers,
1059 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001060 goto out;
1061 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001062
1063 /*
1064 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001065 * checksums.
1066 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001067 * errors and also does not have a checksum error.
1068 * If one is found, and if a checksum is present, the full block
1069 * that is known to contain an error is rewritten. Afterwards
1070 * the block is known to be corrected.
1071 * If a mirror is found which is completely correct, and no
1072 * checksum is present, only those pages are rewritten that had
1073 * an I/O error in the block to be repaired, since it cannot be
1074 * determined, which copy of the other pages is better (and it
1075 * could happen otherwise that a correct page would be
1076 * overwritten by a bad one).
1077 */
1078 for (mirror_index = 0;
1079 mirror_index < BTRFS_MAX_MIRRORS &&
1080 sblocks_for_recheck[mirror_index].page_count > 0;
1081 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001082 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001083
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001084 if (mirror_index == failed_mirror_index)
1085 continue;
1086 sblock_other = sblocks_for_recheck + mirror_index;
1087
1088 /* build and submit the bios, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001089 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1090 have_csum, csum, generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001091 sctx->csum_size, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001092
1093 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001094 !sblock_other->checksum_error &&
1095 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001096 if (sctx->is_dev_replace) {
1097 scrub_write_block_to_dev_replace(sblock_other);
1098 } else {
1099 int force_write = is_metadata || have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001100
Stefan Behrensff023aa2012-11-06 11:43:11 +01001101 ret = scrub_repair_block_from_good_copy(
1102 sblock_bad, sblock_other,
1103 force_write);
1104 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001105 if (0 == ret)
1106 goto corrected_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001107 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001108 }
1109
1110 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001111 * for dev_replace, pick good pages and write to the target device.
1112 */
1113 if (sctx->is_dev_replace) {
1114 success = 1;
1115 for (page_num = 0; page_num < sblock_bad->page_count;
1116 page_num++) {
1117 int sub_success;
1118
1119 sub_success = 0;
1120 for (mirror_index = 0;
1121 mirror_index < BTRFS_MAX_MIRRORS &&
1122 sblocks_for_recheck[mirror_index].page_count > 0;
1123 mirror_index++) {
1124 struct scrub_block *sblock_other =
1125 sblocks_for_recheck + mirror_index;
1126 struct scrub_page *page_other =
1127 sblock_other->pagev[page_num];
1128
1129 if (!page_other->io_error) {
1130 ret = scrub_write_page_to_dev_replace(
1131 sblock_other, page_num);
1132 if (ret == 0) {
1133 /* succeeded for this page */
1134 sub_success = 1;
1135 break;
1136 } else {
1137 btrfs_dev_replace_stats_inc(
1138 &sctx->dev_root->
1139 fs_info->dev_replace.
1140 num_write_errors);
1141 }
1142 }
1143 }
1144
1145 if (!sub_success) {
1146 /*
1147 * did not find a mirror to fetch the page
1148 * from. scrub_write_page_to_dev_replace()
1149 * handles this case (page->io_error), by
1150 * filling the block with zeros before
1151 * submitting the write request
1152 */
1153 success = 0;
1154 ret = scrub_write_page_to_dev_replace(
1155 sblock_bad, page_num);
1156 if (ret)
1157 btrfs_dev_replace_stats_inc(
1158 &sctx->dev_root->fs_info->
1159 dev_replace.num_write_errors);
1160 }
1161 }
1162
1163 goto out;
1164 }
1165
1166 /*
1167 * for regular scrub, repair those pages that are errored.
1168 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001169 * repaired, continue by picking good copies of those pages.
1170 * Select the good pages from mirrors to rewrite bad pages from
1171 * the area to fix. Afterwards verify the checksum of the block
1172 * that is supposed to be repaired. This verification step is
1173 * only done for the purpose of statistic counting and for the
1174 * final scrub report, whether errors remain.
1175 * A perfect algorithm could make use of the checksum and try
1176 * all possible combinations of pages from the different mirrors
1177 * until the checksum verification succeeds. For example, when
1178 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1179 * of mirror #2 is readable but the final checksum test fails,
1180 * then the 2nd page of mirror #3 could be tried, whether now
1181 * the final checksum succeedes. But this would be a rare
1182 * exception and is therefore not implemented. At least it is
1183 * avoided that the good copy is overwritten.
1184 * A more useful improvement would be to pick the sectors
1185 * without I/O error based on sector sizes (512 bytes on legacy
1186 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1187 * mirror could be repaired by taking 512 byte of a different
1188 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1189 * area are unreadable.
1190 */
1191
1192 /* can only fix I/O errors from here on */
1193 if (sblock_bad->no_io_error_seen)
1194 goto did_not_correct_error;
1195
1196 success = 1;
1197 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001198 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001199
1200 if (!page_bad->io_error)
1201 continue;
1202
1203 for (mirror_index = 0;
1204 mirror_index < BTRFS_MAX_MIRRORS &&
1205 sblocks_for_recheck[mirror_index].page_count > 0;
1206 mirror_index++) {
1207 struct scrub_block *sblock_other = sblocks_for_recheck +
1208 mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001209 struct scrub_page *page_other = sblock_other->pagev[
1210 page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001211
1212 if (!page_other->io_error) {
1213 ret = scrub_repair_page_from_good_copy(
1214 sblock_bad, sblock_other, page_num, 0);
1215 if (0 == ret) {
1216 page_bad->io_error = 0;
1217 break; /* succeeded for this page */
1218 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001219 }
1220 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001221
1222 if (page_bad->io_error) {
1223 /* did not find a mirror to copy the page from */
1224 success = 0;
1225 }
1226 }
1227
1228 if (success) {
1229 if (is_metadata || have_csum) {
1230 /*
1231 * need to verify the checksum now that all
1232 * sectors on disk are repaired (the write
1233 * request for data to be repaired is on its way).
1234 * Just be lazy and use scrub_recheck_block()
1235 * which re-reads the data before the checksum
1236 * is verified, but most likely the data comes out
1237 * of the page cache.
1238 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001239 scrub_recheck_block(fs_info, sblock_bad,
1240 is_metadata, have_csum, csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001241 generation, sctx->csum_size, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001242 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001243 !sblock_bad->checksum_error &&
1244 sblock_bad->no_io_error_seen)
1245 goto corrected_error;
1246 else
1247 goto did_not_correct_error;
1248 } else {
1249corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001250 spin_lock(&sctx->stat_lock);
1251 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001252 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001253 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001254 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001255 "BTRFS: fixed up error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001256 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001257 }
1258 } else {
1259did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001260 spin_lock(&sctx->stat_lock);
1261 sctx->stat.uncorrectable_errors++;
1262 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001263 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001264 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001265 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001266 }
1267
1268out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001269 if (sblocks_for_recheck) {
1270 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1271 mirror_index++) {
1272 struct scrub_block *sblock = sblocks_for_recheck +
1273 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001274 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001275 int page_index;
1276
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001277 for (page_index = 0; page_index < sblock->page_count;
1278 page_index++) {
1279 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001280 recover = sblock->pagev[page_index]->recover;
1281 if (recover) {
1282 scrub_put_recover(recover);
1283 sblock->pagev[page_index]->recover =
1284 NULL;
1285 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001286 scrub_page_put(sblock->pagev[page_index]);
1287 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001288 }
1289 kfree(sblocks_for_recheck);
1290 }
1291
1292 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001293}
1294
Miao Xieaf8e2d12014-10-23 14:42:50 +08001295static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
1296{
1297 if (raid_map) {
1298 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
1299 return 3;
1300 else
1301 return 2;
1302 } else {
1303 return (int)bbio->num_stripes;
1304 }
1305}
1306
1307static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1308 u64 mapped_length,
1309 int nstripes, int mirror,
1310 int *stripe_index,
1311 u64 *stripe_offset)
1312{
1313 int i;
1314
1315 if (raid_map) {
1316 /* RAID5/6 */
1317 for (i = 0; i < nstripes; i++) {
1318 if (raid_map[i] == RAID6_Q_STRIPE ||
1319 raid_map[i] == RAID5_P_STRIPE)
1320 continue;
1321
1322 if (logical >= raid_map[i] &&
1323 logical < raid_map[i] + mapped_length)
1324 break;
1325 }
1326
1327 *stripe_index = i;
1328 *stripe_offset = logical - raid_map[i];
1329 } else {
1330 /* The other RAID type */
1331 *stripe_index = mirror;
1332 *stripe_offset = 0;
1333 }
1334}
1335
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001336static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001337 struct btrfs_fs_info *fs_info,
Stefan Behrensff023aa2012-11-06 11:43:11 +01001338 struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001339 u64 length, u64 logical,
1340 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001341{
Miao Xieaf8e2d12014-10-23 14:42:50 +08001342 struct scrub_recover *recover;
1343 struct btrfs_bio *bbio;
1344 u64 *raid_map;
1345 u64 sublen;
1346 u64 mapped_length;
1347 u64 stripe_offset;
1348 int stripe_index;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001349 int page_index;
1350 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001351 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001352 int ret;
1353
1354 /*
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001355 * note: the two members ref_count and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001356 * are not used (and not set) in the blocks that are used for
1357 * the recheck procedure
1358 */
1359
1360 page_index = 0;
1361 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001362 sublen = min_t(u64, length, PAGE_SIZE);
1363 mapped_length = sublen;
1364 bbio = NULL;
1365 raid_map = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001366
1367 /*
1368 * with a length of PAGE_SIZE, each returned stripe
1369 * represents one mirror
1370 */
Miao Xieaf8e2d12014-10-23 14:42:50 +08001371 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1372 &mapped_length, &bbio, 0, &raid_map);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001373 if (ret || !bbio || mapped_length < sublen) {
1374 kfree(bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001375 kfree(raid_map);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001376 return -EIO;
1377 }
1378
Miao Xieaf8e2d12014-10-23 14:42:50 +08001379 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1380 if (!recover) {
1381 kfree(bbio);
1382 kfree(raid_map);
1383 return -ENOMEM;
1384 }
1385
1386 atomic_set(&recover->refs, 1);
1387 recover->bbio = bbio;
1388 recover->raid_map = raid_map;
1389 recover->map_length = mapped_length;
1390
Stefan Behrensff023aa2012-11-06 11:43:11 +01001391 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001392
1393 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
1394 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001395 mirror_index++) {
1396 struct scrub_block *sblock;
1397 struct scrub_page *page;
1398
1399 if (mirror_index >= BTRFS_MAX_MIRRORS)
1400 continue;
1401
1402 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001403 sblock->sctx = sctx;
1404 page = kzalloc(sizeof(*page), GFP_NOFS);
1405 if (!page) {
1406leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001407 spin_lock(&sctx->stat_lock);
1408 sctx->stat.malloc_errors++;
1409 spin_unlock(&sctx->stat_lock);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001410 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001411 return -ENOMEM;
1412 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001413 scrub_page_get(page);
1414 sblock->pagev[page_index] = page;
1415 page->logical = logical;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001416
1417 scrub_stripe_index_and_offset(logical, raid_map,
1418 mapped_length,
1419 bbio->num_stripes,
1420 mirror_index,
1421 &stripe_index,
1422 &stripe_offset);
1423 page->physical = bbio->stripes[stripe_index].physical +
1424 stripe_offset;
1425 page->dev = bbio->stripes[stripe_index].dev;
1426
Stefan Behrensff023aa2012-11-06 11:43:11 +01001427 BUG_ON(page_index >= original_sblock->page_count);
1428 page->physical_for_dev_replace =
1429 original_sblock->pagev[page_index]->
1430 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001431 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001432 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001433 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001434 page->page = alloc_page(GFP_NOFS);
1435 if (!page->page)
1436 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001437
1438 scrub_get_recover(recover);
1439 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001440 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001441 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001442 length -= sublen;
1443 logical += sublen;
1444 page_index++;
1445 }
1446
1447 return 0;
1448}
1449
Miao Xieaf8e2d12014-10-23 14:42:50 +08001450struct scrub_bio_ret {
1451 struct completion event;
1452 int error;
1453};
1454
1455static void scrub_bio_wait_endio(struct bio *bio, int error)
1456{
1457 struct scrub_bio_ret *ret = bio->bi_private;
1458
1459 ret->error = error;
1460 complete(&ret->event);
1461}
1462
1463static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1464{
1465 return page->recover && page->recover->raid_map;
1466}
1467
1468static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1469 struct bio *bio,
1470 struct scrub_page *page)
1471{
1472 struct scrub_bio_ret done;
1473 int ret;
1474
1475 init_completion(&done.event);
1476 done.error = 0;
1477 bio->bi_iter.bi_sector = page->logical >> 9;
1478 bio->bi_private = &done;
1479 bio->bi_end_io = scrub_bio_wait_endio;
1480
1481 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1482 page->recover->raid_map,
1483 page->recover->map_length,
Miao Xie42452152014-11-25 16:39:28 +08001484 page->mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001485 if (ret)
1486 return ret;
1487
1488 wait_for_completion(&done.event);
1489 if (done.error)
1490 return -EIO;
1491
1492 return 0;
1493}
1494
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001495/*
1496 * this function will check the on disk data for checksum errors, header
1497 * errors and read I/O errors. If any I/O errors happen, the exact pages
1498 * which are errored are marked as being bad. The goal is to enable scrub
1499 * to take those pages that are not errored from all the mirrors so that
1500 * the pages that are errored in the just handled mirror can be repaired.
1501 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001502static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1503 struct scrub_block *sblock, int is_metadata,
1504 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001505 u16 csum_size, int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001506{
1507 int page_num;
1508
1509 sblock->no_io_error_seen = 1;
1510 sblock->header_error = 0;
1511 sblock->checksum_error = 0;
1512
1513 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1514 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001515 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001516
Stefan Behrens442a4f62012-05-25 16:06:08 +02001517 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001518 page->io_error = 1;
1519 sblock->no_io_error_seen = 0;
1520 continue;
1521 }
1522
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001523 WARN_ON(!page->page);
Chris Mason9be33952013-05-17 18:30:14 -04001524 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001525 if (!bio) {
1526 page->io_error = 1;
1527 sblock->no_io_error_seen = 0;
1528 continue;
1529 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001530 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001531
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001532 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001533 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1534 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1535 sblock->no_io_error_seen = 0;
1536 } else {
1537 bio->bi_iter.bi_sector = page->physical >> 9;
1538
1539 if (btrfsic_submit_bio_wait(READ, bio))
1540 sblock->no_io_error_seen = 0;
1541 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001542
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001543 bio_put(bio);
1544 }
1545
1546 if (sblock->no_io_error_seen)
1547 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1548 have_csum, csum, generation,
1549 csum_size);
1550
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001551 return;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001552}
1553
Miao Xie17a9be22014-07-24 11:37:08 +08001554static inline int scrub_check_fsid(u8 fsid[],
1555 struct scrub_page *spage)
1556{
1557 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1558 int ret;
1559
1560 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1561 return !ret;
1562}
1563
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001564static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1565 struct scrub_block *sblock,
1566 int is_metadata, int have_csum,
1567 const u8 *csum, u64 generation,
1568 u16 csum_size)
1569{
1570 int page_num;
1571 u8 calculated_csum[BTRFS_CSUM_SIZE];
1572 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001573 void *mapped_buffer;
1574
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001575 WARN_ON(!sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001576 if (is_metadata) {
1577 struct btrfs_header *h;
1578
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001579 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001580 h = (struct btrfs_header *)mapped_buffer;
1581
Qu Wenruo3cae2102013-07-16 11:19:18 +08001582 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
Miao Xie17a9be22014-07-24 11:37:08 +08001583 !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001584 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001585 BTRFS_UUID_SIZE)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001586 sblock->header_error = 1;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001587 } else if (generation != btrfs_stack_header_generation(h)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001588 sblock->header_error = 1;
1589 sblock->generation_error = 1;
1590 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001591 csum = h->csum;
1592 } else {
1593 if (!have_csum)
1594 return;
1595
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001596 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001597 }
1598
1599 for (page_num = 0;;) {
1600 if (page_num == 0 && is_metadata)
Liu Bob0496682013-03-14 14:57:45 +00001601 crc = btrfs_csum_data(
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001602 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1603 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1604 else
Liu Bob0496682013-03-14 14:57:45 +00001605 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001606
Linus Torvalds9613beb2012-03-30 12:44:29 -07001607 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001608 page_num++;
1609 if (page_num >= sblock->page_count)
1610 break;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001611 WARN_ON(!sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001612
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001613 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001614 }
1615
1616 btrfs_csum_final(crc, calculated_csum);
1617 if (memcmp(calculated_csum, csum, csum_size))
1618 sblock->checksum_error = 1;
1619}
1620
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001621static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1622 struct scrub_block *sblock_good,
1623 int force_write)
1624{
1625 int page_num;
1626 int ret = 0;
1627
1628 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1629 int ret_sub;
1630
1631 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1632 sblock_good,
1633 page_num,
1634 force_write);
1635 if (ret_sub)
1636 ret = ret_sub;
1637 }
1638
1639 return ret;
1640}
1641
1642static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1643 struct scrub_block *sblock_good,
1644 int page_num, int force_write)
1645{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001646 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1647 struct scrub_page *page_good = sblock_good->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001648
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001649 BUG_ON(page_bad->page == NULL);
1650 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001651 if (force_write || sblock_bad->header_error ||
1652 sblock_bad->checksum_error || page_bad->io_error) {
1653 struct bio *bio;
1654 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001655
Stefan Behrensff023aa2012-11-06 11:43:11 +01001656 if (!page_bad->dev->bdev) {
Frank Holtonefe120a2013-12-20 11:37:06 -05001657 printk_ratelimited(KERN_WARNING "BTRFS: "
1658 "scrub_repair_page_from_good_copy(bdev == NULL) "
1659 "is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001660 return -EIO;
1661 }
1662
Chris Mason9be33952013-05-17 18:30:14 -04001663 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001664 if (!bio)
1665 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001666 bio->bi_bdev = page_bad->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001667 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001668
1669 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1670 if (PAGE_SIZE != ret) {
1671 bio_put(bio);
1672 return -EIO;
1673 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001674
Kent Overstreet33879d42013-11-23 22:33:32 -08001675 if (btrfsic_submit_bio_wait(WRITE, bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001676 btrfs_dev_stat_inc_and_print(page_bad->dev,
1677 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001678 btrfs_dev_replace_stats_inc(
1679 &sblock_bad->sctx->dev_root->fs_info->
1680 dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001681 bio_put(bio);
1682 return -EIO;
1683 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001684 bio_put(bio);
1685 }
1686
1687 return 0;
1688}
1689
Stefan Behrensff023aa2012-11-06 11:43:11 +01001690static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1691{
1692 int page_num;
1693
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001694 /*
1695 * This block is used for the check of the parity on the source device,
1696 * so the data needn't be written into the destination device.
1697 */
1698 if (sblock->sparity)
1699 return;
1700
Stefan Behrensff023aa2012-11-06 11:43:11 +01001701 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1702 int ret;
1703
1704 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1705 if (ret)
1706 btrfs_dev_replace_stats_inc(
1707 &sblock->sctx->dev_root->fs_info->dev_replace.
1708 num_write_errors);
1709 }
1710}
1711
1712static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1713 int page_num)
1714{
1715 struct scrub_page *spage = sblock->pagev[page_num];
1716
1717 BUG_ON(spage->page == NULL);
1718 if (spage->io_error) {
1719 void *mapped_buffer = kmap_atomic(spage->page);
1720
1721 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1722 flush_dcache_page(spage->page);
1723 kunmap_atomic(mapped_buffer);
1724 }
1725 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1726}
1727
1728static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1729 struct scrub_page *spage)
1730{
1731 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1732 struct scrub_bio *sbio;
1733 int ret;
1734
1735 mutex_lock(&wr_ctx->wr_lock);
1736again:
1737 if (!wr_ctx->wr_curr_bio) {
1738 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1739 GFP_NOFS);
1740 if (!wr_ctx->wr_curr_bio) {
1741 mutex_unlock(&wr_ctx->wr_lock);
1742 return -ENOMEM;
1743 }
1744 wr_ctx->wr_curr_bio->sctx = sctx;
1745 wr_ctx->wr_curr_bio->page_count = 0;
1746 }
1747 sbio = wr_ctx->wr_curr_bio;
1748 if (sbio->page_count == 0) {
1749 struct bio *bio;
1750
1751 sbio->physical = spage->physical_for_dev_replace;
1752 sbio->logical = spage->logical;
1753 sbio->dev = wr_ctx->tgtdev;
1754 bio = sbio->bio;
1755 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04001756 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001757 if (!bio) {
1758 mutex_unlock(&wr_ctx->wr_lock);
1759 return -ENOMEM;
1760 }
1761 sbio->bio = bio;
1762 }
1763
1764 bio->bi_private = sbio;
1765 bio->bi_end_io = scrub_wr_bio_end_io;
1766 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001767 bio->bi_iter.bi_sector = sbio->physical >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001768 sbio->err = 0;
1769 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1770 spage->physical_for_dev_replace ||
1771 sbio->logical + sbio->page_count * PAGE_SIZE !=
1772 spage->logical) {
1773 scrub_wr_submit(sctx);
1774 goto again;
1775 }
1776
1777 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1778 if (ret != PAGE_SIZE) {
1779 if (sbio->page_count < 1) {
1780 bio_put(sbio->bio);
1781 sbio->bio = NULL;
1782 mutex_unlock(&wr_ctx->wr_lock);
1783 return -EIO;
1784 }
1785 scrub_wr_submit(sctx);
1786 goto again;
1787 }
1788
1789 sbio->pagev[sbio->page_count] = spage;
1790 scrub_page_get(spage);
1791 sbio->page_count++;
1792 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1793 scrub_wr_submit(sctx);
1794 mutex_unlock(&wr_ctx->wr_lock);
1795
1796 return 0;
1797}
1798
1799static void scrub_wr_submit(struct scrub_ctx *sctx)
1800{
1801 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1802 struct scrub_bio *sbio;
1803
1804 if (!wr_ctx->wr_curr_bio)
1805 return;
1806
1807 sbio = wr_ctx->wr_curr_bio;
1808 wr_ctx->wr_curr_bio = NULL;
1809 WARN_ON(!sbio->bio->bi_bdev);
1810 scrub_pending_bio_inc(sctx);
1811 /* process all writes in a single worker thread. Then the block layer
1812 * orders the requests before sending them to the driver which
1813 * doubled the write performance on spinning disks when measured
1814 * with Linux 3.5 */
1815 btrfsic_submit_bio(WRITE, sbio->bio);
1816}
1817
1818static void scrub_wr_bio_end_io(struct bio *bio, int err)
1819{
1820 struct scrub_bio *sbio = bio->bi_private;
1821 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1822
1823 sbio->err = err;
1824 sbio->bio = bio;
1825
Liu Bo9e0af232014-08-15 23:36:53 +08001826 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1827 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001828 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001829}
1830
1831static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1832{
1833 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1834 struct scrub_ctx *sctx = sbio->sctx;
1835 int i;
1836
1837 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1838 if (sbio->err) {
1839 struct btrfs_dev_replace *dev_replace =
1840 &sbio->sctx->dev_root->fs_info->dev_replace;
1841
1842 for (i = 0; i < sbio->page_count; i++) {
1843 struct scrub_page *spage = sbio->pagev[i];
1844
1845 spage->io_error = 1;
1846 btrfs_dev_replace_stats_inc(&dev_replace->
1847 num_write_errors);
1848 }
1849 }
1850
1851 for (i = 0; i < sbio->page_count; i++)
1852 scrub_page_put(sbio->pagev[i]);
1853
1854 bio_put(sbio->bio);
1855 kfree(sbio);
1856 scrub_pending_bio_dec(sctx);
1857}
1858
1859static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001860{
1861 u64 flags;
1862 int ret;
1863
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001864 WARN_ON(sblock->page_count < 1);
1865 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001866 ret = 0;
1867 if (flags & BTRFS_EXTENT_FLAG_DATA)
1868 ret = scrub_checksum_data(sblock);
1869 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1870 ret = scrub_checksum_tree_block(sblock);
1871 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1872 (void)scrub_checksum_super(sblock);
1873 else
1874 WARN_ON(1);
1875 if (ret)
1876 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001877
1878 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001879}
1880
1881static int scrub_checksum_data(struct scrub_block *sblock)
1882{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001883 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001884 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001885 u8 *on_disk_csum;
1886 struct page *page;
1887 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001888 u32 crc = ~(u32)0;
1889 int fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001890 u64 len;
1891 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001892
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001893 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001894 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001895 return 0;
1896
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001897 on_disk_csum = sblock->pagev[0]->csum;
1898 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001899 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001900
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001901 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001902 index = 0;
1903 for (;;) {
1904 u64 l = min_t(u64, len, PAGE_SIZE);
1905
Liu Bob0496682013-03-14 14:57:45 +00001906 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001907 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001908 len -= l;
1909 if (len == 0)
1910 break;
1911 index++;
1912 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001913 BUG_ON(!sblock->pagev[index]->page);
1914 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001915 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001916 }
1917
Arne Jansena2de7332011-03-08 14:14:00 +01001918 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001919 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001920 fail = 1;
1921
Arne Jansena2de7332011-03-08 14:14:00 +01001922 return fail;
1923}
1924
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001925static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001926{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001927 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001928 struct btrfs_header *h;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001929 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001930 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001931 u8 calculated_csum[BTRFS_CSUM_SIZE];
1932 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1933 struct page *page;
1934 void *mapped_buffer;
1935 u64 mapped_size;
1936 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001937 u32 crc = ~(u32)0;
1938 int fail = 0;
1939 int crc_fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001940 u64 len;
1941 int index;
1942
1943 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001944 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001945 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001946 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001947 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001948
1949 /*
1950 * we don't use the getter functions here, as we
1951 * a) don't have an extent buffer and
1952 * b) the page is already kmapped
1953 */
Arne Jansena2de7332011-03-08 14:14:00 +01001954
Qu Wenruo3cae2102013-07-16 11:19:18 +08001955 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001956 ++fail;
1957
Qu Wenruo3cae2102013-07-16 11:19:18 +08001958 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001959 ++fail;
1960
Miao Xie17a9be22014-07-24 11:37:08 +08001961 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Arne Jansena2de7332011-03-08 14:14:00 +01001962 ++fail;
1963
1964 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1965 BTRFS_UUID_SIZE))
1966 ++fail;
1967
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001968 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001969 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1970 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1971 index = 0;
1972 for (;;) {
1973 u64 l = min_t(u64, len, mapped_size);
1974
Liu Bob0496682013-03-14 14:57:45 +00001975 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001976 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001977 len -= l;
1978 if (len == 0)
1979 break;
1980 index++;
1981 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001982 BUG_ON(!sblock->pagev[index]->page);
1983 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001984 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001985 mapped_size = PAGE_SIZE;
1986 p = mapped_buffer;
1987 }
1988
1989 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001990 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001991 ++crc_fail;
1992
Arne Jansena2de7332011-03-08 14:14:00 +01001993 return fail || crc_fail;
1994}
1995
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001996static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001997{
1998 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001999 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002000 u8 calculated_csum[BTRFS_CSUM_SIZE];
2001 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2002 struct page *page;
2003 void *mapped_buffer;
2004 u64 mapped_size;
2005 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01002006 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02002007 int fail_gen = 0;
2008 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002009 u64 len;
2010 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01002011
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002012 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002013 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002014 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002015 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002016 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002017
Qu Wenruo3cae2102013-07-16 11:19:18 +08002018 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002019 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002020
Qu Wenruo3cae2102013-07-16 11:19:18 +08002021 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002022 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002023
Miao Xie17a9be22014-07-24 11:37:08 +08002024 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002025 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002026
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002027 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2028 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2029 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2030 index = 0;
2031 for (;;) {
2032 u64 l = min_t(u64, len, mapped_size);
2033
Liu Bob0496682013-03-14 14:57:45 +00002034 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002035 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002036 len -= l;
2037 if (len == 0)
2038 break;
2039 index++;
2040 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002041 BUG_ON(!sblock->pagev[index]->page);
2042 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002043 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002044 mapped_size = PAGE_SIZE;
2045 p = mapped_buffer;
2046 }
2047
2048 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002049 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002050 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002051
Stefan Behrens442a4f62012-05-25 16:06:08 +02002052 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01002053 /*
2054 * if we find an error in a super block, we just report it.
2055 * They will get written with the next transaction commit
2056 * anyway
2057 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002058 spin_lock(&sctx->stat_lock);
2059 ++sctx->stat.super_errors;
2060 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002061 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002062 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002063 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2064 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002065 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002066 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01002067 }
2068
Stefan Behrens442a4f62012-05-25 16:06:08 +02002069 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002070}
2071
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002072static void scrub_block_get(struct scrub_block *sblock)
2073{
2074 atomic_inc(&sblock->ref_count);
2075}
2076
2077static void scrub_block_put(struct scrub_block *sblock)
2078{
2079 if (atomic_dec_and_test(&sblock->ref_count)) {
2080 int i;
2081
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002082 if (sblock->sparity)
2083 scrub_parity_put(sblock->sparity);
2084
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002085 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002086 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002087 kfree(sblock);
2088 }
2089}
2090
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002091static void scrub_page_get(struct scrub_page *spage)
2092{
2093 atomic_inc(&spage->ref_count);
2094}
2095
2096static void scrub_page_put(struct scrub_page *spage)
2097{
2098 if (atomic_dec_and_test(&spage->ref_count)) {
2099 if (spage->page)
2100 __free_page(spage->page);
2101 kfree(spage);
2102 }
2103}
2104
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002105static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002106{
2107 struct scrub_bio *sbio;
2108
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002109 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002110 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002111
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002112 sbio = sctx->bios[sctx->curr];
2113 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002114 scrub_pending_bio_inc(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002115
Stefan Behrensff023aa2012-11-06 11:43:11 +01002116 if (!sbio->bio->bi_bdev) {
2117 /*
2118 * this case should not happen. If btrfs_map_block() is
2119 * wrong, it could happen for dev-replace operations on
2120 * missing devices when no mirrors are available, but in
2121 * this case it should already fail the mount.
2122 * This case is handled correctly (but _very_ slowly).
2123 */
2124 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05002125 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01002126 bio_endio(sbio->bio, -EIO);
2127 } else {
2128 btrfsic_submit_bio(READ, sbio->bio);
2129 }
Arne Jansena2de7332011-03-08 14:14:00 +01002130}
2131
Stefan Behrensff023aa2012-11-06 11:43:11 +01002132static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2133 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002134{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002135 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002136 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002137 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002138
2139again:
2140 /*
2141 * grab a fresh bio or wait for one to become available
2142 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002143 while (sctx->curr == -1) {
2144 spin_lock(&sctx->list_lock);
2145 sctx->curr = sctx->first_free;
2146 if (sctx->curr != -1) {
2147 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2148 sctx->bios[sctx->curr]->next_free = -1;
2149 sctx->bios[sctx->curr]->page_count = 0;
2150 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002151 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002152 spin_unlock(&sctx->list_lock);
2153 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002154 }
2155 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002156 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002157 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002158 struct bio *bio;
2159
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002160 sbio->physical = spage->physical;
2161 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002162 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002163 bio = sbio->bio;
2164 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04002165 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002166 if (!bio)
2167 return -ENOMEM;
2168 sbio->bio = bio;
2169 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002170
2171 bio->bi_private = sbio;
2172 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002173 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002174 bio->bi_iter.bi_sector = sbio->physical >> 9;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002175 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002176 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2177 spage->physical ||
2178 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002179 spage->logical ||
2180 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002181 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002182 goto again;
2183 }
2184
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002185 sbio->pagev[sbio->page_count] = spage;
2186 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2187 if (ret != PAGE_SIZE) {
2188 if (sbio->page_count < 1) {
2189 bio_put(sbio->bio);
2190 sbio->bio = NULL;
2191 return -EIO;
2192 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002193 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002194 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002195 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002196
Stefan Behrensff023aa2012-11-06 11:43:11 +01002197 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002198 atomic_inc(&sblock->outstanding_pages);
2199 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002200 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002201 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002202
2203 return 0;
2204}
2205
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002206static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002207 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002208 u64 gen, int mirror_num, u8 *csum, int force,
2209 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002210{
2211 struct scrub_block *sblock;
2212 int index;
2213
2214 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2215 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002216 spin_lock(&sctx->stat_lock);
2217 sctx->stat.malloc_errors++;
2218 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002219 return -ENOMEM;
2220 }
2221
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002222 /* one ref inside this function, plus one for each page added to
2223 * a bio later on */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002224 atomic_set(&sblock->ref_count, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002225 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002226 sblock->no_io_error_seen = 1;
2227
2228 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002229 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002230 u64 l = min_t(u64, len, PAGE_SIZE);
2231
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002232 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2233 if (!spage) {
2234leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002235 spin_lock(&sctx->stat_lock);
2236 sctx->stat.malloc_errors++;
2237 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002238 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002239 return -ENOMEM;
2240 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002241 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2242 scrub_page_get(spage);
2243 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002244 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002245 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002246 spage->flags = flags;
2247 spage->generation = gen;
2248 spage->logical = logical;
2249 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002250 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002251 spage->mirror_num = mirror_num;
2252 if (csum) {
2253 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002254 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002255 } else {
2256 spage->have_csum = 0;
2257 }
2258 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002259 spage->page = alloc_page(GFP_NOFS);
2260 if (!spage->page)
2261 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002262 len -= l;
2263 logical += l;
2264 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002265 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002266 }
2267
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002268 WARN_ON(sblock->page_count == 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002269 for (index = 0; index < sblock->page_count; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002270 struct scrub_page *spage = sblock->pagev[index];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002271 int ret;
2272
Stefan Behrensff023aa2012-11-06 11:43:11 +01002273 ret = scrub_add_page_to_rd_bio(sctx, spage);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002274 if (ret) {
2275 scrub_block_put(sblock);
2276 return ret;
2277 }
2278 }
2279
2280 if (force)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002281 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002282
2283 /* last one frees, either here or in bio completion for last page */
2284 scrub_block_put(sblock);
2285 return 0;
2286}
2287
2288static void scrub_bio_end_io(struct bio *bio, int err)
2289{
2290 struct scrub_bio *sbio = bio->bi_private;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002291 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002292
2293 sbio->err = err;
2294 sbio->bio = bio;
2295
Qu Wenruo0339ef22014-02-28 10:46:17 +08002296 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002297}
2298
2299static void scrub_bio_end_io_worker(struct btrfs_work *work)
2300{
2301 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002302 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002303 int i;
2304
Stefan Behrensff023aa2012-11-06 11:43:11 +01002305 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002306 if (sbio->err) {
2307 for (i = 0; i < sbio->page_count; i++) {
2308 struct scrub_page *spage = sbio->pagev[i];
2309
2310 spage->io_error = 1;
2311 spage->sblock->no_io_error_seen = 0;
2312 }
2313 }
2314
2315 /* now complete the scrub_block items that have all pages completed */
2316 for (i = 0; i < sbio->page_count; i++) {
2317 struct scrub_page *spage = sbio->pagev[i];
2318 struct scrub_block *sblock = spage->sblock;
2319
2320 if (atomic_dec_and_test(&sblock->outstanding_pages))
2321 scrub_block_complete(sblock);
2322 scrub_block_put(sblock);
2323 }
2324
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002325 bio_put(sbio->bio);
2326 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002327 spin_lock(&sctx->list_lock);
2328 sbio->next_free = sctx->first_free;
2329 sctx->first_free = sbio->index;
2330 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002331
2332 if (sctx->is_dev_replace &&
2333 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2334 mutex_lock(&sctx->wr_ctx.wr_lock);
2335 scrub_wr_submit(sctx);
2336 mutex_unlock(&sctx->wr_ctx.wr_lock);
2337 }
2338
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002339 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002340}
2341
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002342static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2343 unsigned long *bitmap,
2344 u64 start, u64 len)
2345{
2346 int offset;
2347 int nsectors;
2348 int sectorsize = sparity->sctx->dev_root->sectorsize;
2349
2350 if (len >= sparity->stripe_len) {
2351 bitmap_set(bitmap, 0, sparity->nsectors);
2352 return;
2353 }
2354
2355 start -= sparity->logic_start;
2356 offset = (int)do_div(start, sparity->stripe_len);
2357 offset /= sectorsize;
2358 nsectors = (int)len / sectorsize;
2359
2360 if (offset + nsectors <= sparity->nsectors) {
2361 bitmap_set(bitmap, offset, nsectors);
2362 return;
2363 }
2364
2365 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2366 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2367}
2368
2369static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2370 u64 start, u64 len)
2371{
2372 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2373}
2374
2375static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2376 u64 start, u64 len)
2377{
2378 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2379}
2380
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002381static void scrub_block_complete(struct scrub_block *sblock)
2382{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002383 int corrupted = 0;
2384
Stefan Behrensff023aa2012-11-06 11:43:11 +01002385 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002386 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002387 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002388 } else {
2389 /*
2390 * if has checksum error, write via repair mechanism in
2391 * dev replace case, otherwise write here in dev replace
2392 * case.
2393 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002394 corrupted = scrub_checksum(sblock);
2395 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002396 scrub_write_block_to_dev_replace(sblock);
2397 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002398
2399 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2400 u64 start = sblock->pagev[0]->logical;
2401 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2402 PAGE_SIZE;
2403
2404 scrub_parity_mark_sectors_error(sblock->sparity,
2405 start, end - start);
2406 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002407}
2408
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002409static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
Arne Jansena2de7332011-03-08 14:14:00 +01002410 u8 *csum)
2411{
2412 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002413 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002414 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002415
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002416 while (!list_empty(&sctx->csum_list)) {
2417 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002418 struct btrfs_ordered_sum, list);
2419 if (sum->bytenr > logical)
2420 return 0;
2421 if (sum->bytenr + sum->len > logical)
2422 break;
2423
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002424 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002425 list_del(&sum->list);
2426 kfree(sum);
2427 sum = NULL;
2428 }
2429 if (!sum)
2430 return 0;
2431
Miao Xief51a4a12013-06-19 10:36:09 +08002432 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002433 num_sectors = sum->len / sctx->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002434 memcpy(csum, sum->sums + index, sctx->csum_size);
2435 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002436 list_del(&sum->list);
2437 kfree(sum);
2438 }
Miao Xief51a4a12013-06-19 10:36:09 +08002439 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002440}
2441
2442/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002443static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002444 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002445 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002446{
2447 int ret;
2448 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002449 u32 blocksize;
2450
2451 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002452 blocksize = sctx->sectorsize;
2453 spin_lock(&sctx->stat_lock);
2454 sctx->stat.data_extents_scrubbed++;
2455 sctx->stat.data_bytes_scrubbed += len;
2456 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002457 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002458 blocksize = sctx->nodesize;
2459 spin_lock(&sctx->stat_lock);
2460 sctx->stat.tree_extents_scrubbed++;
2461 sctx->stat.tree_bytes_scrubbed += len;
2462 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002463 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002464 blocksize = sctx->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002465 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002466 }
Arne Jansena2de7332011-03-08 14:14:00 +01002467
2468 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002469 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002470 int have_csum = 0;
2471
2472 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2473 /* push csums to sbio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002474 have_csum = scrub_find_csum(sctx, logical, l, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002475 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002476 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002477 if (sctx->is_dev_replace && !have_csum) {
2478 ret = copy_nocow_pages(sctx, logical, l,
2479 mirror_num,
2480 physical_for_dev_replace);
2481 goto behind_scrub_pages;
2482 }
Arne Jansena2de7332011-03-08 14:14:00 +01002483 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002484 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002485 mirror_num, have_csum ? csum : NULL, 0,
2486 physical_for_dev_replace);
2487behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002488 if (ret)
2489 return ret;
2490 len -= l;
2491 logical += l;
2492 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002493 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002494 }
2495 return 0;
2496}
2497
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002498static int scrub_pages_for_parity(struct scrub_parity *sparity,
2499 u64 logical, u64 len,
2500 u64 physical, struct btrfs_device *dev,
2501 u64 flags, u64 gen, int mirror_num, u8 *csum)
2502{
2503 struct scrub_ctx *sctx = sparity->sctx;
2504 struct scrub_block *sblock;
2505 int index;
2506
2507 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2508 if (!sblock) {
2509 spin_lock(&sctx->stat_lock);
2510 sctx->stat.malloc_errors++;
2511 spin_unlock(&sctx->stat_lock);
2512 return -ENOMEM;
2513 }
2514
2515 /* one ref inside this function, plus one for each page added to
2516 * a bio later on */
2517 atomic_set(&sblock->ref_count, 1);
2518 sblock->sctx = sctx;
2519 sblock->no_io_error_seen = 1;
2520 sblock->sparity = sparity;
2521 scrub_parity_get(sparity);
2522
2523 for (index = 0; len > 0; index++) {
2524 struct scrub_page *spage;
2525 u64 l = min_t(u64, len, PAGE_SIZE);
2526
2527 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2528 if (!spage) {
2529leave_nomem:
2530 spin_lock(&sctx->stat_lock);
2531 sctx->stat.malloc_errors++;
2532 spin_unlock(&sctx->stat_lock);
2533 scrub_block_put(sblock);
2534 return -ENOMEM;
2535 }
2536 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2537 /* For scrub block */
2538 scrub_page_get(spage);
2539 sblock->pagev[index] = spage;
2540 /* For scrub parity */
2541 scrub_page_get(spage);
2542 list_add_tail(&spage->list, &sparity->spages);
2543 spage->sblock = sblock;
2544 spage->dev = dev;
2545 spage->flags = flags;
2546 spage->generation = gen;
2547 spage->logical = logical;
2548 spage->physical = physical;
2549 spage->mirror_num = mirror_num;
2550 if (csum) {
2551 spage->have_csum = 1;
2552 memcpy(spage->csum, csum, sctx->csum_size);
2553 } else {
2554 spage->have_csum = 0;
2555 }
2556 sblock->page_count++;
2557 spage->page = alloc_page(GFP_NOFS);
2558 if (!spage->page)
2559 goto leave_nomem;
2560 len -= l;
2561 logical += l;
2562 physical += l;
2563 }
2564
2565 WARN_ON(sblock->page_count == 0);
2566 for (index = 0; index < sblock->page_count; index++) {
2567 struct scrub_page *spage = sblock->pagev[index];
2568 int ret;
2569
2570 ret = scrub_add_page_to_rd_bio(sctx, spage);
2571 if (ret) {
2572 scrub_block_put(sblock);
2573 return ret;
2574 }
2575 }
2576
2577 /* last one frees, either here or in bio completion for last page */
2578 scrub_block_put(sblock);
2579 return 0;
2580}
2581
2582static int scrub_extent_for_parity(struct scrub_parity *sparity,
2583 u64 logical, u64 len,
2584 u64 physical, struct btrfs_device *dev,
2585 u64 flags, u64 gen, int mirror_num)
2586{
2587 struct scrub_ctx *sctx = sparity->sctx;
2588 int ret;
2589 u8 csum[BTRFS_CSUM_SIZE];
2590 u32 blocksize;
2591
2592 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2593 blocksize = sctx->sectorsize;
2594 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2595 blocksize = sctx->nodesize;
2596 } else {
2597 blocksize = sctx->sectorsize;
2598 WARN_ON(1);
2599 }
2600
2601 while (len) {
2602 u64 l = min_t(u64, len, blocksize);
2603 int have_csum = 0;
2604
2605 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2606 /* push csums to sbio */
2607 have_csum = scrub_find_csum(sctx, logical, l, csum);
2608 if (have_csum == 0)
2609 goto skip;
2610 }
2611 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2612 flags, gen, mirror_num,
2613 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002614 if (ret)
2615 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002616skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002617 len -= l;
2618 logical += l;
2619 physical += l;
2620 }
2621 return 0;
2622}
2623
Wang Shilong3b080b22014-04-01 18:01:43 +08002624/*
2625 * Given a physical address, this will calculate it's
2626 * logical offset. if this is a parity stripe, it will return
2627 * the most left data stripe's logical offset.
2628 *
2629 * return 0 if it is a data stripe, 1 means parity stripe.
2630 */
2631static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002632 struct map_lookup *map, u64 *offset,
2633 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002634{
2635 int i;
2636 int j = 0;
2637 u64 stripe_nr;
2638 u64 last_offset;
2639 int stripe_index;
2640 int rot;
2641
2642 last_offset = (physical - map->stripes[num].physical) *
2643 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002644 if (stripe_start)
2645 *stripe_start = last_offset;
2646
Wang Shilong3b080b22014-04-01 18:01:43 +08002647 *offset = last_offset;
2648 for (i = 0; i < nr_data_stripes(map); i++) {
2649 *offset = last_offset + i * map->stripe_len;
2650
2651 stripe_nr = *offset;
2652 do_div(stripe_nr, map->stripe_len);
2653 do_div(stripe_nr, nr_data_stripes(map));
2654
2655 /* Work out the disk rotation on this stripe-set */
2656 rot = do_div(stripe_nr, map->num_stripes);
2657 /* calculate which stripe this data locates */
2658 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002659 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002660 if (stripe_index == num)
2661 return 0;
2662 if (stripe_index < num)
2663 j++;
2664 }
2665 *offset = last_offset + j * map->stripe_len;
2666 return 1;
2667}
2668
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002669static void scrub_free_parity(struct scrub_parity *sparity)
2670{
2671 struct scrub_ctx *sctx = sparity->sctx;
2672 struct scrub_page *curr, *next;
2673 int nbits;
2674
2675 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2676 if (nbits) {
2677 spin_lock(&sctx->stat_lock);
2678 sctx->stat.read_errors += nbits;
2679 sctx->stat.uncorrectable_errors += nbits;
2680 spin_unlock(&sctx->stat_lock);
2681 }
2682
2683 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2684 list_del_init(&curr->list);
2685 scrub_page_put(curr);
2686 }
2687
2688 kfree(sparity);
2689}
2690
2691static void scrub_parity_bio_endio(struct bio *bio, int error)
2692{
2693 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2694 struct scrub_ctx *sctx = sparity->sctx;
2695
2696 if (error)
2697 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2698 sparity->nsectors);
2699
2700 scrub_free_parity(sparity);
2701 scrub_pending_bio_dec(sctx);
2702 bio_put(bio);
2703}
2704
2705static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2706{
2707 struct scrub_ctx *sctx = sparity->sctx;
2708 struct bio *bio;
2709 struct btrfs_raid_bio *rbio;
2710 struct scrub_page *spage;
2711 struct btrfs_bio *bbio = NULL;
2712 u64 *raid_map = NULL;
2713 u64 length;
2714 int ret;
2715
2716 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2717 sparity->nsectors))
2718 goto out;
2719
2720 length = sparity->logic_end - sparity->logic_start + 1;
Miao Xie76035972014-11-14 17:45:42 +08002721 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002722 sparity->logic_start,
2723 &length, &bbio, 0, &raid_map);
2724 if (ret || !bbio || !raid_map)
2725 goto bbio_out;
2726
2727 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2728 if (!bio)
2729 goto bbio_out;
2730
2731 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2732 bio->bi_private = sparity;
2733 bio->bi_end_io = scrub_parity_bio_endio;
2734
2735 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2736 raid_map, length,
2737 sparity->scrub_dev,
2738 sparity->dbitmap,
2739 sparity->nsectors);
2740 if (!rbio)
2741 goto rbio_out;
2742
2743 list_for_each_entry(spage, &sparity->spages, list)
2744 raid56_parity_add_scrub_pages(rbio, spage->page,
2745 spage->logical);
2746
2747 scrub_pending_bio_inc(sctx);
2748 raid56_parity_submit_scrub_rbio(rbio);
2749 return;
2750
2751rbio_out:
2752 bio_put(bio);
2753bbio_out:
2754 kfree(bbio);
2755 kfree(raid_map);
2756 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2757 sparity->nsectors);
2758 spin_lock(&sctx->stat_lock);
2759 sctx->stat.malloc_errors++;
2760 spin_unlock(&sctx->stat_lock);
2761out:
2762 scrub_free_parity(sparity);
2763}
2764
2765static inline int scrub_calc_parity_bitmap_len(int nsectors)
2766{
2767 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2768}
2769
2770static void scrub_parity_get(struct scrub_parity *sparity)
2771{
2772 atomic_inc(&sparity->ref_count);
2773}
2774
2775static void scrub_parity_put(struct scrub_parity *sparity)
2776{
2777 if (!atomic_dec_and_test(&sparity->ref_count))
2778 return;
2779
2780 scrub_parity_check_and_repair(sparity);
2781}
2782
2783static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2784 struct map_lookup *map,
2785 struct btrfs_device *sdev,
2786 struct btrfs_path *path,
2787 u64 logic_start,
2788 u64 logic_end)
2789{
2790 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2791 struct btrfs_root *root = fs_info->extent_root;
2792 struct btrfs_root *csum_root = fs_info->csum_root;
2793 struct btrfs_extent_item *extent;
2794 u64 flags;
2795 int ret;
2796 int slot;
2797 struct extent_buffer *l;
2798 struct btrfs_key key;
2799 u64 generation;
2800 u64 extent_logical;
2801 u64 extent_physical;
2802 u64 extent_len;
2803 struct btrfs_device *extent_dev;
2804 struct scrub_parity *sparity;
2805 int nsectors;
2806 int bitmap_len;
2807 int extent_mirror_num;
2808 int stop_loop = 0;
2809
2810 nsectors = map->stripe_len / root->sectorsize;
2811 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2812 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2813 GFP_NOFS);
2814 if (!sparity) {
2815 spin_lock(&sctx->stat_lock);
2816 sctx->stat.malloc_errors++;
2817 spin_unlock(&sctx->stat_lock);
2818 return -ENOMEM;
2819 }
2820
2821 sparity->stripe_len = map->stripe_len;
2822 sparity->nsectors = nsectors;
2823 sparity->sctx = sctx;
2824 sparity->scrub_dev = sdev;
2825 sparity->logic_start = logic_start;
2826 sparity->logic_end = logic_end;
2827 atomic_set(&sparity->ref_count, 1);
2828 INIT_LIST_HEAD(&sparity->spages);
2829 sparity->dbitmap = sparity->bitmap;
2830 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2831
2832 ret = 0;
2833 while (logic_start < logic_end) {
2834 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2835 key.type = BTRFS_METADATA_ITEM_KEY;
2836 else
2837 key.type = BTRFS_EXTENT_ITEM_KEY;
2838 key.objectid = logic_start;
2839 key.offset = (u64)-1;
2840
2841 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2842 if (ret < 0)
2843 goto out;
2844
2845 if (ret > 0) {
2846 ret = btrfs_previous_extent_item(root, path, 0);
2847 if (ret < 0)
2848 goto out;
2849 if (ret > 0) {
2850 btrfs_release_path(path);
2851 ret = btrfs_search_slot(NULL, root, &key,
2852 path, 0, 0);
2853 if (ret < 0)
2854 goto out;
2855 }
2856 }
2857
2858 stop_loop = 0;
2859 while (1) {
2860 u64 bytes;
2861
2862 l = path->nodes[0];
2863 slot = path->slots[0];
2864 if (slot >= btrfs_header_nritems(l)) {
2865 ret = btrfs_next_leaf(root, path);
2866 if (ret == 0)
2867 continue;
2868 if (ret < 0)
2869 goto out;
2870
2871 stop_loop = 1;
2872 break;
2873 }
2874 btrfs_item_key_to_cpu(l, &key, slot);
2875
2876 if (key.type == BTRFS_METADATA_ITEM_KEY)
2877 bytes = root->nodesize;
2878 else
2879 bytes = key.offset;
2880
2881 if (key.objectid + bytes <= logic_start)
2882 goto next;
2883
2884 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2885 key.type != BTRFS_METADATA_ITEM_KEY)
2886 goto next;
2887
2888 if (key.objectid > logic_end) {
2889 stop_loop = 1;
2890 break;
2891 }
2892
2893 while (key.objectid >= logic_start + map->stripe_len)
2894 logic_start += map->stripe_len;
2895
2896 extent = btrfs_item_ptr(l, slot,
2897 struct btrfs_extent_item);
2898 flags = btrfs_extent_flags(l, extent);
2899 generation = btrfs_extent_generation(l, extent);
2900
2901 if (key.objectid < logic_start &&
2902 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2903 btrfs_err(fs_info,
2904 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2905 key.objectid, logic_start);
2906 goto next;
2907 }
2908again:
2909 extent_logical = key.objectid;
2910 extent_len = bytes;
2911
2912 if (extent_logical < logic_start) {
2913 extent_len -= logic_start - extent_logical;
2914 extent_logical = logic_start;
2915 }
2916
2917 if (extent_logical + extent_len >
2918 logic_start + map->stripe_len)
2919 extent_len = logic_start + map->stripe_len -
2920 extent_logical;
2921
2922 scrub_parity_mark_sectors_data(sparity, extent_logical,
2923 extent_len);
2924
2925 scrub_remap_extent(fs_info, extent_logical,
2926 extent_len, &extent_physical,
2927 &extent_dev,
2928 &extent_mirror_num);
2929
2930 ret = btrfs_lookup_csums_range(csum_root,
2931 extent_logical,
2932 extent_logical + extent_len - 1,
2933 &sctx->csum_list, 1);
2934 if (ret)
2935 goto out;
2936
2937 ret = scrub_extent_for_parity(sparity, extent_logical,
2938 extent_len,
2939 extent_physical,
2940 extent_dev, flags,
2941 generation,
2942 extent_mirror_num);
2943 if (ret)
2944 goto out;
2945
2946 scrub_free_csums(sctx);
2947 if (extent_logical + extent_len <
2948 key.objectid + bytes) {
2949 logic_start += map->stripe_len;
2950
2951 if (logic_start >= logic_end) {
2952 stop_loop = 1;
2953 break;
2954 }
2955
2956 if (logic_start < key.objectid + bytes) {
2957 cond_resched();
2958 goto again;
2959 }
2960 }
2961next:
2962 path->slots[0]++;
2963 }
2964
2965 btrfs_release_path(path);
2966
2967 if (stop_loop)
2968 break;
2969
2970 logic_start += map->stripe_len;
2971 }
2972out:
2973 if (ret < 0)
2974 scrub_parity_mark_sectors_error(sparity, logic_start,
2975 logic_end - logic_start + 1);
2976 scrub_parity_put(sparity);
2977 scrub_submit(sctx);
2978 mutex_lock(&sctx->wr_ctx.wr_lock);
2979 scrub_wr_submit(sctx);
2980 mutex_unlock(&sctx->wr_ctx.wr_lock);
2981
2982 btrfs_release_path(path);
2983 return ret < 0 ? ret : 0;
2984}
2985
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002986static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002987 struct map_lookup *map,
2988 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002989 int num, u64 base, u64 length,
2990 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002991{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002992 struct btrfs_path *path, *ppath;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002993 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01002994 struct btrfs_root *root = fs_info->extent_root;
2995 struct btrfs_root *csum_root = fs_info->csum_root;
2996 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00002997 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01002998 u64 flags;
2999 int ret;
3000 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003001 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003002 struct extent_buffer *l;
3003 struct btrfs_key key;
3004 u64 physical;
3005 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003006 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003007 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003008 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003009 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003010 struct reada_control *reada1;
3011 struct reada_control *reada2;
3012 struct btrfs_key key_start;
3013 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003014 u64 increment = map->stripe_len;
3015 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003016 u64 extent_logical;
3017 u64 extent_physical;
3018 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003019 u64 stripe_logical;
3020 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003021 struct btrfs_device *extent_dev;
3022 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003023 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003024
Arne Jansena2de7332011-03-08 14:14:00 +01003025 nstripes = length;
Wang Shilong3b080b22014-04-01 18:01:43 +08003026 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003027 offset = 0;
3028 do_div(nstripes, map->stripe_len);
3029 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3030 offset = map->stripe_len * num;
3031 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003032 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003033 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3034 int factor = map->num_stripes / map->sub_stripes;
3035 offset = map->stripe_len * (num / map->sub_stripes);
3036 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003037 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003038 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3039 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003040 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003041 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3042 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003043 mirror_num = num % map->num_stripes + 1;
Wang Shilong3b080b22014-04-01 18:01:43 +08003044 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3045 BTRFS_BLOCK_GROUP_RAID6)) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003046 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003047 increment = map->stripe_len * nr_data_stripes(map);
3048 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003049 } else {
3050 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003051 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003052 }
3053
3054 path = btrfs_alloc_path();
3055 if (!path)
3056 return -ENOMEM;
3057
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003058 ppath = btrfs_alloc_path();
3059 if (!ppath) {
3060 btrfs_free_path(ppath);
3061 return -ENOMEM;
3062 }
3063
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003064 /*
3065 * work on commit root. The related disk blocks are static as
3066 * long as COW is applied. This means, it is save to rewrite
3067 * them to repair disk errors without any race conditions
3068 */
Arne Jansena2de7332011-03-08 14:14:00 +01003069 path->search_commit_root = 1;
3070 path->skip_locking = 1;
3071
3072 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003073 * trigger the readahead for extent tree csum tree and wait for
3074 * completion. During readahead, the scrub is officially paused
3075 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003076 */
3077 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003078 physical_end = physical + nstripes * map->stripe_len;
3079 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3080 BTRFS_BLOCK_GROUP_RAID6)) {
3081 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003082 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003083 logic_end += base;
3084 } else {
3085 logic_end = logical + increment * nstripes;
3086 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003087 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003088 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003089 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003090
Arne Jansen7a262852011-06-10 12:39:23 +02003091 /* FIXME it might be better to start readahead at commit root */
3092 key_start.objectid = logical;
3093 key_start.type = BTRFS_EXTENT_ITEM_KEY;
3094 key_start.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003095 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003096 key_end.type = BTRFS_METADATA_ITEM_KEY;
3097 key_end.offset = (u64)-1;
Arne Jansen7a262852011-06-10 12:39:23 +02003098 reada1 = btrfs_reada_add(root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003099
Arne Jansen7a262852011-06-10 12:39:23 +02003100 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3101 key_start.type = BTRFS_EXTENT_CSUM_KEY;
3102 key_start.offset = logical;
3103 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3104 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003105 key_end.offset = logic_end;
Arne Jansen7a262852011-06-10 12:39:23 +02003106 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003107
Arne Jansen7a262852011-06-10 12:39:23 +02003108 if (!IS_ERR(reada1))
3109 btrfs_reada_wait(reada1);
3110 if (!IS_ERR(reada2))
3111 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003112
Arne Jansena2de7332011-03-08 14:14:00 +01003113
3114 /*
3115 * collect all data csums for the stripe to avoid seeking during
3116 * the scrub. This might currently (crc32) end up to be about 1MB
3117 */
Arne Jansene7786c32011-05-28 20:58:38 +00003118 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003119
Arne Jansena2de7332011-03-08 14:14:00 +01003120 /*
3121 * now find all extents for each stripe and scrub them
3122 */
Arne Jansena2de7332011-03-08 14:14:00 +01003123 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003124 while (physical < physical_end) {
3125 /* for raid56, we skip parity stripe */
3126 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3127 BTRFS_BLOCK_GROUP_RAID6)) {
3128 ret = get_raid56_logic_offset(physical, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003129 map, &logical, &stripe_logical);
Wang Shilong3b080b22014-04-01 18:01:43 +08003130 logical += base;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003131 if (ret) {
3132 stripe_logical += base;
3133 stripe_end = stripe_logical + increment - 1;
3134 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3135 ppath, stripe_logical,
3136 stripe_end);
3137 if (ret)
3138 goto out;
Wang Shilong3b080b22014-04-01 18:01:43 +08003139 goto skip;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003140 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003141 }
Arne Jansena2de7332011-03-08 14:14:00 +01003142 /*
3143 * canceled?
3144 */
3145 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003146 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003147 ret = -ECANCELED;
3148 goto out;
3149 }
3150 /*
3151 * check to see if we have to pause
3152 */
3153 if (atomic_read(&fs_info->scrub_pause_req)) {
3154 /* push queued extents */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003155 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003156 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003157 mutex_lock(&sctx->wr_ctx.wr_lock);
3158 scrub_wr_submit(sctx);
3159 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003160 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003161 atomic_read(&sctx->bios_in_flight) == 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003162 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Wang Shilong3cb09292013-12-04 21:15:19 +08003163 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003164 }
3165
Wang Shilong7c76edb2014-01-12 21:38:32 +08003166 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3167 key.type = BTRFS_METADATA_ITEM_KEY;
3168 else
3169 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003170 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003171 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003172
3173 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3174 if (ret < 0)
3175 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003176
Arne Jansen8c510322011-06-03 10:09:26 +02003177 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003178 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003179 if (ret < 0)
3180 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003181 if (ret > 0) {
3182 /* there's no smaller item, so stick with the
3183 * larger one */
3184 btrfs_release_path(path);
3185 ret = btrfs_search_slot(NULL, root, &key,
3186 path, 0, 0);
3187 if (ret < 0)
3188 goto out;
3189 }
Arne Jansena2de7332011-03-08 14:14:00 +01003190 }
3191
Liu Bo625f1c8d2013-04-27 02:56:57 +00003192 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003193 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003194 u64 bytes;
3195
Arne Jansena2de7332011-03-08 14:14:00 +01003196 l = path->nodes[0];
3197 slot = path->slots[0];
3198 if (slot >= btrfs_header_nritems(l)) {
3199 ret = btrfs_next_leaf(root, path);
3200 if (ret == 0)
3201 continue;
3202 if (ret < 0)
3203 goto out;
3204
Liu Bo625f1c8d2013-04-27 02:56:57 +00003205 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003206 break;
3207 }
3208 btrfs_item_key_to_cpu(l, &key, slot);
3209
Josef Bacik3173a182013-03-07 14:22:04 -05003210 if (key.type == BTRFS_METADATA_ITEM_KEY)
David Sterba707e8a02014-06-04 19:22:26 +02003211 bytes = root->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003212 else
3213 bytes = key.offset;
3214
3215 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003216 goto next;
3217
Liu Bo625f1c8d2013-04-27 02:56:57 +00003218 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3219 key.type != BTRFS_METADATA_ITEM_KEY)
3220 goto next;
Arne Jansena2de7332011-03-08 14:14:00 +01003221
Liu Bo625f1c8d2013-04-27 02:56:57 +00003222 if (key.objectid >= logical + map->stripe_len) {
3223 /* out of this device extent */
3224 if (key.objectid >= logic_end)
3225 stop_loop = 1;
3226 break;
3227 }
Arne Jansena2de7332011-03-08 14:14:00 +01003228
3229 extent = btrfs_item_ptr(l, slot,
3230 struct btrfs_extent_item);
3231 flags = btrfs_extent_flags(l, extent);
3232 generation = btrfs_extent_generation(l, extent);
3233
3234 if (key.objectid < logical &&
3235 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003236 btrfs_err(fs_info,
3237 "scrub: tree block %llu spanning "
3238 "stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003239 key.objectid, logical);
Arne Jansena2de7332011-03-08 14:14:00 +01003240 goto next;
3241 }
3242
Liu Bo625f1c8d2013-04-27 02:56:57 +00003243again:
3244 extent_logical = key.objectid;
3245 extent_len = bytes;
3246
Arne Jansena2de7332011-03-08 14:14:00 +01003247 /*
3248 * trim extent to this stripe
3249 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003250 if (extent_logical < logical) {
3251 extent_len -= logical - extent_logical;
3252 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003253 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003254 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003255 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003256 extent_len = logical + map->stripe_len -
3257 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003258 }
3259
Liu Bo625f1c8d2013-04-27 02:56:57 +00003260 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003261 extent_dev = scrub_dev;
3262 extent_mirror_num = mirror_num;
3263 if (is_dev_replace)
3264 scrub_remap_extent(fs_info, extent_logical,
3265 extent_len, &extent_physical,
3266 &extent_dev,
3267 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003268
3269 ret = btrfs_lookup_csums_range(csum_root, logical,
3270 logical + map->stripe_len - 1,
3271 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003272 if (ret)
3273 goto out;
3274
Liu Bo625f1c8d2013-04-27 02:56:57 +00003275 ret = scrub_extent(sctx, extent_logical, extent_len,
3276 extent_physical, extent_dev, flags,
3277 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003278 extent_logical - logical + physical);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003279 if (ret)
3280 goto out;
3281
Josef Bacikd88d46c2013-06-10 12:59:04 +00003282 scrub_free_csums(sctx);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003283 if (extent_logical + extent_len <
3284 key.objectid + bytes) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003285 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3286 BTRFS_BLOCK_GROUP_RAID6)) {
3287 /*
3288 * loop until we find next data stripe
3289 * or we have finished all stripes.
3290 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003291loop:
3292 physical += map->stripe_len;
3293 ret = get_raid56_logic_offset(physical,
3294 num, map, &logical,
3295 &stripe_logical);
3296 logical += base;
3297
3298 if (ret && physical < physical_end) {
3299 stripe_logical += base;
3300 stripe_end = stripe_logical +
3301 increment - 1;
3302 ret = scrub_raid56_parity(sctx,
3303 map, scrub_dev, ppath,
3304 stripe_logical,
3305 stripe_end);
3306 if (ret)
3307 goto out;
3308 goto loop;
3309 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003310 } else {
3311 physical += map->stripe_len;
3312 logical += increment;
3313 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003314 if (logical < key.objectid + bytes) {
3315 cond_resched();
3316 goto again;
3317 }
3318
Wang Shilong3b080b22014-04-01 18:01:43 +08003319 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003320 stop_loop = 1;
3321 break;
3322 }
3323 }
Arne Jansena2de7332011-03-08 14:14:00 +01003324next:
3325 path->slots[0]++;
3326 }
Chris Mason71267332011-05-23 06:30:52 -04003327 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003328skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003329 logical += increment;
3330 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003331 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003332 if (stop_loop)
3333 sctx->stat.last_physical = map->stripes[num].physical +
3334 length;
3335 else
3336 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003337 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003338 if (stop_loop)
3339 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003340 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003341out:
Arne Jansena2de7332011-03-08 14:14:00 +01003342 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003343 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003344 mutex_lock(&sctx->wr_ctx.wr_lock);
3345 scrub_wr_submit(sctx);
3346 mutex_unlock(&sctx->wr_ctx.wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003347
Arne Jansene7786c32011-05-28 20:58:38 +00003348 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003349 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003350 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003351 return ret < 0 ? ret : 0;
3352}
3353
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003354static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003355 struct btrfs_device *scrub_dev,
3356 u64 chunk_tree, u64 chunk_objectid,
3357 u64 chunk_offset, u64 length,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003358 u64 dev_offset, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003359{
3360 struct btrfs_mapping_tree *map_tree =
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003361 &sctx->dev_root->fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003362 struct map_lookup *map;
3363 struct extent_map *em;
3364 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003365 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003366
3367 read_lock(&map_tree->map_tree.lock);
3368 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3369 read_unlock(&map_tree->map_tree.lock);
3370
3371 if (!em)
3372 return -EINVAL;
3373
3374 map = (struct map_lookup *)em->bdev;
3375 if (em->start != chunk_offset)
3376 goto out;
3377
3378 if (em->len < length)
3379 goto out;
3380
3381 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003382 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003383 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003384 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003385 chunk_offset, length,
3386 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003387 if (ret)
3388 goto out;
3389 }
3390 }
3391out:
3392 free_extent_map(em);
3393
3394 return ret;
3395}
3396
3397static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003398int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003399 struct btrfs_device *scrub_dev, u64 start, u64 end,
3400 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003401{
3402 struct btrfs_dev_extent *dev_extent = NULL;
3403 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003404 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003405 struct btrfs_fs_info *fs_info = root->fs_info;
3406 u64 length;
3407 u64 chunk_tree;
3408 u64 chunk_objectid;
3409 u64 chunk_offset;
3410 int ret;
3411 int slot;
3412 struct extent_buffer *l;
3413 struct btrfs_key key;
3414 struct btrfs_key found_key;
3415 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003416 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003417
3418 path = btrfs_alloc_path();
3419 if (!path)
3420 return -ENOMEM;
3421
3422 path->reada = 2;
3423 path->search_commit_root = 1;
3424 path->skip_locking = 1;
3425
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003426 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003427 key.offset = 0ull;
3428 key.type = BTRFS_DEV_EXTENT_KEY;
3429
Arne Jansena2de7332011-03-08 14:14:00 +01003430 while (1) {
3431 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3432 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003433 break;
3434 if (ret > 0) {
3435 if (path->slots[0] >=
3436 btrfs_header_nritems(path->nodes[0])) {
3437 ret = btrfs_next_leaf(root, path);
3438 if (ret)
3439 break;
3440 }
3441 }
Arne Jansena2de7332011-03-08 14:14:00 +01003442
3443 l = path->nodes[0];
3444 slot = path->slots[0];
3445
3446 btrfs_item_key_to_cpu(l, &found_key, slot);
3447
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003448 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003449 break;
3450
David Sterba962a2982014-06-04 18:41:45 +02003451 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003452 break;
3453
3454 if (found_key.offset >= end)
3455 break;
3456
3457 if (found_key.offset < key.offset)
3458 break;
3459
3460 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3461 length = btrfs_dev_extent_length(l, dev_extent);
3462
Qu Wenruoced96ed2014-06-19 10:42:51 +08003463 if (found_key.offset + length <= start)
3464 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003465
3466 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3467 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3468 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3469
3470 /*
3471 * get a reference on the corresponding block group to prevent
3472 * the chunk from going away while we scrub it
3473 */
3474 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003475
3476 /* some chunks are removed but not committed to disk yet,
3477 * continue scrubbing */
3478 if (!cache)
3479 goto skip;
3480
Stefan Behrensff023aa2012-11-06 11:43:11 +01003481 dev_replace->cursor_right = found_key.offset + length;
3482 dev_replace->cursor_left = found_key.offset;
3483 dev_replace->item_needs_writeback = 1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003484 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003485 chunk_offset, length, found_key.offset,
3486 is_dev_replace);
3487
3488 /*
3489 * flush, submit all pending read and write bios, afterwards
3490 * wait for them.
3491 * Note that in the dev replace case, a read request causes
3492 * write requests that are submitted in the read completion
3493 * worker. Therefore in the current situation, it is required
3494 * that all write requests are flushed, so that all read and
3495 * write requests are really completed when bios_in_flight
3496 * changes to 0.
3497 */
3498 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3499 scrub_submit(sctx);
3500 mutex_lock(&sctx->wr_ctx.wr_lock);
3501 scrub_wr_submit(sctx);
3502 mutex_unlock(&sctx->wr_ctx.wr_lock);
3503
3504 wait_event(sctx->list_wait,
3505 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003506 atomic_inc(&fs_info->scrubs_paused);
3507 wake_up(&fs_info->scrub_pause_wait);
3508
3509 /*
3510 * must be called before we decrease @scrub_paused.
3511 * make sure we don't block transaction commit while
3512 * we are waiting pending workers finished.
3513 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003514 wait_event(sctx->list_wait,
3515 atomic_read(&sctx->workers_pending) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003516 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3517
3518 mutex_lock(&fs_info->scrub_lock);
3519 __scrub_blocked_if_needed(fs_info);
3520 atomic_dec(&fs_info->scrubs_paused);
3521 mutex_unlock(&fs_info->scrub_lock);
3522 wake_up(&fs_info->scrub_pause_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003523
Arne Jansena2de7332011-03-08 14:14:00 +01003524 btrfs_put_block_group(cache);
3525 if (ret)
3526 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003527 if (is_dev_replace &&
3528 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003529 ret = -EIO;
3530 break;
3531 }
3532 if (sctx->stat.malloc_errors > 0) {
3533 ret = -ENOMEM;
3534 break;
3535 }
Arne Jansena2de7332011-03-08 14:14:00 +01003536
Ilya Dryomov539f3582013-10-07 13:42:57 +03003537 dev_replace->cursor_left = dev_replace->cursor_right;
3538 dev_replace->item_needs_writeback = 1;
Qu Wenruoced96ed2014-06-19 10:42:51 +08003539skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003540 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003541 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003542 }
3543
Arne Jansena2de7332011-03-08 14:14:00 +01003544 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003545
3546 /*
3547 * ret can still be 1 from search_slot or next_leaf,
3548 * that's not an error
3549 */
3550 return ret < 0 ? ret : 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003551}
3552
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003553static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3554 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003555{
3556 int i;
3557 u64 bytenr;
3558 u64 gen;
3559 int ret;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003560 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003561
Miao Xie87533c42013-01-29 10:14:48 +00003562 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003563 return -EIO;
3564
Miao Xie5f546062014-07-24 11:37:09 +08003565 /* Seed devices of a new filesystem has their own generation. */
3566 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3567 gen = scrub_dev->generation;
3568 else
3569 gen = root->fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01003570
3571 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3572 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08003573 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3574 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01003575 break;
3576
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003577 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003578 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003579 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01003580 if (ret)
3581 return ret;
3582 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003583 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003584
3585 return 0;
3586}
3587
3588/*
3589 * get a reference count on fs_info->scrub_workers. start worker if necessary
3590 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003591static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3592 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003593{
Josef Bacik0dc3b842011-11-18 14:37:27 -05003594 int ret = 0;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003595 int flags = WQ_FREEZABLE | WQ_UNBOUND;
3596 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003597
Arne Jansen632dd772011-06-10 12:07:07 +02003598 if (fs_info->scrub_workers_refcnt == 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003599 if (is_dev_replace)
Qu Wenruo0339ef22014-02-28 10:46:17 +08003600 fs_info->scrub_workers =
3601 btrfs_alloc_workqueue("btrfs-scrub", flags,
3602 1, 4);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003603 else
Qu Wenruo0339ef22014-02-28 10:46:17 +08003604 fs_info->scrub_workers =
3605 btrfs_alloc_workqueue("btrfs-scrub", flags,
3606 max_active, 4);
3607 if (!fs_info->scrub_workers) {
3608 ret = -ENOMEM;
Josef Bacik0dc3b842011-11-18 14:37:27 -05003609 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003610 }
3611 fs_info->scrub_wr_completion_workers =
3612 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3613 max_active, 2);
3614 if (!fs_info->scrub_wr_completion_workers) {
3615 ret = -ENOMEM;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003616 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003617 }
3618 fs_info->scrub_nocow_workers =
3619 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3620 if (!fs_info->scrub_nocow_workers) {
3621 ret = -ENOMEM;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003622 goto out;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003623 }
Arne Jansen632dd772011-06-10 12:07:07 +02003624 }
Arne Jansena2de7332011-03-08 14:14:00 +01003625 ++fs_info->scrub_workers_refcnt;
Josef Bacik0dc3b842011-11-18 14:37:27 -05003626out:
Josef Bacik0dc3b842011-11-18 14:37:27 -05003627 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003628}
3629
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003630static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003631{
Stefan Behrensff023aa2012-11-06 11:43:11 +01003632 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08003633 btrfs_destroy_workqueue(fs_info->scrub_workers);
3634 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3635 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003636 }
Arne Jansena2de7332011-03-08 14:14:00 +01003637 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003638}
3639
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003640int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3641 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003642 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003643{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003644 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003645 int ret;
3646 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08003647 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01003648
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003649 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01003650 return -EINVAL;
3651
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003652 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003653 /*
3654 * in this case scrub is unable to calculate the checksum
3655 * the way scrub is implemented. Do not handle this
3656 * situation at all because it won't ever happen.
3657 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003658 btrfs_err(fs_info,
3659 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003660 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003661 return -EINVAL;
3662 }
3663
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003664 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003665 /* not supported for data w/o checksums */
Frank Holtonefe120a2013-12-20 11:37:06 -05003666 btrfs_err(fs_info,
3667 "scrub: size assumption sectorsize != PAGE_SIZE "
3668 "(%d != %lu) fails",
Geert Uytterhoeven27f9f022013-08-20 13:20:09 +02003669 fs_info->chunk_root->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003670 return -EINVAL;
3671 }
3672
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003673 if (fs_info->chunk_root->nodesize >
3674 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3675 fs_info->chunk_root->sectorsize >
3676 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3677 /*
3678 * would exhaust the array bounds of pagev member in
3679 * struct scrub_block
3680 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003681 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3682 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003683 fs_info->chunk_root->nodesize,
3684 SCRUB_MAX_PAGES_PER_BLOCK,
3685 fs_info->chunk_root->sectorsize,
3686 SCRUB_MAX_PAGES_PER_BLOCK);
3687 return -EINVAL;
3688 }
3689
Arne Jansena2de7332011-03-08 14:14:00 +01003690
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003691 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3692 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003693 if (!dev || (dev->missing && !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003694 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003695 return -ENODEV;
3696 }
Arne Jansena2de7332011-03-08 14:14:00 +01003697
Miao Xie5d68da32014-07-24 11:37:07 +08003698 if (!is_dev_replace && !readonly && !dev->writeable) {
3699 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3700 rcu_read_lock();
3701 name = rcu_dereference(dev->name);
3702 btrfs_err(fs_info, "scrub: device %s is not writable",
3703 name->str);
3704 rcu_read_unlock();
3705 return -EROFS;
3706 }
3707
Wang Shilong3b7a0162013-10-12 02:11:12 +08003708 mutex_lock(&fs_info->scrub_lock);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003709 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
Arne Jansena2de7332011-03-08 14:14:00 +01003710 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003711 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003712 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01003713 }
3714
Stefan Behrens8dabb742012-11-06 13:15:27 +01003715 btrfs_dev_replace_lock(&fs_info->dev_replace);
3716 if (dev->scrub_device ||
3717 (!is_dev_replace &&
3718 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3719 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003720 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003721 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003722 return -EINPROGRESS;
3723 }
Stefan Behrens8dabb742012-11-06 13:15:27 +01003724 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003725
3726 ret = scrub_workers_get(fs_info, is_dev_replace);
3727 if (ret) {
3728 mutex_unlock(&fs_info->scrub_lock);
3729 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3730 return ret;
3731 }
3732
Stefan Behrens63a212a2012-11-05 18:29:28 +01003733 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003734 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003735 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003736 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3737 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003738 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003739 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003740 sctx->readonly = readonly;
3741 dev->scrub_device = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003742 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003743
Wang Shilong3cb09292013-12-04 21:15:19 +08003744 /*
3745 * checking @scrub_pause_req here, we can avoid
3746 * race between committing transaction and scrubbing.
3747 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003748 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003749 atomic_inc(&fs_info->scrubs_running);
3750 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003751
Stefan Behrensff023aa2012-11-06 11:43:11 +01003752 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08003753 /*
3754 * by holding device list mutex, we can
3755 * kick off writing super in log tree sync.
3756 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003757 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003758 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003759 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003760 }
Arne Jansena2de7332011-03-08 14:14:00 +01003761
3762 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003763 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3764 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003765
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003766 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003767 atomic_dec(&fs_info->scrubs_running);
3768 wake_up(&fs_info->scrub_pause_wait);
3769
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003770 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003771
Arne Jansena2de7332011-03-08 14:14:00 +01003772 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003773 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003774
3775 mutex_lock(&fs_info->scrub_lock);
3776 dev->scrub_device = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003777 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003778 mutex_unlock(&fs_info->scrub_lock);
3779
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003780 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003781
3782 return ret;
3783}
3784
Jeff Mahoney143bede2012-03-01 14:56:26 +01003785void btrfs_scrub_pause(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003786{
3787 struct btrfs_fs_info *fs_info = root->fs_info;
3788
3789 mutex_lock(&fs_info->scrub_lock);
3790 atomic_inc(&fs_info->scrub_pause_req);
3791 while (atomic_read(&fs_info->scrubs_paused) !=
3792 atomic_read(&fs_info->scrubs_running)) {
3793 mutex_unlock(&fs_info->scrub_lock);
3794 wait_event(fs_info->scrub_pause_wait,
3795 atomic_read(&fs_info->scrubs_paused) ==
3796 atomic_read(&fs_info->scrubs_running));
3797 mutex_lock(&fs_info->scrub_lock);
3798 }
3799 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003800}
3801
Jeff Mahoney143bede2012-03-01 14:56:26 +01003802void btrfs_scrub_continue(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003803{
3804 struct btrfs_fs_info *fs_info = root->fs_info;
3805
3806 atomic_dec(&fs_info->scrub_pause_req);
3807 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01003808}
3809
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003810int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003811{
Arne Jansena2de7332011-03-08 14:14:00 +01003812 mutex_lock(&fs_info->scrub_lock);
3813 if (!atomic_read(&fs_info->scrubs_running)) {
3814 mutex_unlock(&fs_info->scrub_lock);
3815 return -ENOTCONN;
3816 }
3817
3818 atomic_inc(&fs_info->scrub_cancel_req);
3819 while (atomic_read(&fs_info->scrubs_running)) {
3820 mutex_unlock(&fs_info->scrub_lock);
3821 wait_event(fs_info->scrub_pause_wait,
3822 atomic_read(&fs_info->scrubs_running) == 0);
3823 mutex_lock(&fs_info->scrub_lock);
3824 }
3825 atomic_dec(&fs_info->scrub_cancel_req);
3826 mutex_unlock(&fs_info->scrub_lock);
3827
3828 return 0;
3829}
3830
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003831int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3832 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01003833{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003834 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003835
3836 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003837 sctx = dev->scrub_device;
3838 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01003839 mutex_unlock(&fs_info->scrub_lock);
3840 return -ENOTCONN;
3841 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003842 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01003843 while (dev->scrub_device) {
3844 mutex_unlock(&fs_info->scrub_lock);
3845 wait_event(fs_info->scrub_pause_wait,
3846 dev->scrub_device == NULL);
3847 mutex_lock(&fs_info->scrub_lock);
3848 }
3849 mutex_unlock(&fs_info->scrub_lock);
3850
3851 return 0;
3852}
Stefan Behrens1623ede2012-03-27 14:21:26 -04003853
Arne Jansena2de7332011-03-08 14:14:00 +01003854int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3855 struct btrfs_scrub_progress *progress)
3856{
3857 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003858 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01003859
3860 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003861 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01003862 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003863 sctx = dev->scrub_device;
3864 if (sctx)
3865 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003866 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3867
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003868 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01003869}
Stefan Behrensff023aa2012-11-06 11:43:11 +01003870
3871static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3872 u64 extent_logical, u64 extent_len,
3873 u64 *extent_physical,
3874 struct btrfs_device **extent_dev,
3875 int *extent_mirror_num)
3876{
3877 u64 mapped_length;
3878 struct btrfs_bio *bbio = NULL;
3879 int ret;
3880
3881 mapped_length = extent_len;
3882 ret = btrfs_map_block(fs_info, READ, extent_logical,
3883 &mapped_length, &bbio, 0);
3884 if (ret || !bbio || mapped_length < extent_len ||
3885 !bbio->stripes[0].dev->bdev) {
3886 kfree(bbio);
3887 return;
3888 }
3889
3890 *extent_physical = bbio->stripes[0].physical;
3891 *extent_mirror_num = bbio->mirror_num;
3892 *extent_dev = bbio->stripes[0].dev;
3893 kfree(bbio);
3894}
3895
3896static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3897 struct scrub_wr_ctx *wr_ctx,
3898 struct btrfs_fs_info *fs_info,
3899 struct btrfs_device *dev,
3900 int is_dev_replace)
3901{
3902 WARN_ON(wr_ctx->wr_curr_bio != NULL);
3903
3904 mutex_init(&wr_ctx->wr_lock);
3905 wr_ctx->wr_curr_bio = NULL;
3906 if (!is_dev_replace)
3907 return 0;
3908
3909 WARN_ON(!dev->bdev);
3910 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3911 bio_get_nr_vecs(dev->bdev));
3912 wr_ctx->tgtdev = dev;
3913 atomic_set(&wr_ctx->flush_all_writes, 0);
3914 return 0;
3915}
3916
3917static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3918{
3919 mutex_lock(&wr_ctx->wr_lock);
3920 kfree(wr_ctx->wr_curr_bio);
3921 wr_ctx->wr_curr_bio = NULL;
3922 mutex_unlock(&wr_ctx->wr_lock);
3923}
3924
3925static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3926 int mirror_num, u64 physical_for_dev_replace)
3927{
3928 struct scrub_copy_nocow_ctx *nocow_ctx;
3929 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3930
3931 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3932 if (!nocow_ctx) {
3933 spin_lock(&sctx->stat_lock);
3934 sctx->stat.malloc_errors++;
3935 spin_unlock(&sctx->stat_lock);
3936 return -ENOMEM;
3937 }
3938
3939 scrub_pending_trans_workers_inc(sctx);
3940
3941 nocow_ctx->sctx = sctx;
3942 nocow_ctx->logical = logical;
3943 nocow_ctx->len = len;
3944 nocow_ctx->mirror_num = mirror_num;
3945 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08003946 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3947 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04003948 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08003949 btrfs_queue_work(fs_info->scrub_nocow_workers,
3950 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003951
3952 return 0;
3953}
3954
Josef Bacik652f25a2013-09-12 16:58:28 -04003955static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3956{
3957 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3958 struct scrub_nocow_inode *nocow_inode;
3959
3960 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3961 if (!nocow_inode)
3962 return -ENOMEM;
3963 nocow_inode->inum = inum;
3964 nocow_inode->offset = offset;
3965 nocow_inode->root = root;
3966 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3967 return 0;
3968}
3969
3970#define COPY_COMPLETE 1
3971
Stefan Behrensff023aa2012-11-06 11:43:11 +01003972static void copy_nocow_pages_worker(struct btrfs_work *work)
3973{
3974 struct scrub_copy_nocow_ctx *nocow_ctx =
3975 container_of(work, struct scrub_copy_nocow_ctx, work);
3976 struct scrub_ctx *sctx = nocow_ctx->sctx;
3977 u64 logical = nocow_ctx->logical;
3978 u64 len = nocow_ctx->len;
3979 int mirror_num = nocow_ctx->mirror_num;
3980 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3981 int ret;
3982 struct btrfs_trans_handle *trans = NULL;
3983 struct btrfs_fs_info *fs_info;
3984 struct btrfs_path *path;
3985 struct btrfs_root *root;
3986 int not_written = 0;
3987
3988 fs_info = sctx->dev_root->fs_info;
3989 root = fs_info->extent_root;
3990
3991 path = btrfs_alloc_path();
3992 if (!path) {
3993 spin_lock(&sctx->stat_lock);
3994 sctx->stat.malloc_errors++;
3995 spin_unlock(&sctx->stat_lock);
3996 not_written = 1;
3997 goto out;
3998 }
3999
4000 trans = btrfs_join_transaction(root);
4001 if (IS_ERR(trans)) {
4002 not_written = 1;
4003 goto out;
4004 }
4005
4006 ret = iterate_inodes_from_logical(logical, fs_info, path,
Josef Bacik652f25a2013-09-12 16:58:28 -04004007 record_inode_for_nocow, nocow_ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004008 if (ret != 0 && ret != -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004009 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4010 "phys %llu, len %llu, mir %u, ret %d",
Geert Uytterhoeven118a0a22013-08-20 13:20:10 +02004011 logical, physical_for_dev_replace, len, mirror_num,
4012 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004013 not_written = 1;
4014 goto out;
4015 }
4016
Josef Bacik652f25a2013-09-12 16:58:28 -04004017 btrfs_end_transaction(trans, root);
4018 trans = NULL;
4019 while (!list_empty(&nocow_ctx->inodes)) {
4020 struct scrub_nocow_inode *entry;
4021 entry = list_first_entry(&nocow_ctx->inodes,
4022 struct scrub_nocow_inode,
4023 list);
4024 list_del_init(&entry->list);
4025 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4026 entry->root, nocow_ctx);
4027 kfree(entry);
4028 if (ret == COPY_COMPLETE) {
4029 ret = 0;
4030 break;
4031 } else if (ret) {
4032 break;
4033 }
4034 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004035out:
Josef Bacik652f25a2013-09-12 16:58:28 -04004036 while (!list_empty(&nocow_ctx->inodes)) {
4037 struct scrub_nocow_inode *entry;
4038 entry = list_first_entry(&nocow_ctx->inodes,
4039 struct scrub_nocow_inode,
4040 list);
4041 list_del_init(&entry->list);
4042 kfree(entry);
4043 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004044 if (trans && !IS_ERR(trans))
4045 btrfs_end_transaction(trans, root);
4046 if (not_written)
4047 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4048 num_uncorrectable_read_errors);
4049
4050 btrfs_free_path(path);
4051 kfree(nocow_ctx);
4052
4053 scrub_pending_trans_workers_dec(sctx);
4054}
4055
Gui Hecheng32159242014-11-10 15:36:08 +08004056static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4057 u64 logical)
4058{
4059 struct extent_state *cached_state = NULL;
4060 struct btrfs_ordered_extent *ordered;
4061 struct extent_io_tree *io_tree;
4062 struct extent_map *em;
4063 u64 lockstart = start, lockend = start + len - 1;
4064 int ret = 0;
4065
4066 io_tree = &BTRFS_I(inode)->io_tree;
4067
4068 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4069 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4070 if (ordered) {
4071 btrfs_put_ordered_extent(ordered);
4072 ret = 1;
4073 goto out_unlock;
4074 }
4075
4076 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4077 if (IS_ERR(em)) {
4078 ret = PTR_ERR(em);
4079 goto out_unlock;
4080 }
4081
4082 /*
4083 * This extent does not actually cover the logical extent anymore,
4084 * move on to the next inode.
4085 */
4086 if (em->block_start > logical ||
4087 em->block_start + em->block_len < logical + len) {
4088 free_extent_map(em);
4089 ret = 1;
4090 goto out_unlock;
4091 }
4092 free_extent_map(em);
4093
4094out_unlock:
4095 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4096 GFP_NOFS);
4097 return ret;
4098}
4099
Josef Bacik652f25a2013-09-12 16:58:28 -04004100static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4101 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004102{
Miao Xie826aa0a2013-06-27 18:50:59 +08004103 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004104 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08004105 struct inode *inode;
4106 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004107 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04004108 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004109 u64 physical_for_dev_replace;
Gui Hecheng32159242014-11-10 15:36:08 +08004110 u64 nocow_ctx_logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004111 u64 len = nocow_ctx->len;
Miao Xie826aa0a2013-06-27 18:50:59 +08004112 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00004113 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04004114 int ret = 0;
4115 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004116
4117 key.objectid = root;
4118 key.type = BTRFS_ROOT_ITEM_KEY;
4119 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00004120
4121 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4122
Stefan Behrensff023aa2012-11-06 11:43:11 +01004123 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00004124 if (IS_ERR(local_root)) {
4125 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004126 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00004127 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004128
4129 key.type = BTRFS_INODE_ITEM_KEY;
4130 key.objectid = inum;
4131 key.offset = 0;
4132 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00004133 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004134 if (IS_ERR(inode))
4135 return PTR_ERR(inode);
4136
Miao Xieedd14002013-06-27 18:51:00 +08004137 /* Avoid truncate/dio/punch hole.. */
4138 mutex_lock(&inode->i_mutex);
4139 inode_dio_wait(inode);
4140
Stefan Behrensff023aa2012-11-06 11:43:11 +01004141 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04004142 io_tree = &BTRFS_I(inode)->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004143 nocow_ctx_logical = nocow_ctx->logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004144
Gui Hecheng32159242014-11-10 15:36:08 +08004145 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4146 if (ret) {
4147 ret = ret > 0 ? 0 : ret;
4148 goto out;
Josef Bacik652f25a2013-09-12 16:58:28 -04004149 }
4150
Stefan Behrensff023aa2012-11-06 11:43:11 +01004151 while (len >= PAGE_CACHE_SIZE) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004152 index = offset >> PAGE_CACHE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08004153again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01004154 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4155 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004156 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004157 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08004158 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004159 }
4160
4161 if (PageUptodate(page)) {
4162 if (PageDirty(page))
4163 goto next_page;
4164 } else {
4165 ClearPageError(page);
Gui Hecheng32159242014-11-10 15:36:08 +08004166 err = extent_read_full_page(io_tree, page,
Josef Bacik652f25a2013-09-12 16:58:28 -04004167 btrfs_get_extent,
4168 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08004169 if (err) {
4170 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004171 goto next_page;
4172 }
Miao Xieedd14002013-06-27 18:51:00 +08004173
Miao Xie26b258912013-06-27 18:50:58 +08004174 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004175 /*
4176 * If the page has been remove from the page cache,
4177 * the data on it is meaningless, because it may be
4178 * old one, the new data may be written into the new
4179 * page in the page cache.
4180 */
4181 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04004182 unlock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004183 page_cache_release(page);
4184 goto again;
4185 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004186 if (!PageUptodate(page)) {
4187 ret = -EIO;
4188 goto next_page;
4189 }
4190 }
Gui Hecheng32159242014-11-10 15:36:08 +08004191
4192 ret = check_extent_to_block(inode, offset, len,
4193 nocow_ctx_logical);
4194 if (ret) {
4195 ret = ret > 0 ? 0 : ret;
4196 goto next_page;
4197 }
4198
Miao Xie826aa0a2013-06-27 18:50:59 +08004199 err = write_page_nocow(nocow_ctx->sctx,
4200 physical_for_dev_replace, page);
4201 if (err)
4202 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004203next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08004204 unlock_page(page);
4205 page_cache_release(page);
4206
4207 if (ret)
4208 break;
4209
Stefan Behrensff023aa2012-11-06 11:43:11 +01004210 offset += PAGE_CACHE_SIZE;
4211 physical_for_dev_replace += PAGE_CACHE_SIZE;
Gui Hecheng32159242014-11-10 15:36:08 +08004212 nocow_ctx_logical += PAGE_CACHE_SIZE;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004213 len -= PAGE_CACHE_SIZE;
4214 }
Josef Bacik652f25a2013-09-12 16:58:28 -04004215 ret = COPY_COMPLETE;
Miao Xie826aa0a2013-06-27 18:50:59 +08004216out:
Miao Xieedd14002013-06-27 18:51:00 +08004217 mutex_unlock(&inode->i_mutex);
Miao Xie826aa0a2013-06-27 18:50:59 +08004218 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004219 return ret;
4220}
4221
4222static int write_page_nocow(struct scrub_ctx *sctx,
4223 u64 physical_for_dev_replace, struct page *page)
4224{
4225 struct bio *bio;
4226 struct btrfs_device *dev;
4227 int ret;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004228
4229 dev = sctx->wr_ctx.tgtdev;
4230 if (!dev)
4231 return -EIO;
4232 if (!dev->bdev) {
4233 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05004234 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004235 return -EIO;
4236 }
Chris Mason9be33952013-05-17 18:30:14 -04004237 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004238 if (!bio) {
4239 spin_lock(&sctx->stat_lock);
4240 sctx->stat.malloc_errors++;
4241 spin_unlock(&sctx->stat_lock);
4242 return -ENOMEM;
4243 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07004244 bio->bi_iter.bi_size = 0;
4245 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004246 bio->bi_bdev = dev->bdev;
4247 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4248 if (ret != PAGE_CACHE_SIZE) {
4249leave_with_eio:
4250 bio_put(bio);
4251 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4252 return -EIO;
4253 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004254
Kent Overstreet33879d42013-11-23 22:33:32 -08004255 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
Stefan Behrensff023aa2012-11-06 11:43:11 +01004256 goto leave_with_eio;
4257
4258 bio_put(bio);
4259 return 0;
4260}