blob: de174ad6f8bdc60b9ed49dcd78ca3dc2f5ebcb9f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * Base on code in raid1.c. See raid1.c for further copyright information.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110022#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110023#include <linux/blkdev.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040024#include <linux/module.h>
NeilBrownbff61972009-03-31 14:33:13 +110025#include <linux/seq_file.h>
Christian Dietrich8bda4702011-07-27 11:00:36 +100026#include <linux/ratelimit.h>
NeilBrown3ea7daa2012-05-22 13:53:47 +100027#include <linux/kthread.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110028#include "md.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110029#include "raid10.h"
Trela, Maciejdab8b292010-03-08 16:02:45 +110030#include "raid0.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110031#include "bitmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33/*
34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
35 * The layout of data is defined by
36 * chunk_size
37 * raid_disks
38 * near_copies (stored in low byte of layout)
39 * far_copies (stored in second byte of layout)
NeilBrownc93983b2006-06-26 00:27:41 -070040 * far_offset (stored in bit 16 of layout )
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 *
42 * The data to be stored is divided into chunks using chunksize.
43 * Each device is divided into far_copies sections.
44 * In each section, chunks are laid out in a style similar to raid0, but
45 * near_copies copies of each chunk is stored (each on a different drive).
46 * The starting device for each section is offset near_copies from the starting
47 * device of the previous section.
NeilBrownc93983b2006-06-26 00:27:41 -070048 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * drive.
50 * near_copies and far_copies must be at least one, and their product is at most
51 * raid_disks.
NeilBrownc93983b2006-06-26 00:27:41 -070052 *
53 * If far_offset is true, then the far_copies are handled a bit differently.
54 * The copies are still in different stripes, but instead of be very far apart
55 * on disk, there are adjacent stripes.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 */
57
58/*
59 * Number of guaranteed r10bios in case of extreme VM load:
60 */
61#define NR_RAID10_BIOS 256
62
Jonathan Brassow473e87c2012-07-31 10:03:52 +100063/* when we get a read error on a read-only array, we redirect to another
64 * device without failing the first device, or trying to over-write to
65 * correct the read error. To keep track of bad blocks on a per-bio
66 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
67 */
68#define IO_BLOCKED ((struct bio *)1)
69/* When we successfully write to a known bad-block, we need to remove the
70 * bad-block marking which must be done from process context. So we record
71 * the success by setting devs[n].bio to IO_MADE_GOOD
72 */
73#define IO_MADE_GOOD ((struct bio *)2)
74
75#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
76
77/* When there are this many requests queued to be written by
NeilBrown34db0cd2011-10-11 16:50:01 +110078 * the raid10 thread, we become 'congested' to provide back-pressure
79 * for writeback.
80 */
81static int max_queued_requests = 1024;
82
NeilBrowne879a872011-10-11 16:49:02 +110083static void allow_barrier(struct r10conf *conf);
84static void lower_barrier(struct r10conf *conf);
NeilBrownfae8cc5e2012-02-14 11:10:10 +110085static int enough(struct r10conf *conf, int ignore);
NeilBrown3ea7daa2012-05-22 13:53:47 +100086static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
87 int *skipped);
88static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
89static void end_reshape_write(struct bio *bio, int error);
90static void end_reshape(struct r10conf *conf);
NeilBrown0a27ec92006-01-06 00:20:13 -080091
Al Virodd0fc662005-10-07 07:46:04 +010092static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
NeilBrowne879a872011-10-11 16:49:02 +110094 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +110095 int size = offsetof(struct r10bio, devs[conf->copies]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
NeilBrown69335ef2011-12-23 10:17:54 +110097 /* allocate a r10bio with room for raid_disks entries in the
98 * bios array */
Jens Axboe7eaceac2011-03-10 08:52:07 +010099 return kzalloc(size, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
102static void r10bio_pool_free(void *r10_bio, void *data)
103{
104 kfree(r10_bio);
105}
106
NeilBrown0310fa22008-08-05 15:54:14 +1000107/* Maximum size of each resync request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#define RESYNC_BLOCK_SIZE (64*1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
NeilBrown0310fa22008-08-05 15:54:14 +1000110/* amount of memory to reserve for resync requests */
111#define RESYNC_WINDOW (1024*1024)
112/* maximum number of concurrent requests, memory permitting */
113#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115/*
116 * When performing a resync, we need to read and compare, so
117 * we need as many pages are there are copies.
118 * When performing a recovery, we need 2 bios, one for read,
119 * one for write (we recover only one drive per r10buf)
120 *
121 */
Al Virodd0fc662005-10-07 07:46:04 +0100122static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
NeilBrowne879a872011-10-11 16:49:02 +1100124 struct r10conf *conf = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 struct page *page;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100126 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 struct bio *bio;
128 int i, j;
129 int nalloc;
130
131 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100132 if (!r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
NeilBrown3ea7daa2012-05-22 13:53:47 +1000135 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
136 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 nalloc = conf->copies; /* resync */
138 else
139 nalloc = 2; /* recovery */
140
141 /*
142 * Allocate bios.
143 */
144 for (j = nalloc ; j-- ; ) {
NeilBrown67465572010-10-26 17:33:54 +1100145 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (!bio)
147 goto out_free_bio;
148 r10_bio->devs[j].bio = bio;
NeilBrown69335ef2011-12-23 10:17:54 +1100149 if (!conf->have_replacement)
150 continue;
151 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
152 if (!bio)
153 goto out_free_bio;
154 r10_bio->devs[j].repl_bio = bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 /*
157 * Allocate RESYNC_PAGES data pages and attach them
158 * where needed.
159 */
160 for (j = 0 ; j < nalloc; j++) {
NeilBrown69335ef2011-12-23 10:17:54 +1100161 struct bio *rbio = r10_bio->devs[j].repl_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 bio = r10_bio->devs[j].bio;
163 for (i = 0; i < RESYNC_PAGES; i++) {
NeilBrown3ea7daa2012-05-22 13:53:47 +1000164 if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
165 &conf->mddev->recovery)) {
166 /* we can share bv_page's during recovery
167 * and reshape */
Namhyung Kimc65060a2011-07-18 17:38:49 +1000168 struct bio *rbio = r10_bio->devs[0].bio;
169 page = rbio->bi_io_vec[i].bv_page;
170 get_page(page);
171 } else
172 page = alloc_page(gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 if (unlikely(!page))
174 goto out_free_pages;
175
176 bio->bi_io_vec[i].bv_page = page;
NeilBrown69335ef2011-12-23 10:17:54 +1100177 if (rbio)
178 rbio->bi_io_vec[i].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 }
180 }
181
182 return r10_bio;
183
184out_free_pages:
185 for ( ; i > 0 ; i--)
NeilBrown1345b1d2006-01-06 00:20:40 -0800186 safe_put_page(bio->bi_io_vec[i-1].bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 while (j--)
188 for (i = 0; i < RESYNC_PAGES ; i++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800189 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
majianpeng5fdd2cf2012-05-22 13:55:03 +1000190 j = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191out_free_bio:
majianpeng5fdd2cf2012-05-22 13:55:03 +1000192 for ( ; j < nalloc; j++) {
193 if (r10_bio->devs[j].bio)
194 bio_put(r10_bio->devs[j].bio);
NeilBrown69335ef2011-12-23 10:17:54 +1100195 if (r10_bio->devs[j].repl_bio)
196 bio_put(r10_bio->devs[j].repl_bio);
197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 r10bio_pool_free(r10_bio, conf);
199 return NULL;
200}
201
202static void r10buf_pool_free(void *__r10_bio, void *data)
203{
204 int i;
NeilBrowne879a872011-10-11 16:49:02 +1100205 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100206 struct r10bio *r10bio = __r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 int j;
208
209 for (j=0; j < conf->copies; j++) {
210 struct bio *bio = r10bio->devs[j].bio;
211 if (bio) {
212 for (i = 0; i < RESYNC_PAGES; i++) {
NeilBrown1345b1d2006-01-06 00:20:40 -0800213 safe_put_page(bio->bi_io_vec[i].bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 bio->bi_io_vec[i].bv_page = NULL;
215 }
216 bio_put(bio);
217 }
NeilBrown69335ef2011-12-23 10:17:54 +1100218 bio = r10bio->devs[j].repl_bio;
219 if (bio)
220 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
222 r10bio_pool_free(r10bio, conf);
223}
224
NeilBrowne879a872011-10-11 16:49:02 +1100225static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 int i;
228
229 for (i = 0; i < conf->copies; i++) {
230 struct bio **bio = & r10_bio->devs[i].bio;
NeilBrown749c55e2011-07-28 11:39:24 +1000231 if (!BIO_SPECIAL(*bio))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 bio_put(*bio);
233 *bio = NULL;
NeilBrown69335ef2011-12-23 10:17:54 +1100234 bio = &r10_bio->devs[i].repl_bio;
235 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
236 bio_put(*bio);
237 *bio = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239}
240
NeilBrown9f2c9d12011-10-11 16:48:43 +1100241static void free_r10bio(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
NeilBrowne879a872011-10-11 16:49:02 +1100243 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 put_all_bios(conf, r10_bio);
246 mempool_free(r10_bio, conf->r10bio_pool);
247}
248
NeilBrown9f2c9d12011-10-11 16:48:43 +1100249static void put_buf(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
NeilBrowne879a872011-10-11 16:49:02 +1100251 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 mempool_free(r10_bio, conf->r10buf_pool);
254
NeilBrown0a27ec92006-01-06 00:20:13 -0800255 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256}
257
NeilBrown9f2c9d12011-10-11 16:48:43 +1100258static void reschedule_retry(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
260 unsigned long flags;
NeilBrownfd01b882011-10-11 16:47:53 +1100261 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +1100262 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 spin_lock_irqsave(&conf->device_lock, flags);
265 list_add(&r10_bio->retry_list, &conf->retry_list);
NeilBrown4443ae12006-01-06 00:20:28 -0800266 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 spin_unlock_irqrestore(&conf->device_lock, flags);
268
Arthur Jones388667b2008-07-25 12:03:38 -0700269 /* wake up frozen array... */
270 wake_up(&conf->wait_barrier);
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 md_wakeup_thread(mddev->thread);
273}
274
275/*
276 * raid_end_bio_io() is called when we have finished servicing a mirrored
277 * operation and are ready to return a success/failure code to the buffer
278 * cache layer.
279 */
NeilBrown9f2c9d12011-10-11 16:48:43 +1100280static void raid_end_bio_io(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
282 struct bio *bio = r10_bio->master_bio;
NeilBrown856e08e2011-07-28 11:39:23 +1000283 int done;
NeilBrowne879a872011-10-11 16:49:02 +1100284 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
NeilBrown856e08e2011-07-28 11:39:23 +1000286 if (bio->bi_phys_segments) {
287 unsigned long flags;
288 spin_lock_irqsave(&conf->device_lock, flags);
289 bio->bi_phys_segments--;
290 done = (bio->bi_phys_segments == 0);
291 spin_unlock_irqrestore(&conf->device_lock, flags);
292 } else
293 done = 1;
294 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
295 clear_bit(BIO_UPTODATE, &bio->bi_flags);
296 if (done) {
297 bio_endio(bio, 0);
298 /*
299 * Wake up any possible resync thread that waits for the device
300 * to go idle.
301 */
302 allow_barrier(conf);
303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 free_r10bio(r10_bio);
305}
306
307/*
308 * Update disk head position estimator based on IRQ completion info.
309 */
NeilBrown9f2c9d12011-10-11 16:48:43 +1100310static inline void update_head_pos(int slot, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
NeilBrowne879a872011-10-11 16:49:02 +1100312 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
315 r10_bio->devs[slot].addr + (r10_bio->sectors);
316}
317
Namhyung Kim778ca012011-07-18 17:38:47 +1000318/*
319 * Find the disk number which triggered given bio
320 */
NeilBrowne879a872011-10-11 16:49:02 +1100321static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
NeilBrown69335ef2011-12-23 10:17:54 +1100322 struct bio *bio, int *slotp, int *replp)
Namhyung Kim778ca012011-07-18 17:38:47 +1000323{
324 int slot;
NeilBrown69335ef2011-12-23 10:17:54 +1100325 int repl = 0;
Namhyung Kim778ca012011-07-18 17:38:47 +1000326
NeilBrown69335ef2011-12-23 10:17:54 +1100327 for (slot = 0; slot < conf->copies; slot++) {
Namhyung Kim778ca012011-07-18 17:38:47 +1000328 if (r10_bio->devs[slot].bio == bio)
329 break;
NeilBrown69335ef2011-12-23 10:17:54 +1100330 if (r10_bio->devs[slot].repl_bio == bio) {
331 repl = 1;
332 break;
333 }
334 }
Namhyung Kim778ca012011-07-18 17:38:47 +1000335
336 BUG_ON(slot == conf->copies);
337 update_head_pos(slot, r10_bio);
338
NeilBrown749c55e2011-07-28 11:39:24 +1000339 if (slotp)
340 *slotp = slot;
NeilBrown69335ef2011-12-23 10:17:54 +1100341 if (replp)
342 *replp = repl;
Namhyung Kim778ca012011-07-18 17:38:47 +1000343 return r10_bio->devs[slot].devnum;
344}
345
NeilBrown6712ecf2007-09-27 12:47:43 +0200346static void raid10_end_read_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
348 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
NeilBrown9f2c9d12011-10-11 16:48:43 +1100349 struct r10bio *r10_bio = bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 int slot, dev;
NeilBrownabbf0982011-12-23 10:17:54 +1100351 struct md_rdev *rdev;
NeilBrowne879a872011-10-11 16:49:02 +1100352 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 slot = r10_bio->read_slot;
356 dev = r10_bio->devs[slot].devnum;
NeilBrownabbf0982011-12-23 10:17:54 +1100357 rdev = r10_bio->devs[slot].rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /*
359 * this branch is our 'one mirror IO has finished' event handler:
360 */
NeilBrown4443ae12006-01-06 00:20:28 -0800361 update_head_pos(slot, r10_bio);
362
363 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 /*
365 * Set R10BIO_Uptodate in our master bio, so that
366 * we will return a good error code to the higher
367 * levels even if IO on some other mirrored buffer fails.
368 *
369 * The 'master' represents the composite IO operation to
370 * user-side. So if something waits for IO, then it will
371 * wait for the 'master' bio.
372 */
373 set_bit(R10BIO_Uptodate, &r10_bio->state);
NeilBrownfae8cc5e2012-02-14 11:10:10 +1100374 } else {
375 /* If all other devices that store this block have
376 * failed, we want to return the error upwards rather
377 * than fail the last device. Here we redefine
378 * "uptodate" to mean "Don't want to retry"
379 */
380 unsigned long flags;
381 spin_lock_irqsave(&conf->device_lock, flags);
382 if (!enough(conf, rdev->raid_disk))
383 uptodate = 1;
384 spin_unlock_irqrestore(&conf->device_lock, flags);
385 }
386 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 raid_end_bio_io(r10_bio);
NeilBrownabbf0982011-12-23 10:17:54 +1100388 rdev_dec_pending(rdev, conf->mddev);
NeilBrown4443ae12006-01-06 00:20:28 -0800389 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 /*
NeilBrown7c4e06f2011-05-11 14:53:17 +1000391 * oops, read error - keep the refcount on the rdev
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 */
393 char b[BDEVNAME_SIZE];
Christian Dietrich8bda4702011-07-27 11:00:36 +1000394 printk_ratelimited(KERN_ERR
395 "md/raid10:%s: %s: rescheduling sector %llu\n",
396 mdname(conf->mddev),
NeilBrownabbf0982011-12-23 10:17:54 +1100397 bdevname(rdev->bdev, b),
Christian Dietrich8bda4702011-07-27 11:00:36 +1000398 (unsigned long long)r10_bio->sector);
NeilBrown856e08e2011-07-28 11:39:23 +1000399 set_bit(R10BIO_ReadError, &r10_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 reschedule_retry(r10_bio);
401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
NeilBrown9f2c9d12011-10-11 16:48:43 +1100404static void close_write(struct r10bio *r10_bio)
NeilBrownbd870a12011-07-28 11:39:24 +1000405{
406 /* clear the bitmap if all writes complete successfully */
407 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
408 r10_bio->sectors,
409 !test_bit(R10BIO_Degraded, &r10_bio->state),
410 0);
411 md_write_end(r10_bio->mddev);
412}
413
NeilBrown9f2c9d12011-10-11 16:48:43 +1100414static void one_write_done(struct r10bio *r10_bio)
NeilBrown19d5f832011-09-10 17:21:17 +1000415{
416 if (atomic_dec_and_test(&r10_bio->remaining)) {
417 if (test_bit(R10BIO_WriteError, &r10_bio->state))
418 reschedule_retry(r10_bio);
419 else {
420 close_write(r10_bio);
421 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
422 reschedule_retry(r10_bio);
423 else
424 raid_end_bio_io(r10_bio);
425 }
426 }
427}
428
NeilBrown6712ecf2007-09-27 12:47:43 +0200429static void raid10_end_write_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
431 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
NeilBrown9f2c9d12011-10-11 16:48:43 +1100432 struct r10bio *r10_bio = bio->bi_private;
Namhyung Kim778ca012011-07-18 17:38:47 +1000433 int dev;
NeilBrown749c55e2011-07-28 11:39:24 +1000434 int dec_rdev = 1;
NeilBrowne879a872011-10-11 16:49:02 +1100435 struct r10conf *conf = r10_bio->mddev->private;
NeilBrown475b0322011-12-23 10:17:55 +1100436 int slot, repl;
NeilBrown4ca40c22011-12-23 10:17:55 +1100437 struct md_rdev *rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
NeilBrown475b0322011-12-23 10:17:55 +1100439 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
NeilBrown475b0322011-12-23 10:17:55 +1100441 if (repl)
442 rdev = conf->mirrors[dev].replacement;
NeilBrown4ca40c22011-12-23 10:17:55 +1100443 if (!rdev) {
444 smp_rmb();
445 repl = 0;
NeilBrown475b0322011-12-23 10:17:55 +1100446 rdev = conf->mirrors[dev].rdev;
NeilBrown4ca40c22011-12-23 10:17:55 +1100447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
449 * this branch is our 'one mirror IO has finished' event handler:
450 */
NeilBrown6cce3b232006-01-06 00:20:16 -0800451 if (!uptodate) {
NeilBrown475b0322011-12-23 10:17:55 +1100452 if (repl)
453 /* Never record new bad blocks to replacement,
454 * just fail it.
455 */
456 md_error(rdev->mddev, rdev);
457 else {
458 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +1100459 if (!test_and_set_bit(WantReplacement, &rdev->flags))
460 set_bit(MD_RECOVERY_NEEDED,
461 &rdev->mddev->recovery);
NeilBrown475b0322011-12-23 10:17:55 +1100462 set_bit(R10BIO_WriteError, &r10_bio->state);
463 dec_rdev = 0;
464 }
NeilBrown749c55e2011-07-28 11:39:24 +1000465 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 /*
467 * Set R10BIO_Uptodate in our master bio, so that
468 * we will return a good error code for to the higher
469 * levels even if IO on some other mirrored buffer fails.
470 *
471 * The 'master' represents the composite IO operation to
472 * user-side. So if something waits for IO, then it will
473 * wait for the 'master' bio.
474 */
NeilBrown749c55e2011-07-28 11:39:24 +1000475 sector_t first_bad;
476 int bad_sectors;
477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 set_bit(R10BIO_Uptodate, &r10_bio->state);
479
NeilBrown749c55e2011-07-28 11:39:24 +1000480 /* Maybe we can clear some bad blocks. */
NeilBrown475b0322011-12-23 10:17:55 +1100481 if (is_badblock(rdev,
NeilBrown749c55e2011-07-28 11:39:24 +1000482 r10_bio->devs[slot].addr,
483 r10_bio->sectors,
484 &first_bad, &bad_sectors)) {
485 bio_put(bio);
NeilBrown475b0322011-12-23 10:17:55 +1100486 if (repl)
487 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
488 else
489 r10_bio->devs[slot].bio = IO_MADE_GOOD;
NeilBrown749c55e2011-07-28 11:39:24 +1000490 dec_rdev = 0;
491 set_bit(R10BIO_MadeGood, &r10_bio->state);
492 }
493 }
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 /*
496 *
497 * Let's see if all mirrored write operations have finished
498 * already.
499 */
NeilBrown19d5f832011-09-10 17:21:17 +1000500 one_write_done(r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +1000501 if (dec_rdev)
NeilBrown884162d2012-11-22 15:12:09 +1100502 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505/*
506 * RAID10 layout manager
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300507 * As well as the chunksize and raid_disks count, there are two
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 * parameters: near_copies and far_copies.
509 * near_copies * far_copies must be <= raid_disks.
510 * Normally one of these will be 1.
511 * If both are 1, we get raid0.
512 * If near_copies == raid_disks, we get raid1.
513 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300514 * Chunks are laid out in raid0 style with near_copies copies of the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 * first chunk, followed by near_copies copies of the next chunk and
516 * so on.
517 * If far_copies > 1, then after 1/far_copies of the array has been assigned
518 * as described above, we start again with a device offset of near_copies.
519 * So we effectively have another copy of the whole array further down all
520 * the drives, but with blocks on different drives.
521 * With this layout, and block is never stored twice on the one device.
522 *
523 * raid10_find_phys finds the sector offset of a given virtual sector
NeilBrownc93983b2006-06-26 00:27:41 -0700524 * on each device that it is on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 *
526 * raid10_find_virt does the reverse mapping, from a device and a
527 * sector offset to a virtual address
528 */
529
NeilBrownf8c9e742012-05-21 09:28:33 +1000530static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
532 int n,f;
533 sector_t sector;
534 sector_t chunk;
535 sector_t stripe;
536 int dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 int slot = 0;
538
539 /* now calculate first sector/dev */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000540 chunk = r10bio->sector >> geo->chunk_shift;
541 sector = r10bio->sector & geo->chunk_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
NeilBrown5cf00fc2012-05-21 09:28:20 +1000543 chunk *= geo->near_copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 stripe = chunk;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000545 dev = sector_div(stripe, geo->raid_disks);
546 if (geo->far_offset)
547 stripe *= geo->far_copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
NeilBrown5cf00fc2012-05-21 09:28:20 +1000549 sector += stripe << geo->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
551 /* and calculate all the others */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000552 for (n = 0; n < geo->near_copies; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 int d = dev;
554 sector_t s = sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 r10bio->devs[slot].devnum = d;
Jonathan Brassow4c0ca262013-02-21 13:28:09 +1100556 r10bio->devs[slot].addr = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 slot++;
558
NeilBrown5cf00fc2012-05-21 09:28:20 +1000559 for (f = 1; f < geo->far_copies; f++) {
560 d += geo->near_copies;
Jonathan Brassow4c0ca262013-02-21 13:28:09 +1100561 d %= geo->raid_disks;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000562 s += geo->stride;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 r10bio->devs[slot].devnum = d;
564 r10bio->devs[slot].addr = s;
565 slot++;
566 }
567 dev++;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000568 if (dev >= geo->raid_disks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 dev = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000570 sector += (geo->chunk_mask + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572 }
NeilBrownf8c9e742012-05-21 09:28:33 +1000573}
574
575static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
576{
577 struct geom *geo = &conf->geo;
578
579 if (conf->reshape_progress != MaxSector &&
580 ((r10bio->sector >= conf->reshape_progress) !=
581 conf->mddev->reshape_backwards)) {
582 set_bit(R10BIO_Previous, &r10bio->state);
583 geo = &conf->prev;
584 } else
585 clear_bit(R10BIO_Previous, &r10bio->state);
586
587 __raid10_find_phys(geo, r10bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
NeilBrowne879a872011-10-11 16:49:02 +1100590static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
592 sector_t offset, chunk, vchunk;
NeilBrownf8c9e742012-05-21 09:28:33 +1000593 /* Never use conf->prev as this is only called during resync
594 * or recovery, so reshape isn't happening
595 */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000596 struct geom *geo = &conf->geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
NeilBrown5cf00fc2012-05-21 09:28:20 +1000598 offset = sector & geo->chunk_mask;
599 if (geo->far_offset) {
NeilBrownc93983b2006-06-26 00:27:41 -0700600 int fc;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000601 chunk = sector >> geo->chunk_shift;
602 fc = sector_div(chunk, geo->far_copies);
603 dev -= fc * geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700604 if (dev < 0)
NeilBrown5cf00fc2012-05-21 09:28:20 +1000605 dev += geo->raid_disks;
NeilBrownc93983b2006-06-26 00:27:41 -0700606 } else {
NeilBrown5cf00fc2012-05-21 09:28:20 +1000607 while (sector >= geo->stride) {
608 sector -= geo->stride;
609 if (dev < geo->near_copies)
610 dev += geo->raid_disks - geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700611 else
NeilBrown5cf00fc2012-05-21 09:28:20 +1000612 dev -= geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700613 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000614 chunk = sector >> geo->chunk_shift;
NeilBrownc93983b2006-06-26 00:27:41 -0700615 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000616 vchunk = chunk * geo->raid_disks + dev;
617 sector_div(vchunk, geo->near_copies);
618 return (vchunk << geo->chunk_shift) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
621/**
622 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
623 * @q: request queue
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200624 * @bvm: properties of new bio
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 * @biovec: the request that could be merged to it.
626 *
627 * Return amount of bytes we can accept at this offset
NeilBrown050b6612012-03-19 12:46:39 +1100628 * This requires checking for end-of-chunk if near_copies != raid_disks,
629 * and for subordinate merge_bvec_fns if merge_check_needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 */
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200631static int raid10_mergeable_bvec(struct request_queue *q,
632 struct bvec_merge_data *bvm,
633 struct bio_vec *biovec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
NeilBrownfd01b882011-10-11 16:47:53 +1100635 struct mddev *mddev = q->queuedata;
NeilBrown050b6612012-03-19 12:46:39 +1100636 struct r10conf *conf = mddev->private;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200637 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 int max;
NeilBrown3ea7daa2012-05-22 13:53:47 +1000639 unsigned int chunk_sectors;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200640 unsigned int bio_sectors = bvm->bi_size >> 9;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000641 struct geom *geo = &conf->geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
NeilBrown3ea7daa2012-05-22 13:53:47 +1000643 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
NeilBrownf8c9e742012-05-21 09:28:33 +1000644 if (conf->reshape_progress != MaxSector &&
645 ((sector >= conf->reshape_progress) !=
646 conf->mddev->reshape_backwards))
647 geo = &conf->prev;
648
NeilBrown5cf00fc2012-05-21 09:28:20 +1000649 if (geo->near_copies < geo->raid_disks) {
NeilBrown050b6612012-03-19 12:46:39 +1100650 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
651 + bio_sectors)) << 9;
652 if (max < 0)
653 /* bio_add cannot handle a negative return */
654 max = 0;
655 if (max <= biovec->bv_len && bio_sectors == 0)
656 return biovec->bv_len;
657 } else
658 max = biovec->bv_len;
659
660 if (mddev->merge_check_needed) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000661 struct {
662 struct r10bio r10_bio;
663 struct r10dev devs[conf->copies];
664 } on_stack;
665 struct r10bio *r10_bio = &on_stack.r10_bio;
NeilBrown050b6612012-03-19 12:46:39 +1100666 int s;
NeilBrownf8c9e742012-05-21 09:28:33 +1000667 if (conf->reshape_progress != MaxSector) {
668 /* Cannot give any guidance during reshape */
669 if (max <= biovec->bv_len && bio_sectors == 0)
670 return biovec->bv_len;
671 return 0;
672 }
NeilBrowne0ee7782012-08-18 09:51:42 +1000673 r10_bio->sector = sector;
674 raid10_find_phys(conf, r10_bio);
NeilBrown050b6612012-03-19 12:46:39 +1100675 rcu_read_lock();
676 for (s = 0; s < conf->copies; s++) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000677 int disk = r10_bio->devs[s].devnum;
NeilBrown050b6612012-03-19 12:46:39 +1100678 struct md_rdev *rdev = rcu_dereference(
679 conf->mirrors[disk].rdev);
680 if (rdev && !test_bit(Faulty, &rdev->flags)) {
681 struct request_queue *q =
682 bdev_get_queue(rdev->bdev);
683 if (q->merge_bvec_fn) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000684 bvm->bi_sector = r10_bio->devs[s].addr
NeilBrown050b6612012-03-19 12:46:39 +1100685 + rdev->data_offset;
686 bvm->bi_bdev = rdev->bdev;
687 max = min(max, q->merge_bvec_fn(
688 q, bvm, biovec));
689 }
690 }
691 rdev = rcu_dereference(conf->mirrors[disk].replacement);
692 if (rdev && !test_bit(Faulty, &rdev->flags)) {
693 struct request_queue *q =
694 bdev_get_queue(rdev->bdev);
695 if (q->merge_bvec_fn) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000696 bvm->bi_sector = r10_bio->devs[s].addr
NeilBrown050b6612012-03-19 12:46:39 +1100697 + rdev->data_offset;
698 bvm->bi_bdev = rdev->bdev;
699 max = min(max, q->merge_bvec_fn(
700 q, bvm, biovec));
701 }
702 }
703 }
704 rcu_read_unlock();
705 }
706 return max;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
709/*
710 * This routine returns the disk from which the requested read should
711 * be done. There is a per-array 'next expected sequential IO' sector
712 * number - if this matches on the next IO then we use the last disk.
713 * There is also a per-disk 'last know head position' sector that is
714 * maintained from IRQ contexts, both the normal and the resync IO
715 * completion handlers update this position correctly. If there is no
716 * perfect sequential match then we pick the disk whose head is closest.
717 *
718 * If there are 2 mirrors in the same 2 devices, performance degrades
719 * because position is mirror, not device based.
720 *
721 * The rdev for the device selected will have nr_pending incremented.
722 */
723
724/*
725 * FIXME: possibly should rethink readbalancing and do it differently
726 * depending on near_copies / far_copies geometry.
727 */
NeilBrown96c3fd12011-12-23 10:17:54 +1100728static struct md_rdev *read_balance(struct r10conf *conf,
729 struct r10bio *r10_bio,
730 int *max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
NeilBrownaf3a2cd2010-05-08 08:20:17 +1000732 const sector_t this_sector = r10_bio->sector;
NeilBrown56d99122011-05-11 14:27:03 +1000733 int disk, slot;
NeilBrown856e08e2011-07-28 11:39:23 +1000734 int sectors = r10_bio->sectors;
735 int best_good_sectors;
NeilBrown56d99122011-05-11 14:27:03 +1000736 sector_t new_distance, best_dist;
Jonathan Brassow3bbae042012-07-31 10:03:52 +1000737 struct md_rdev *best_rdev, *rdev = NULL;
NeilBrown56d99122011-05-11 14:27:03 +1000738 int do_balance;
739 int best_slot;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000740 struct geom *geo = &conf->geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 raid10_find_phys(conf, r10_bio);
743 rcu_read_lock();
NeilBrown56d99122011-05-11 14:27:03 +1000744retry:
NeilBrown856e08e2011-07-28 11:39:23 +1000745 sectors = r10_bio->sectors;
NeilBrown56d99122011-05-11 14:27:03 +1000746 best_slot = -1;
NeilBrownabbf0982011-12-23 10:17:54 +1100747 best_rdev = NULL;
NeilBrown56d99122011-05-11 14:27:03 +1000748 best_dist = MaxSector;
NeilBrown856e08e2011-07-28 11:39:23 +1000749 best_good_sectors = 0;
NeilBrown56d99122011-05-11 14:27:03 +1000750 do_balance = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 /*
752 * Check if we can balance. We can balance on the whole
NeilBrown6cce3b232006-01-06 00:20:16 -0800753 * device if no resync is going on (recovery is ok), or below
754 * the resync window. We take the first readable disk when
755 * above the resync window.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 */
757 if (conf->mddev->recovery_cp < MaxSector
NeilBrown56d99122011-05-11 14:27:03 +1000758 && (this_sector + sectors >= conf->next_resync))
759 do_balance = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
NeilBrown56d99122011-05-11 14:27:03 +1000761 for (slot = 0; slot < conf->copies ; slot++) {
NeilBrown856e08e2011-07-28 11:39:23 +1000762 sector_t first_bad;
763 int bad_sectors;
764 sector_t dev_sector;
765
NeilBrown56d99122011-05-11 14:27:03 +1000766 if (r10_bio->devs[slot].bio == IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 continue;
NeilBrown56d99122011-05-11 14:27:03 +1000768 disk = r10_bio->devs[slot].devnum;
NeilBrownabbf0982011-12-23 10:17:54 +1100769 rdev = rcu_dereference(conf->mirrors[disk].replacement);
770 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
NeilBrown050b6612012-03-19 12:46:39 +1100771 test_bit(Unmerged, &rdev->flags) ||
NeilBrownabbf0982011-12-23 10:17:54 +1100772 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
773 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown050b6612012-03-19 12:46:39 +1100774 if (rdev == NULL ||
775 test_bit(Faulty, &rdev->flags) ||
776 test_bit(Unmerged, &rdev->flags))
NeilBrownabbf0982011-12-23 10:17:54 +1100777 continue;
778 if (!test_bit(In_sync, &rdev->flags) &&
779 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
NeilBrown56d99122011-05-11 14:27:03 +1000780 continue;
781
NeilBrown856e08e2011-07-28 11:39:23 +1000782 dev_sector = r10_bio->devs[slot].addr;
783 if (is_badblock(rdev, dev_sector, sectors,
784 &first_bad, &bad_sectors)) {
785 if (best_dist < MaxSector)
786 /* Already have a better slot */
787 continue;
788 if (first_bad <= dev_sector) {
789 /* Cannot read here. If this is the
790 * 'primary' device, then we must not read
791 * beyond 'bad_sectors' from another device.
792 */
793 bad_sectors -= (dev_sector - first_bad);
794 if (!do_balance && sectors > bad_sectors)
795 sectors = bad_sectors;
796 if (best_good_sectors > sectors)
797 best_good_sectors = sectors;
798 } else {
799 sector_t good_sectors =
800 first_bad - dev_sector;
801 if (good_sectors > best_good_sectors) {
802 best_good_sectors = good_sectors;
803 best_slot = slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100804 best_rdev = rdev;
NeilBrown856e08e2011-07-28 11:39:23 +1000805 }
806 if (!do_balance)
807 /* Must read from here */
808 break;
809 }
810 continue;
811 } else
812 best_good_sectors = sectors;
813
NeilBrown56d99122011-05-11 14:27:03 +1000814 if (!do_balance)
815 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
NeilBrown22dfdf52005-11-28 13:44:09 -0800817 /* This optimisation is debatable, and completely destroys
818 * sequential read speed for 'far copies' arrays. So only
819 * keep it for 'near' arrays, and review those later.
820 */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000821 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 break;
Keld Simonsen8ed3a192008-03-04 14:29:34 -0800823
824 /* for far > 1 always use the lowest address */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000825 if (geo->far_copies > 1)
NeilBrown56d99122011-05-11 14:27:03 +1000826 new_distance = r10_bio->devs[slot].addr;
Keld Simonsen8ed3a192008-03-04 14:29:34 -0800827 else
NeilBrown56d99122011-05-11 14:27:03 +1000828 new_distance = abs(r10_bio->devs[slot].addr -
829 conf->mirrors[disk].head_position);
830 if (new_distance < best_dist) {
831 best_dist = new_distance;
832 best_slot = slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100833 best_rdev = rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 }
835 }
NeilBrownabbf0982011-12-23 10:17:54 +1100836 if (slot >= conf->copies) {
NeilBrown56d99122011-05-11 14:27:03 +1000837 slot = best_slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100838 rdev = best_rdev;
839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
NeilBrown56d99122011-05-11 14:27:03 +1000841 if (slot >= 0) {
NeilBrown56d99122011-05-11 14:27:03 +1000842 atomic_inc(&rdev->nr_pending);
843 if (test_bit(Faulty, &rdev->flags)) {
844 /* Cannot risk returning a device that failed
845 * before we inc'ed nr_pending
846 */
847 rdev_dec_pending(rdev, conf->mddev);
848 goto retry;
849 }
850 r10_bio->read_slot = slot;
851 } else
NeilBrown96c3fd12011-12-23 10:17:54 +1100852 rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 rcu_read_unlock();
NeilBrown856e08e2011-07-28 11:39:23 +1000854 *max_sectors = best_good_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
NeilBrown96c3fd12011-12-23 10:17:54 +1100856 return rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +1000859int md_raid10_congested(struct mddev *mddev, int bits)
NeilBrown0d129222006-10-03 01:15:54 -0700860{
NeilBrowne879a872011-10-11 16:49:02 +1100861 struct r10conf *conf = mddev->private;
NeilBrown0d129222006-10-03 01:15:54 -0700862 int i, ret = 0;
863
NeilBrown34db0cd2011-10-11 16:50:01 +1100864 if ((bits & (1 << BDI_async_congested)) &&
865 conf->pending_count >= max_queued_requests)
866 return 1;
867
NeilBrown0d129222006-10-03 01:15:54 -0700868 rcu_read_lock();
NeilBrownf8c9e742012-05-21 09:28:33 +1000869 for (i = 0;
870 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
871 && ret == 0;
872 i++) {
NeilBrown3cb03002011-10-11 16:45:26 +1100873 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrown0d129222006-10-03 01:15:54 -0700874 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200875 struct request_queue *q = bdev_get_queue(rdev->bdev);
NeilBrown0d129222006-10-03 01:15:54 -0700876
877 ret |= bdi_congested(&q->backing_dev_info, bits);
878 }
879 }
880 rcu_read_unlock();
881 return ret;
882}
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +1000883EXPORT_SYMBOL_GPL(md_raid10_congested);
884
885static int raid10_congested(void *data, int bits)
886{
887 struct mddev *mddev = data;
888
889 return mddev_congested(mddev, bits) ||
890 md_raid10_congested(mddev, bits);
891}
NeilBrown0d129222006-10-03 01:15:54 -0700892
NeilBrowne879a872011-10-11 16:49:02 +1100893static void flush_pending_writes(struct r10conf *conf)
NeilBrowna35e63e2008-03-04 14:29:29 -0800894{
895 /* Any writes that have been queued but are awaiting
896 * bitmap updates get flushed here.
NeilBrowna35e63e2008-03-04 14:29:29 -0800897 */
NeilBrowna35e63e2008-03-04 14:29:29 -0800898 spin_lock_irq(&conf->device_lock);
899
900 if (conf->pending_bio_list.head) {
901 struct bio *bio;
902 bio = bio_list_get(&conf->pending_bio_list);
NeilBrown34db0cd2011-10-11 16:50:01 +1100903 conf->pending_count = 0;
NeilBrowna35e63e2008-03-04 14:29:29 -0800904 spin_unlock_irq(&conf->device_lock);
905 /* flush any pending bitmap writes to disk
906 * before proceeding w/ I/O */
907 bitmap_unplug(conf->mddev->bitmap);
NeilBrown34db0cd2011-10-11 16:50:01 +1100908 wake_up(&conf->wait_barrier);
NeilBrowna35e63e2008-03-04 14:29:29 -0800909
910 while (bio) { /* submit pending writes */
911 struct bio *next = bio->bi_next;
912 bio->bi_next = NULL;
Shaohua Li532a2a32012-10-11 13:30:52 +1100913 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
914 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
915 /* Just ignore it */
916 bio_endio(bio, 0);
917 else
918 generic_make_request(bio);
NeilBrowna35e63e2008-03-04 14:29:29 -0800919 bio = next;
920 }
NeilBrowna35e63e2008-03-04 14:29:29 -0800921 } else
922 spin_unlock_irq(&conf->device_lock);
NeilBrowna35e63e2008-03-04 14:29:29 -0800923}
Jens Axboe7eaceac2011-03-10 08:52:07 +0100924
NeilBrown0a27ec92006-01-06 00:20:13 -0800925/* Barriers....
926 * Sometimes we need to suspend IO while we do something else,
927 * either some resync/recovery, or reconfigure the array.
928 * To do this we raise a 'barrier'.
929 * The 'barrier' is a counter that can be raised multiple times
930 * to count how many activities are happening which preclude
931 * normal IO.
932 * We can only raise the barrier if there is no pending IO.
933 * i.e. if nr_pending == 0.
934 * We choose only to raise the barrier if no-one is waiting for the
935 * barrier to go down. This means that as soon as an IO request
936 * is ready, no other operations which require a barrier will start
937 * until the IO request has had a chance.
938 *
939 * So: regular IO calls 'wait_barrier'. When that returns there
940 * is no backgroup IO happening, It must arrange to call
941 * allow_barrier when it has finished its IO.
942 * backgroup IO calls must call raise_barrier. Once that returns
943 * there is no normal IO happeing. It must arrange to call
944 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
NeilBrowne879a872011-10-11 16:49:02 +1100947static void raise_barrier(struct r10conf *conf, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
NeilBrown6cce3b232006-01-06 00:20:16 -0800949 BUG_ON(force && !conf->barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 spin_lock_irq(&conf->resync_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
NeilBrown6cce3b232006-01-06 00:20:16 -0800952 /* Wait until no block IO is waiting (unless 'force') */
953 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
Lukas Czernereed8c022012-11-30 11:42:40 +0100954 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -0800955
956 /* block any new IO from starting */
957 conf->barrier++;
958
NeilBrownc3b328a2011-04-18 18:25:43 +1000959 /* Now wait for all pending IO to complete */
NeilBrown0a27ec92006-01-06 00:20:13 -0800960 wait_event_lock_irq(conf->wait_barrier,
961 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
Lukas Czernereed8c022012-11-30 11:42:40 +0100962 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -0800963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 spin_unlock_irq(&conf->resync_lock);
965}
966
NeilBrowne879a872011-10-11 16:49:02 +1100967static void lower_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -0800968{
969 unsigned long flags;
970 spin_lock_irqsave(&conf->resync_lock, flags);
971 conf->barrier--;
972 spin_unlock_irqrestore(&conf->resync_lock, flags);
973 wake_up(&conf->wait_barrier);
974}
975
NeilBrowne879a872011-10-11 16:49:02 +1100976static void wait_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -0800977{
978 spin_lock_irq(&conf->resync_lock);
979 if (conf->barrier) {
980 conf->nr_waiting++;
NeilBrownd6b42dc2012-03-19 12:46:38 +1100981 /* Wait for the barrier to drop.
982 * However if there are already pending
983 * requests (preventing the barrier from
984 * rising completely), and the
985 * pre-process bio queue isn't empty,
986 * then don't wait, as we need to empty
987 * that queue to get the nr_pending
988 * count down.
989 */
990 wait_event_lock_irq(conf->wait_barrier,
991 !conf->barrier ||
992 (conf->nr_pending &&
993 current->bio_list &&
994 !bio_list_empty(current->bio_list)),
Lukas Czernereed8c022012-11-30 11:42:40 +0100995 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -0800996 conf->nr_waiting--;
997 }
998 conf->nr_pending++;
999 spin_unlock_irq(&conf->resync_lock);
1000}
1001
NeilBrowne879a872011-10-11 16:49:02 +11001002static void allow_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -08001003{
1004 unsigned long flags;
1005 spin_lock_irqsave(&conf->resync_lock, flags);
1006 conf->nr_pending--;
1007 spin_unlock_irqrestore(&conf->resync_lock, flags);
1008 wake_up(&conf->wait_barrier);
1009}
1010
NeilBrowne879a872011-10-11 16:49:02 +11001011static void freeze_array(struct r10conf *conf)
NeilBrown4443ae12006-01-06 00:20:28 -08001012{
1013 /* stop syncio and normal IO and wait for everything to
NeilBrownf1885932006-01-06 00:20:42 -08001014 * go quiet.
NeilBrown4443ae12006-01-06 00:20:28 -08001015 * We increment barrier and nr_waiting, and then
NeilBrown1c830532008-03-04 14:29:35 -08001016 * wait until nr_pending match nr_queued+1
1017 * This is called in the context of one normal IO request
1018 * that has failed. Thus any sync request that might be pending
1019 * will be blocked by nr_pending, and we need to wait for
1020 * pending IO requests to complete or be queued for re-try.
1021 * Thus the number queued (nr_queued) plus this request (1)
1022 * must match the number of pending IOs (nr_pending) before
1023 * we continue.
NeilBrown4443ae12006-01-06 00:20:28 -08001024 */
1025 spin_lock_irq(&conf->resync_lock);
1026 conf->barrier++;
1027 conf->nr_waiting++;
Lukas Czernereed8c022012-11-30 11:42:40 +01001028 wait_event_lock_irq_cmd(conf->wait_barrier,
1029 conf->nr_pending == conf->nr_queued+1,
1030 conf->resync_lock,
1031 flush_pending_writes(conf));
NeilBrownc3b328a2011-04-18 18:25:43 +10001032
NeilBrown4443ae12006-01-06 00:20:28 -08001033 spin_unlock_irq(&conf->resync_lock);
1034}
1035
NeilBrowne879a872011-10-11 16:49:02 +11001036static void unfreeze_array(struct r10conf *conf)
NeilBrown4443ae12006-01-06 00:20:28 -08001037{
1038 /* reverse the effect of the freeze */
1039 spin_lock_irq(&conf->resync_lock);
1040 conf->barrier--;
1041 conf->nr_waiting--;
1042 wake_up(&conf->wait_barrier);
1043 spin_unlock_irq(&conf->resync_lock);
1044}
1045
NeilBrownf8c9e742012-05-21 09:28:33 +10001046static sector_t choose_data_offset(struct r10bio *r10_bio,
1047 struct md_rdev *rdev)
1048{
1049 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1050 test_bit(R10BIO_Previous, &r10_bio->state))
1051 return rdev->data_offset;
1052 else
1053 return rdev->new_data_offset;
1054}
1055
NeilBrown57c67df2012-10-11 13:32:13 +11001056struct raid10_plug_cb {
1057 struct blk_plug_cb cb;
1058 struct bio_list pending;
1059 int pending_cnt;
1060};
1061
1062static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1063{
1064 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1065 cb);
1066 struct mddev *mddev = plug->cb.data;
1067 struct r10conf *conf = mddev->private;
1068 struct bio *bio;
1069
NeilBrown874807a2012-11-27 12:14:40 +11001070 if (from_schedule || current->bio_list) {
NeilBrown57c67df2012-10-11 13:32:13 +11001071 spin_lock_irq(&conf->device_lock);
1072 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1073 conf->pending_count += plug->pending_cnt;
1074 spin_unlock_irq(&conf->device_lock);
1075 md_wakeup_thread(mddev->thread);
1076 kfree(plug);
1077 return;
1078 }
1079
1080 /* we aren't scheduling, so we can do the write-out directly. */
1081 bio = bio_list_get(&plug->pending);
1082 bitmap_unplug(mddev->bitmap);
1083 wake_up(&conf->wait_barrier);
1084
1085 while (bio) { /* submit pending writes */
1086 struct bio *next = bio->bi_next;
1087 bio->bi_next = NULL;
1088 generic_make_request(bio);
1089 bio = next;
1090 }
1091 kfree(plug);
1092}
1093
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -07001094static void make_request(struct mddev *mddev, struct bio * bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095{
NeilBrowne879a872011-10-11 16:49:02 +11001096 struct r10conf *conf = mddev->private;
NeilBrown9f2c9d12011-10-11 16:48:43 +11001097 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 struct bio *read_bio;
1099 int i;
NeilBrownf8c9e742012-05-21 09:28:33 +10001100 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
NeilBrown5cf00fc2012-05-21 09:28:20 +10001101 int chunk_sects = chunk_mask + 1;
Jens Axboea3623572005-11-01 09:26:16 +01001102 const int rw = bio_data_dir(bio);
NeilBrown2c7d46e2010-08-18 16:16:05 +10001103 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
Tejun Heoe9c74692010-09-03 11:56:18 +02001104 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
Shaohua Li532a2a32012-10-11 13:30:52 +11001105 const unsigned long do_discard = (bio->bi_rw
1106 & (REQ_DISCARD | REQ_SECURE));
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11001107 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
NeilBrown6cce3b232006-01-06 00:20:16 -08001108 unsigned long flags;
NeilBrown3cb03002011-10-11 16:45:26 +11001109 struct md_rdev *blocked_rdev;
NeilBrown57c67df2012-10-11 13:32:13 +11001110 struct blk_plug_cb *cb;
1111 struct raid10_plug_cb *plug = NULL;
NeilBrownd4432c22011-07-28 11:39:24 +10001112 int sectors_handled;
1113 int max_sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10001114 int sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Tejun Heoe9c74692010-09-03 11:56:18 +02001116 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1117 md_flush_request(mddev, bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001118 return;
NeilBrowne5dcdd82005-09-09 16:23:41 -07001119 }
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 /* If this request crosses a chunk boundary, we need to
1122 * split it. This will only happen for 1 PAGE (or less) requests.
1123 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10001124 if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
1125 > chunk_sects
NeilBrownf8c9e742012-05-21 09:28:33 +10001126 && (conf->geo.near_copies < conf->geo.raid_disks
1127 || conf->prev.near_copies < conf->prev.raid_disks))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 struct bio_pair *bp;
1129 /* Sanity check -- queue functions should prevent this happening */
Shaohua Li532a2a32012-10-11 13:30:52 +11001130 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 bio->bi_idx != 0)
1132 goto bad_map;
1133 /* This is a one page bio that upper layers
1134 * refuse to split for us, so we need to split it.
1135 */
Denis ChengRq6feef532008-10-09 08:57:05 +02001136 bp = bio_split(bio,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
NeilBrown51e9ac72010-08-07 21:17:00 +10001138
1139 /* Each of these 'make_request' calls will call 'wait_barrier'.
1140 * If the first succeeds but the second blocks due to the resync
1141 * thread raising the barrier, we will deadlock because the
1142 * IO to the underlying device will be queued in generic_make_request
1143 * and will never complete, so will never reduce nr_pending.
1144 * So increment nr_waiting here so no new raise_barriers will
1145 * succeed, and so the second wait_barrier cannot block.
1146 */
1147 spin_lock_irq(&conf->resync_lock);
1148 conf->nr_waiting++;
1149 spin_unlock_irq(&conf->resync_lock);
1150
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001151 make_request(mddev, &bp->bio1);
1152 make_request(mddev, &bp->bio2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
NeilBrown51e9ac72010-08-07 21:17:00 +10001154 spin_lock_irq(&conf->resync_lock);
1155 conf->nr_waiting--;
1156 wake_up(&conf->wait_barrier);
1157 spin_unlock_irq(&conf->resync_lock);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 bio_pair_release(bp);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001160 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 bad_map:
NeilBrown128595e2010-05-03 14:47:14 +10001162 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1163 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
1165
NeilBrown6712ecf2007-09-27 12:47:43 +02001166 bio_io_error(bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001167 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
1169
NeilBrown3d310eb2005-06-21 17:17:26 -07001170 md_write_start(mddev, bio);
NeilBrown06d91a52005-06-21 17:17:12 -07001171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 /*
1173 * Register the new request and wait if the reconstruction
1174 * thread has put up a bar for new requests.
1175 * Continue immediately if no resync is active currently.
1176 */
NeilBrown0a27ec92006-01-06 00:20:13 -08001177 wait_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
NeilBrown3ea7daa2012-05-22 13:53:47 +10001179 sectors = bio->bi_size >> 9;
1180 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1181 bio->bi_sector < conf->reshape_progress &&
1182 bio->bi_sector + sectors > conf->reshape_progress) {
1183 /* IO spans the reshape position. Need to wait for
1184 * reshape to pass
1185 */
1186 allow_barrier(conf);
1187 wait_event(conf->wait_barrier,
1188 conf->reshape_progress <= bio->bi_sector ||
1189 conf->reshape_progress >= bio->bi_sector + sectors);
1190 wait_barrier(conf);
1191 }
1192 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1193 bio_data_dir(bio) == WRITE &&
1194 (mddev->reshape_backwards
1195 ? (bio->bi_sector < conf->reshape_safe &&
1196 bio->bi_sector + sectors > conf->reshape_progress)
1197 : (bio->bi_sector + sectors > conf->reshape_safe &&
1198 bio->bi_sector < conf->reshape_progress))) {
1199 /* Need to update reshape_position in metadata */
1200 mddev->reshape_position = conf->reshape_progress;
1201 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1202 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1203 md_wakeup_thread(mddev->thread);
1204 wait_event(mddev->sb_wait,
1205 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1206
1207 conf->reshape_safe = mddev->reshape_position;
1208 }
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1211
1212 r10_bio->master_bio = bio;
NeilBrown3ea7daa2012-05-22 13:53:47 +10001213 r10_bio->sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 r10_bio->mddev = mddev;
1216 r10_bio->sector = bio->bi_sector;
NeilBrown6cce3b232006-01-06 00:20:16 -08001217 r10_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
NeilBrown856e08e2011-07-28 11:39:23 +10001219 /* We might need to issue multiple reads to different
1220 * devices if there are bad blocks around, so we keep
1221 * track of the number of reads in bio->bi_phys_segments.
1222 * If this is 0, there is only one r10_bio and no locking
1223 * will be needed when the request completes. If it is
1224 * non-zero, then it is the number of not-completed requests.
1225 */
1226 bio->bi_phys_segments = 0;
1227 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1228
Jens Axboea3623572005-11-01 09:26:16 +01001229 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 /*
1231 * read balancing logic:
1232 */
NeilBrown96c3fd12011-12-23 10:17:54 +11001233 struct md_rdev *rdev;
NeilBrown856e08e2011-07-28 11:39:23 +10001234 int slot;
1235
1236read_again:
NeilBrown96c3fd12011-12-23 10:17:54 +11001237 rdev = read_balance(conf, r10_bio, &max_sectors);
1238 if (!rdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 raid_end_bio_io(r10_bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001240 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 }
NeilBrown96c3fd12011-12-23 10:17:54 +11001242 slot = r10_bio->read_slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
NeilBrowna167f662010-10-26 18:31:13 +11001244 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
NeilBrown856e08e2011-07-28 11:39:23 +10001245 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
1246 max_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
1248 r10_bio->devs[slot].bio = read_bio;
NeilBrownabbf0982011-12-23 10:17:54 +11001249 r10_bio->devs[slot].rdev = rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 read_bio->bi_sector = r10_bio->devs[slot].addr +
NeilBrownf8c9e742012-05-21 09:28:33 +10001252 choose_data_offset(r10_bio, rdev);
NeilBrown96c3fd12011-12-23 10:17:54 +11001253 read_bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 read_bio->bi_end_io = raid10_end_read_request;
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001255 read_bio->bi_rw = READ | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 read_bio->bi_private = r10_bio;
1257
NeilBrown856e08e2011-07-28 11:39:23 +10001258 if (max_sectors < r10_bio->sectors) {
1259 /* Could not read all from this device, so we will
1260 * need another r10_bio.
1261 */
NeilBrown856e08e2011-07-28 11:39:23 +10001262 sectors_handled = (r10_bio->sectors + max_sectors
1263 - bio->bi_sector);
1264 r10_bio->sectors = max_sectors;
1265 spin_lock_irq(&conf->device_lock);
1266 if (bio->bi_phys_segments == 0)
1267 bio->bi_phys_segments = 2;
1268 else
1269 bio->bi_phys_segments++;
1270 spin_unlock(&conf->device_lock);
1271 /* Cannot call generic_make_request directly
1272 * as that will be queued in __generic_make_request
1273 * and subsequent mempool_alloc might block
1274 * waiting for it. so hand bio over to raid10d.
1275 */
1276 reschedule_retry(r10_bio);
1277
1278 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1279
1280 r10_bio->master_bio = bio;
1281 r10_bio->sectors = ((bio->bi_size >> 9)
1282 - sectors_handled);
1283 r10_bio->state = 0;
1284 r10_bio->mddev = mddev;
1285 r10_bio->sector = bio->bi_sector + sectors_handled;
1286 goto read_again;
1287 } else
1288 generic_make_request(read_bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001289 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
1291
1292 /*
1293 * WRITE:
1294 */
NeilBrown34db0cd2011-10-11 16:50:01 +11001295 if (conf->pending_count >= max_queued_requests) {
1296 md_wakeup_thread(mddev->thread);
1297 wait_event(conf->wait_barrier,
1298 conf->pending_count < max_queued_requests);
1299 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07001300 /* first select target devices under rcu_lock and
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 * inc refcount on their rdev. Record them by setting
1302 * bios[x] to bio
NeilBrownd4432c22011-07-28 11:39:24 +10001303 * If there are known/acknowledged bad blocks on any device
1304 * on which we have seen a write error, we want to avoid
1305 * writing to those blocks. This potentially requires several
1306 * writes to write around the bad blocks. Each set of writes
1307 * gets its own r10_bio with a set of bios attached. The number
1308 * of r10_bios is recored in bio->bi_phys_segments just as with
1309 * the read case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 */
NeilBrownc3b328a2011-04-18 18:25:43 +10001311
NeilBrown69335ef2011-12-23 10:17:54 +11001312 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 raid10_find_phys(conf, r10_bio);
NeilBrownd4432c22011-07-28 11:39:24 +10001314retry_write:
Harvey Harrisoncb6969e2008-05-06 20:42:32 -07001315 blocked_rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 rcu_read_lock();
NeilBrownd4432c22011-07-28 11:39:24 +10001317 max_sectors = r10_bio->sectors;
1318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 for (i = 0; i < conf->copies; i++) {
1320 int d = r10_bio->devs[i].devnum;
NeilBrown3cb03002011-10-11 16:45:26 +11001321 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown475b0322011-12-23 10:17:55 +11001322 struct md_rdev *rrdev = rcu_dereference(
1323 conf->mirrors[d].replacement);
NeilBrown4ca40c22011-12-23 10:17:55 +11001324 if (rdev == rrdev)
1325 rrdev = NULL;
Dan Williams6bfe0b42008-04-30 00:52:32 -07001326 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1327 atomic_inc(&rdev->nr_pending);
1328 blocked_rdev = rdev;
1329 break;
1330 }
NeilBrown475b0322011-12-23 10:17:55 +11001331 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1332 atomic_inc(&rrdev->nr_pending);
1333 blocked_rdev = rrdev;
1334 break;
1335 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001336 if (rdev && (test_bit(Faulty, &rdev->flags)
1337 || test_bit(Unmerged, &rdev->flags)))
1338 rdev = NULL;
NeilBrown050b6612012-03-19 12:46:39 +11001339 if (rrdev && (test_bit(Faulty, &rrdev->flags)
1340 || test_bit(Unmerged, &rrdev->flags)))
NeilBrown475b0322011-12-23 10:17:55 +11001341 rrdev = NULL;
1342
NeilBrownd4432c22011-07-28 11:39:24 +10001343 r10_bio->devs[i].bio = NULL;
NeilBrown475b0322011-12-23 10:17:55 +11001344 r10_bio->devs[i].repl_bio = NULL;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001345
1346 if (!rdev && !rrdev) {
NeilBrown6cce3b232006-01-06 00:20:16 -08001347 set_bit(R10BIO_Degraded, &r10_bio->state);
NeilBrownd4432c22011-07-28 11:39:24 +10001348 continue;
NeilBrown6cce3b232006-01-06 00:20:16 -08001349 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001350 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
NeilBrownd4432c22011-07-28 11:39:24 +10001351 sector_t first_bad;
1352 sector_t dev_sector = r10_bio->devs[i].addr;
1353 int bad_sectors;
1354 int is_bad;
1355
1356 is_bad = is_badblock(rdev, dev_sector,
1357 max_sectors,
1358 &first_bad, &bad_sectors);
1359 if (is_bad < 0) {
1360 /* Mustn't write here until the bad block
1361 * is acknowledged
1362 */
1363 atomic_inc(&rdev->nr_pending);
1364 set_bit(BlockedBadBlocks, &rdev->flags);
1365 blocked_rdev = rdev;
1366 break;
1367 }
1368 if (is_bad && first_bad <= dev_sector) {
1369 /* Cannot write here at all */
1370 bad_sectors -= (dev_sector - first_bad);
1371 if (bad_sectors < max_sectors)
1372 /* Mustn't write more than bad_sectors
1373 * to other devices yet
1374 */
1375 max_sectors = bad_sectors;
1376 /* We don't set R10BIO_Degraded as that
1377 * only applies if the disk is missing,
1378 * so it might be re-added, and we want to
1379 * know to recover this chunk.
1380 * In this case the device is here, and the
1381 * fact that this chunk is not in-sync is
1382 * recorded in the bad block log.
1383 */
1384 continue;
1385 }
1386 if (is_bad) {
1387 int good_sectors = first_bad - dev_sector;
1388 if (good_sectors < max_sectors)
1389 max_sectors = good_sectors;
1390 }
1391 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001392 if (rdev) {
1393 r10_bio->devs[i].bio = bio;
1394 atomic_inc(&rdev->nr_pending);
1395 }
NeilBrown475b0322011-12-23 10:17:55 +11001396 if (rrdev) {
1397 r10_bio->devs[i].repl_bio = bio;
1398 atomic_inc(&rrdev->nr_pending);
1399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
1401 rcu_read_unlock();
1402
Dan Williams6bfe0b42008-04-30 00:52:32 -07001403 if (unlikely(blocked_rdev)) {
1404 /* Have to wait for this device to get unblocked, then retry */
1405 int j;
1406 int d;
1407
NeilBrown475b0322011-12-23 10:17:55 +11001408 for (j = 0; j < i; j++) {
Dan Williams6bfe0b42008-04-30 00:52:32 -07001409 if (r10_bio->devs[j].bio) {
1410 d = r10_bio->devs[j].devnum;
1411 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1412 }
NeilBrown475b0322011-12-23 10:17:55 +11001413 if (r10_bio->devs[j].repl_bio) {
NeilBrown4ca40c22011-12-23 10:17:55 +11001414 struct md_rdev *rdev;
NeilBrown475b0322011-12-23 10:17:55 +11001415 d = r10_bio->devs[j].devnum;
NeilBrown4ca40c22011-12-23 10:17:55 +11001416 rdev = conf->mirrors[d].replacement;
1417 if (!rdev) {
1418 /* Race with remove_disk */
1419 smp_mb();
1420 rdev = conf->mirrors[d].rdev;
1421 }
1422 rdev_dec_pending(rdev, mddev);
NeilBrown475b0322011-12-23 10:17:55 +11001423 }
1424 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07001425 allow_barrier(conf);
1426 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1427 wait_barrier(conf);
1428 goto retry_write;
1429 }
1430
NeilBrownd4432c22011-07-28 11:39:24 +10001431 if (max_sectors < r10_bio->sectors) {
1432 /* We are splitting this into multiple parts, so
1433 * we need to prepare for allocating another r10_bio.
1434 */
1435 r10_bio->sectors = max_sectors;
1436 spin_lock_irq(&conf->device_lock);
1437 if (bio->bi_phys_segments == 0)
1438 bio->bi_phys_segments = 2;
1439 else
1440 bio->bi_phys_segments++;
1441 spin_unlock_irq(&conf->device_lock);
1442 }
1443 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1444
NeilBrown4e780642010-10-19 12:54:01 +11001445 atomic_set(&r10_bio->remaining, 1);
NeilBrownd4432c22011-07-28 11:39:24 +10001446 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07001447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 for (i = 0; i < conf->copies; i++) {
1449 struct bio *mbio;
1450 int d = r10_bio->devs[i].devnum;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001451 if (r10_bio->devs[i].bio) {
1452 struct md_rdev *rdev = conf->mirrors[d].rdev;
1453 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1454 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1455 max_sectors);
1456 r10_bio->devs[i].bio = mbio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001458 mbio->bi_sector = (r10_bio->devs[i].addr+
1459 choose_data_offset(r10_bio,
1460 rdev));
1461 mbio->bi_bdev = rdev->bdev;
1462 mbio->bi_end_io = raid10_end_write_request;
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11001463 mbio->bi_rw =
1464 WRITE | do_sync | do_fua | do_discard | do_same;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001465 mbio->bi_private = r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001467 atomic_inc(&r10_bio->remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001469 cb = blk_check_plugged(raid10_unplug, mddev,
1470 sizeof(*plug));
1471 if (cb)
1472 plug = container_of(cb, struct raid10_plug_cb,
1473 cb);
1474 else
1475 plug = NULL;
1476 spin_lock_irqsave(&conf->device_lock, flags);
1477 if (plug) {
1478 bio_list_add(&plug->pending, mbio);
1479 plug->pending_cnt++;
1480 } else {
1481 bio_list_add(&conf->pending_bio_list, mbio);
1482 conf->pending_count++;
1483 }
1484 spin_unlock_irqrestore(&conf->device_lock, flags);
1485 if (!plug)
1486 md_wakeup_thread(mddev->thread);
1487 }
NeilBrown57c67df2012-10-11 13:32:13 +11001488
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001489 if (r10_bio->devs[i].repl_bio) {
1490 struct md_rdev *rdev = conf->mirrors[d].replacement;
1491 if (rdev == NULL) {
1492 /* Replacement just got moved to main 'rdev' */
1493 smp_mb();
1494 rdev = conf->mirrors[d].rdev;
1495 }
1496 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1497 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1498 max_sectors);
1499 r10_bio->devs[i].repl_bio = mbio;
1500
1501 mbio->bi_sector = (r10_bio->devs[i].addr +
1502 choose_data_offset(
1503 r10_bio, rdev));
1504 mbio->bi_bdev = rdev->bdev;
1505 mbio->bi_end_io = raid10_end_write_request;
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11001506 mbio->bi_rw =
1507 WRITE | do_sync | do_fua | do_discard | do_same;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001508 mbio->bi_private = r10_bio;
1509
1510 atomic_inc(&r10_bio->remaining);
1511 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrown57c67df2012-10-11 13:32:13 +11001512 bio_list_add(&conf->pending_bio_list, mbio);
1513 conf->pending_count++;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001514 spin_unlock_irqrestore(&conf->device_lock, flags);
1515 if (!mddev_check_plugged(mddev))
1516 md_wakeup_thread(mddev->thread);
NeilBrown57c67df2012-10-11 13:32:13 +11001517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 }
1519
NeilBrown079fa162011-09-10 17:21:23 +10001520 /* Don't remove the bias on 'remaining' (one_write_done) until
1521 * after checking if we need to go around again.
1522 */
NeilBrowna35e63e2008-03-04 14:29:29 -08001523
NeilBrownd4432c22011-07-28 11:39:24 +10001524 if (sectors_handled < (bio->bi_size >> 9)) {
NeilBrown079fa162011-09-10 17:21:23 +10001525 one_write_done(r10_bio);
NeilBrown5e570282011-07-28 11:39:25 +10001526 /* We need another r10_bio. It has already been counted
NeilBrownd4432c22011-07-28 11:39:24 +10001527 * in bio->bi_phys_segments.
1528 */
1529 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1530
1531 r10_bio->master_bio = bio;
1532 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1533
1534 r10_bio->mddev = mddev;
1535 r10_bio->sector = bio->bi_sector + sectors_handled;
1536 r10_bio->state = 0;
1537 goto retry_write;
1538 }
NeilBrown079fa162011-09-10 17:21:23 +10001539 one_write_done(r10_bio);
1540
1541 /* In case raid10d snuck in to freeze_array */
1542 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544
NeilBrownfd01b882011-10-11 16:47:53 +11001545static void status(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
NeilBrowne879a872011-10-11 16:49:02 +11001547 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 int i;
1549
NeilBrown5cf00fc2012-05-21 09:28:20 +10001550 if (conf->geo.near_copies < conf->geo.raid_disks)
Andre Noll9d8f0362009-06-18 08:45:01 +10001551 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
NeilBrown5cf00fc2012-05-21 09:28:20 +10001552 if (conf->geo.near_copies > 1)
1553 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1554 if (conf->geo.far_copies > 1) {
1555 if (conf->geo.far_offset)
1556 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
NeilBrownc93983b2006-06-26 00:27:41 -07001557 else
NeilBrown5cf00fc2012-05-21 09:28:20 +10001558 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
NeilBrownc93983b2006-06-26 00:27:41 -07001559 }
NeilBrown5cf00fc2012-05-21 09:28:20 +10001560 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1561 conf->geo.raid_disks - mddev->degraded);
1562 for (i = 0; i < conf->geo.raid_disks; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 seq_printf(seq, "%s",
1564 conf->mirrors[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -08001565 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 seq_printf(seq, "]");
1567}
1568
NeilBrown700c7212011-07-27 11:00:36 +10001569/* check if there are enough drives for
1570 * every block to appear on atleast one.
1571 * Don't consider the device numbered 'ignore'
1572 * as we might be about to remove it.
1573 */
NeilBrownf8c9e742012-05-21 09:28:33 +10001574static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
NeilBrown700c7212011-07-27 11:00:36 +10001575{
1576 int first = 0;
1577
1578 do {
1579 int n = conf->copies;
1580 int cnt = 0;
NeilBrown80b48122012-09-27 12:35:21 +10001581 int this = first;
NeilBrown700c7212011-07-27 11:00:36 +10001582 while (n--) {
NeilBrown80b48122012-09-27 12:35:21 +10001583 if (conf->mirrors[this].rdev &&
1584 this != ignore)
NeilBrown700c7212011-07-27 11:00:36 +10001585 cnt++;
NeilBrown80b48122012-09-27 12:35:21 +10001586 this = (this+1) % geo->raid_disks;
NeilBrown700c7212011-07-27 11:00:36 +10001587 }
1588 if (cnt == 0)
1589 return 0;
NeilBrown80b48122012-09-27 12:35:21 +10001590 first = (first + geo->near_copies) % geo->raid_disks;
NeilBrown700c7212011-07-27 11:00:36 +10001591 } while (first != 0);
1592 return 1;
1593}
1594
NeilBrownf8c9e742012-05-21 09:28:33 +10001595static int enough(struct r10conf *conf, int ignore)
1596{
1597 return _enough(conf, &conf->geo, ignore) &&
1598 _enough(conf, &conf->prev, ignore);
1599}
1600
NeilBrownfd01b882011-10-11 16:47:53 +11001601static void error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
1603 char b[BDEVNAME_SIZE];
NeilBrowne879a872011-10-11 16:49:02 +11001604 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 /*
1607 * If it is not operational, then we have already marked it as dead
1608 * else if it is the last working disks, ignore the error, let the
1609 * next level up know.
1610 * else mark the drive as failed
1611 */
NeilBrownb2d444d2005-11-08 21:39:31 -08001612 if (test_bit(In_sync, &rdev->flags)
NeilBrown700c7212011-07-27 11:00:36 +10001613 && !enough(conf, rdev->raid_disk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 /*
1615 * Don't fail the drive, just return an IO error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 */
1617 return;
NeilBrownc04be0a2006-10-03 01:15:53 -07001618 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1619 unsigned long flags;
1620 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 mddev->degraded++;
NeilBrownc04be0a2006-10-03 01:15:53 -07001622 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 /*
1624 * if recovery is running, make sure it aborts.
1625 */
NeilBrowndfc70642008-05-23 13:04:39 -07001626 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 }
NeilBrownde393cd2011-07-28 11:31:48 +10001628 set_bit(Blocked, &rdev->flags);
NeilBrownb2d444d2005-11-08 21:39:31 -08001629 set_bit(Faulty, &rdev->flags);
NeilBrown850b2b42006-10-03 01:15:46 -07001630 set_bit(MD_CHANGE_DEVS, &mddev->flags);
Joe Perches067032b2011-01-14 09:14:33 +11001631 printk(KERN_ALERT
1632 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1633 "md/raid10:%s: Operation continuing on %d devices.\n",
NeilBrown128595e2010-05-03 14:47:14 +10001634 mdname(mddev), bdevname(rdev->bdev, b),
NeilBrown5cf00fc2012-05-21 09:28:20 +10001635 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636}
1637
NeilBrowne879a872011-10-11 16:49:02 +11001638static void print_conf(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
1640 int i;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001641 struct raid10_info *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
NeilBrown128595e2010-05-03 14:47:14 +10001643 printk(KERN_DEBUG "RAID10 conf printout:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 if (!conf) {
NeilBrown128595e2010-05-03 14:47:14 +10001645 printk(KERN_DEBUG "(!conf)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 return;
1647 }
NeilBrown5cf00fc2012-05-21 09:28:20 +10001648 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1649 conf->geo.raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
NeilBrown5cf00fc2012-05-21 09:28:20 +10001651 for (i = 0; i < conf->geo.raid_disks; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 char b[BDEVNAME_SIZE];
1653 tmp = conf->mirrors + i;
1654 if (tmp->rdev)
NeilBrown128595e2010-05-03 14:47:14 +10001655 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -08001656 i, !test_bit(In_sync, &tmp->rdev->flags),
1657 !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 bdevname(tmp->rdev->bdev,b));
1659 }
1660}
1661
NeilBrowne879a872011-10-11 16:49:02 +11001662static void close_sync(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663{
NeilBrown0a27ec92006-01-06 00:20:13 -08001664 wait_barrier(conf);
1665 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
1667 mempool_destroy(conf->r10buf_pool);
1668 conf->r10buf_pool = NULL;
1669}
1670
NeilBrownfd01b882011-10-11 16:47:53 +11001671static int raid10_spare_active(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672{
1673 int i;
NeilBrowne879a872011-10-11 16:49:02 +11001674 struct r10conf *conf = mddev->private;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001675 struct raid10_info *tmp;
NeilBrown6b965622010-08-18 11:56:59 +10001676 int count = 0;
1677 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 /*
1680 * Find all non-in_sync disks within the RAID10 configuration
1681 * and mark them in_sync
1682 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10001683 for (i = 0; i < conf->geo.raid_disks; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 tmp = conf->mirrors + i;
NeilBrown4ca40c22011-12-23 10:17:55 +11001685 if (tmp->replacement
1686 && tmp->replacement->recovery_offset == MaxSector
1687 && !test_bit(Faulty, &tmp->replacement->flags)
1688 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1689 /* Replacement has just become active */
1690 if (!tmp->rdev
1691 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1692 count++;
1693 if (tmp->rdev) {
1694 /* Replaced device not technically faulty,
1695 * but we need to be sure it gets removed
1696 * and never re-added.
1697 */
1698 set_bit(Faulty, &tmp->rdev->flags);
1699 sysfs_notify_dirent_safe(
1700 tmp->rdev->sysfs_state);
1701 }
1702 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1703 } else if (tmp->rdev
1704 && !test_bit(Faulty, &tmp->rdev->flags)
1705 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
NeilBrown6b965622010-08-18 11:56:59 +10001706 count++;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11001707 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 }
1709 }
NeilBrown6b965622010-08-18 11:56:59 +10001710 spin_lock_irqsave(&conf->device_lock, flags);
1711 mddev->degraded -= count;
1712 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
1714 print_conf(conf);
NeilBrown6b965622010-08-18 11:56:59 +10001715 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716}
1717
1718
NeilBrownfd01b882011-10-11 16:47:53 +11001719static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
NeilBrowne879a872011-10-11 16:49:02 +11001721 struct r10conf *conf = mddev->private;
Neil Brown199050e2008-06-28 08:31:33 +10001722 int err = -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 int mirror;
Neil Brown6c2fce22008-06-28 08:31:31 +10001724 int first = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10001725 int last = conf->geo.raid_disks - 1;
NeilBrown050b6612012-03-19 12:46:39 +11001726 struct request_queue *q = bdev_get_queue(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
1728 if (mddev->recovery_cp < MaxSector)
1729 /* only hot-add to in-sync arrays, as recovery is
1730 * very different from resync
1731 */
Neil Brown199050e2008-06-28 08:31:33 +10001732 return -EBUSY;
NeilBrownf8c9e742012-05-21 09:28:33 +10001733 if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
Neil Brown199050e2008-06-28 08:31:33 +10001734 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
NeilBrowna53a6c82008-11-06 17:28:20 +11001736 if (rdev->raid_disk >= 0)
Neil Brown6c2fce22008-06-28 08:31:31 +10001737 first = last = rdev->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
NeilBrown050b6612012-03-19 12:46:39 +11001739 if (q->merge_bvec_fn) {
1740 set_bit(Unmerged, &rdev->flags);
1741 mddev->merge_check_needed = 1;
1742 }
1743
Namhyung Kim2c4193d2011-07-18 17:38:43 +10001744 if (rdev->saved_raid_disk >= first &&
NeilBrown6cce3b232006-01-06 00:20:16 -08001745 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1746 mirror = rdev->saved_raid_disk;
1747 else
Neil Brown6c2fce22008-06-28 08:31:31 +10001748 mirror = first;
NeilBrown2bb77732011-07-27 11:00:36 +10001749 for ( ; mirror <= last ; mirror++) {
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001750 struct raid10_info *p = &conf->mirrors[mirror];
NeilBrown2bb77732011-07-27 11:00:36 +10001751 if (p->recovery_disabled == mddev->recovery_disabled)
1752 continue;
NeilBrownb7044d42011-12-23 10:17:56 +11001753 if (p->rdev) {
1754 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1755 p->replacement != NULL)
1756 continue;
1757 clear_bit(In_sync, &rdev->flags);
1758 set_bit(Replacement, &rdev->flags);
1759 rdev->raid_disk = mirror;
1760 err = 0;
1761 disk_stack_limits(mddev->gendisk, rdev->bdev,
1762 rdev->data_offset << 9);
NeilBrownb7044d42011-12-23 10:17:56 +11001763 conf->fullsync = 1;
1764 rcu_assign_pointer(p->replacement, rdev);
1765 break;
1766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
NeilBrown2bb77732011-07-27 11:00:36 +10001768 disk_stack_limits(mddev->gendisk, rdev->bdev,
1769 rdev->data_offset << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
NeilBrown2bb77732011-07-27 11:00:36 +10001771 p->head_position = 0;
NeilBrownd890fa22011-10-26 11:54:39 +11001772 p->recovery_disabled = mddev->recovery_disabled - 1;
NeilBrown2bb77732011-07-27 11:00:36 +10001773 rdev->raid_disk = mirror;
1774 err = 0;
1775 if (rdev->saved_raid_disk != mirror)
1776 conf->fullsync = 1;
1777 rcu_assign_pointer(p->rdev, rdev);
1778 break;
1779 }
NeilBrown050b6612012-03-19 12:46:39 +11001780 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1781 /* Some requests might not have seen this new
1782 * merge_bvec_fn. We must wait for them to complete
1783 * before merging the device fully.
1784 * First we make sure any code which has tested
1785 * our function has submitted the request, then
1786 * we wait for all outstanding requests to complete.
1787 */
1788 synchronize_sched();
1789 raise_barrier(conf, 0);
1790 lower_barrier(conf);
1791 clear_bit(Unmerged, &rdev->flags);
1792 }
Andre Nollac5e7112009-08-03 10:59:47 +10001793 md_integrity_add_rdev(rdev, mddev);
Jonathan Brassowed30be02012-10-31 11:42:30 +11001794 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
Shaohua Li532a2a32012-10-11 13:30:52 +11001795 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 print_conf(conf);
Neil Brown199050e2008-06-28 08:31:33 +10001798 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799}
1800
NeilBrownb8321b62011-12-23 10:17:51 +11001801static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802{
NeilBrowne879a872011-10-11 16:49:02 +11001803 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 int err = 0;
NeilBrownb8321b62011-12-23 10:17:51 +11001805 int number = rdev->raid_disk;
NeilBrownc8ab9032011-12-23 10:17:54 +11001806 struct md_rdev **rdevp;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001807 struct raid10_info *p = conf->mirrors + number;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 print_conf(conf);
NeilBrownc8ab9032011-12-23 10:17:54 +11001810 if (rdev == p->rdev)
1811 rdevp = &p->rdev;
1812 else if (rdev == p->replacement)
1813 rdevp = &p->replacement;
1814 else
1815 return 0;
1816
1817 if (test_bit(In_sync, &rdev->flags) ||
1818 atomic_read(&rdev->nr_pending)) {
1819 err = -EBUSY;
1820 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 }
NeilBrownc8ab9032011-12-23 10:17:54 +11001822 /* Only remove faulty devices if recovery
1823 * is not possible.
1824 */
1825 if (!test_bit(Faulty, &rdev->flags) &&
1826 mddev->recovery_disabled != p->recovery_disabled &&
NeilBrown4ca40c22011-12-23 10:17:55 +11001827 (!p->replacement || p->replacement == rdev) &&
NeilBrown63aced62012-05-22 13:55:33 +10001828 number < conf->geo.raid_disks &&
NeilBrownc8ab9032011-12-23 10:17:54 +11001829 enough(conf, -1)) {
1830 err = -EBUSY;
1831 goto abort;
1832 }
1833 *rdevp = NULL;
1834 synchronize_rcu();
1835 if (atomic_read(&rdev->nr_pending)) {
1836 /* lost the race, try later */
1837 err = -EBUSY;
1838 *rdevp = rdev;
1839 goto abort;
NeilBrown4ca40c22011-12-23 10:17:55 +11001840 } else if (p->replacement) {
1841 /* We must have just cleared 'rdev' */
1842 p->rdev = p->replacement;
1843 clear_bit(Replacement, &p->replacement->flags);
1844 smp_mb(); /* Make sure other CPUs may see both as identical
1845 * but will never see neither -- if they are careful.
1846 */
1847 p->replacement = NULL;
1848 clear_bit(WantReplacement, &rdev->flags);
1849 } else
1850 /* We might have just remove the Replacement as faulty
1851 * Clear the flag just in case
1852 */
1853 clear_bit(WantReplacement, &rdev->flags);
1854
NeilBrownc8ab9032011-12-23 10:17:54 +11001855 err = md_integrity_register(mddev);
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857abort:
1858
1859 print_conf(conf);
1860 return err;
1861}
1862
1863
NeilBrown6712ecf2007-09-27 12:47:43 +02001864static void end_sync_read(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865{
NeilBrown9f2c9d12011-10-11 16:48:43 +11001866 struct r10bio *r10_bio = bio->bi_private;
NeilBrowne879a872011-10-11 16:49:02 +11001867 struct r10conf *conf = r10_bio->mddev->private;
Namhyung Kim778ca012011-07-18 17:38:47 +10001868 int d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869
NeilBrown3ea7daa2012-05-22 13:53:47 +10001870 if (bio == r10_bio->master_bio) {
1871 /* this is a reshape read */
1872 d = r10_bio->read_slot; /* really the read dev */
1873 } else
1874 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
NeilBrown0eb3ff12006-01-06 00:20:29 -08001875
1876 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1877 set_bit(R10BIO_Uptodate, &r10_bio->state);
NeilBrowne684e412011-07-28 11:39:25 +10001878 else
1879 /* The write handler will notice the lack of
1880 * R10BIO_Uptodate and record any errors etc
1881 */
NeilBrown4dbcdc72006-01-06 00:20:52 -08001882 atomic_add(r10_bio->sectors,
1883 &conf->mirrors[d].rdev->corrected_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
1885 /* for reconstruct, we always reschedule after a read.
1886 * for resync, only after all reads
1887 */
NeilBrown73d5c382009-02-25 13:18:47 +11001888 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1890 atomic_dec_and_test(&r10_bio->remaining)) {
1891 /* we have read all the blocks,
1892 * do the comparison in process context in raid10d
1893 */
1894 reschedule_retry(r10_bio);
1895 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896}
1897
NeilBrown9f2c9d12011-10-11 16:48:43 +11001898static void end_sync_request(struct r10bio *r10_bio)
NeilBrown5e570282011-07-28 11:39:25 +10001899{
NeilBrownfd01b882011-10-11 16:47:53 +11001900 struct mddev *mddev = r10_bio->mddev;
NeilBrown5e570282011-07-28 11:39:25 +10001901
1902 while (atomic_dec_and_test(&r10_bio->remaining)) {
1903 if (r10_bio->master_bio == NULL) {
1904 /* the primary of several recovery bios */
1905 sector_t s = r10_bio->sectors;
1906 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1907 test_bit(R10BIO_WriteError, &r10_bio->state))
1908 reschedule_retry(r10_bio);
1909 else
1910 put_buf(r10_bio);
1911 md_done_sync(mddev, s, 1);
1912 break;
1913 } else {
NeilBrown9f2c9d12011-10-11 16:48:43 +11001914 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
NeilBrown5e570282011-07-28 11:39:25 +10001915 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1916 test_bit(R10BIO_WriteError, &r10_bio->state))
1917 reschedule_retry(r10_bio);
1918 else
1919 put_buf(r10_bio);
1920 r10_bio = r10_bio2;
1921 }
1922 }
1923}
1924
NeilBrown6712ecf2007-09-27 12:47:43 +02001925static void end_sync_write(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
NeilBrown9f2c9d12011-10-11 16:48:43 +11001928 struct r10bio *r10_bio = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +11001929 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11001930 struct r10conf *conf = mddev->private;
Namhyung Kim778ca012011-07-18 17:38:47 +10001931 int d;
NeilBrown749c55e2011-07-28 11:39:24 +10001932 sector_t first_bad;
1933 int bad_sectors;
1934 int slot;
NeilBrown9ad1aef2011-12-23 10:17:55 +11001935 int repl;
NeilBrown4ca40c22011-12-23 10:17:55 +11001936 struct md_rdev *rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
NeilBrown9ad1aef2011-12-23 10:17:55 +11001938 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1939 if (repl)
1940 rdev = conf->mirrors[d].replacement;
NeilBrown547414d2012-03-13 11:21:20 +11001941 else
NeilBrown9ad1aef2011-12-23 10:17:55 +11001942 rdev = conf->mirrors[d].rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
NeilBrown1a0b7cd2011-07-28 11:39:25 +10001944 if (!uptodate) {
NeilBrown9ad1aef2011-12-23 10:17:55 +11001945 if (repl)
1946 md_error(mddev, rdev);
1947 else {
1948 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11001949 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1950 set_bit(MD_RECOVERY_NEEDED,
1951 &rdev->mddev->recovery);
NeilBrown9ad1aef2011-12-23 10:17:55 +11001952 set_bit(R10BIO_WriteError, &r10_bio->state);
1953 }
1954 } else if (is_badblock(rdev,
NeilBrown749c55e2011-07-28 11:39:24 +10001955 r10_bio->devs[slot].addr,
1956 r10_bio->sectors,
1957 &first_bad, &bad_sectors))
1958 set_bit(R10BIO_MadeGood, &r10_bio->state);
NeilBrowndfc70642008-05-23 13:04:39 -07001959
NeilBrown9ad1aef2011-12-23 10:17:55 +11001960 rdev_dec_pending(rdev, mddev);
NeilBrown5e570282011-07-28 11:39:25 +10001961
1962 end_sync_request(r10_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963}
1964
1965/*
1966 * Note: sync and recover and handled very differently for raid10
1967 * This code is for resync.
1968 * For resync, we read through virtual addresses and read all blocks.
1969 * If there is any error, we schedule a write. The lowest numbered
1970 * drive is authoritative.
1971 * However requests come for physical address, so we need to map.
1972 * For every physical address there are raid_disks/copies virtual addresses,
1973 * which is always are least one, but is not necessarly an integer.
1974 * This means that a physical address can span multiple chunks, so we may
1975 * have to submit multiple io requests for a single sync request.
1976 */
1977/*
1978 * We check if all blocks are in-sync and only write to blocks that
1979 * aren't in sync
1980 */
NeilBrown9f2c9d12011-10-11 16:48:43 +11001981static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982{
NeilBrowne879a872011-10-11 16:49:02 +11001983 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 int i, first;
1985 struct bio *tbio, *fbio;
majianpengf4380a92012-04-12 16:04:47 +10001986 int vcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
1988 atomic_set(&r10_bio->remaining, 1);
1989
1990 /* find the first device with a block */
1991 for (i=0; i<conf->copies; i++)
1992 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1993 break;
1994
1995 if (i == conf->copies)
1996 goto done;
1997
1998 first = i;
1999 fbio = r10_bio->devs[i].bio;
2000
majianpengf4380a92012-04-12 16:04:47 +10002001 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 /* now find blocks with errors */
NeilBrown0eb3ff12006-01-06 00:20:29 -08002003 for (i=0 ; i < conf->copies ; i++) {
2004 int j, d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 tbio = r10_bio->devs[i].bio;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002007
2008 if (tbio->bi_end_io != end_sync_read)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 continue;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002010 if (i == first)
2011 continue;
2012 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
2013 /* We know that the bi_io_vec layout is the same for
2014 * both 'first' and 'i', so we just compare them.
2015 * All vec entries are PAGE_SIZE;
2016 */
2017 for (j = 0; j < vcnt; j++)
2018 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2019 page_address(tbio->bi_io_vec[j].bv_page),
NeilBrown5020ad72012-04-02 01:39:05 +10002020 fbio->bi_io_vec[j].bv_len))
NeilBrown0eb3ff12006-01-06 00:20:29 -08002021 break;
2022 if (j == vcnt)
2023 continue;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11002024 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
NeilBrownf84ee362011-07-28 11:39:25 +10002025 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2026 /* Don't fix anything. */
2027 continue;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002028 }
NeilBrownf84ee362011-07-28 11:39:25 +10002029 /* Ok, we need to write this bio, either to correct an
2030 * inconsistency or to correct an unreadable block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 * First we need to fixup bv_offset, bv_len and
2032 * bi_vecs, as the read request might have corrupted these
2033 */
2034 tbio->bi_vcnt = vcnt;
2035 tbio->bi_size = r10_bio->sectors << 9;
2036 tbio->bi_idx = 0;
2037 tbio->bi_phys_segments = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
2039 tbio->bi_flags |= 1 << BIO_UPTODATE;
2040 tbio->bi_next = NULL;
2041 tbio->bi_rw = WRITE;
2042 tbio->bi_private = r10_bio;
2043 tbio->bi_sector = r10_bio->devs[i].addr;
2044
2045 for (j=0; j < vcnt ; j++) {
2046 tbio->bi_io_vec[j].bv_offset = 0;
2047 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
2048
2049 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2050 page_address(fbio->bi_io_vec[j].bv_page),
2051 PAGE_SIZE);
2052 }
2053 tbio->bi_end_io = end_sync_write;
2054
2055 d = r10_bio->devs[i].devnum;
2056 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2057 atomic_inc(&r10_bio->remaining);
2058 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
2059
2060 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2061 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2062 generic_make_request(tbio);
2063 }
2064
NeilBrown9ad1aef2011-12-23 10:17:55 +11002065 /* Now write out to any replacement devices
2066 * that are active
2067 */
2068 for (i = 0; i < conf->copies; i++) {
2069 int j, d;
NeilBrown9ad1aef2011-12-23 10:17:55 +11002070
2071 tbio = r10_bio->devs[i].repl_bio;
2072 if (!tbio || !tbio->bi_end_io)
2073 continue;
2074 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2075 && r10_bio->devs[i].bio != fbio)
2076 for (j = 0; j < vcnt; j++)
2077 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2078 page_address(fbio->bi_io_vec[j].bv_page),
2079 PAGE_SIZE);
2080 d = r10_bio->devs[i].devnum;
2081 atomic_inc(&r10_bio->remaining);
2082 md_sync_acct(conf->mirrors[d].replacement->bdev,
2083 tbio->bi_size >> 9);
2084 generic_make_request(tbio);
2085 }
2086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087done:
2088 if (atomic_dec_and_test(&r10_bio->remaining)) {
2089 md_done_sync(mddev, r10_bio->sectors, 1);
2090 put_buf(r10_bio);
2091 }
2092}
2093
2094/*
2095 * Now for the recovery code.
2096 * Recovery happens across physical sectors.
2097 * We recover all non-is_sync drives by finding the virtual address of
2098 * each, and then choose a working drive that also has that virt address.
2099 * There is a separate r10_bio for each non-in_sync drive.
2100 * Only the first two slots are in use. The first for reading,
2101 * The second for writing.
2102 *
2103 */
NeilBrown9f2c9d12011-10-11 16:48:43 +11002104static void fix_recovery_read_error(struct r10bio *r10_bio)
NeilBrown5e570282011-07-28 11:39:25 +10002105{
2106 /* We got a read error during recovery.
2107 * We repeat the read in smaller page-sized sections.
2108 * If a read succeeds, write it to the new device or record
2109 * a bad block if we cannot.
2110 * If a read fails, record a bad block on both old and
2111 * new devices.
2112 */
NeilBrownfd01b882011-10-11 16:47:53 +11002113 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002114 struct r10conf *conf = mddev->private;
NeilBrown5e570282011-07-28 11:39:25 +10002115 struct bio *bio = r10_bio->devs[0].bio;
2116 sector_t sect = 0;
2117 int sectors = r10_bio->sectors;
2118 int idx = 0;
2119 int dr = r10_bio->devs[0].devnum;
2120 int dw = r10_bio->devs[1].devnum;
2121
2122 while (sectors) {
2123 int s = sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11002124 struct md_rdev *rdev;
NeilBrown5e570282011-07-28 11:39:25 +10002125 sector_t addr;
2126 int ok;
2127
2128 if (s > (PAGE_SIZE>>9))
2129 s = PAGE_SIZE >> 9;
2130
2131 rdev = conf->mirrors[dr].rdev;
2132 addr = r10_bio->devs[0].addr + sect,
2133 ok = sync_page_io(rdev,
2134 addr,
2135 s << 9,
2136 bio->bi_io_vec[idx].bv_page,
2137 READ, false);
2138 if (ok) {
2139 rdev = conf->mirrors[dw].rdev;
2140 addr = r10_bio->devs[1].addr + sect;
2141 ok = sync_page_io(rdev,
2142 addr,
2143 s << 9,
2144 bio->bi_io_vec[idx].bv_page,
2145 WRITE, false);
NeilBrownb7044d42011-12-23 10:17:56 +11002146 if (!ok) {
NeilBrown5e570282011-07-28 11:39:25 +10002147 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002148 if (!test_and_set_bit(WantReplacement,
2149 &rdev->flags))
2150 set_bit(MD_RECOVERY_NEEDED,
2151 &rdev->mddev->recovery);
2152 }
NeilBrown5e570282011-07-28 11:39:25 +10002153 }
2154 if (!ok) {
2155 /* We don't worry if we cannot set a bad block -
2156 * it really is bad so there is no loss in not
2157 * recording it yet
2158 */
2159 rdev_set_badblocks(rdev, addr, s, 0);
2160
2161 if (rdev != conf->mirrors[dw].rdev) {
2162 /* need bad block on destination too */
NeilBrown3cb03002011-10-11 16:45:26 +11002163 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
NeilBrown5e570282011-07-28 11:39:25 +10002164 addr = r10_bio->devs[1].addr + sect;
2165 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2166 if (!ok) {
2167 /* just abort the recovery */
2168 printk(KERN_NOTICE
2169 "md/raid10:%s: recovery aborted"
2170 " due to read error\n",
2171 mdname(mddev));
2172
2173 conf->mirrors[dw].recovery_disabled
2174 = mddev->recovery_disabled;
2175 set_bit(MD_RECOVERY_INTR,
2176 &mddev->recovery);
2177 break;
2178 }
2179 }
2180 }
2181
2182 sectors -= s;
2183 sect += s;
2184 idx++;
2185 }
2186}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
NeilBrown9f2c9d12011-10-11 16:48:43 +11002188static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
NeilBrowne879a872011-10-11 16:49:02 +11002190 struct r10conf *conf = mddev->private;
Namhyung Kimc65060a2011-07-18 17:38:49 +10002191 int d;
NeilBrown24afd802011-12-23 10:17:55 +11002192 struct bio *wbio, *wbio2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
NeilBrown5e570282011-07-28 11:39:25 +10002194 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2195 fix_recovery_read_error(r10_bio);
2196 end_sync_request(r10_bio);
2197 return;
2198 }
2199
Namhyung Kimc65060a2011-07-18 17:38:49 +10002200 /*
2201 * share the pages with the first bio
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 * and submit the write request
2203 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 d = r10_bio->devs[1].devnum;
NeilBrown24afd802011-12-23 10:17:55 +11002205 wbio = r10_bio->devs[1].bio;
2206 wbio2 = r10_bio->devs[1].repl_bio;
2207 if (wbio->bi_end_io) {
2208 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2209 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
2210 generic_make_request(wbio);
2211 }
2212 if (wbio2 && wbio2->bi_end_io) {
2213 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2214 md_sync_acct(conf->mirrors[d].replacement->bdev,
2215 wbio2->bi_size >> 9);
2216 generic_make_request(wbio2);
2217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218}
2219
2220
2221/*
Robert Becker1e509152009-12-14 12:49:58 +11002222 * Used by fix_read_error() to decay the per rdev read_errors.
2223 * We halve the read error count for every hour that has elapsed
2224 * since the last recorded read error.
2225 *
2226 */
NeilBrownfd01b882011-10-11 16:47:53 +11002227static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
Robert Becker1e509152009-12-14 12:49:58 +11002228{
2229 struct timespec cur_time_mon;
2230 unsigned long hours_since_last;
2231 unsigned int read_errors = atomic_read(&rdev->read_errors);
2232
2233 ktime_get_ts(&cur_time_mon);
2234
2235 if (rdev->last_read_error.tv_sec == 0 &&
2236 rdev->last_read_error.tv_nsec == 0) {
2237 /* first time we've seen a read error */
2238 rdev->last_read_error = cur_time_mon;
2239 return;
2240 }
2241
2242 hours_since_last = (cur_time_mon.tv_sec -
2243 rdev->last_read_error.tv_sec) / 3600;
2244
2245 rdev->last_read_error = cur_time_mon;
2246
2247 /*
2248 * if hours_since_last is > the number of bits in read_errors
2249 * just set read errors to 0. We do this to avoid
2250 * overflowing the shift of read_errors by hours_since_last.
2251 */
2252 if (hours_since_last >= 8 * sizeof(read_errors))
2253 atomic_set(&rdev->read_errors, 0);
2254 else
2255 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2256}
2257
NeilBrown3cb03002011-10-11 16:45:26 +11002258static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
NeilBrown58c54fc2011-07-28 11:39:25 +10002259 int sectors, struct page *page, int rw)
2260{
2261 sector_t first_bad;
2262 int bad_sectors;
2263
2264 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2265 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2266 return -1;
2267 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2268 /* success */
2269 return 1;
NeilBrownb7044d42011-12-23 10:17:56 +11002270 if (rw == WRITE) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002271 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002272 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2273 set_bit(MD_RECOVERY_NEEDED,
2274 &rdev->mddev->recovery);
2275 }
NeilBrown58c54fc2011-07-28 11:39:25 +10002276 /* need to record an error - either for the block or the device */
2277 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2278 md_error(rdev->mddev, rdev);
2279 return 0;
2280}
2281
Robert Becker1e509152009-12-14 12:49:58 +11002282/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 * This is a kernel thread which:
2284 *
2285 * 1. Retries failed read operations on working mirrors.
2286 * 2. Updates the raid superblock when problems encounter.
NeilBrown6814d532006-10-03 01:15:45 -07002287 * 3. Performs writes following reads for array synchronising.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 */
2289
NeilBrowne879a872011-10-11 16:49:02 +11002290static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
NeilBrown6814d532006-10-03 01:15:45 -07002291{
2292 int sect = 0; /* Offset from r10_bio->sector */
2293 int sectors = r10_bio->sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11002294 struct md_rdev*rdev;
Robert Becker1e509152009-12-14 12:49:58 +11002295 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002296 int d = r10_bio->devs[r10_bio->read_slot].devnum;
Robert Becker1e509152009-12-14 12:49:58 +11002297
NeilBrown7c4e06f2011-05-11 14:53:17 +10002298 /* still own a reference to this rdev, so it cannot
2299 * have been cleared recently.
2300 */
2301 rdev = conf->mirrors[d].rdev;
Robert Becker1e509152009-12-14 12:49:58 +11002302
NeilBrown7c4e06f2011-05-11 14:53:17 +10002303 if (test_bit(Faulty, &rdev->flags))
2304 /* drive has already been failed, just ignore any
2305 more fix_read_error() attempts */
2306 return;
2307
2308 check_decay_read_errors(mddev, rdev);
2309 atomic_inc(&rdev->read_errors);
2310 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2311 char b[BDEVNAME_SIZE];
Robert Becker1e509152009-12-14 12:49:58 +11002312 bdevname(rdev->bdev, b);
2313
NeilBrown7c4e06f2011-05-11 14:53:17 +10002314 printk(KERN_NOTICE
2315 "md/raid10:%s: %s: Raid device exceeded "
2316 "read_error threshold [cur %d:max %d]\n",
2317 mdname(mddev), b,
2318 atomic_read(&rdev->read_errors), max_read_errors);
2319 printk(KERN_NOTICE
2320 "md/raid10:%s: %s: Failing raid device\n",
2321 mdname(mddev), b);
2322 md_error(mddev, conf->mirrors[d].rdev);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002323 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
NeilBrown7c4e06f2011-05-11 14:53:17 +10002324 return;
Robert Becker1e509152009-12-14 12:49:58 +11002325 }
Robert Becker1e509152009-12-14 12:49:58 +11002326
NeilBrown6814d532006-10-03 01:15:45 -07002327 while(sectors) {
2328 int s = sectors;
2329 int sl = r10_bio->read_slot;
2330 int success = 0;
2331 int start;
2332
2333 if (s > (PAGE_SIZE>>9))
2334 s = PAGE_SIZE >> 9;
2335
2336 rcu_read_lock();
2337 do {
NeilBrown8dbed5c2011-07-28 11:39:24 +10002338 sector_t first_bad;
2339 int bad_sectors;
2340
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002341 d = r10_bio->devs[sl].devnum;
NeilBrown6814d532006-10-03 01:15:45 -07002342 rdev = rcu_dereference(conf->mirrors[d].rdev);
2343 if (rdev &&
NeilBrown050b6612012-03-19 12:46:39 +11002344 !test_bit(Unmerged, &rdev->flags) &&
NeilBrown8dbed5c2011-07-28 11:39:24 +10002345 test_bit(In_sync, &rdev->flags) &&
2346 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2347 &first_bad, &bad_sectors) == 0) {
NeilBrown6814d532006-10-03 01:15:45 -07002348 atomic_inc(&rdev->nr_pending);
2349 rcu_read_unlock();
NeilBrown2b193362010-10-27 15:16:40 +11002350 success = sync_page_io(rdev,
NeilBrown6814d532006-10-03 01:15:45 -07002351 r10_bio->devs[sl].addr +
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11002352 sect,
NeilBrown6814d532006-10-03 01:15:45 -07002353 s<<9,
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11002354 conf->tmppage, READ, false);
NeilBrown6814d532006-10-03 01:15:45 -07002355 rdev_dec_pending(rdev, mddev);
2356 rcu_read_lock();
2357 if (success)
2358 break;
2359 }
2360 sl++;
2361 if (sl == conf->copies)
2362 sl = 0;
2363 } while (!success && sl != r10_bio->read_slot);
2364 rcu_read_unlock();
2365
2366 if (!success) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002367 /* Cannot read from anywhere, just mark the block
2368 * as bad on the first device to discourage future
2369 * reads.
2370 */
NeilBrown6814d532006-10-03 01:15:45 -07002371 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
NeilBrown58c54fc2011-07-28 11:39:25 +10002372 rdev = conf->mirrors[dn].rdev;
2373
2374 if (!rdev_set_badblocks(
2375 rdev,
2376 r10_bio->devs[r10_bio->read_slot].addr
2377 + sect,
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002378 s, 0)) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002379 md_error(mddev, rdev);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002380 r10_bio->devs[r10_bio->read_slot].bio
2381 = IO_BLOCKED;
2382 }
NeilBrown6814d532006-10-03 01:15:45 -07002383 break;
2384 }
2385
2386 start = sl;
2387 /* write it back and re-read */
2388 rcu_read_lock();
2389 while (sl != r10_bio->read_slot) {
Robert Becker67b8dc42009-12-14 12:49:57 +11002390 char b[BDEVNAME_SIZE];
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002391
NeilBrown6814d532006-10-03 01:15:45 -07002392 if (sl==0)
2393 sl = conf->copies;
2394 sl--;
2395 d = r10_bio->devs[sl].devnum;
2396 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown1294b9c2011-07-28 11:39:23 +10002397 if (!rdev ||
NeilBrown050b6612012-03-19 12:46:39 +11002398 test_bit(Unmerged, &rdev->flags) ||
NeilBrown1294b9c2011-07-28 11:39:23 +10002399 !test_bit(In_sync, &rdev->flags))
2400 continue;
2401
2402 atomic_inc(&rdev->nr_pending);
2403 rcu_read_unlock();
NeilBrown58c54fc2011-07-28 11:39:25 +10002404 if (r10_sync_page_io(rdev,
2405 r10_bio->devs[sl].addr +
2406 sect,
NeilBrown055d3742012-07-03 15:55:33 +10002407 s, conf->tmppage, WRITE)
NeilBrown1294b9c2011-07-28 11:39:23 +10002408 == 0) {
2409 /* Well, this device is dead */
2410 printk(KERN_NOTICE
2411 "md/raid10:%s: read correction "
2412 "write failed"
2413 " (%d sectors at %llu on %s)\n",
2414 mdname(mddev), s,
2415 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002416 sect +
2417 choose_data_offset(r10_bio,
2418 rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002419 bdevname(rdev->bdev, b));
2420 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2421 "drive\n",
2422 mdname(mddev),
2423 bdevname(rdev->bdev, b));
NeilBrown6814d532006-10-03 01:15:45 -07002424 }
NeilBrown1294b9c2011-07-28 11:39:23 +10002425 rdev_dec_pending(rdev, mddev);
2426 rcu_read_lock();
NeilBrown6814d532006-10-03 01:15:45 -07002427 }
2428 sl = start;
2429 while (sl != r10_bio->read_slot) {
NeilBrown1294b9c2011-07-28 11:39:23 +10002430 char b[BDEVNAME_SIZE];
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002431
NeilBrown6814d532006-10-03 01:15:45 -07002432 if (sl==0)
2433 sl = conf->copies;
2434 sl--;
2435 d = r10_bio->devs[sl].devnum;
2436 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown1294b9c2011-07-28 11:39:23 +10002437 if (!rdev ||
2438 !test_bit(In_sync, &rdev->flags))
2439 continue;
Robert Becker67b8dc42009-12-14 12:49:57 +11002440
NeilBrown1294b9c2011-07-28 11:39:23 +10002441 atomic_inc(&rdev->nr_pending);
2442 rcu_read_unlock();
NeilBrown58c54fc2011-07-28 11:39:25 +10002443 switch (r10_sync_page_io(rdev,
2444 r10_bio->devs[sl].addr +
2445 sect,
NeilBrown055d3742012-07-03 15:55:33 +10002446 s, conf->tmppage,
NeilBrown58c54fc2011-07-28 11:39:25 +10002447 READ)) {
2448 case 0:
NeilBrown1294b9c2011-07-28 11:39:23 +10002449 /* Well, this device is dead */
2450 printk(KERN_NOTICE
2451 "md/raid10:%s: unable to read back "
2452 "corrected sectors"
2453 " (%d sectors at %llu on %s)\n",
2454 mdname(mddev), s,
2455 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002456 sect +
2457 choose_data_offset(r10_bio, rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002458 bdevname(rdev->bdev, b));
2459 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2460 "drive\n",
2461 mdname(mddev),
2462 bdevname(rdev->bdev, b));
NeilBrown58c54fc2011-07-28 11:39:25 +10002463 break;
2464 case 1:
NeilBrown1294b9c2011-07-28 11:39:23 +10002465 printk(KERN_INFO
2466 "md/raid10:%s: read error corrected"
2467 " (%d sectors at %llu on %s)\n",
2468 mdname(mddev), s,
2469 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002470 sect +
2471 choose_data_offset(r10_bio, rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002472 bdevname(rdev->bdev, b));
2473 atomic_add(s, &rdev->corrected_errors);
NeilBrown6814d532006-10-03 01:15:45 -07002474 }
NeilBrown1294b9c2011-07-28 11:39:23 +10002475
2476 rdev_dec_pending(rdev, mddev);
2477 rcu_read_lock();
NeilBrown6814d532006-10-03 01:15:45 -07002478 }
2479 rcu_read_unlock();
2480
2481 sectors -= s;
2482 sect += s;
2483 }
2484}
2485
NeilBrownbd870a12011-07-28 11:39:24 +10002486static void bi_complete(struct bio *bio, int error)
2487{
2488 complete((struct completion *)bio->bi_private);
2489}
2490
2491static int submit_bio_wait(int rw, struct bio *bio)
2492{
2493 struct completion event;
2494 rw |= REQ_SYNC;
2495
2496 init_completion(&event);
2497 bio->bi_private = &event;
2498 bio->bi_end_io = bi_complete;
2499 submit_bio(rw, bio);
2500 wait_for_completion(&event);
2501
2502 return test_bit(BIO_UPTODATE, &bio->bi_flags);
2503}
2504
NeilBrown9f2c9d12011-10-11 16:48:43 +11002505static int narrow_write_error(struct r10bio *r10_bio, int i)
NeilBrownbd870a12011-07-28 11:39:24 +10002506{
2507 struct bio *bio = r10_bio->master_bio;
NeilBrownfd01b882011-10-11 16:47:53 +11002508 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002509 struct r10conf *conf = mddev->private;
NeilBrown3cb03002011-10-11 16:45:26 +11002510 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
NeilBrownbd870a12011-07-28 11:39:24 +10002511 /* bio has the data to be written to slot 'i' where
2512 * we just recently had a write error.
2513 * We repeatedly clone the bio and trim down to one block,
2514 * then try the write. Where the write fails we record
2515 * a bad block.
2516 * It is conceivable that the bio doesn't exactly align with
2517 * blocks. We must handle this.
2518 *
2519 * We currently own a reference to the rdev.
2520 */
2521
2522 int block_sectors;
2523 sector_t sector;
2524 int sectors;
2525 int sect_to_write = r10_bio->sectors;
2526 int ok = 1;
2527
2528 if (rdev->badblocks.shift < 0)
2529 return 0;
2530
2531 block_sectors = 1 << rdev->badblocks.shift;
2532 sector = r10_bio->sector;
2533 sectors = ((r10_bio->sector + block_sectors)
2534 & ~(sector_t)(block_sectors - 1))
2535 - sector;
2536
2537 while (sect_to_write) {
2538 struct bio *wbio;
2539 if (sectors > sect_to_write)
2540 sectors = sect_to_write;
2541 /* Write at 'sector' for 'sectors' */
2542 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2543 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2544 wbio->bi_sector = (r10_bio->devs[i].addr+
NeilBrownf8c9e742012-05-21 09:28:33 +10002545 choose_data_offset(r10_bio, rdev) +
NeilBrownbd870a12011-07-28 11:39:24 +10002546 (sector - r10_bio->sector));
2547 wbio->bi_bdev = rdev->bdev;
2548 if (submit_bio_wait(WRITE, wbio) == 0)
2549 /* Failure! */
2550 ok = rdev_set_badblocks(rdev, sector,
2551 sectors, 0)
2552 && ok;
2553
2554 bio_put(wbio);
2555 sect_to_write -= sectors;
2556 sector += sectors;
2557 sectors = block_sectors;
2558 }
2559 return ok;
2560}
2561
NeilBrown9f2c9d12011-10-11 16:48:43 +11002562static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
NeilBrown560f8e52011-07-28 11:39:23 +10002563{
2564 int slot = r10_bio->read_slot;
NeilBrown560f8e52011-07-28 11:39:23 +10002565 struct bio *bio;
NeilBrowne879a872011-10-11 16:49:02 +11002566 struct r10conf *conf = mddev->private;
NeilBrownabbf0982011-12-23 10:17:54 +11002567 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
NeilBrown560f8e52011-07-28 11:39:23 +10002568 char b[BDEVNAME_SIZE];
2569 unsigned long do_sync;
NeilBrown856e08e2011-07-28 11:39:23 +10002570 int max_sectors;
NeilBrown560f8e52011-07-28 11:39:23 +10002571
2572 /* we got a read error. Maybe the drive is bad. Maybe just
2573 * the block and we can fix it.
2574 * We freeze all other IO, and try reading the block from
2575 * other devices. When we find one, we re-write
2576 * and check it that fixes the read error.
2577 * This is all done synchronously while the array is
2578 * frozen.
2579 */
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002580 bio = r10_bio->devs[slot].bio;
2581 bdevname(bio->bi_bdev, b);
2582 bio_put(bio);
2583 r10_bio->devs[slot].bio = NULL;
2584
NeilBrown560f8e52011-07-28 11:39:23 +10002585 if (mddev->ro == 0) {
2586 freeze_array(conf);
2587 fix_read_error(conf, mddev, r10_bio);
2588 unfreeze_array(conf);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002589 } else
2590 r10_bio->devs[slot].bio = IO_BLOCKED;
2591
NeilBrownabbf0982011-12-23 10:17:54 +11002592 rdev_dec_pending(rdev, mddev);
NeilBrown560f8e52011-07-28 11:39:23 +10002593
NeilBrown7399c312011-07-28 11:39:23 +10002594read_more:
NeilBrown96c3fd12011-12-23 10:17:54 +11002595 rdev = read_balance(conf, r10_bio, &max_sectors);
2596 if (rdev == NULL) {
NeilBrown560f8e52011-07-28 11:39:23 +10002597 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2598 " read error for block %llu\n",
NeilBrown7399c312011-07-28 11:39:23 +10002599 mdname(mddev), b,
NeilBrown560f8e52011-07-28 11:39:23 +10002600 (unsigned long long)r10_bio->sector);
2601 raid_end_bio_io(r10_bio);
NeilBrown560f8e52011-07-28 11:39:23 +10002602 return;
2603 }
2604
2605 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
NeilBrown560f8e52011-07-28 11:39:23 +10002606 slot = r10_bio->read_slot;
NeilBrown560f8e52011-07-28 11:39:23 +10002607 printk_ratelimited(
2608 KERN_ERR
NeilBrown055d3742012-07-03 15:55:33 +10002609 "md/raid10:%s: %s: redirecting "
NeilBrown560f8e52011-07-28 11:39:23 +10002610 "sector %llu to another mirror\n",
2611 mdname(mddev),
2612 bdevname(rdev->bdev, b),
2613 (unsigned long long)r10_bio->sector);
2614 bio = bio_clone_mddev(r10_bio->master_bio,
2615 GFP_NOIO, mddev);
NeilBrown7399c312011-07-28 11:39:23 +10002616 md_trim_bio(bio,
2617 r10_bio->sector - bio->bi_sector,
2618 max_sectors);
NeilBrown560f8e52011-07-28 11:39:23 +10002619 r10_bio->devs[slot].bio = bio;
NeilBrownabbf0982011-12-23 10:17:54 +11002620 r10_bio->devs[slot].rdev = rdev;
NeilBrown560f8e52011-07-28 11:39:23 +10002621 bio->bi_sector = r10_bio->devs[slot].addr
NeilBrownf8c9e742012-05-21 09:28:33 +10002622 + choose_data_offset(r10_bio, rdev);
NeilBrown560f8e52011-07-28 11:39:23 +10002623 bio->bi_bdev = rdev->bdev;
2624 bio->bi_rw = READ | do_sync;
2625 bio->bi_private = r10_bio;
2626 bio->bi_end_io = raid10_end_read_request;
NeilBrown7399c312011-07-28 11:39:23 +10002627 if (max_sectors < r10_bio->sectors) {
2628 /* Drat - have to split this up more */
2629 struct bio *mbio = r10_bio->master_bio;
2630 int sectors_handled =
2631 r10_bio->sector + max_sectors
2632 - mbio->bi_sector;
2633 r10_bio->sectors = max_sectors;
2634 spin_lock_irq(&conf->device_lock);
2635 if (mbio->bi_phys_segments == 0)
2636 mbio->bi_phys_segments = 2;
2637 else
2638 mbio->bi_phys_segments++;
2639 spin_unlock_irq(&conf->device_lock);
2640 generic_make_request(bio);
NeilBrown7399c312011-07-28 11:39:23 +10002641
2642 r10_bio = mempool_alloc(conf->r10bio_pool,
2643 GFP_NOIO);
2644 r10_bio->master_bio = mbio;
2645 r10_bio->sectors = (mbio->bi_size >> 9)
2646 - sectors_handled;
2647 r10_bio->state = 0;
2648 set_bit(R10BIO_ReadError,
2649 &r10_bio->state);
2650 r10_bio->mddev = mddev;
2651 r10_bio->sector = mbio->bi_sector
2652 + sectors_handled;
2653
2654 goto read_more;
2655 } else
2656 generic_make_request(bio);
NeilBrown560f8e52011-07-28 11:39:23 +10002657}
2658
NeilBrowne879a872011-10-11 16:49:02 +11002659static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
NeilBrown749c55e2011-07-28 11:39:24 +10002660{
2661 /* Some sort of write request has finished and it
2662 * succeeded in writing where we thought there was a
2663 * bad block. So forget the bad block.
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002664 * Or possibly if failed and we need to record
2665 * a bad block.
NeilBrown749c55e2011-07-28 11:39:24 +10002666 */
2667 int m;
NeilBrown3cb03002011-10-11 16:45:26 +11002668 struct md_rdev *rdev;
NeilBrown749c55e2011-07-28 11:39:24 +10002669
2670 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2671 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002672 for (m = 0; m < conf->copies; m++) {
2673 int dev = r10_bio->devs[m].devnum;
2674 rdev = conf->mirrors[dev].rdev;
2675 if (r10_bio->devs[m].bio == NULL)
2676 continue;
2677 if (test_bit(BIO_UPTODATE,
NeilBrown749c55e2011-07-28 11:39:24 +10002678 &r10_bio->devs[m].bio->bi_flags)) {
NeilBrown749c55e2011-07-28 11:39:24 +10002679 rdev_clear_badblocks(
2680 rdev,
2681 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002682 r10_bio->sectors, 0);
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002683 } else {
2684 if (!rdev_set_badblocks(
2685 rdev,
2686 r10_bio->devs[m].addr,
2687 r10_bio->sectors, 0))
2688 md_error(conf->mddev, rdev);
NeilBrown749c55e2011-07-28 11:39:24 +10002689 }
NeilBrown9ad1aef2011-12-23 10:17:55 +11002690 rdev = conf->mirrors[dev].replacement;
2691 if (r10_bio->devs[m].repl_bio == NULL)
2692 continue;
2693 if (test_bit(BIO_UPTODATE,
2694 &r10_bio->devs[m].repl_bio->bi_flags)) {
2695 rdev_clear_badblocks(
2696 rdev,
2697 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002698 r10_bio->sectors, 0);
NeilBrown9ad1aef2011-12-23 10:17:55 +11002699 } else {
2700 if (!rdev_set_badblocks(
2701 rdev,
2702 r10_bio->devs[m].addr,
2703 r10_bio->sectors, 0))
2704 md_error(conf->mddev, rdev);
2705 }
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002706 }
NeilBrown749c55e2011-07-28 11:39:24 +10002707 put_buf(r10_bio);
2708 } else {
NeilBrownbd870a12011-07-28 11:39:24 +10002709 for (m = 0; m < conf->copies; m++) {
2710 int dev = r10_bio->devs[m].devnum;
2711 struct bio *bio = r10_bio->devs[m].bio;
2712 rdev = conf->mirrors[dev].rdev;
2713 if (bio == IO_MADE_GOOD) {
NeilBrown749c55e2011-07-28 11:39:24 +10002714 rdev_clear_badblocks(
2715 rdev,
2716 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002717 r10_bio->sectors, 0);
NeilBrown749c55e2011-07-28 11:39:24 +10002718 rdev_dec_pending(rdev, conf->mddev);
NeilBrownbd870a12011-07-28 11:39:24 +10002719 } else if (bio != NULL &&
2720 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2721 if (!narrow_write_error(r10_bio, m)) {
2722 md_error(conf->mddev, rdev);
2723 set_bit(R10BIO_Degraded,
2724 &r10_bio->state);
2725 }
2726 rdev_dec_pending(rdev, conf->mddev);
NeilBrown749c55e2011-07-28 11:39:24 +10002727 }
NeilBrown475b0322011-12-23 10:17:55 +11002728 bio = r10_bio->devs[m].repl_bio;
2729 rdev = conf->mirrors[dev].replacement;
NeilBrown4ca40c22011-12-23 10:17:55 +11002730 if (rdev && bio == IO_MADE_GOOD) {
NeilBrown475b0322011-12-23 10:17:55 +11002731 rdev_clear_badblocks(
2732 rdev,
2733 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002734 r10_bio->sectors, 0);
NeilBrown475b0322011-12-23 10:17:55 +11002735 rdev_dec_pending(rdev, conf->mddev);
2736 }
NeilBrownbd870a12011-07-28 11:39:24 +10002737 }
2738 if (test_bit(R10BIO_WriteError,
2739 &r10_bio->state))
2740 close_write(r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +10002741 raid_end_bio_io(r10_bio);
2742 }
2743}
2744
Shaohua Li4ed87312012-10-11 13:34:00 +11002745static void raid10d(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746{
Shaohua Li4ed87312012-10-11 13:34:00 +11002747 struct mddev *mddev = thread->mddev;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002748 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 unsigned long flags;
NeilBrowne879a872011-10-11 16:49:02 +11002750 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 struct list_head *head = &conf->retry_list;
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002752 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
2754 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002756 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 for (;;) {
NeilBrowna35e63e2008-03-04 14:29:29 -08002758
NeilBrown0021b7b2012-07-31 09:08:14 +02002759 flush_pending_writes(conf);
NeilBrowna35e63e2008-03-04 14:29:29 -08002760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrowna35e63e2008-03-04 14:29:29 -08002762 if (list_empty(head)) {
NeilBrown6cce3b232006-01-06 00:20:16 -08002763 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 break;
NeilBrowna35e63e2008-03-04 14:29:29 -08002765 }
NeilBrown9f2c9d12011-10-11 16:48:43 +11002766 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 list_del(head->prev);
NeilBrown4443ae12006-01-06 00:20:28 -08002768 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 spin_unlock_irqrestore(&conf->device_lock, flags);
2770
2771 mddev = r10_bio->mddev;
NeilBrown070ec552009-06-16 16:54:21 +10002772 conf = mddev->private;
NeilBrownbd870a12011-07-28 11:39:24 +10002773 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2774 test_bit(R10BIO_WriteError, &r10_bio->state))
NeilBrown749c55e2011-07-28 11:39:24 +10002775 handle_write_completed(conf, r10_bio);
NeilBrown3ea7daa2012-05-22 13:53:47 +10002776 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2777 reshape_request_write(mddev, r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +10002778 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 sync_request_write(mddev, r10_bio);
Jens Axboe7eaceac2011-03-10 08:52:07 +01002780 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 recovery_request_write(mddev, r10_bio);
NeilBrown856e08e2011-07-28 11:39:23 +10002782 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
NeilBrown560f8e52011-07-28 11:39:23 +10002783 handle_read_error(mddev, r10_bio);
NeilBrown856e08e2011-07-28 11:39:23 +10002784 else {
2785 /* just a partial read to be scheduled from a
2786 * separate context
2787 */
2788 int slot = r10_bio->read_slot;
2789 generic_make_request(r10_bio->devs[slot].bio);
2790 }
NeilBrown4443ae12006-01-06 00:20:28 -08002791
NeilBrown1d9d5242009-10-16 15:55:32 +11002792 cond_resched();
NeilBrownde393cd2011-07-28 11:31:48 +10002793 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2794 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 }
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002796 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797}
2798
2799
NeilBrowne879a872011-10-11 16:49:02 +11002800static int init_resync(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801{
2802 int buffs;
NeilBrown69335ef2011-12-23 10:17:54 +11002803 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
Eric Sesterhennb6385482006-04-02 13:34:29 +02002806 BUG_ON(conf->r10buf_pool);
NeilBrown69335ef2011-12-23 10:17:54 +11002807 conf->have_replacement = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10002808 for (i = 0; i < conf->geo.raid_disks; i++)
NeilBrown69335ef2011-12-23 10:17:54 +11002809 if (conf->mirrors[i].replacement)
2810 conf->have_replacement = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2812 if (!conf->r10buf_pool)
2813 return -ENOMEM;
2814 conf->next_resync = 0;
2815 return 0;
2816}
2817
2818/*
2819 * perform a "sync" on one "block"
2820 *
2821 * We need to make sure that no normal I/O request - particularly write
2822 * requests - conflict with active sync requests.
2823 *
2824 * This is achieved by tracking pending requests and a 'barrier' concept
2825 * that can be installed to exclude normal IO requests.
2826 *
2827 * Resync and recovery are handled very differently.
2828 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2829 *
2830 * For resync, we iterate over virtual addresses, read all copies,
2831 * and update if there are differences. If only one copy is live,
2832 * skip it.
2833 * For recovery, we iterate over physical addresses, read a good
2834 * value for each non-in_sync drive, and over-write.
2835 *
2836 * So, for recovery we may have several outstanding complex requests for a
2837 * given address, one for each out-of-sync device. We model this by allocating
2838 * a number of r10_bio structures, one for each out-of-sync device.
2839 * As we setup these structures, we collect all bio's together into a list
2840 * which we then process collectively to add pages, and then process again
2841 * to pass to generic_make_request.
2842 *
2843 * The r10_bio structures are linked using a borrowed master_bio pointer.
2844 * This link is counted in ->remaining. When the r10_bio that points to NULL
2845 * has its remaining count decremented to 0, the whole complex operation
2846 * is complete.
2847 *
2848 */
2849
NeilBrownfd01b882011-10-11 16:47:53 +11002850static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
NeilBrownab9d47e2011-05-11 14:54:41 +10002851 int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852{
NeilBrowne879a872011-10-11 16:49:02 +11002853 struct r10conf *conf = mddev->private;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002854 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 struct bio *biolist = NULL, *bio;
2856 sector_t max_sector, nr_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 int i;
NeilBrown6cce3b232006-01-06 00:20:16 -08002858 int max_sync;
NeilBrown57dab0b2010-10-19 10:03:39 +11002859 sector_t sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 sector_t sectors_skipped = 0;
2861 int chunks_skipped = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10002862 sector_t chunk_mask = conf->geo.chunk_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863
2864 if (!conf->r10buf_pool)
2865 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07002866 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 skipped:
Andre Noll58c0fed2009-03-31 14:33:13 +11002869 max_sector = mddev->dev_sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10002870 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2871 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 max_sector = mddev->resync_max_sectors;
2873 if (sector_nr >= max_sector) {
NeilBrown6cce3b232006-01-06 00:20:16 -08002874 /* If we aborted, we need to abort the
2875 * sync on the 'current' bitmap chucks (there can
2876 * be several when recovering multiple devices).
2877 * as we may have started syncing it but not finished.
2878 * We can find the current address in
2879 * mddev->curr_resync, but for recovery,
2880 * we need to convert that to several
2881 * virtual addresses.
2882 */
NeilBrown3ea7daa2012-05-22 13:53:47 +10002883 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2884 end_reshape(conf);
2885 return 0;
2886 }
2887
NeilBrown6cce3b232006-01-06 00:20:16 -08002888 if (mddev->curr_resync < max_sector) { /* aborted */
2889 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2890 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2891 &sync_blocks, 1);
NeilBrown5cf00fc2012-05-21 09:28:20 +10002892 else for (i = 0; i < conf->geo.raid_disks; i++) {
NeilBrown6cce3b232006-01-06 00:20:16 -08002893 sector_t sect =
2894 raid10_find_virt(conf, mddev->curr_resync, i);
2895 bitmap_end_sync(mddev->bitmap, sect,
2896 &sync_blocks, 1);
2897 }
NeilBrown9ad1aef2011-12-23 10:17:55 +11002898 } else {
2899 /* completed sync */
2900 if ((!mddev->bitmap || conf->fullsync)
2901 && conf->have_replacement
2902 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2903 /* Completed a full sync so the replacements
2904 * are now fully recovered.
2905 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10002906 for (i = 0; i < conf->geo.raid_disks; i++)
NeilBrown9ad1aef2011-12-23 10:17:55 +11002907 if (conf->mirrors[i].replacement)
2908 conf->mirrors[i].replacement
2909 ->recovery_offset
2910 = MaxSector;
2911 }
NeilBrown6cce3b232006-01-06 00:20:16 -08002912 conf->fullsync = 0;
NeilBrown9ad1aef2011-12-23 10:17:55 +11002913 }
NeilBrown6cce3b232006-01-06 00:20:16 -08002914 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 close_sync(conf);
NeilBrown57afd892005-06-21 17:17:13 -07002916 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 return sectors_skipped;
2918 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10002919
2920 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2921 return reshape_request(mddev, sector_nr, skipped);
2922
NeilBrown5cf00fc2012-05-21 09:28:20 +10002923 if (chunks_skipped >= conf->geo.raid_disks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 /* if there has been nothing to do on any drive,
2925 * then there is nothing to do at all..
2926 */
NeilBrown57afd892005-06-21 17:17:13 -07002927 *skipped = 1;
2928 return (max_sector - sector_nr) + sectors_skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 }
2930
NeilBrownc6207272008-02-06 01:39:52 -08002931 if (max_sector > mddev->resync_max)
2932 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 /* make sure whole request will fit in a chunk - if chunks
2935 * are meaningful
2936 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10002937 if (conf->geo.near_copies < conf->geo.raid_disks &&
2938 max_sector > (sector_nr | chunk_mask))
2939 max_sector = (sector_nr | chunk_mask) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 /*
2941 * If there is non-resync activity waiting for us then
2942 * put in a delay to throttle resync.
2943 */
NeilBrown0a27ec92006-01-06 00:20:13 -08002944 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 msleep_interruptible(1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
2947 /* Again, very different code for resync and recovery.
2948 * Both must result in an r10bio with a list of bios that
2949 * have bi_end_io, bi_sector, bi_bdev set,
2950 * and bi_private set to the r10bio.
2951 * For recovery, we may actually create several r10bios
2952 * with 2 bios in each, that correspond to the bios in the main one.
2953 * In this case, the subordinate r10bios link back through a
2954 * borrowed master_bio pointer, and the counter in the master
2955 * includes a ref from each subordinate.
2956 */
2957 /* First, we decide what to do and set ->bi_end_io
2958 * To end_sync_read if we want to read, and
2959 * end_sync_write if we will want to write.
2960 */
2961
NeilBrown6cce3b232006-01-06 00:20:16 -08002962 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2964 /* recovery... the complicated one */
NeilBrowne875ece2011-07-28 11:39:24 +10002965 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 r10_bio = NULL;
2967
NeilBrown5cf00fc2012-05-21 09:28:20 +10002968 for (i = 0 ; i < conf->geo.raid_disks; i++) {
NeilBrownab9d47e2011-05-11 14:54:41 +10002969 int still_degraded;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002970 struct r10bio *rb2;
NeilBrownab9d47e2011-05-11 14:54:41 +10002971 sector_t sect;
2972 int must_sync;
NeilBrowne875ece2011-07-28 11:39:24 +10002973 int any_working;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10002974 struct raid10_info *mirror = &conf->mirrors[i];
NeilBrownab9d47e2011-05-11 14:54:41 +10002975
NeilBrown24afd802011-12-23 10:17:55 +11002976 if ((mirror->rdev == NULL ||
2977 test_bit(In_sync, &mirror->rdev->flags))
2978 &&
2979 (mirror->replacement == NULL ||
2980 test_bit(Faulty,
2981 &mirror->replacement->flags)))
NeilBrownab9d47e2011-05-11 14:54:41 +10002982 continue;
2983
2984 still_degraded = 0;
2985 /* want to reconstruct this device */
2986 rb2 = r10_bio;
2987 sect = raid10_find_virt(conf, sector_nr, i);
NeilBrownfc448a12012-07-03 10:37:30 +10002988 if (sect >= mddev->resync_max_sectors) {
2989 /* last stripe is not complete - don't
2990 * try to recover this sector.
2991 */
2992 continue;
2993 }
NeilBrown24afd802011-12-23 10:17:55 +11002994 /* Unless we are doing a full sync, or a replacement
2995 * we only need to recover the block if it is set in
2996 * the bitmap
NeilBrownab9d47e2011-05-11 14:54:41 +10002997 */
2998 must_sync = bitmap_start_sync(mddev->bitmap, sect,
2999 &sync_blocks, 1);
3000 if (sync_blocks < max_sync)
3001 max_sync = sync_blocks;
3002 if (!must_sync &&
NeilBrown24afd802011-12-23 10:17:55 +11003003 mirror->replacement == NULL &&
NeilBrownab9d47e2011-05-11 14:54:41 +10003004 !conf->fullsync) {
3005 /* yep, skip the sync_blocks here, but don't assume
3006 * that there will never be anything to do here
NeilBrown6cce3b232006-01-06 00:20:16 -08003007 */
NeilBrownab9d47e2011-05-11 14:54:41 +10003008 chunks_skipped = -1;
3009 continue;
3010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
NeilBrownab9d47e2011-05-11 14:54:41 +10003012 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3013 raise_barrier(conf, rb2 != NULL);
3014 atomic_set(&r10_bio->remaining, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015
NeilBrownab9d47e2011-05-11 14:54:41 +10003016 r10_bio->master_bio = (struct bio*)rb2;
3017 if (rb2)
3018 atomic_inc(&rb2->remaining);
3019 r10_bio->mddev = mddev;
3020 set_bit(R10BIO_IsRecover, &r10_bio->state);
3021 r10_bio->sector = sect;
NeilBrown6cce3b232006-01-06 00:20:16 -08003022
NeilBrownab9d47e2011-05-11 14:54:41 +10003023 raid10_find_phys(conf, r10_bio);
NeilBrown18055562009-05-07 12:48:10 +10003024
NeilBrownab9d47e2011-05-11 14:54:41 +10003025 /* Need to check if the array will still be
3026 * degraded
3027 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003028 for (j = 0; j < conf->geo.raid_disks; j++)
NeilBrownab9d47e2011-05-11 14:54:41 +10003029 if (conf->mirrors[j].rdev == NULL ||
3030 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
3031 still_degraded = 1;
NeilBrown87fc7672005-09-09 16:24:04 -07003032 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003034
3035 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3036 &sync_blocks, still_degraded);
3037
NeilBrowne875ece2011-07-28 11:39:24 +10003038 any_working = 0;
NeilBrownab9d47e2011-05-11 14:54:41 +10003039 for (j=0; j<conf->copies;j++) {
NeilBrowne875ece2011-07-28 11:39:24 +10003040 int k;
NeilBrownab9d47e2011-05-11 14:54:41 +10003041 int d = r10_bio->devs[j].devnum;
NeilBrown5e570282011-07-28 11:39:25 +10003042 sector_t from_addr, to_addr;
NeilBrown3cb03002011-10-11 16:45:26 +11003043 struct md_rdev *rdev;
NeilBrown40c356c2011-07-28 11:39:24 +10003044 sector_t sector, first_bad;
3045 int bad_sectors;
NeilBrownab9d47e2011-05-11 14:54:41 +10003046 if (!conf->mirrors[d].rdev ||
3047 !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
3048 continue;
3049 /* This is where we read from */
NeilBrowne875ece2011-07-28 11:39:24 +10003050 any_working = 1;
NeilBrown40c356c2011-07-28 11:39:24 +10003051 rdev = conf->mirrors[d].rdev;
3052 sector = r10_bio->devs[j].addr;
3053
3054 if (is_badblock(rdev, sector, max_sync,
3055 &first_bad, &bad_sectors)) {
3056 if (first_bad > sector)
3057 max_sync = first_bad - sector;
3058 else {
3059 bad_sectors -= (sector
3060 - first_bad);
3061 if (max_sync > bad_sectors)
3062 max_sync = bad_sectors;
3063 continue;
3064 }
3065 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003066 bio = r10_bio->devs[0].bio;
3067 bio->bi_next = biolist;
3068 biolist = bio;
3069 bio->bi_private = r10_bio;
3070 bio->bi_end_io = end_sync_read;
3071 bio->bi_rw = READ;
NeilBrown5e570282011-07-28 11:39:25 +10003072 from_addr = r10_bio->devs[j].addr;
NeilBrown24afd802011-12-23 10:17:55 +11003073 bio->bi_sector = from_addr + rdev->data_offset;
3074 bio->bi_bdev = rdev->bdev;
3075 atomic_inc(&rdev->nr_pending);
3076 /* and we write to 'i' (if not in_sync) */
NeilBrownab9d47e2011-05-11 14:54:41 +10003077
3078 for (k=0; k<conf->copies; k++)
3079 if (r10_bio->devs[k].devnum == i)
3080 break;
3081 BUG_ON(k == conf->copies);
NeilBrown5e570282011-07-28 11:39:25 +10003082 to_addr = r10_bio->devs[k].addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003083 r10_bio->devs[0].devnum = d;
NeilBrown5e570282011-07-28 11:39:25 +10003084 r10_bio->devs[0].addr = from_addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003085 r10_bio->devs[1].devnum = i;
NeilBrown5e570282011-07-28 11:39:25 +10003086 r10_bio->devs[1].addr = to_addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003087
NeilBrown24afd802011-12-23 10:17:55 +11003088 rdev = mirror->rdev;
3089 if (!test_bit(In_sync, &rdev->flags)) {
3090 bio = r10_bio->devs[1].bio;
3091 bio->bi_next = biolist;
3092 biolist = bio;
3093 bio->bi_private = r10_bio;
3094 bio->bi_end_io = end_sync_write;
3095 bio->bi_rw = WRITE;
3096 bio->bi_sector = to_addr
3097 + rdev->data_offset;
3098 bio->bi_bdev = rdev->bdev;
3099 atomic_inc(&r10_bio->remaining);
3100 } else
3101 r10_bio->devs[1].bio->bi_end_io = NULL;
3102
3103 /* and maybe write to replacement */
3104 bio = r10_bio->devs[1].repl_bio;
3105 if (bio)
3106 bio->bi_end_io = NULL;
3107 rdev = mirror->replacement;
3108 /* Note: if rdev != NULL, then bio
3109 * cannot be NULL as r10buf_pool_alloc will
3110 * have allocated it.
3111 * So the second test here is pointless.
3112 * But it keeps semantic-checkers happy, and
3113 * this comment keeps human reviewers
3114 * happy.
3115 */
3116 if (rdev == NULL || bio == NULL ||
3117 test_bit(Faulty, &rdev->flags))
3118 break;
3119 bio->bi_next = biolist;
3120 biolist = bio;
3121 bio->bi_private = r10_bio;
3122 bio->bi_end_io = end_sync_write;
3123 bio->bi_rw = WRITE;
3124 bio->bi_sector = to_addr + rdev->data_offset;
3125 bio->bi_bdev = rdev->bdev;
3126 atomic_inc(&r10_bio->remaining);
NeilBrownab9d47e2011-05-11 14:54:41 +10003127 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003129 if (j == conf->copies) {
NeilBrowne875ece2011-07-28 11:39:24 +10003130 /* Cannot recover, so abort the recovery or
3131 * record a bad block */
NeilBrownab9d47e2011-05-11 14:54:41 +10003132 put_buf(r10_bio);
3133 if (rb2)
3134 atomic_dec(&rb2->remaining);
3135 r10_bio = rb2;
NeilBrowne875ece2011-07-28 11:39:24 +10003136 if (any_working) {
3137 /* problem is that there are bad blocks
3138 * on other device(s)
3139 */
3140 int k;
3141 for (k = 0; k < conf->copies; k++)
3142 if (r10_bio->devs[k].devnum == i)
3143 break;
NeilBrown24afd802011-12-23 10:17:55 +11003144 if (!test_bit(In_sync,
3145 &mirror->rdev->flags)
3146 && !rdev_set_badblocks(
3147 mirror->rdev,
3148 r10_bio->devs[k].addr,
3149 max_sync, 0))
3150 any_working = 0;
3151 if (mirror->replacement &&
3152 !rdev_set_badblocks(
3153 mirror->replacement,
NeilBrowne875ece2011-07-28 11:39:24 +10003154 r10_bio->devs[k].addr,
3155 max_sync, 0))
3156 any_working = 0;
3157 }
3158 if (!any_working) {
3159 if (!test_and_set_bit(MD_RECOVERY_INTR,
3160 &mddev->recovery))
3161 printk(KERN_INFO "md/raid10:%s: insufficient "
3162 "working devices for recovery.\n",
3163 mdname(mddev));
NeilBrown24afd802011-12-23 10:17:55 +11003164 mirror->recovery_disabled
NeilBrowne875ece2011-07-28 11:39:24 +10003165 = mddev->recovery_disabled;
3166 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003167 break;
3168 }
3169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 if (biolist == NULL) {
3171 while (r10_bio) {
NeilBrown9f2c9d12011-10-11 16:48:43 +11003172 struct r10bio *rb2 = r10_bio;
3173 r10_bio = (struct r10bio*) rb2->master_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 rb2->master_bio = NULL;
3175 put_buf(rb2);
3176 }
3177 goto giveup;
3178 }
3179 } else {
3180 /* resync. Schedule a read for every block at this virt offset */
3181 int count = 0;
NeilBrown6cce3b232006-01-06 00:20:16 -08003182
NeilBrown78200d42009-02-25 13:18:47 +11003183 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3184
NeilBrown6cce3b232006-01-06 00:20:16 -08003185 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3186 &sync_blocks, mddev->degraded) &&
NeilBrownab9d47e2011-05-11 14:54:41 +10003187 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3188 &mddev->recovery)) {
NeilBrown6cce3b232006-01-06 00:20:16 -08003189 /* We can skip this block */
3190 *skipped = 1;
3191 return sync_blocks + sectors_skipped;
3192 }
3193 if (sync_blocks < max_sync)
3194 max_sync = sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3196
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 r10_bio->mddev = mddev;
3198 atomic_set(&r10_bio->remaining, 0);
NeilBrown6cce3b232006-01-06 00:20:16 -08003199 raise_barrier(conf, 0);
3200 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201
3202 r10_bio->master_bio = NULL;
3203 r10_bio->sector = sector_nr;
3204 set_bit(R10BIO_IsSync, &r10_bio->state);
3205 raid10_find_phys(conf, r10_bio);
NeilBrown5cf00fc2012-05-21 09:28:20 +10003206 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
NeilBrown5cf00fc2012-05-21 09:28:20 +10003208 for (i = 0; i < conf->copies; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 int d = r10_bio->devs[i].devnum;
NeilBrown40c356c2011-07-28 11:39:24 +10003210 sector_t first_bad, sector;
3211 int bad_sectors;
3212
NeilBrown9ad1aef2011-12-23 10:17:55 +11003213 if (r10_bio->devs[i].repl_bio)
3214 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 bio = r10_bio->devs[i].bio;
3217 bio->bi_end_io = NULL;
NeilBrownaf03b8e2007-06-16 10:16:06 -07003218 clear_bit(BIO_UPTODATE, &bio->bi_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 if (conf->mirrors[d].rdev == NULL ||
NeilBrownb2d444d2005-11-08 21:39:31 -08003220 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 continue;
NeilBrown40c356c2011-07-28 11:39:24 +10003222 sector = r10_bio->devs[i].addr;
3223 if (is_badblock(conf->mirrors[d].rdev,
3224 sector, max_sync,
3225 &first_bad, &bad_sectors)) {
3226 if (first_bad > sector)
3227 max_sync = first_bad - sector;
3228 else {
3229 bad_sectors -= (sector - first_bad);
3230 if (max_sync > bad_sectors)
Dan Carpenter91502f02012-10-11 14:20:58 +11003231 max_sync = bad_sectors;
NeilBrown40c356c2011-07-28 11:39:24 +10003232 continue;
3233 }
3234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3236 atomic_inc(&r10_bio->remaining);
3237 bio->bi_next = biolist;
3238 biolist = bio;
3239 bio->bi_private = r10_bio;
3240 bio->bi_end_io = end_sync_read;
NeilBrown802ba062006-12-13 00:34:13 -08003241 bio->bi_rw = READ;
NeilBrown40c356c2011-07-28 11:39:24 +10003242 bio->bi_sector = sector +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 conf->mirrors[d].rdev->data_offset;
3244 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3245 count++;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003246
3247 if (conf->mirrors[d].replacement == NULL ||
3248 test_bit(Faulty,
3249 &conf->mirrors[d].replacement->flags))
3250 continue;
3251
3252 /* Need to set up for writing to the replacement */
3253 bio = r10_bio->devs[i].repl_bio;
3254 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3255
3256 sector = r10_bio->devs[i].addr;
3257 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3258 bio->bi_next = biolist;
3259 biolist = bio;
3260 bio->bi_private = r10_bio;
3261 bio->bi_end_io = end_sync_write;
3262 bio->bi_rw = WRITE;
3263 bio->bi_sector = sector +
3264 conf->mirrors[d].replacement->data_offset;
3265 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3266 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 }
3268
3269 if (count < 2) {
3270 for (i=0; i<conf->copies; i++) {
3271 int d = r10_bio->devs[i].devnum;
3272 if (r10_bio->devs[i].bio->bi_end_io)
NeilBrownab9d47e2011-05-11 14:54:41 +10003273 rdev_dec_pending(conf->mirrors[d].rdev,
3274 mddev);
NeilBrown9ad1aef2011-12-23 10:17:55 +11003275 if (r10_bio->devs[i].repl_bio &&
3276 r10_bio->devs[i].repl_bio->bi_end_io)
3277 rdev_dec_pending(
3278 conf->mirrors[d].replacement,
3279 mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 }
3281 put_buf(r10_bio);
3282 biolist = NULL;
3283 goto giveup;
3284 }
3285 }
3286
3287 for (bio = biolist; bio ; bio=bio->bi_next) {
3288
3289 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
3290 if (bio->bi_end_io)
3291 bio->bi_flags |= 1 << BIO_UPTODATE;
3292 bio->bi_vcnt = 0;
3293 bio->bi_idx = 0;
3294 bio->bi_phys_segments = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 bio->bi_size = 0;
3296 }
3297
3298 nr_sectors = 0;
NeilBrown6cce3b232006-01-06 00:20:16 -08003299 if (sector_nr + max_sync < max_sector)
3300 max_sector = sector_nr + max_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 do {
3302 struct page *page;
3303 int len = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 if (sector_nr + (len>>9) > max_sector)
3305 len = (max_sector - sector_nr) << 9;
3306 if (len == 0)
3307 break;
3308 for (bio= biolist ; bio ; bio=bio->bi_next) {
NeilBrownab9d47e2011-05-11 14:54:41 +10003309 struct bio *bio2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
NeilBrownab9d47e2011-05-11 14:54:41 +10003311 if (bio_add_page(bio, page, len, 0))
3312 continue;
3313
3314 /* stop here */
3315 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3316 for (bio2 = biolist;
3317 bio2 && bio2 != bio;
3318 bio2 = bio2->bi_next) {
3319 /* remove last page from this bio */
3320 bio2->bi_vcnt--;
3321 bio2->bi_size -= len;
3322 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003324 goto bio_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 }
3326 nr_sectors += len>>9;
3327 sector_nr += len>>9;
3328 } while (biolist->bi_vcnt < RESYNC_PAGES);
3329 bio_full:
3330 r10_bio->sectors = nr_sectors;
3331
3332 while (biolist) {
3333 bio = biolist;
3334 biolist = biolist->bi_next;
3335
3336 bio->bi_next = NULL;
3337 r10_bio = bio->bi_private;
3338 r10_bio->sectors = nr_sectors;
3339
3340 if (bio->bi_end_io == end_sync_read) {
3341 md_sync_acct(bio->bi_bdev, nr_sectors);
3342 generic_make_request(bio);
3343 }
3344 }
3345
NeilBrown57afd892005-06-21 17:17:13 -07003346 if (sectors_skipped)
3347 /* pretend they weren't skipped, it makes
3348 * no important difference in this case
3349 */
3350 md_done_sync(mddev, sectors_skipped, 1);
3351
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 return sectors_skipped + nr_sectors;
3353 giveup:
3354 /* There is nowhere to write, so all non-sync
NeilBrowne875ece2011-07-28 11:39:24 +10003355 * drives must be failed or in resync, all drives
3356 * have a bad block, so try the next chunk...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 */
NeilBrown09b40682009-02-25 13:18:47 +11003358 if (sector_nr + max_sync < max_sector)
3359 max_sector = sector_nr + max_sync;
3360
3361 sectors_skipped += (max_sector - sector_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 chunks_skipped ++;
3363 sector_nr = max_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 goto skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365}
3366
Dan Williams80c3a6c2009-03-17 18:10:40 -07003367static sector_t
NeilBrownfd01b882011-10-11 16:47:53 +11003368raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
Dan Williams80c3a6c2009-03-17 18:10:40 -07003369{
3370 sector_t size;
NeilBrowne879a872011-10-11 16:49:02 +11003371 struct r10conf *conf = mddev->private;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003372
3373 if (!raid_disks)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003374 raid_disks = min(conf->geo.raid_disks,
3375 conf->prev.raid_disks);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003376 if (!sectors)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003377 sectors = conf->dev_sectors;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003378
NeilBrown5cf00fc2012-05-21 09:28:20 +10003379 size = sectors >> conf->geo.chunk_shift;
3380 sector_div(size, conf->geo.far_copies);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003381 size = size * raid_disks;
NeilBrown5cf00fc2012-05-21 09:28:20 +10003382 sector_div(size, conf->geo.near_copies);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003383
NeilBrown5cf00fc2012-05-21 09:28:20 +10003384 return size << conf->geo.chunk_shift;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003385}
3386
NeilBrown6508fdb2012-05-17 10:08:45 +10003387static void calc_sectors(struct r10conf *conf, sector_t size)
3388{
3389 /* Calculate the number of sectors-per-device that will
3390 * actually be used, and set conf->dev_sectors and
3391 * conf->stride
3392 */
3393
NeilBrown5cf00fc2012-05-21 09:28:20 +10003394 size = size >> conf->geo.chunk_shift;
3395 sector_div(size, conf->geo.far_copies);
3396 size = size * conf->geo.raid_disks;
3397 sector_div(size, conf->geo.near_copies);
NeilBrown6508fdb2012-05-17 10:08:45 +10003398 /* 'size' is now the number of chunks in the array */
3399 /* calculate "used chunks per device" */
3400 size = size * conf->copies;
3401
3402 /* We need to round up when dividing by raid_disks to
3403 * get the stride size.
3404 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003405 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
NeilBrown6508fdb2012-05-17 10:08:45 +10003406
NeilBrown5cf00fc2012-05-21 09:28:20 +10003407 conf->dev_sectors = size << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003408
NeilBrown5cf00fc2012-05-21 09:28:20 +10003409 if (conf->geo.far_offset)
3410 conf->geo.stride = 1 << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003411 else {
NeilBrown5cf00fc2012-05-21 09:28:20 +10003412 sector_div(size, conf->geo.far_copies);
3413 conf->geo.stride = size << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003414 }
3415}
Trela, Maciejdab8b292010-03-08 16:02:45 +11003416
NeilBrowndeb200d2012-05-21 09:28:33 +10003417enum geo_type {geo_new, geo_old, geo_start};
3418static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3419{
3420 int nc, fc, fo;
3421 int layout, chunk, disks;
3422 switch (new) {
3423 case geo_old:
3424 layout = mddev->layout;
3425 chunk = mddev->chunk_sectors;
3426 disks = mddev->raid_disks - mddev->delta_disks;
3427 break;
3428 case geo_new:
3429 layout = mddev->new_layout;
3430 chunk = mddev->new_chunk_sectors;
3431 disks = mddev->raid_disks;
3432 break;
3433 default: /* avoid 'may be unused' warnings */
3434 case geo_start: /* new when starting reshape - raid_disks not
3435 * updated yet. */
3436 layout = mddev->new_layout;
3437 chunk = mddev->new_chunk_sectors;
3438 disks = mddev->raid_disks + mddev->delta_disks;
3439 break;
3440 }
3441 if (layout >> 17)
3442 return -1;
3443 if (chunk < (PAGE_SIZE >> 9) ||
3444 !is_power_of_2(chunk))
3445 return -2;
3446 nc = layout & 255;
3447 fc = (layout >> 8) & 255;
3448 fo = layout & (1<<16);
3449 geo->raid_disks = disks;
3450 geo->near_copies = nc;
3451 geo->far_copies = fc;
3452 geo->far_offset = fo;
3453 geo->chunk_mask = chunk - 1;
3454 geo->chunk_shift = ffz(~chunk);
3455 return nc*fc;
3456}
3457
NeilBrowne879a872011-10-11 16:49:02 +11003458static struct r10conf *setup_conf(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459{
NeilBrowne879a872011-10-11 16:49:02 +11003460 struct r10conf *conf = NULL;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003461 int err = -EINVAL;
NeilBrowndeb200d2012-05-21 09:28:33 +10003462 struct geom geo;
3463 int copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
NeilBrowndeb200d2012-05-21 09:28:33 +10003465 copies = setup_geo(&geo, mddev, geo_new);
3466
3467 if (copies == -2) {
NeilBrown128595e2010-05-03 14:47:14 +10003468 printk(KERN_ERR "md/raid10:%s: chunk size must be "
3469 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3470 mdname(mddev), PAGE_SIZE);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003471 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 }
NeilBrown2604b702006-01-06 00:20:36 -08003473
NeilBrowndeb200d2012-05-21 09:28:33 +10003474 if (copies < 2 || copies > mddev->raid_disks) {
NeilBrown128595e2010-05-03 14:47:14 +10003475 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
Maciej Trelaf73ea872010-06-16 11:46:29 +01003476 mdname(mddev), mddev->new_layout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 goto out;
3478 }
Trela, Maciejdab8b292010-03-08 16:02:45 +11003479
3480 err = -ENOMEM;
NeilBrowne879a872011-10-11 16:49:02 +11003481 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003482 if (!conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 goto out;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003484
NeilBrown3ea7daa2012-05-22 13:53:47 +10003485 /* FIXME calc properly */
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003486 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
NeilBrown3ea7daa2012-05-22 13:53:47 +10003487 max(0,mddev->delta_disks)),
Trela, Maciejdab8b292010-03-08 16:02:45 +11003488 GFP_KERNEL);
3489 if (!conf->mirrors)
3490 goto out;
NeilBrown4443ae12006-01-06 00:20:28 -08003491
3492 conf->tmppage = alloc_page(GFP_KERNEL);
3493 if (!conf->tmppage)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003494 goto out;
3495
NeilBrowndeb200d2012-05-21 09:28:33 +10003496 conf->geo = geo;
3497 conf->copies = copies;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003498 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3499 r10bio_pool_free, conf);
3500 if (!conf->r10bio_pool)
3501 goto out;
3502
NeilBrown6508fdb2012-05-17 10:08:45 +10003503 calc_sectors(conf, mddev->dev_sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10003504 if (mddev->reshape_position == MaxSector) {
3505 conf->prev = conf->geo;
3506 conf->reshape_progress = MaxSector;
3507 } else {
3508 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3509 err = -EINVAL;
3510 goto out;
3511 }
3512 conf->reshape_progress = mddev->reshape_position;
3513 if (conf->prev.far_offset)
3514 conf->prev.stride = 1 << conf->prev.chunk_shift;
3515 else
3516 /* far_copies must be 1 */
3517 conf->prev.stride = conf->dev_sectors;
3518 }
Neil Browne7e72bf2008-05-14 16:05:54 -07003519 spin_lock_init(&conf->device_lock);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003520 INIT_LIST_HEAD(&conf->retry_list);
3521
3522 spin_lock_init(&conf->resync_lock);
3523 init_waitqueue_head(&conf->wait_barrier);
3524
NeilBrown02326052012-07-03 15:56:52 +10003525 conf->thread = md_register_thread(raid10d, mddev, "raid10");
Trela, Maciejdab8b292010-03-08 16:02:45 +11003526 if (!conf->thread)
3527 goto out;
3528
Trela, Maciejdab8b292010-03-08 16:02:45 +11003529 conf->mddev = mddev;
3530 return conf;
3531
3532 out:
NeilBrown3ea7daa2012-05-22 13:53:47 +10003533 if (err == -ENOMEM)
3534 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3535 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11003536 if (conf) {
3537 if (conf->r10bio_pool)
3538 mempool_destroy(conf->r10bio_pool);
3539 kfree(conf->mirrors);
3540 safe_put_page(conf->tmppage);
3541 kfree(conf);
3542 }
3543 return ERR_PTR(err);
3544}
3545
NeilBrownfd01b882011-10-11 16:47:53 +11003546static int run(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003547{
NeilBrowne879a872011-10-11 16:49:02 +11003548 struct r10conf *conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003549 int i, disk_idx, chunk_size;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003550 struct raid10_info *disk;
NeilBrown3cb03002011-10-11 16:45:26 +11003551 struct md_rdev *rdev;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003552 sector_t size;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003553 sector_t min_offset_diff = 0;
3554 int first = 1;
Shaohua Li532a2a32012-10-11 13:30:52 +11003555 bool discard_supported = false;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003556
3557 if (mddev->private == NULL) {
3558 conf = setup_conf(mddev);
3559 if (IS_ERR(conf))
3560 return PTR_ERR(conf);
3561 mddev->private = conf;
3562 }
3563 conf = mddev->private;
3564 if (!conf)
3565 goto out;
3566
Trela, Maciejdab8b292010-03-08 16:02:45 +11003567 mddev->thread = conf->thread;
3568 conf->thread = NULL;
3569
Martin K. Petersen8f6c2e42009-07-01 11:13:45 +10003570 chunk_size = mddev->chunk_sectors << 9;
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003571 if (mddev->queue) {
Shaohua Li532a2a32012-10-11 13:30:52 +11003572 blk_queue_max_discard_sectors(mddev->queue,
3573 mddev->chunk_sectors);
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11003574 blk_queue_max_write_same_sectors(mddev->queue,
3575 mddev->chunk_sectors);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003576 blk_queue_io_min(mddev->queue, chunk_size);
3577 if (conf->geo.raid_disks % conf->geo.near_copies)
3578 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3579 else
3580 blk_queue_io_opt(mddev->queue, chunk_size *
3581 (conf->geo.raid_disks / conf->geo.near_copies));
3582 }
Martin K. Petersen8f6c2e42009-07-01 11:13:45 +10003583
NeilBrowndafb20f2012-03-19 12:46:39 +11003584 rdev_for_each(rdev, mddev) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10003585 long long diff;
NeilBrownaba336b2012-05-31 15:39:11 +10003586 struct request_queue *q;
NeilBrown34b343c2011-07-28 11:31:47 +10003587
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 disk_idx = rdev->raid_disk;
NeilBrownf8c9e742012-05-21 09:28:33 +10003589 if (disk_idx < 0)
3590 continue;
3591 if (disk_idx >= conf->geo.raid_disks &&
3592 disk_idx >= conf->prev.raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 continue;
3594 disk = conf->mirrors + disk_idx;
3595
NeilBrown56a25592011-12-23 10:17:55 +11003596 if (test_bit(Replacement, &rdev->flags)) {
3597 if (disk->replacement)
3598 goto out_free_conf;
3599 disk->replacement = rdev;
3600 } else {
3601 if (disk->rdev)
3602 goto out_free_conf;
3603 disk->rdev = rdev;
3604 }
NeilBrownaba336b2012-05-31 15:39:11 +10003605 q = bdev_get_queue(rdev->bdev);
3606 if (q->merge_bvec_fn)
3607 mddev->merge_check_needed = 1;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003608 diff = (rdev->new_data_offset - rdev->data_offset);
3609 if (!mddev->reshape_backwards)
3610 diff = -diff;
3611 if (diff < 0)
3612 diff = 0;
3613 if (first || diff < min_offset_diff)
3614 min_offset_diff = diff;
NeilBrown56a25592011-12-23 10:17:55 +11003615
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003616 if (mddev->gendisk)
3617 disk_stack_limits(mddev->gendisk, rdev->bdev,
3618 rdev->data_offset << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
3620 disk->head_position = 0;
Shaohua Li532a2a32012-10-11 13:30:52 +11003621
3622 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3623 discard_supported = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10003625
Jonathan Brassowed30be02012-10-31 11:42:30 +11003626 if (mddev->queue) {
3627 if (discard_supported)
3628 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3629 mddev->queue);
3630 else
3631 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3632 mddev->queue);
3633 }
NeilBrown6d508242005-09-09 16:24:03 -07003634 /* need to check that every block has at least one working mirror */
NeilBrown700c7212011-07-27 11:00:36 +10003635 if (!enough(conf, -1)) {
NeilBrown128595e2010-05-03 14:47:14 +10003636 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
NeilBrown6d508242005-09-09 16:24:03 -07003637 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638 goto out_free_conf;
3639 }
3640
NeilBrown3ea7daa2012-05-22 13:53:47 +10003641 if (conf->reshape_progress != MaxSector) {
3642 /* must ensure that shape change is supported */
3643 if (conf->geo.far_copies != 1 &&
3644 conf->geo.far_offset == 0)
3645 goto out_free_conf;
3646 if (conf->prev.far_copies != 1 &&
3647 conf->geo.far_offset == 0)
3648 goto out_free_conf;
3649 }
3650
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 mddev->degraded = 0;
NeilBrownf8c9e742012-05-21 09:28:33 +10003652 for (i = 0;
3653 i < conf->geo.raid_disks
3654 || i < conf->prev.raid_disks;
3655 i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656
3657 disk = conf->mirrors + i;
3658
NeilBrown56a25592011-12-23 10:17:55 +11003659 if (!disk->rdev && disk->replacement) {
3660 /* The replacement is all we have - use it */
3661 disk->rdev = disk->replacement;
3662 disk->replacement = NULL;
3663 clear_bit(Replacement, &disk->rdev->flags);
3664 }
3665
NeilBrown5fd6c1d2006-06-26 00:27:40 -07003666 if (!disk->rdev ||
NeilBrown2e333e82006-10-21 10:24:07 -07003667 !test_bit(In_sync, &disk->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 disk->head_position = 0;
3669 mddev->degraded++;
Neil Brown8c2e8702008-06-28 08:30:52 +10003670 if (disk->rdev)
3671 conf->fullsync = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 }
NeilBrownd890fa22011-10-26 11:54:39 +11003673 disk->recovery_disabled = mddev->recovery_disabled - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 }
3675
Andre Noll8c6ac862009-06-18 08:48:06 +10003676 if (mddev->recovery_cp != MaxSector)
NeilBrown128595e2010-05-03 14:47:14 +10003677 printk(KERN_NOTICE "md/raid10:%s: not clean"
Andre Noll8c6ac862009-06-18 08:48:06 +10003678 " -- starting background reconstruction\n",
3679 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 printk(KERN_INFO
NeilBrown128595e2010-05-03 14:47:14 +10003681 "md/raid10:%s: active with %d out of %d devices\n",
NeilBrown5cf00fc2012-05-21 09:28:20 +10003682 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3683 conf->geo.raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 /*
3685 * Ok, everything is just fine now
3686 */
Trela, Maciejdab8b292010-03-08 16:02:45 +11003687 mddev->dev_sectors = conf->dev_sectors;
3688 size = raid10_size(mddev, 0, 0);
3689 md_set_array_sectors(mddev, size);
3690 mddev->resync_max_sectors = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003692 if (mddev->queue) {
NeilBrown5cf00fc2012-05-21 09:28:20 +10003693 int stripe = conf->geo.raid_disks *
Andre Noll9d8f0362009-06-18 08:45:01 +10003694 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003695 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
3696 mddev->queue->backing_dev_info.congested_data = mddev;
3697
3698 /* Calculate max read-ahead size.
3699 * We need to readahead at least twice a whole stripe....
3700 * maybe...
3701 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003702 stripe /= conf->geo.near_copies;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003703 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3704 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003705 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 }
3707
Martin K. Petersena91a2782011-03-17 11:11:05 +01003708
3709 if (md_integrity_register(mddev))
3710 goto out_free_conf;
3711
NeilBrown3ea7daa2012-05-22 13:53:47 +10003712 if (conf->reshape_progress != MaxSector) {
3713 unsigned long before_length, after_length;
3714
3715 before_length = ((1 << conf->prev.chunk_shift) *
3716 conf->prev.far_copies);
3717 after_length = ((1 << conf->geo.chunk_shift) *
3718 conf->geo.far_copies);
3719
3720 if (max(before_length, after_length) > min_offset_diff) {
3721 /* This cannot work */
3722 printk("md/raid10: offset difference not enough to continue reshape\n");
3723 goto out_free_conf;
3724 }
3725 conf->offset_diff = min_offset_diff;
3726
3727 conf->reshape_safe = conf->reshape_progress;
3728 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3729 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3730 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3731 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3732 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3733 "reshape");
3734 }
3735
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 return 0;
3737
3738out_free_conf:
NeilBrown01f96c02011-09-21 15:30:20 +10003739 md_unregister_thread(&mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 if (conf->r10bio_pool)
3741 mempool_destroy(conf->r10bio_pool);
NeilBrown1345b1d2006-01-06 00:20:40 -08003742 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07003743 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 kfree(conf);
3745 mddev->private = NULL;
3746out:
3747 return -EIO;
3748}
3749
NeilBrownfd01b882011-10-11 16:47:53 +11003750static int stop(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751{
NeilBrowne879a872011-10-11 16:49:02 +11003752 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
NeilBrown409c57f2009-03-31 14:39:39 +11003754 raise_barrier(conf, 0);
3755 lower_barrier(conf);
3756
NeilBrown01f96c02011-09-21 15:30:20 +10003757 md_unregister_thread(&mddev->thread);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003758 if (mddev->queue)
3759 /* the unplug fn references 'conf'*/
3760 blk_sync_queue(mddev->queue);
3761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 if (conf->r10bio_pool)
3763 mempool_destroy(conf->r10bio_pool);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07003764 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 kfree(conf);
3766 mddev->private = NULL;
3767 return 0;
3768}
3769
NeilBrownfd01b882011-10-11 16:47:53 +11003770static void raid10_quiesce(struct mddev *mddev, int state)
NeilBrown6cce3b232006-01-06 00:20:16 -08003771{
NeilBrowne879a872011-10-11 16:49:02 +11003772 struct r10conf *conf = mddev->private;
NeilBrown6cce3b232006-01-06 00:20:16 -08003773
3774 switch(state) {
3775 case 1:
3776 raise_barrier(conf, 0);
3777 break;
3778 case 0:
3779 lower_barrier(conf);
3780 break;
3781 }
NeilBrown6cce3b232006-01-06 00:20:16 -08003782}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
NeilBrown006a09a2012-03-19 12:46:40 +11003784static int raid10_resize(struct mddev *mddev, sector_t sectors)
3785{
3786 /* Resize of 'far' arrays is not supported.
3787 * For 'near' and 'offset' arrays we can set the
3788 * number of sectors used to be an appropriate multiple
3789 * of the chunk size.
3790 * For 'offset', this is far_copies*chunksize.
3791 * For 'near' the multiplier is the LCM of
3792 * near_copies and raid_disks.
3793 * So if far_copies > 1 && !far_offset, fail.
3794 * Else find LCM(raid_disks, near_copy)*far_copies and
3795 * multiply by chunk_size. Then round to this number.
3796 * This is mostly done by raid10_size()
3797 */
3798 struct r10conf *conf = mddev->private;
3799 sector_t oldsize, size;
3800
NeilBrownf8c9e742012-05-21 09:28:33 +10003801 if (mddev->reshape_position != MaxSector)
3802 return -EBUSY;
3803
NeilBrown5cf00fc2012-05-21 09:28:20 +10003804 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
NeilBrown006a09a2012-03-19 12:46:40 +11003805 return -EINVAL;
3806
3807 oldsize = raid10_size(mddev, 0, 0);
3808 size = raid10_size(mddev, sectors, 0);
NeilBrowna4a61252012-05-22 13:55:27 +10003809 if (mddev->external_size &&
3810 mddev->array_sectors > size)
NeilBrown006a09a2012-03-19 12:46:40 +11003811 return -EINVAL;
NeilBrowna4a61252012-05-22 13:55:27 +10003812 if (mddev->bitmap) {
3813 int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3814 if (ret)
3815 return ret;
3816 }
3817 md_set_array_sectors(mddev, size);
NeilBrown006a09a2012-03-19 12:46:40 +11003818 set_capacity(mddev->gendisk, mddev->array_sectors);
3819 revalidate_disk(mddev->gendisk);
3820 if (sectors > mddev->dev_sectors &&
3821 mddev->recovery_cp > oldsize) {
3822 mddev->recovery_cp = oldsize;
3823 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3824 }
NeilBrown6508fdb2012-05-17 10:08:45 +10003825 calc_sectors(conf, sectors);
3826 mddev->dev_sectors = conf->dev_sectors;
NeilBrown006a09a2012-03-19 12:46:40 +11003827 mddev->resync_max_sectors = size;
3828 return 0;
3829}
3830
NeilBrownfd01b882011-10-11 16:47:53 +11003831static void *raid10_takeover_raid0(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003832{
NeilBrown3cb03002011-10-11 16:45:26 +11003833 struct md_rdev *rdev;
NeilBrowne879a872011-10-11 16:49:02 +11003834 struct r10conf *conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003835
3836 if (mddev->degraded > 0) {
NeilBrown128595e2010-05-03 14:47:14 +10003837 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3838 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11003839 return ERR_PTR(-EINVAL);
3840 }
3841
Trela, Maciejdab8b292010-03-08 16:02:45 +11003842 /* Set new parameters */
3843 mddev->new_level = 10;
3844 /* new layout: far_copies = 1, near_copies = 2 */
3845 mddev->new_layout = (1<<8) + 2;
3846 mddev->new_chunk_sectors = mddev->chunk_sectors;
3847 mddev->delta_disks = mddev->raid_disks;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003848 mddev->raid_disks *= 2;
3849 /* make sure it will be not marked as dirty */
3850 mddev->recovery_cp = MaxSector;
3851
3852 conf = setup_conf(mddev);
Krzysztof Wojcik02214dc2011-02-04 14:18:26 +01003853 if (!IS_ERR(conf)) {
NeilBrowndafb20f2012-03-19 12:46:39 +11003854 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01003855 if (rdev->raid_disk >= 0)
3856 rdev->new_raid_disk = rdev->raid_disk * 2;
Krzysztof Wojcik02214dc2011-02-04 14:18:26 +01003857 conf->barrier = 1;
3858 }
3859
Trela, Maciejdab8b292010-03-08 16:02:45 +11003860 return conf;
3861}
3862
NeilBrownfd01b882011-10-11 16:47:53 +11003863static void *raid10_takeover(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003864{
NeilBrowne373ab12011-10-11 16:48:59 +11003865 struct r0conf *raid0_conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003866
3867 /* raid10 can take over:
3868 * raid0 - providing it has only two drives
3869 */
3870 if (mddev->level == 0) {
3871 /* for raid0 takeover only one zone is supported */
NeilBrowne373ab12011-10-11 16:48:59 +11003872 raid0_conf = mddev->private;
3873 if (raid0_conf->nr_strip_zones > 1) {
NeilBrown128595e2010-05-03 14:47:14 +10003874 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3875 " with more than one zone.\n",
3876 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11003877 return ERR_PTR(-EINVAL);
3878 }
3879 return raid10_takeover_raid0(mddev);
3880 }
3881 return ERR_PTR(-EINVAL);
3882}
3883
NeilBrown3ea7daa2012-05-22 13:53:47 +10003884static int raid10_check_reshape(struct mddev *mddev)
3885{
3886 /* Called when there is a request to change
3887 * - layout (to ->new_layout)
3888 * - chunk size (to ->new_chunk_sectors)
3889 * - raid_disks (by delta_disks)
3890 * or when trying to restart a reshape that was ongoing.
3891 *
3892 * We need to validate the request and possibly allocate
3893 * space if that might be an issue later.
3894 *
3895 * Currently we reject any reshape of a 'far' mode array,
3896 * allow chunk size to change if new is generally acceptable,
3897 * allow raid_disks to increase, and allow
3898 * a switch between 'near' mode and 'offset' mode.
3899 */
3900 struct r10conf *conf = mddev->private;
3901 struct geom geo;
3902
3903 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3904 return -EINVAL;
3905
3906 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3907 /* mustn't change number of copies */
3908 return -EINVAL;
3909 if (geo.far_copies > 1 && !geo.far_offset)
3910 /* Cannot switch to 'far' mode */
3911 return -EINVAL;
3912
3913 if (mddev->array_sectors & geo.chunk_mask)
3914 /* not factor of array size */
3915 return -EINVAL;
3916
NeilBrown3ea7daa2012-05-22 13:53:47 +10003917 if (!enough(conf, -1))
3918 return -EINVAL;
3919
3920 kfree(conf->mirrors_new);
3921 conf->mirrors_new = NULL;
3922 if (mddev->delta_disks > 0) {
3923 /* allocate new 'mirrors' list */
3924 conf->mirrors_new = kzalloc(
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003925 sizeof(struct raid10_info)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003926 *(mddev->raid_disks +
3927 mddev->delta_disks),
3928 GFP_KERNEL);
3929 if (!conf->mirrors_new)
3930 return -ENOMEM;
3931 }
3932 return 0;
3933}
3934
3935/*
3936 * Need to check if array has failed when deciding whether to:
3937 * - start an array
3938 * - remove non-faulty devices
3939 * - add a spare
3940 * - allow a reshape
3941 * This determination is simple when no reshape is happening.
3942 * However if there is a reshape, we need to carefully check
3943 * both the before and after sections.
3944 * This is because some failed devices may only affect one
3945 * of the two sections, and some non-in_sync devices may
3946 * be insync in the section most affected by failed devices.
3947 */
3948static int calc_degraded(struct r10conf *conf)
3949{
3950 int degraded, degraded2;
3951 int i;
3952
3953 rcu_read_lock();
3954 degraded = 0;
3955 /* 'prev' section first */
3956 for (i = 0; i < conf->prev.raid_disks; i++) {
3957 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3958 if (!rdev || test_bit(Faulty, &rdev->flags))
3959 degraded++;
3960 else if (!test_bit(In_sync, &rdev->flags))
3961 /* When we can reduce the number of devices in
3962 * an array, this might not contribute to
3963 * 'degraded'. It does now.
3964 */
3965 degraded++;
3966 }
3967 rcu_read_unlock();
3968 if (conf->geo.raid_disks == conf->prev.raid_disks)
3969 return degraded;
3970 rcu_read_lock();
3971 degraded2 = 0;
3972 for (i = 0; i < conf->geo.raid_disks; i++) {
3973 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3974 if (!rdev || test_bit(Faulty, &rdev->flags))
3975 degraded2++;
3976 else if (!test_bit(In_sync, &rdev->flags)) {
3977 /* If reshape is increasing the number of devices,
3978 * this section has already been recovered, so
3979 * it doesn't contribute to degraded.
3980 * else it does.
3981 */
3982 if (conf->geo.raid_disks <= conf->prev.raid_disks)
3983 degraded2++;
3984 }
3985 }
3986 rcu_read_unlock();
3987 if (degraded2 > degraded)
3988 return degraded2;
3989 return degraded;
3990}
3991
3992static int raid10_start_reshape(struct mddev *mddev)
3993{
3994 /* A 'reshape' has been requested. This commits
3995 * the various 'new' fields and sets MD_RECOVER_RESHAPE
3996 * This also checks if there are enough spares and adds them
3997 * to the array.
3998 * We currently require enough spares to make the final
3999 * array non-degraded. We also require that the difference
4000 * between old and new data_offset - on each device - is
4001 * enough that we never risk over-writing.
4002 */
4003
4004 unsigned long before_length, after_length;
4005 sector_t min_offset_diff = 0;
4006 int first = 1;
4007 struct geom new;
4008 struct r10conf *conf = mddev->private;
4009 struct md_rdev *rdev;
4010 int spares = 0;
NeilBrownbb63a702012-05-22 13:55:28 +10004011 int ret;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004012
4013 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4014 return -EBUSY;
4015
4016 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4017 return -EINVAL;
4018
4019 before_length = ((1 << conf->prev.chunk_shift) *
4020 conf->prev.far_copies);
4021 after_length = ((1 << conf->geo.chunk_shift) *
4022 conf->geo.far_copies);
4023
4024 rdev_for_each(rdev, mddev) {
4025 if (!test_bit(In_sync, &rdev->flags)
4026 && !test_bit(Faulty, &rdev->flags))
4027 spares++;
4028 if (rdev->raid_disk >= 0) {
4029 long long diff = (rdev->new_data_offset
4030 - rdev->data_offset);
4031 if (!mddev->reshape_backwards)
4032 diff = -diff;
4033 if (diff < 0)
4034 diff = 0;
4035 if (first || diff < min_offset_diff)
4036 min_offset_diff = diff;
4037 }
4038 }
4039
4040 if (max(before_length, after_length) > min_offset_diff)
4041 return -EINVAL;
4042
4043 if (spares < mddev->delta_disks)
4044 return -EINVAL;
4045
4046 conf->offset_diff = min_offset_diff;
4047 spin_lock_irq(&conf->device_lock);
4048 if (conf->mirrors_new) {
4049 memcpy(conf->mirrors_new, conf->mirrors,
Jonathan Brassowdc280d982012-07-31 10:03:52 +10004050 sizeof(struct raid10_info)*conf->prev.raid_disks);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004051 smp_mb();
4052 kfree(conf->mirrors_old); /* FIXME and elsewhere */
4053 conf->mirrors_old = conf->mirrors;
4054 conf->mirrors = conf->mirrors_new;
4055 conf->mirrors_new = NULL;
4056 }
4057 setup_geo(&conf->geo, mddev, geo_start);
4058 smp_mb();
4059 if (mddev->reshape_backwards) {
4060 sector_t size = raid10_size(mddev, 0, 0);
4061 if (size < mddev->array_sectors) {
4062 spin_unlock_irq(&conf->device_lock);
4063 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
4064 mdname(mddev));
4065 return -EINVAL;
4066 }
4067 mddev->resync_max_sectors = size;
4068 conf->reshape_progress = size;
4069 } else
4070 conf->reshape_progress = 0;
4071 spin_unlock_irq(&conf->device_lock);
4072
NeilBrownbb63a702012-05-22 13:55:28 +10004073 if (mddev->delta_disks && mddev->bitmap) {
4074 ret = bitmap_resize(mddev->bitmap,
4075 raid10_size(mddev, 0,
4076 conf->geo.raid_disks),
4077 0, 0);
4078 if (ret)
4079 goto abort;
4080 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004081 if (mddev->delta_disks > 0) {
4082 rdev_for_each(rdev, mddev)
4083 if (rdev->raid_disk < 0 &&
4084 !test_bit(Faulty, &rdev->flags)) {
4085 if (raid10_add_disk(mddev, rdev) == 0) {
4086 if (rdev->raid_disk >=
4087 conf->prev.raid_disks)
4088 set_bit(In_sync, &rdev->flags);
4089 else
4090 rdev->recovery_offset = 0;
4091
4092 if (sysfs_link_rdev(mddev, rdev))
4093 /* Failure here is OK */;
4094 }
4095 } else if (rdev->raid_disk >= conf->prev.raid_disks
4096 && !test_bit(Faulty, &rdev->flags)) {
4097 /* This is a spare that was manually added */
4098 set_bit(In_sync, &rdev->flags);
4099 }
4100 }
4101 /* When a reshape changes the number of devices,
4102 * ->degraded is measured against the larger of the
4103 * pre and post numbers.
4104 */
4105 spin_lock_irq(&conf->device_lock);
4106 mddev->degraded = calc_degraded(conf);
4107 spin_unlock_irq(&conf->device_lock);
4108 mddev->raid_disks = conf->geo.raid_disks;
4109 mddev->reshape_position = conf->reshape_progress;
4110 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4111
4112 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4113 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4114 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4115 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4116
4117 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4118 "reshape");
4119 if (!mddev->sync_thread) {
NeilBrownbb63a702012-05-22 13:55:28 +10004120 ret = -EAGAIN;
4121 goto abort;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004122 }
4123 conf->reshape_checkpoint = jiffies;
4124 md_wakeup_thread(mddev->sync_thread);
4125 md_new_event(mddev);
4126 return 0;
NeilBrownbb63a702012-05-22 13:55:28 +10004127
4128abort:
4129 mddev->recovery = 0;
4130 spin_lock_irq(&conf->device_lock);
4131 conf->geo = conf->prev;
4132 mddev->raid_disks = conf->geo.raid_disks;
4133 rdev_for_each(rdev, mddev)
4134 rdev->new_data_offset = rdev->data_offset;
4135 smp_wmb();
4136 conf->reshape_progress = MaxSector;
4137 mddev->reshape_position = MaxSector;
4138 spin_unlock_irq(&conf->device_lock);
4139 return ret;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004140}
4141
4142/* Calculate the last device-address that could contain
4143 * any block from the chunk that includes the array-address 's'
4144 * and report the next address.
4145 * i.e. the address returned will be chunk-aligned and after
4146 * any data that is in the chunk containing 's'.
4147 */
4148static sector_t last_dev_address(sector_t s, struct geom *geo)
4149{
4150 s = (s | geo->chunk_mask) + 1;
4151 s >>= geo->chunk_shift;
4152 s *= geo->near_copies;
4153 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4154 s *= geo->far_copies;
4155 s <<= geo->chunk_shift;
4156 return s;
4157}
4158
4159/* Calculate the first device-address that could contain
4160 * any block from the chunk that includes the array-address 's'.
4161 * This too will be the start of a chunk
4162 */
4163static sector_t first_dev_address(sector_t s, struct geom *geo)
4164{
4165 s >>= geo->chunk_shift;
4166 s *= geo->near_copies;
4167 sector_div(s, geo->raid_disks);
4168 s *= geo->far_copies;
4169 s <<= geo->chunk_shift;
4170 return s;
4171}
4172
4173static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4174 int *skipped)
4175{
4176 /* We simply copy at most one chunk (smallest of old and new)
4177 * at a time, possibly less if that exceeds RESYNC_PAGES,
4178 * or we hit a bad block or something.
4179 * This might mean we pause for normal IO in the middle of
4180 * a chunk, but that is not a problem was mddev->reshape_position
4181 * can record any location.
4182 *
4183 * If we will want to write to a location that isn't
4184 * yet recorded as 'safe' (i.e. in metadata on disk) then
4185 * we need to flush all reshape requests and update the metadata.
4186 *
4187 * When reshaping forwards (e.g. to more devices), we interpret
4188 * 'safe' as the earliest block which might not have been copied
4189 * down yet. We divide this by previous stripe size and multiply
4190 * by previous stripe length to get lowest device offset that we
4191 * cannot write to yet.
4192 * We interpret 'sector_nr' as an address that we want to write to.
4193 * From this we use last_device_address() to find where we might
4194 * write to, and first_device_address on the 'safe' position.
4195 * If this 'next' write position is after the 'safe' position,
4196 * we must update the metadata to increase the 'safe' position.
4197 *
4198 * When reshaping backwards, we round in the opposite direction
4199 * and perform the reverse test: next write position must not be
4200 * less than current safe position.
4201 *
4202 * In all this the minimum difference in data offsets
4203 * (conf->offset_diff - always positive) allows a bit of slack,
4204 * so next can be after 'safe', but not by more than offset_disk
4205 *
4206 * We need to prepare all the bios here before we start any IO
4207 * to ensure the size we choose is acceptable to all devices.
4208 * The means one for each copy for write-out and an extra one for
4209 * read-in.
4210 * We store the read-in bio in ->master_bio and the others in
4211 * ->devs[x].bio and ->devs[x].repl_bio.
4212 */
4213 struct r10conf *conf = mddev->private;
4214 struct r10bio *r10_bio;
4215 sector_t next, safe, last;
4216 int max_sectors;
4217 int nr_sectors;
4218 int s;
4219 struct md_rdev *rdev;
4220 int need_flush = 0;
4221 struct bio *blist;
4222 struct bio *bio, *read_bio;
4223 int sectors_done = 0;
4224
4225 if (sector_nr == 0) {
4226 /* If restarting in the middle, skip the initial sectors */
4227 if (mddev->reshape_backwards &&
4228 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4229 sector_nr = (raid10_size(mddev, 0, 0)
4230 - conf->reshape_progress);
4231 } else if (!mddev->reshape_backwards &&
4232 conf->reshape_progress > 0)
4233 sector_nr = conf->reshape_progress;
4234 if (sector_nr) {
4235 mddev->curr_resync_completed = sector_nr;
4236 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4237 *skipped = 1;
4238 return sector_nr;
4239 }
4240 }
4241
4242 /* We don't use sector_nr to track where we are up to
4243 * as that doesn't work well for ->reshape_backwards.
4244 * So just use ->reshape_progress.
4245 */
4246 if (mddev->reshape_backwards) {
4247 /* 'next' is the earliest device address that we might
4248 * write to for this chunk in the new layout
4249 */
4250 next = first_dev_address(conf->reshape_progress - 1,
4251 &conf->geo);
4252
4253 /* 'safe' is the last device address that we might read from
4254 * in the old layout after a restart
4255 */
4256 safe = last_dev_address(conf->reshape_safe - 1,
4257 &conf->prev);
4258
4259 if (next + conf->offset_diff < safe)
4260 need_flush = 1;
4261
4262 last = conf->reshape_progress - 1;
4263 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4264 & conf->prev.chunk_mask);
4265 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4266 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4267 } else {
4268 /* 'next' is after the last device address that we
4269 * might write to for this chunk in the new layout
4270 */
4271 next = last_dev_address(conf->reshape_progress, &conf->geo);
4272
4273 /* 'safe' is the earliest device address that we might
4274 * read from in the old layout after a restart
4275 */
4276 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4277
4278 /* Need to update metadata if 'next' might be beyond 'safe'
4279 * as that would possibly corrupt data
4280 */
4281 if (next > safe + conf->offset_diff)
4282 need_flush = 1;
4283
4284 sector_nr = conf->reshape_progress;
4285 last = sector_nr | (conf->geo.chunk_mask
4286 & conf->prev.chunk_mask);
4287
4288 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4289 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4290 }
4291
4292 if (need_flush ||
4293 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4294 /* Need to update reshape_position in metadata */
4295 wait_barrier(conf);
4296 mddev->reshape_position = conf->reshape_progress;
4297 if (mddev->reshape_backwards)
4298 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4299 - conf->reshape_progress;
4300 else
4301 mddev->curr_resync_completed = conf->reshape_progress;
4302 conf->reshape_checkpoint = jiffies;
4303 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4304 md_wakeup_thread(mddev->thread);
4305 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4306 kthread_should_stop());
4307 conf->reshape_safe = mddev->reshape_position;
4308 allow_barrier(conf);
4309 }
4310
4311read_more:
4312 /* Now schedule reads for blocks from sector_nr to last */
4313 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4314 raise_barrier(conf, sectors_done != 0);
4315 atomic_set(&r10_bio->remaining, 0);
4316 r10_bio->mddev = mddev;
4317 r10_bio->sector = sector_nr;
4318 set_bit(R10BIO_IsReshape, &r10_bio->state);
4319 r10_bio->sectors = last - sector_nr + 1;
4320 rdev = read_balance(conf, r10_bio, &max_sectors);
4321 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4322
4323 if (!rdev) {
4324 /* Cannot read from here, so need to record bad blocks
4325 * on all the target devices.
4326 */
4327 // FIXME
4328 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4329 return sectors_done;
4330 }
4331
4332 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4333
4334 read_bio->bi_bdev = rdev->bdev;
4335 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4336 + rdev->data_offset);
4337 read_bio->bi_private = r10_bio;
4338 read_bio->bi_end_io = end_sync_read;
4339 read_bio->bi_rw = READ;
4340 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4341 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4342 read_bio->bi_vcnt = 0;
4343 read_bio->bi_idx = 0;
4344 read_bio->bi_size = 0;
4345 r10_bio->master_bio = read_bio;
4346 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4347
4348 /* Now find the locations in the new layout */
4349 __raid10_find_phys(&conf->geo, r10_bio);
4350
4351 blist = read_bio;
4352 read_bio->bi_next = NULL;
4353
4354 for (s = 0; s < conf->copies*2; s++) {
4355 struct bio *b;
4356 int d = r10_bio->devs[s/2].devnum;
4357 struct md_rdev *rdev2;
4358 if (s&1) {
4359 rdev2 = conf->mirrors[d].replacement;
4360 b = r10_bio->devs[s/2].repl_bio;
4361 } else {
4362 rdev2 = conf->mirrors[d].rdev;
4363 b = r10_bio->devs[s/2].bio;
4364 }
4365 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4366 continue;
4367 b->bi_bdev = rdev2->bdev;
4368 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
4369 b->bi_private = r10_bio;
4370 b->bi_end_io = end_reshape_write;
4371 b->bi_rw = WRITE;
4372 b->bi_flags &= ~(BIO_POOL_MASK - 1);
4373 b->bi_flags |= 1 << BIO_UPTODATE;
4374 b->bi_next = blist;
4375 b->bi_vcnt = 0;
4376 b->bi_idx = 0;
4377 b->bi_size = 0;
4378 blist = b;
4379 }
4380
4381 /* Now add as many pages as possible to all of these bios. */
4382
4383 nr_sectors = 0;
4384 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4385 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4386 int len = (max_sectors - s) << 9;
4387 if (len > PAGE_SIZE)
4388 len = PAGE_SIZE;
4389 for (bio = blist; bio ; bio = bio->bi_next) {
4390 struct bio *bio2;
4391 if (bio_add_page(bio, page, len, 0))
4392 continue;
4393
4394 /* Didn't fit, must stop */
4395 for (bio2 = blist;
4396 bio2 && bio2 != bio;
4397 bio2 = bio2->bi_next) {
4398 /* Remove last page from this bio */
4399 bio2->bi_vcnt--;
4400 bio2->bi_size -= len;
4401 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4402 }
4403 goto bio_full;
4404 }
4405 sector_nr += len >> 9;
4406 nr_sectors += len >> 9;
4407 }
4408bio_full:
4409 r10_bio->sectors = nr_sectors;
4410
4411 /* Now submit the read */
4412 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4413 atomic_inc(&r10_bio->remaining);
4414 read_bio->bi_next = NULL;
4415 generic_make_request(read_bio);
4416 sector_nr += nr_sectors;
4417 sectors_done += nr_sectors;
4418 if (sector_nr <= last)
4419 goto read_more;
4420
4421 /* Now that we have done the whole section we can
4422 * update reshape_progress
4423 */
4424 if (mddev->reshape_backwards)
4425 conf->reshape_progress -= sectors_done;
4426 else
4427 conf->reshape_progress += sectors_done;
4428
4429 return sectors_done;
4430}
4431
4432static void end_reshape_request(struct r10bio *r10_bio);
4433static int handle_reshape_read_error(struct mddev *mddev,
4434 struct r10bio *r10_bio);
4435static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4436{
4437 /* Reshape read completed. Hopefully we have a block
4438 * to write out.
4439 * If we got a read error then we do sync 1-page reads from
4440 * elsewhere until we find the data - or give up.
4441 */
4442 struct r10conf *conf = mddev->private;
4443 int s;
4444
4445 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4446 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4447 /* Reshape has been aborted */
4448 md_done_sync(mddev, r10_bio->sectors, 0);
4449 return;
4450 }
4451
4452 /* We definitely have the data in the pages, schedule the
4453 * writes.
4454 */
4455 atomic_set(&r10_bio->remaining, 1);
4456 for (s = 0; s < conf->copies*2; s++) {
4457 struct bio *b;
4458 int d = r10_bio->devs[s/2].devnum;
4459 struct md_rdev *rdev;
4460 if (s&1) {
4461 rdev = conf->mirrors[d].replacement;
4462 b = r10_bio->devs[s/2].repl_bio;
4463 } else {
4464 rdev = conf->mirrors[d].rdev;
4465 b = r10_bio->devs[s/2].bio;
4466 }
4467 if (!rdev || test_bit(Faulty, &rdev->flags))
4468 continue;
4469 atomic_inc(&rdev->nr_pending);
4470 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4471 atomic_inc(&r10_bio->remaining);
4472 b->bi_next = NULL;
4473 generic_make_request(b);
4474 }
4475 end_reshape_request(r10_bio);
4476}
4477
4478static void end_reshape(struct r10conf *conf)
4479{
4480 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4481 return;
4482
4483 spin_lock_irq(&conf->device_lock);
4484 conf->prev = conf->geo;
4485 md_finish_reshape(conf->mddev);
4486 smp_wmb();
4487 conf->reshape_progress = MaxSector;
4488 spin_unlock_irq(&conf->device_lock);
4489
4490 /* read-ahead size must cover two whole stripes, which is
4491 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4492 */
4493 if (conf->mddev->queue) {
4494 int stripe = conf->geo.raid_disks *
4495 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4496 stripe /= conf->geo.near_copies;
4497 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4498 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4499 }
4500 conf->fullsync = 0;
4501}
4502
4503
4504static int handle_reshape_read_error(struct mddev *mddev,
4505 struct r10bio *r10_bio)
4506{
4507 /* Use sync reads to get the blocks from somewhere else */
4508 int sectors = r10_bio->sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004509 struct r10conf *conf = mddev->private;
NeilBrowne0ee7782012-08-18 09:51:42 +10004510 struct {
4511 struct r10bio r10_bio;
4512 struct r10dev devs[conf->copies];
4513 } on_stack;
4514 struct r10bio *r10b = &on_stack.r10_bio;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004515 int slot = 0;
4516 int idx = 0;
4517 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4518
NeilBrowne0ee7782012-08-18 09:51:42 +10004519 r10b->sector = r10_bio->sector;
4520 __raid10_find_phys(&conf->prev, r10b);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004521
4522 while (sectors) {
4523 int s = sectors;
4524 int success = 0;
4525 int first_slot = slot;
4526
4527 if (s > (PAGE_SIZE >> 9))
4528 s = PAGE_SIZE >> 9;
4529
4530 while (!success) {
NeilBrowne0ee7782012-08-18 09:51:42 +10004531 int d = r10b->devs[slot].devnum;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004532 struct md_rdev *rdev = conf->mirrors[d].rdev;
4533 sector_t addr;
4534 if (rdev == NULL ||
4535 test_bit(Faulty, &rdev->flags) ||
4536 !test_bit(In_sync, &rdev->flags))
4537 goto failed;
4538
NeilBrowne0ee7782012-08-18 09:51:42 +10004539 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004540 success = sync_page_io(rdev,
4541 addr,
4542 s << 9,
4543 bvec[idx].bv_page,
4544 READ, false);
4545 if (success)
4546 break;
4547 failed:
4548 slot++;
4549 if (slot >= conf->copies)
4550 slot = 0;
4551 if (slot == first_slot)
4552 break;
4553 }
4554 if (!success) {
4555 /* couldn't read this block, must give up */
4556 set_bit(MD_RECOVERY_INTR,
4557 &mddev->recovery);
4558 return -EIO;
4559 }
4560 sectors -= s;
4561 idx++;
4562 }
4563 return 0;
4564}
4565
4566static void end_reshape_write(struct bio *bio, int error)
4567{
4568 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4569 struct r10bio *r10_bio = bio->bi_private;
4570 struct mddev *mddev = r10_bio->mddev;
4571 struct r10conf *conf = mddev->private;
4572 int d;
4573 int slot;
4574 int repl;
4575 struct md_rdev *rdev = NULL;
4576
4577 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4578 if (repl)
4579 rdev = conf->mirrors[d].replacement;
4580 if (!rdev) {
4581 smp_mb();
4582 rdev = conf->mirrors[d].rdev;
4583 }
4584
4585 if (!uptodate) {
4586 /* FIXME should record badblock */
4587 md_error(mddev, rdev);
4588 }
4589
4590 rdev_dec_pending(rdev, mddev);
4591 end_reshape_request(r10_bio);
4592}
4593
4594static void end_reshape_request(struct r10bio *r10_bio)
4595{
4596 if (!atomic_dec_and_test(&r10_bio->remaining))
4597 return;
4598 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4599 bio_put(r10_bio->master_bio);
4600 put_buf(r10_bio);
4601}
4602
4603static void raid10_finish_reshape(struct mddev *mddev)
4604{
4605 struct r10conf *conf = mddev->private;
4606
4607 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4608 return;
4609
4610 if (mddev->delta_disks > 0) {
4611 sector_t size = raid10_size(mddev, 0, 0);
4612 md_set_array_sectors(mddev, size);
4613 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4614 mddev->recovery_cp = mddev->resync_max_sectors;
4615 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4616 }
4617 mddev->resync_max_sectors = size;
4618 set_capacity(mddev->gendisk, mddev->array_sectors);
4619 revalidate_disk(mddev->gendisk);
NeilBrown63aced62012-05-22 13:55:33 +10004620 } else {
4621 int d;
4622 for (d = conf->geo.raid_disks ;
4623 d < conf->geo.raid_disks - mddev->delta_disks;
4624 d++) {
4625 struct md_rdev *rdev = conf->mirrors[d].rdev;
4626 if (rdev)
4627 clear_bit(In_sync, &rdev->flags);
4628 rdev = conf->mirrors[d].replacement;
4629 if (rdev)
4630 clear_bit(In_sync, &rdev->flags);
4631 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004632 }
4633 mddev->layout = mddev->new_layout;
4634 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4635 mddev->reshape_position = MaxSector;
4636 mddev->delta_disks = 0;
4637 mddev->reshape_backwards = 0;
4638}
4639
NeilBrown84fc4b52011-10-11 16:49:58 +11004640static struct md_personality raid10_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641{
4642 .name = "raid10",
NeilBrown2604b702006-01-06 00:20:36 -08004643 .level = 10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 .owner = THIS_MODULE,
4645 .make_request = make_request,
4646 .run = run,
4647 .stop = stop,
4648 .status = status,
4649 .error_handler = error,
4650 .hot_add_disk = raid10_add_disk,
4651 .hot_remove_disk= raid10_remove_disk,
4652 .spare_active = raid10_spare_active,
4653 .sync_request = sync_request,
NeilBrown6cce3b232006-01-06 00:20:16 -08004654 .quiesce = raid10_quiesce,
Dan Williams80c3a6c2009-03-17 18:10:40 -07004655 .size = raid10_size,
NeilBrown006a09a2012-03-19 12:46:40 +11004656 .resize = raid10_resize,
Trela, Maciejdab8b292010-03-08 16:02:45 +11004657 .takeover = raid10_takeover,
NeilBrown3ea7daa2012-05-22 13:53:47 +10004658 .check_reshape = raid10_check_reshape,
4659 .start_reshape = raid10_start_reshape,
4660 .finish_reshape = raid10_finish_reshape,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661};
4662
4663static int __init raid_init(void)
4664{
NeilBrown2604b702006-01-06 00:20:36 -08004665 return register_md_personality(&raid10_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666}
4667
4668static void raid_exit(void)
4669{
NeilBrown2604b702006-01-06 00:20:36 -08004670 unregister_md_personality(&raid10_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671}
4672
4673module_init(raid_init);
4674module_exit(raid_exit);
4675MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11004676MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677MODULE_ALIAS("md-personality-9"); /* RAID10 */
NeilBrownd9d166c2006-01-06 00:20:51 -08004678MODULE_ALIAS("md-raid10");
NeilBrown2604b702006-01-06 00:20:36 -08004679MODULE_ALIAS("md-level-10");
NeilBrown34db0cd2011-10-11 16:50:01 +11004680
4681module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);