blob: 3699c4704ba853d6ac31e9e34bdf8467c8e6e010 [file] [log] [blame]
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/wait.h>
16#include <linux/blkdev.h>
17#include <linux/slab.h>
18#include <linux/raid/md_p.h>
Shaohua Li5cb2fbd2015-10-28 08:41:25 -070019#include <linux/crc32c.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070020#include <linux/random.h>
21#include "md.h"
22#include "raid5.h"
23
24/*
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
27 */
28#define BLOCK_SECTORS (8)
29
Shaohua Li0576b1c2015-08-13 14:32:00 -070030/*
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
33 */
34#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36
Shaohua Lif6bed0e2015-08-13 14:31:59 -070037struct r5l_log {
38 struct md_rdev *rdev;
39
40 u32 uuid_checksum;
41
42 sector_t device_size; /* log device size, round to
43 * BLOCK_SECTORS */
Shaohua Li0576b1c2015-08-13 14:32:00 -070044 sector_t max_free_space; /* reclaim run if free space is at
45 * this size */
Shaohua Lif6bed0e2015-08-13 14:31:59 -070046
47 sector_t last_checkpoint; /* log tail. where recovery scan
48 * starts from */
49 u64 last_cp_seq; /* log tail sequence */
50
51 sector_t log_start; /* log head. where new data appends */
52 u64 seq; /* log head sequence */
53
Christoph Hellwig17036462015-10-05 09:31:06 +020054 sector_t next_checkpoint;
55 u64 next_cp_seq;
56
Shaohua Lif6bed0e2015-08-13 14:31:59 -070057 struct mutex io_mutex;
58 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
59
60 spinlock_t io_list_lock;
61 struct list_head running_ios; /* io_units which are still running,
62 * and have not yet been completely
63 * written to the log */
64 struct list_head io_end_ios; /* io_units which have been completely
65 * written to the log but not yet written
66 * to the RAID */
Shaohua Lia8c34f92015-09-02 13:49:46 -070067 struct list_head flushing_ios; /* io_units which are waiting for log
68 * cache flush */
Christoph Hellwig04732f72015-10-05 09:31:07 +020069 struct list_head finished_ios; /* io_units which settle down in log disk */
Shaohua Lia8c34f92015-09-02 13:49:46 -070070 struct bio flush_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -070071
72 struct kmem_cache *io_kc;
73
Shaohua Li0576b1c2015-08-13 14:32:00 -070074 struct md_thread *reclaim_thread;
75 unsigned long reclaim_target; /* number of space that need to be
76 * reclaimed. if it's 0, reclaim spaces
77 * used by io_units which are in
78 * IO_UNIT_STRIPE_END state (eg, reclaim
79 * dones't wait for specific io_unit
80 * switching to IO_UNIT_STRIPE_END
81 * state) */
Shaohua Li0fd22b42015-09-02 13:49:47 -070082 wait_queue_head_t iounit_wait;
Shaohua Li0576b1c2015-08-13 14:32:00 -070083
Shaohua Lif6bed0e2015-08-13 14:31:59 -070084 struct list_head no_space_stripes; /* pending stripes, log has no space */
85 spinlock_t no_space_stripes_lock;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +020086
87 bool need_cache_flush;
Shaohua Li4b482042015-10-08 21:54:06 -070088 bool in_teardown;
Shaohua Lif6bed0e2015-08-13 14:31:59 -070089};
90
91/*
92 * an IO range starts from a meta data block and end at the next meta data
93 * block. The io unit's the meta data block tracks data/parity followed it. io
94 * unit is written to log disk with normal write, as we always flush log disk
95 * first and then start move data to raid disks, there is no requirement to
96 * write io unit with FLUSH/FUA
97 */
98struct r5l_io_unit {
99 struct r5l_log *log;
100
101 struct page *meta_page; /* store meta block */
102 int meta_offset; /* current offset in meta_page */
103
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700104 struct bio *current_bio;/* current_bio accepting new data */
105
106 atomic_t pending_stripe;/* how many stripes not flushed to raid */
107 u64 seq; /* seq number of the metablock */
108 sector_t log_start; /* where the io_unit starts */
109 sector_t log_end; /* where the io_unit ends */
110 struct list_head log_sibling; /* log->running_ios */
111 struct list_head stripe_list; /* stripes added to the io_unit */
112
113 int state;
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200114 bool need_split_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700115};
116
117/* r5l_io_unit state */
118enum r5l_io_unit_state {
119 IO_UNIT_RUNNING = 0, /* accepting new IO */
120 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
121 * don't accepting new bio */
122 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700123 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700124};
125
126static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
127{
128 start += inc;
129 if (start >= log->device_size)
130 start = start - log->device_size;
131 return start;
132}
133
134static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
135 sector_t end)
136{
137 if (end >= start)
138 return end - start;
139 else
140 return end + log->device_size - start;
141}
142
143static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
144{
145 sector_t used_size;
146
147 used_size = r5l_ring_distance(log, log->last_checkpoint,
148 log->log_start);
149
150 return log->device_size > used_size + size;
151}
152
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700153static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
154{
155 __free_page(io->meta_page);
156 kmem_cache_free(log->io_kc, io);
157}
158
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700159static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
160 enum r5l_io_unit_state state)
161{
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700162 if (WARN_ON(io->state >= state))
163 return;
164 io->state = state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700165}
166
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200167static void r5l_io_run_stripes(struct r5l_io_unit *io)
168{
169 struct stripe_head *sh, *next;
170
171 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
172 list_del_init(&sh->log_list);
173 set_bit(STRIPE_HANDLE, &sh->state);
174 raid5_release_stripe(sh);
175 }
176}
177
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200178static void r5l_log_run_stripes(struct r5l_log *log)
179{
180 struct r5l_io_unit *io, *next;
181
182 assert_spin_locked(&log->io_list_lock);
183
184 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
185 /* don't change list order */
186 if (io->state < IO_UNIT_IO_END)
187 break;
188
189 list_move_tail(&io->log_sibling, &log->finished_ios);
190 r5l_io_run_stripes(io);
191 }
192}
193
Christoph Hellwig3848c0b2015-12-21 10:51:01 +1100194static void r5l_move_to_end_ios(struct r5l_log *log)
195{
196 struct r5l_io_unit *io, *next;
197
198 assert_spin_locked(&log->io_list_lock);
199
200 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
201 /* don't change list order */
202 if (io->state < IO_UNIT_IO_END)
203 break;
204 list_move_tail(&io->log_sibling, &log->io_end_ios);
205 }
206}
207
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700208static void r5l_log_endio(struct bio *bio)
209{
210 struct r5l_io_unit *io = bio->bi_private;
211 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700212 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700213
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700214 if (bio->bi_error)
215 md_error(log->rdev->mddev, log->rdev);
216
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700217 bio_put(bio);
218
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700219 spin_lock_irqsave(&log->io_list_lock, flags);
220 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200221 if (log->need_cache_flush)
Christoph Hellwig3848c0b2015-12-21 10:51:01 +1100222 r5l_move_to_end_ios(log);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200223 else
224 r5l_log_run_stripes(log);
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700225 spin_unlock_irqrestore(&log->io_list_lock, flags);
226
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200227 if (log->need_cache_flush)
228 md_wakeup_thread(log->rdev->mddev->thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700229}
230
231static void r5l_submit_current_io(struct r5l_log *log)
232{
233 struct r5l_io_unit *io = log->current_io;
234 struct r5l_meta_block *block;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700235 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700236 u32 crc;
237
238 if (!io)
239 return;
240
241 block = page_address(io->meta_page);
242 block->meta_size = cpu_to_le32(io->meta_offset);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700243 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700244 block->checksum = cpu_to_le32(crc);
245
246 log->current_io = NULL;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700247 spin_lock_irqsave(&log->io_list_lock, flags);
248 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
249 spin_unlock_irqrestore(&log->io_list_lock, flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700250
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200251 submit_bio(WRITE, io->current_bio);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700252}
253
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200254static struct bio *r5l_bio_alloc(struct r5l_log *log)
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200255{
256 struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
257
258 bio->bi_rw = WRITE;
259 bio->bi_bdev = log->rdev->bdev;
Christoph Hellwig1e932a32015-10-05 09:31:12 +0200260 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200261
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200262 return bio;
263}
264
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200265static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
266{
267 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
268
269 /*
270 * If we filled up the log device start from the beginning again,
271 * which will require a new bio.
272 *
273 * Note: for this to work properly the log size needs to me a multiple
274 * of BLOCK_SECTORS.
275 */
276 if (log->log_start == 0)
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200277 io->need_split_bio = true;
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200278
279 io->log_end = log->log_start;
280}
281
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700282static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
283{
284 struct r5l_io_unit *io;
285 struct r5l_meta_block *block;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700286
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200287 /* We can't handle memory allocate failure so far */
288 io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
289 io->log = log;
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200290 INIT_LIST_HEAD(&io->log_sibling);
291 INIT_LIST_HEAD(&io->stripe_list);
292 io->state = IO_UNIT_RUNNING;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700293
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200294 io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700295 block = page_address(io->meta_page);
296 block->magic = cpu_to_le32(R5LOG_MAGIC);
297 block->version = R5LOG_VERSION;
298 block->seq = cpu_to_le64(log->seq);
299 block->position = cpu_to_le64(log->log_start);
300
301 io->log_start = log->log_start;
302 io->meta_offset = sizeof(struct r5l_meta_block);
Christoph Hellwig2b8ef162015-10-05 09:31:15 +0200303 io->seq = log->seq++;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700304
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200305 io->current_bio = r5l_bio_alloc(log);
306 io->current_bio->bi_end_io = r5l_log_endio;
307 io->current_bio->bi_private = io;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200308 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700309
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200310 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700311
312 spin_lock_irq(&log->io_list_lock);
313 list_add_tail(&io->log_sibling, &log->running_ios);
314 spin_unlock_irq(&log->io_list_lock);
315
316 return io;
317}
318
319static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
320{
Christoph Hellwig22581f52015-10-05 09:31:10 +0200321 if (log->current_io &&
322 log->current_io->meta_offset + payload_size > PAGE_SIZE)
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700323 r5l_submit_current_io(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700324
Christoph Hellwig22581f52015-10-05 09:31:10 +0200325 if (!log->current_io)
326 log->current_io = r5l_new_meta(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700327 return 0;
328}
329
330static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
331 sector_t location,
332 u32 checksum1, u32 checksum2,
333 bool checksum2_valid)
334{
335 struct r5l_io_unit *io = log->current_io;
336 struct r5l_payload_data_parity *payload;
337
338 payload = page_address(io->meta_page) + io->meta_offset;
339 payload->header.type = cpu_to_le16(type);
340 payload->header.flags = cpu_to_le16(0);
341 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
342 (PAGE_SHIFT - 9));
343 payload->location = cpu_to_le64(location);
344 payload->checksum[0] = cpu_to_le32(checksum1);
345 if (checksum2_valid)
346 payload->checksum[1] = cpu_to_le32(checksum2);
347
348 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
349 sizeof(__le32) * (1 + !!checksum2_valid);
350}
351
352static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
353{
354 struct r5l_io_unit *io = log->current_io;
355
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200356 if (io->need_split_bio) {
357 struct bio *prev = io->current_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700358
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200359 io->current_bio = r5l_bio_alloc(log);
360 bio_chain(io->current_bio, prev);
361
362 submit_bio(WRITE, prev);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700363 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700364
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200365 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
366 BUG();
367
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200368 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700369}
370
371static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
372 int data_pages, int parity_pages)
373{
374 int i;
375 int meta_size;
376 struct r5l_io_unit *io;
377
378 meta_size =
379 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
380 * data_pages) +
381 sizeof(struct r5l_payload_data_parity) +
382 sizeof(__le32) * parity_pages;
383
384 r5l_get_meta(log, meta_size);
385 io = log->current_io;
386
387 for (i = 0; i < sh->disks; i++) {
388 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
389 continue;
390 if (i == sh->pd_idx || i == sh->qd_idx)
391 continue;
392 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
393 raid5_compute_blocknr(sh, i, 0),
394 sh->dev[i].log_checksum, 0, false);
395 r5l_append_payload_page(log, sh->dev[i].page);
396 }
397
398 if (sh->qd_idx >= 0) {
399 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
400 sh->sector, sh->dev[sh->pd_idx].log_checksum,
401 sh->dev[sh->qd_idx].log_checksum, true);
402 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
403 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
404 } else {
405 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
406 sh->sector, sh->dev[sh->pd_idx].log_checksum,
407 0, false);
408 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
409 }
410
411 list_add_tail(&sh->log_list, &io->stripe_list);
412 atomic_inc(&io->pending_stripe);
413 sh->log_io = io;
414}
415
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700416static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700417/*
418 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
419 * data from log to raid disks), so we shouldn't wait for reclaim here
420 */
421int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
422{
423 int write_disks = 0;
424 int data_pages, parity_pages;
425 int meta_size;
426 int reserve;
427 int i;
428
429 if (!log)
430 return -EAGAIN;
431 /* Don't support stripe batch */
432 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
433 test_bit(STRIPE_SYNCING, &sh->state)) {
434 /* the stripe is written to log, we start writing it to raid */
435 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
436 return -EAGAIN;
437 }
438
439 for (i = 0; i < sh->disks; i++) {
440 void *addr;
441
442 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
443 continue;
444 write_disks++;
445 /* checksum is already calculated in last run */
446 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
447 continue;
448 addr = kmap_atomic(sh->dev[i].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700449 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
450 addr, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700451 kunmap_atomic(addr);
452 }
453 parity_pages = 1 + !!(sh->qd_idx >= 0);
454 data_pages = write_disks - parity_pages;
455
456 meta_size =
457 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
458 * data_pages) +
459 sizeof(struct r5l_payload_data_parity) +
460 sizeof(__le32) * parity_pages;
461 /* Doesn't work with very big raid array */
462 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
463 return -EINVAL;
464
465 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
Shaohua Li253f9fd42015-09-04 14:14:16 -0700466 /*
467 * The stripe must enter state machine again to finish the write, so
468 * don't delay.
469 */
470 clear_bit(STRIPE_DELAYED, &sh->state);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700471 atomic_inc(&sh->count);
472
473 mutex_lock(&log->io_mutex);
474 /* meta + data */
475 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
476 if (r5l_has_free_space(log, reserve))
477 r5l_log_stripe(log, sh, data_pages, parity_pages);
478 else {
479 spin_lock(&log->no_space_stripes_lock);
480 list_add_tail(&sh->log_list, &log->no_space_stripes);
481 spin_unlock(&log->no_space_stripes_lock);
482
483 r5l_wake_reclaim(log, reserve);
484 }
485 mutex_unlock(&log->io_mutex);
486
487 return 0;
488}
489
490void r5l_write_stripe_run(struct r5l_log *log)
491{
492 if (!log)
493 return;
494 mutex_lock(&log->io_mutex);
495 r5l_submit_current_io(log);
496 mutex_unlock(&log->io_mutex);
497}
498
Shaohua Li828cbe92015-09-02 13:49:49 -0700499int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
500{
501 if (!log)
502 return -ENODEV;
503 /*
504 * we flush log disk cache first, then write stripe data to raid disks.
505 * So if bio is finished, the log disk cache is flushed already. The
506 * recovery guarantees we can recovery the bio from log disk, so we
507 * don't need to flush again
508 */
509 if (bio->bi_iter.bi_size == 0) {
510 bio_endio(bio);
511 return 0;
512 }
513 bio->bi_rw &= ~REQ_FLUSH;
514 return -EAGAIN;
515}
516
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700517/* This will run after log space is reclaimed */
518static void r5l_run_no_space_stripes(struct r5l_log *log)
519{
520 struct stripe_head *sh;
521
522 spin_lock(&log->no_space_stripes_lock);
523 while (!list_empty(&log->no_space_stripes)) {
524 sh = list_first_entry(&log->no_space_stripes,
525 struct stripe_head, log_list);
526 list_del_init(&sh->log_list);
527 set_bit(STRIPE_HANDLE, &sh->state);
528 raid5_release_stripe(sh);
529 }
530 spin_unlock(&log->no_space_stripes_lock);
531}
532
Christoph Hellwig17036462015-10-05 09:31:06 +0200533static sector_t r5l_reclaimable_space(struct r5l_log *log)
534{
535 return r5l_ring_distance(log, log->last_checkpoint,
536 log->next_checkpoint);
537}
538
Christoph Hellwig04732f72015-10-05 09:31:07 +0200539static bool r5l_complete_finished_ios(struct r5l_log *log)
Christoph Hellwig17036462015-10-05 09:31:06 +0200540{
541 struct r5l_io_unit *io, *next;
542 bool found = false;
543
544 assert_spin_locked(&log->io_list_lock);
545
Christoph Hellwig04732f72015-10-05 09:31:07 +0200546 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
Christoph Hellwig17036462015-10-05 09:31:06 +0200547 /* don't change list order */
548 if (io->state < IO_UNIT_STRIPE_END)
549 break;
550
551 log->next_checkpoint = io->log_start;
552 log->next_cp_seq = io->seq;
553
554 list_del(&io->log_sibling);
555 r5l_free_io_unit(log, io);
556
557 found = true;
558 }
559
560 return found;
561}
562
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700563static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
564{
565 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700566 unsigned long flags;
567
568 spin_lock_irqsave(&log->io_list_lock, flags);
569 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
Christoph Hellwig17036462015-10-05 09:31:06 +0200570
Christoph Hellwig04732f72015-10-05 09:31:07 +0200571 if (!r5l_complete_finished_ios(log)) {
Shaohua Li85f2f9a2015-09-04 14:14:05 -0700572 spin_unlock_irqrestore(&log->io_list_lock, flags);
573 return;
574 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700575
Christoph Hellwig17036462015-10-05 09:31:06 +0200576 if (r5l_reclaimable_space(log) > log->max_free_space)
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700577 r5l_wake_reclaim(log, 0);
578
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700579 spin_unlock_irqrestore(&log->io_list_lock, flags);
580 wake_up(&log->iounit_wait);
581}
582
Shaohua Li0576b1c2015-08-13 14:32:00 -0700583void r5l_stripe_write_finished(struct stripe_head *sh)
584{
585 struct r5l_io_unit *io;
586
Shaohua Li0576b1c2015-08-13 14:32:00 -0700587 io = sh->log_io;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700588 sh->log_io = NULL;
589
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700590 if (io && atomic_dec_and_test(&io->pending_stripe))
591 __r5l_stripe_write_finished(io);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700592}
593
Shaohua Lia8c34f92015-09-02 13:49:46 -0700594static void r5l_log_flush_endio(struct bio *bio)
595{
596 struct r5l_log *log = container_of(bio, struct r5l_log,
597 flush_bio);
598 unsigned long flags;
599 struct r5l_io_unit *io;
Shaohua Lia8c34f92015-09-02 13:49:46 -0700600
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700601 if (bio->bi_error)
602 md_error(log->rdev->mddev, log->rdev);
603
Shaohua Lia8c34f92015-09-02 13:49:46 -0700604 spin_lock_irqsave(&log->io_list_lock, flags);
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200605 list_for_each_entry(io, &log->flushing_ios, log_sibling)
606 r5l_io_run_stripes(io);
Christoph Hellwig04732f72015-10-05 09:31:07 +0200607 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700608 spin_unlock_irqrestore(&log->io_list_lock, flags);
609}
610
Shaohua Li0576b1c2015-08-13 14:32:00 -0700611/*
612 * Starting dispatch IO to raid.
613 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
614 * broken meta in the middle of a log causes recovery can't find meta at the
615 * head of log. If operations require meta at the head persistent in log, we
616 * must make sure meta before it persistent in log too. A case is:
617 *
618 * stripe data/parity is in log, we start write stripe to raid disks. stripe
619 * data/parity must be persistent in log before we do the write to raid disks.
620 *
621 * The solution is we restrictly maintain io_unit list order. In this case, we
622 * only write stripes of an io_unit to raid disks till the io_unit is the first
623 * one whose data/parity is in log.
624 */
625void r5l_flush_stripe_to_raid(struct r5l_log *log)
626{
Shaohua Lia8c34f92015-09-02 13:49:46 -0700627 bool do_flush;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200628
629 if (!log || !log->need_cache_flush)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700630 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700631
Shaohua Lia8c34f92015-09-02 13:49:46 -0700632 spin_lock_irq(&log->io_list_lock);
633 /* flush bio is running */
634 if (!list_empty(&log->flushing_ios)) {
635 spin_unlock_irq(&log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700636 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700637 }
Shaohua Lia8c34f92015-09-02 13:49:46 -0700638 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
639 do_flush = !list_empty(&log->flushing_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700640 spin_unlock_irq(&log->io_list_lock);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700641
642 if (!do_flush)
643 return;
644 bio_reset(&log->flush_bio);
645 log->flush_bio.bi_bdev = log->rdev->bdev;
646 log->flush_bio.bi_end_io = r5l_log_flush_endio;
647 submit_bio(WRITE_FLUSH, &log->flush_bio);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700648}
649
Shaohua Li0576b1c2015-08-13 14:32:00 -0700650static void r5l_write_super(struct r5l_log *log, sector_t cp);
Shaohua Li4b482042015-10-08 21:54:06 -0700651static void r5l_write_super_and_discard_space(struct r5l_log *log,
652 sector_t end)
653{
654 struct block_device *bdev = log->rdev->bdev;
655 struct mddev *mddev;
656
657 r5l_write_super(log, end);
658
659 if (!blk_queue_discard(bdev_get_queue(bdev)))
660 return;
661
662 mddev = log->rdev->mddev;
663 /*
664 * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
665 * wait for this thread to finish. This thread waits for
666 * MD_CHANGE_PENDING clear, which is supposed to be done in
667 * md_check_recovery(). md_check_recovery() tries to get
668 * reconfig_mutex. Since r5l_quiesce already holds the mutex,
669 * md_check_recovery() fails, so the PENDING never get cleared. The
670 * in_teardown check workaround this issue.
671 */
672 if (!log->in_teardown) {
673 set_bit(MD_CHANGE_DEVS, &mddev->flags);
674 set_bit(MD_CHANGE_PENDING, &mddev->flags);
675 md_wakeup_thread(mddev->thread);
676 wait_event(mddev->sb_wait,
677 !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
678 log->in_teardown);
679 /*
680 * r5l_quiesce could run after in_teardown check and hold
681 * mutex first. Superblock might get updated twice.
682 */
683 if (log->in_teardown)
684 md_update_sb(mddev, 1);
685 } else {
686 WARN_ON(!mddev_is_locked(mddev));
687 md_update_sb(mddev, 1);
688 }
689
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700690 /* discard IO error really doesn't matter, ignore it */
Shaohua Li4b482042015-10-08 21:54:06 -0700691 if (log->last_checkpoint < end) {
692 blkdev_issue_discard(bdev,
693 log->last_checkpoint + log->rdev->data_offset,
694 end - log->last_checkpoint, GFP_NOIO, 0);
695 } else {
696 blkdev_issue_discard(bdev,
697 log->last_checkpoint + log->rdev->data_offset,
698 log->device_size - log->last_checkpoint,
699 GFP_NOIO, 0);
700 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
701 GFP_NOIO, 0);
702 }
703}
704
705
Shaohua Li0576b1c2015-08-13 14:32:00 -0700706static void r5l_do_reclaim(struct r5l_log *log)
707{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700708 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
Christoph Hellwig17036462015-10-05 09:31:06 +0200709 sector_t reclaimable;
710 sector_t next_checkpoint;
711 u64 next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700712
713 spin_lock_irq(&log->io_list_lock);
714 /*
715 * move proper io_unit to reclaim list. We should not change the order.
716 * reclaimable/unreclaimable io_unit can be mixed in the list, we
717 * shouldn't reuse space of an unreclaimable io_unit
718 */
719 while (1) {
Christoph Hellwig17036462015-10-05 09:31:06 +0200720 reclaimable = r5l_reclaimable_space(log);
721 if (reclaimable >= reclaim_target ||
Shaohua Li0576b1c2015-08-13 14:32:00 -0700722 (list_empty(&log->running_ios) &&
723 list_empty(&log->io_end_ios) &&
Shaohua Lia8c34f92015-09-02 13:49:46 -0700724 list_empty(&log->flushing_ios) &&
Christoph Hellwig04732f72015-10-05 09:31:07 +0200725 list_empty(&log->finished_ios)))
Shaohua Li0576b1c2015-08-13 14:32:00 -0700726 break;
727
Christoph Hellwig17036462015-10-05 09:31:06 +0200728 md_wakeup_thread(log->rdev->mddev->thread);
729 wait_event_lock_irq(log->iounit_wait,
730 r5l_reclaimable_space(log) > reclaimable,
731 log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700732 }
Christoph Hellwig17036462015-10-05 09:31:06 +0200733
734 next_checkpoint = log->next_checkpoint;
735 next_cp_seq = log->next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700736 spin_unlock_irq(&log->io_list_lock);
737
Christoph Hellwig17036462015-10-05 09:31:06 +0200738 BUG_ON(reclaimable < 0);
739 if (reclaimable == 0)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700740 return;
741
Shaohua Li0576b1c2015-08-13 14:32:00 -0700742 /*
743 * write_super will flush cache of each raid disk. We must write super
744 * here, because the log area might be reused soon and we don't want to
745 * confuse recovery
746 */
Shaohua Li4b482042015-10-08 21:54:06 -0700747 r5l_write_super_and_discard_space(log, next_checkpoint);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700748
749 mutex_lock(&log->io_mutex);
Christoph Hellwig17036462015-10-05 09:31:06 +0200750 log->last_checkpoint = next_checkpoint;
751 log->last_cp_seq = next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700752 mutex_unlock(&log->io_mutex);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700753
Christoph Hellwig17036462015-10-05 09:31:06 +0200754 r5l_run_no_space_stripes(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700755}
756
757static void r5l_reclaim_thread(struct md_thread *thread)
758{
759 struct mddev *mddev = thread->mddev;
760 struct r5conf *conf = mddev->private;
761 struct r5l_log *log = conf->log;
762
763 if (!log)
764 return;
765 r5l_do_reclaim(log);
766}
767
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700768static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
769{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700770 unsigned long target;
771 unsigned long new = (unsigned long)space; /* overflow in theory */
772
773 do {
774 target = log->reclaim_target;
775 if (new < target)
776 return;
777 } while (cmpxchg(&log->reclaim_target, target, new) != target);
778 md_wakeup_thread(log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700779}
780
Shaohua Lie6c033f2015-10-04 09:20:12 -0700781void r5l_quiesce(struct r5l_log *log, int state)
782{
Shaohua Li4b482042015-10-08 21:54:06 -0700783 struct mddev *mddev;
Shaohua Lie6c033f2015-10-04 09:20:12 -0700784 if (!log || state == 2)
785 return;
786 if (state == 0) {
Shaohua Li4b482042015-10-08 21:54:06 -0700787 log->in_teardown = 0;
Shaohua Lie6c033f2015-10-04 09:20:12 -0700788 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
789 log->rdev->mddev, "reclaim");
790 } else if (state == 1) {
791 /*
792 * at this point all stripes are finished, so io_unit is at
793 * least in STRIPE_END state
794 */
Shaohua Li4b482042015-10-08 21:54:06 -0700795 log->in_teardown = 1;
796 /* make sure r5l_write_super_and_discard_space exits */
797 mddev = log->rdev->mddev;
798 wake_up(&mddev->sb_wait);
Shaohua Lie6c033f2015-10-04 09:20:12 -0700799 r5l_wake_reclaim(log, -1L);
800 md_unregister_thread(&log->reclaim_thread);
801 r5l_do_reclaim(log);
802 }
803}
804
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700805bool r5l_log_disk_error(struct r5conf *conf)
806{
Shaohua Li7dde2ad2015-10-08 21:54:10 -0700807 /* don't allow write if journal disk is missing */
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700808 if (!conf->log)
Shaohua Li7dde2ad2015-10-08 21:54:10 -0700809 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700810 return test_bit(Faulty, &conf->log->rdev->flags);
811}
812
Shaohua Li355810d2015-08-13 14:32:01 -0700813struct r5l_recovery_ctx {
814 struct page *meta_page; /* current meta */
815 sector_t meta_total_blocks; /* total size of current meta and data */
816 sector_t pos; /* recovery position */
817 u64 seq; /* recovery position seq */
818};
819
820static int r5l_read_meta_block(struct r5l_log *log,
821 struct r5l_recovery_ctx *ctx)
822{
823 struct page *page = ctx->meta_page;
824 struct r5l_meta_block *mb;
825 u32 crc, stored_crc;
826
827 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
828 return -EIO;
829
830 mb = page_address(page);
831 stored_crc = le32_to_cpu(mb->checksum);
832 mb->checksum = 0;
833
834 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
835 le64_to_cpu(mb->seq) != ctx->seq ||
836 mb->version != R5LOG_VERSION ||
837 le64_to_cpu(mb->position) != ctx->pos)
838 return -EINVAL;
839
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700840 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700841 if (stored_crc != crc)
842 return -EINVAL;
843
844 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
845 return -EINVAL;
846
847 ctx->meta_total_blocks = BLOCK_SECTORS;
848
849 return 0;
850}
851
852static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
853 struct r5l_recovery_ctx *ctx,
854 sector_t stripe_sect,
855 int *offset, sector_t *log_offset)
856{
857 struct r5conf *conf = log->rdev->mddev->private;
858 struct stripe_head *sh;
859 struct r5l_payload_data_parity *payload;
860 int disk_index;
861
862 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
863 while (1) {
864 payload = page_address(ctx->meta_page) + *offset;
865
866 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
867 raid5_compute_sector(conf,
868 le64_to_cpu(payload->location), 0,
869 &disk_index, sh);
870
871 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
872 sh->dev[disk_index].page, READ, false);
873 sh->dev[disk_index].log_checksum =
874 le32_to_cpu(payload->checksum[0]);
875 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
876 ctx->meta_total_blocks += BLOCK_SECTORS;
877 } else {
878 disk_index = sh->pd_idx;
879 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
880 sh->dev[disk_index].page, READ, false);
881 sh->dev[disk_index].log_checksum =
882 le32_to_cpu(payload->checksum[0]);
883 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
884
885 if (sh->qd_idx >= 0) {
886 disk_index = sh->qd_idx;
887 sync_page_io(log->rdev,
888 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
889 PAGE_SIZE, sh->dev[disk_index].page,
890 READ, false);
891 sh->dev[disk_index].log_checksum =
892 le32_to_cpu(payload->checksum[1]);
893 set_bit(R5_Wantwrite,
894 &sh->dev[disk_index].flags);
895 }
896 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
897 }
898
899 *log_offset = r5l_ring_add(log, *log_offset,
900 le32_to_cpu(payload->size));
901 *offset += sizeof(struct r5l_payload_data_parity) +
902 sizeof(__le32) *
903 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
904 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
905 break;
906 }
907
908 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
909 void *addr;
910 u32 checksum;
911
912 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
913 continue;
914 addr = kmap_atomic(sh->dev[disk_index].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700915 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700916 kunmap_atomic(addr);
917 if (checksum != sh->dev[disk_index].log_checksum)
918 goto error;
919 }
920
921 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
922 struct md_rdev *rdev, *rrdev;
923
924 if (!test_and_clear_bit(R5_Wantwrite,
925 &sh->dev[disk_index].flags))
926 continue;
927
928 /* in case device is broken */
929 rdev = rcu_dereference(conf->disks[disk_index].rdev);
930 if (rdev)
931 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
932 sh->dev[disk_index].page, WRITE, false);
933 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
934 if (rrdev)
935 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
936 sh->dev[disk_index].page, WRITE, false);
937 }
938 raid5_release_stripe(sh);
939 return 0;
940
941error:
942 for (disk_index = 0; disk_index < sh->disks; disk_index++)
943 sh->dev[disk_index].flags = 0;
944 raid5_release_stripe(sh);
945 return -EINVAL;
946}
947
948static int r5l_recovery_flush_one_meta(struct r5l_log *log,
949 struct r5l_recovery_ctx *ctx)
950{
951 struct r5conf *conf = log->rdev->mddev->private;
952 struct r5l_payload_data_parity *payload;
953 struct r5l_meta_block *mb;
954 int offset;
955 sector_t log_offset;
956 sector_t stripe_sector;
957
958 mb = page_address(ctx->meta_page);
959 offset = sizeof(struct r5l_meta_block);
960 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
961
962 while (offset < le32_to_cpu(mb->meta_size)) {
963 int dd;
964
965 payload = (void *)mb + offset;
966 stripe_sector = raid5_compute_sector(conf,
967 le64_to_cpu(payload->location), 0, &dd, NULL);
968 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
969 &offset, &log_offset))
970 return -EINVAL;
971 }
972 return 0;
973}
974
975/* copy data/parity from log to raid disks */
976static void r5l_recovery_flush_log(struct r5l_log *log,
977 struct r5l_recovery_ctx *ctx)
978{
979 while (1) {
980 if (r5l_read_meta_block(log, ctx))
981 return;
982 if (r5l_recovery_flush_one_meta(log, ctx))
983 return;
984 ctx->seq++;
985 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
986 }
987}
988
989static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
990 u64 seq)
991{
992 struct page *page;
993 struct r5l_meta_block *mb;
994 u32 crc;
995
996 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
997 if (!page)
998 return -ENOMEM;
999 mb = page_address(page);
1000 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1001 mb->version = R5LOG_VERSION;
1002 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1003 mb->seq = cpu_to_le64(seq);
1004 mb->position = cpu_to_le64(pos);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001005 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -07001006 mb->checksum = cpu_to_le32(crc);
1007
1008 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
1009 __free_page(page);
1010 return -EIO;
1011 }
1012 __free_page(page);
1013 return 0;
1014}
1015
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001016static int r5l_recovery_log(struct r5l_log *log)
1017{
Shaohua Li355810d2015-08-13 14:32:01 -07001018 struct r5l_recovery_ctx ctx;
1019
1020 ctx.pos = log->last_checkpoint;
1021 ctx.seq = log->last_cp_seq;
1022 ctx.meta_page = alloc_page(GFP_KERNEL);
1023 if (!ctx.meta_page)
1024 return -ENOMEM;
1025
1026 r5l_recovery_flush_log(log, &ctx);
1027 __free_page(ctx.meta_page);
1028
1029 /*
1030 * we did a recovery. Now ctx.pos points to an invalid meta block. New
1031 * log will start here. but we can't let superblock point to last valid
1032 * meta block. The log might looks like:
1033 * | meta 1| meta 2| meta 3|
1034 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
1035 * superblock points to meta 1, we write a new valid meta 2n. if crash
1036 * happens again, new recovery will start from meta 1. Since meta 2n is
1037 * valid now, recovery will think meta 3 is valid, which is wrong.
1038 * The solution is we create a new meta in meta2 with its seq == meta
1039 * 1's seq + 10 and let superblock points to meta2. The same recovery will
1040 * not think meta 3 is a valid meta, because its seq doesn't match
1041 */
1042 if (ctx.seq > log->last_cp_seq + 1) {
1043 int ret;
1044
1045 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
1046 if (ret)
1047 return ret;
1048 log->seq = ctx.seq + 11;
1049 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
1050 r5l_write_super(log, ctx.pos);
1051 } else {
1052 log->log_start = ctx.pos;
1053 log->seq = ctx.seq;
1054 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001055 return 0;
1056}
1057
1058static void r5l_write_super(struct r5l_log *log, sector_t cp)
1059{
1060 struct mddev *mddev = log->rdev->mddev;
1061
1062 log->rdev->journal_tail = cp;
1063 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1064}
1065
1066static int r5l_load_log(struct r5l_log *log)
1067{
1068 struct md_rdev *rdev = log->rdev;
1069 struct page *page;
1070 struct r5l_meta_block *mb;
1071 sector_t cp = log->rdev->journal_tail;
1072 u32 stored_crc, expected_crc;
1073 bool create_super = false;
1074 int ret;
1075
1076 /* Make sure it's valid */
1077 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1078 cp = 0;
1079 page = alloc_page(GFP_KERNEL);
1080 if (!page)
1081 return -ENOMEM;
1082
1083 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
1084 ret = -EIO;
1085 goto ioerr;
1086 }
1087 mb = page_address(page);
1088
1089 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1090 mb->version != R5LOG_VERSION) {
1091 create_super = true;
1092 goto create;
1093 }
1094 stored_crc = le32_to_cpu(mb->checksum);
1095 mb->checksum = 0;
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001096 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001097 if (stored_crc != expected_crc) {
1098 create_super = true;
1099 goto create;
1100 }
1101 if (le64_to_cpu(mb->position) != cp) {
1102 create_super = true;
1103 goto create;
1104 }
1105create:
1106 if (create_super) {
1107 log->last_cp_seq = prandom_u32();
1108 cp = 0;
1109 /*
1110 * Make sure super points to correct address. Log might have
1111 * data very soon. If super hasn't correct log tail address,
1112 * recovery can't find the log
1113 */
1114 r5l_write_super(log, cp);
1115 } else
1116 log->last_cp_seq = le64_to_cpu(mb->seq);
1117
1118 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001119 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1120 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1121 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001122 log->last_checkpoint = cp;
1123
1124 __free_page(page);
1125
1126 return r5l_recovery_log(log);
1127ioerr:
1128 __free_page(page);
1129 return ret;
1130}
1131
1132int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1133{
1134 struct r5l_log *log;
1135
1136 if (PAGE_SIZE != 4096)
1137 return -EINVAL;
1138 log = kzalloc(sizeof(*log), GFP_KERNEL);
1139 if (!log)
1140 return -ENOMEM;
1141 log->rdev = rdev;
1142
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02001143 log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
1144
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001145 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1146 sizeof(rdev->mddev->uuid));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001147
1148 mutex_init(&log->io_mutex);
1149
1150 spin_lock_init(&log->io_list_lock);
1151 INIT_LIST_HEAD(&log->running_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001152 INIT_LIST_HEAD(&log->io_end_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001153 INIT_LIST_HEAD(&log->flushing_ios);
Christoph Hellwig04732f72015-10-05 09:31:07 +02001154 INIT_LIST_HEAD(&log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001155 bio_init(&log->flush_bio);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001156
1157 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1158 if (!log->io_kc)
1159 goto io_kc;
1160
Shaohua Li0576b1c2015-08-13 14:32:00 -07001161 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1162 log->rdev->mddev, "reclaim");
1163 if (!log->reclaim_thread)
1164 goto reclaim_thread;
Shaohua Li0fd22b42015-09-02 13:49:47 -07001165 init_waitqueue_head(&log->iounit_wait);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001166
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001167 INIT_LIST_HEAD(&log->no_space_stripes);
1168 spin_lock_init(&log->no_space_stripes_lock);
1169
1170 if (r5l_load_log(log))
1171 goto error;
1172
1173 conf->log = log;
1174 return 0;
1175error:
Shaohua Li0576b1c2015-08-13 14:32:00 -07001176 md_unregister_thread(&log->reclaim_thread);
1177reclaim_thread:
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001178 kmem_cache_destroy(log->io_kc);
1179io_kc:
1180 kfree(log);
1181 return -EINVAL;
1182}
1183
1184void r5l_exit_log(struct r5l_log *log)
1185{
Shaohua Li0576b1c2015-08-13 14:32:00 -07001186 md_unregister_thread(&log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001187 kmem_cache_destroy(log->io_kc);
1188 kfree(log);
1189}