blob: 29db786557d98a01dd4930e4fa07114a9e939e12 [file] [log] [blame]
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/wait.h>
16#include <linux/blkdev.h>
17#include <linux/slab.h>
18#include <linux/raid/md_p.h>
Shaohua Li5cb2fbd2015-10-28 08:41:25 -070019#include <linux/crc32c.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070020#include <linux/random.h>
21#include "md.h"
22#include "raid5.h"
23
24/*
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
27 */
28#define BLOCK_SECTORS (8)
29
Shaohua Li0576b1c2015-08-13 14:32:00 -070030/*
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
33 */
34#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36
Shaohua Lif6bed0e2015-08-13 14:31:59 -070037struct r5l_log {
38 struct md_rdev *rdev;
39
40 u32 uuid_checksum;
41
42 sector_t device_size; /* log device size, round to
43 * BLOCK_SECTORS */
Shaohua Li0576b1c2015-08-13 14:32:00 -070044 sector_t max_free_space; /* reclaim run if free space is at
45 * this size */
Shaohua Lif6bed0e2015-08-13 14:31:59 -070046
47 sector_t last_checkpoint; /* log tail. where recovery scan
48 * starts from */
49 u64 last_cp_seq; /* log tail sequence */
50
51 sector_t log_start; /* log head. where new data appends */
52 u64 seq; /* log head sequence */
53
Christoph Hellwig17036462015-10-05 09:31:06 +020054 sector_t next_checkpoint;
55 u64 next_cp_seq;
56
Shaohua Lif6bed0e2015-08-13 14:31:59 -070057 struct mutex io_mutex;
58 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
59
60 spinlock_t io_list_lock;
61 struct list_head running_ios; /* io_units which are still running,
62 * and have not yet been completely
63 * written to the log */
64 struct list_head io_end_ios; /* io_units which have been completely
65 * written to the log but not yet written
66 * to the RAID */
Shaohua Lia8c34f92015-09-02 13:49:46 -070067 struct list_head flushing_ios; /* io_units which are waiting for log
68 * cache flush */
Christoph Hellwig04732f72015-10-05 09:31:07 +020069 struct list_head finished_ios; /* io_units which settle down in log disk */
Shaohua Lia8c34f92015-09-02 13:49:46 -070070 struct bio flush_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -070071
72 struct kmem_cache *io_kc;
73
Shaohua Li0576b1c2015-08-13 14:32:00 -070074 struct md_thread *reclaim_thread;
75 unsigned long reclaim_target; /* number of space that need to be
76 * reclaimed. if it's 0, reclaim spaces
77 * used by io_units which are in
78 * IO_UNIT_STRIPE_END state (eg, reclaim
79 * dones't wait for specific io_unit
80 * switching to IO_UNIT_STRIPE_END
81 * state) */
Shaohua Li0fd22b42015-09-02 13:49:47 -070082 wait_queue_head_t iounit_wait;
Shaohua Li0576b1c2015-08-13 14:32:00 -070083
Shaohua Lif6bed0e2015-08-13 14:31:59 -070084 struct list_head no_space_stripes; /* pending stripes, log has no space */
85 spinlock_t no_space_stripes_lock;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +020086
87 bool need_cache_flush;
Shaohua Lif6bed0e2015-08-13 14:31:59 -070088};
89
90/*
91 * an IO range starts from a meta data block and end at the next meta data
92 * block. The io unit's the meta data block tracks data/parity followed it. io
93 * unit is written to log disk with normal write, as we always flush log disk
94 * first and then start move data to raid disks, there is no requirement to
95 * write io unit with FLUSH/FUA
96 */
97struct r5l_io_unit {
98 struct r5l_log *log;
99
100 struct page *meta_page; /* store meta block */
101 int meta_offset; /* current offset in meta_page */
102
103 struct bio_list bios;
104 atomic_t pending_io; /* pending bios not written to log yet */
105 struct bio *current_bio;/* current_bio accepting new data */
106
107 atomic_t pending_stripe;/* how many stripes not flushed to raid */
108 u64 seq; /* seq number of the metablock */
109 sector_t log_start; /* where the io_unit starts */
110 sector_t log_end; /* where the io_unit ends */
111 struct list_head log_sibling; /* log->running_ios */
112 struct list_head stripe_list; /* stripes added to the io_unit */
113
114 int state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700115};
116
117/* r5l_io_unit state */
118enum r5l_io_unit_state {
119 IO_UNIT_RUNNING = 0, /* accepting new IO */
120 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
121 * don't accepting new bio */
122 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700123 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700124};
125
126static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
127{
128 start += inc;
129 if (start >= log->device_size)
130 start = start - log->device_size;
131 return start;
132}
133
134static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
135 sector_t end)
136{
137 if (end >= start)
138 return end - start;
139 else
140 return end + log->device_size - start;
141}
142
143static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
144{
145 sector_t used_size;
146
147 used_size = r5l_ring_distance(log, log->last_checkpoint,
148 log->log_start);
149
150 return log->device_size > used_size + size;
151}
152
153static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
154{
155 struct r5l_io_unit *io;
156 /* We can't handle memory allocate failure so far */
157 gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
158
159 io = kmem_cache_zalloc(log->io_kc, gfp);
160 io->log = log;
161 io->meta_page = alloc_page(gfp | __GFP_ZERO);
162
163 bio_list_init(&io->bios);
164 INIT_LIST_HEAD(&io->log_sibling);
165 INIT_LIST_HEAD(&io->stripe_list);
166 io->state = IO_UNIT_RUNNING;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700167 return io;
168}
169
170static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
171{
172 __free_page(io->meta_page);
173 kmem_cache_free(log->io_kc, io);
174}
175
176static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
177 enum r5l_io_unit_state state)
178{
179 struct r5l_io_unit *io;
180
181 while (!list_empty(from)) {
182 io = list_first_entry(from, struct r5l_io_unit, log_sibling);
183 /* don't change list order */
184 if (io->state >= state)
185 list_move_tail(&io->log_sibling, to);
186 else
187 break;
188 }
189}
190
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700191static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
192 enum r5l_io_unit_state state)
193{
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700194 if (WARN_ON(io->state >= state))
195 return;
196 io->state = state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700197}
198
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200199static void r5l_io_run_stripes(struct r5l_io_unit *io)
200{
201 struct stripe_head *sh, *next;
202
203 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
204 list_del_init(&sh->log_list);
205 set_bit(STRIPE_HANDLE, &sh->state);
206 raid5_release_stripe(sh);
207 }
208}
209
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700210/* XXX: totally ignores I/O errors */
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200211static void r5l_log_run_stripes(struct r5l_log *log)
212{
213 struct r5l_io_unit *io, *next;
214
215 assert_spin_locked(&log->io_list_lock);
216
217 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
218 /* don't change list order */
219 if (io->state < IO_UNIT_IO_END)
220 break;
221
222 list_move_tail(&io->log_sibling, &log->finished_ios);
223 r5l_io_run_stripes(io);
224 }
225}
226
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700227static void r5l_log_endio(struct bio *bio)
228{
229 struct r5l_io_unit *io = bio->bi_private;
230 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700231 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700232
233 bio_put(bio);
234
235 if (!atomic_dec_and_test(&io->pending_io))
236 return;
237
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700238 spin_lock_irqsave(&log->io_list_lock, flags);
239 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200240 if (log->need_cache_flush)
241 r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
242 IO_UNIT_IO_END);
243 else
244 r5l_log_run_stripes(log);
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700245 spin_unlock_irqrestore(&log->io_list_lock, flags);
246
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200247 if (log->need_cache_flush)
248 md_wakeup_thread(log->rdev->mddev->thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700249}
250
251static void r5l_submit_current_io(struct r5l_log *log)
252{
253 struct r5l_io_unit *io = log->current_io;
254 struct r5l_meta_block *block;
255 struct bio *bio;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700256 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700257 u32 crc;
258
259 if (!io)
260 return;
261
262 block = page_address(io->meta_page);
263 block->meta_size = cpu_to_le32(io->meta_offset);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700264 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700265 block->checksum = cpu_to_le32(crc);
266
267 log->current_io = NULL;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700268 spin_lock_irqsave(&log->io_list_lock, flags);
269 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
270 spin_unlock_irqrestore(&log->io_list_lock, flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700271
272 while ((bio = bio_list_pop(&io->bios))) {
273 /* all IO must start from rdev->data_offset */
274 bio->bi_iter.bi_sector += log->rdev->data_offset;
275 submit_bio(WRITE, bio);
276 }
277}
278
279static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
280{
281 struct r5l_io_unit *io;
282 struct r5l_meta_block *block;
283 struct bio *bio;
284
285 io = r5l_alloc_io_unit(log);
286
287 block = page_address(io->meta_page);
288 block->magic = cpu_to_le32(R5LOG_MAGIC);
289 block->version = R5LOG_VERSION;
290 block->seq = cpu_to_le64(log->seq);
291 block->position = cpu_to_le64(log->log_start);
292
293 io->log_start = log->log_start;
294 io->meta_offset = sizeof(struct r5l_meta_block);
295 io->seq = log->seq;
296
297 bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
298 io->current_bio = bio;
299 bio->bi_rw = WRITE;
300 bio->bi_bdev = log->rdev->bdev;
301 bio->bi_iter.bi_sector = log->log_start;
302 bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
303 bio->bi_end_io = r5l_log_endio;
304 bio->bi_private = io;
305
306 bio_list_add(&io->bios, bio);
307 atomic_inc(&io->pending_io);
308
309 log->seq++;
310 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
311 io->log_end = log->log_start;
312 /* current bio hit disk end */
313 if (log->log_start == 0)
314 io->current_bio = NULL;
315
316 spin_lock_irq(&log->io_list_lock);
317 list_add_tail(&io->log_sibling, &log->running_ios);
318 spin_unlock_irq(&log->io_list_lock);
319
320 return io;
321}
322
323static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
324{
Christoph Hellwig22581f52015-10-05 09:31:10 +0200325 if (log->current_io &&
326 log->current_io->meta_offset + payload_size > PAGE_SIZE)
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700327 r5l_submit_current_io(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700328
Christoph Hellwig22581f52015-10-05 09:31:10 +0200329 if (!log->current_io)
330 log->current_io = r5l_new_meta(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700331 return 0;
332}
333
334static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
335 sector_t location,
336 u32 checksum1, u32 checksum2,
337 bool checksum2_valid)
338{
339 struct r5l_io_unit *io = log->current_io;
340 struct r5l_payload_data_parity *payload;
341
342 payload = page_address(io->meta_page) + io->meta_offset;
343 payload->header.type = cpu_to_le16(type);
344 payload->header.flags = cpu_to_le16(0);
345 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
346 (PAGE_SHIFT - 9));
347 payload->location = cpu_to_le64(location);
348 payload->checksum[0] = cpu_to_le32(checksum1);
349 if (checksum2_valid)
350 payload->checksum[1] = cpu_to_le32(checksum2);
351
352 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
353 sizeof(__le32) * (1 + !!checksum2_valid);
354}
355
356static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
357{
358 struct r5l_io_unit *io = log->current_io;
359
360alloc_bio:
361 if (!io->current_bio) {
362 struct bio *bio;
363
364 bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
365 bio->bi_rw = WRITE;
366 bio->bi_bdev = log->rdev->bdev;
367 bio->bi_iter.bi_sector = log->log_start;
368 bio->bi_end_io = r5l_log_endio;
369 bio->bi_private = io;
370 bio_list_add(&io->bios, bio);
371 atomic_inc(&io->pending_io);
372 io->current_bio = bio;
373 }
374 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
375 io->current_bio = NULL;
376 goto alloc_bio;
377 }
378 log->log_start = r5l_ring_add(log, log->log_start,
379 BLOCK_SECTORS);
380 /* current bio hit disk end */
381 if (log->log_start == 0)
382 io->current_bio = NULL;
383
384 io->log_end = log->log_start;
385}
386
387static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
388 int data_pages, int parity_pages)
389{
390 int i;
391 int meta_size;
392 struct r5l_io_unit *io;
393
394 meta_size =
395 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
396 * data_pages) +
397 sizeof(struct r5l_payload_data_parity) +
398 sizeof(__le32) * parity_pages;
399
400 r5l_get_meta(log, meta_size);
401 io = log->current_io;
402
403 for (i = 0; i < sh->disks; i++) {
404 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
405 continue;
406 if (i == sh->pd_idx || i == sh->qd_idx)
407 continue;
408 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
409 raid5_compute_blocknr(sh, i, 0),
410 sh->dev[i].log_checksum, 0, false);
411 r5l_append_payload_page(log, sh->dev[i].page);
412 }
413
414 if (sh->qd_idx >= 0) {
415 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
416 sh->sector, sh->dev[sh->pd_idx].log_checksum,
417 sh->dev[sh->qd_idx].log_checksum, true);
418 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
419 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
420 } else {
421 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
422 sh->sector, sh->dev[sh->pd_idx].log_checksum,
423 0, false);
424 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
425 }
426
427 list_add_tail(&sh->log_list, &io->stripe_list);
428 atomic_inc(&io->pending_stripe);
429 sh->log_io = io;
430}
431
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700432static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700433/*
434 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
435 * data from log to raid disks), so we shouldn't wait for reclaim here
436 */
437int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
438{
439 int write_disks = 0;
440 int data_pages, parity_pages;
441 int meta_size;
442 int reserve;
443 int i;
444
445 if (!log)
446 return -EAGAIN;
447 /* Don't support stripe batch */
448 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
449 test_bit(STRIPE_SYNCING, &sh->state)) {
450 /* the stripe is written to log, we start writing it to raid */
451 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
452 return -EAGAIN;
453 }
454
455 for (i = 0; i < sh->disks; i++) {
456 void *addr;
457
458 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
459 continue;
460 write_disks++;
461 /* checksum is already calculated in last run */
462 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
463 continue;
464 addr = kmap_atomic(sh->dev[i].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700465 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
466 addr, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700467 kunmap_atomic(addr);
468 }
469 parity_pages = 1 + !!(sh->qd_idx >= 0);
470 data_pages = write_disks - parity_pages;
471
472 meta_size =
473 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
474 * data_pages) +
475 sizeof(struct r5l_payload_data_parity) +
476 sizeof(__le32) * parity_pages;
477 /* Doesn't work with very big raid array */
478 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
479 return -EINVAL;
480
481 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
Shaohua Li253f9fd42015-09-04 14:14:16 -0700482 /*
483 * The stripe must enter state machine again to finish the write, so
484 * don't delay.
485 */
486 clear_bit(STRIPE_DELAYED, &sh->state);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700487 atomic_inc(&sh->count);
488
489 mutex_lock(&log->io_mutex);
490 /* meta + data */
491 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
492 if (r5l_has_free_space(log, reserve))
493 r5l_log_stripe(log, sh, data_pages, parity_pages);
494 else {
495 spin_lock(&log->no_space_stripes_lock);
496 list_add_tail(&sh->log_list, &log->no_space_stripes);
497 spin_unlock(&log->no_space_stripes_lock);
498
499 r5l_wake_reclaim(log, reserve);
500 }
501 mutex_unlock(&log->io_mutex);
502
503 return 0;
504}
505
506void r5l_write_stripe_run(struct r5l_log *log)
507{
508 if (!log)
509 return;
510 mutex_lock(&log->io_mutex);
511 r5l_submit_current_io(log);
512 mutex_unlock(&log->io_mutex);
513}
514
Shaohua Li828cbe92015-09-02 13:49:49 -0700515int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
516{
517 if (!log)
518 return -ENODEV;
519 /*
520 * we flush log disk cache first, then write stripe data to raid disks.
521 * So if bio is finished, the log disk cache is flushed already. The
522 * recovery guarantees we can recovery the bio from log disk, so we
523 * don't need to flush again
524 */
525 if (bio->bi_iter.bi_size == 0) {
526 bio_endio(bio);
527 return 0;
528 }
529 bio->bi_rw &= ~REQ_FLUSH;
530 return -EAGAIN;
531}
532
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700533/* This will run after log space is reclaimed */
534static void r5l_run_no_space_stripes(struct r5l_log *log)
535{
536 struct stripe_head *sh;
537
538 spin_lock(&log->no_space_stripes_lock);
539 while (!list_empty(&log->no_space_stripes)) {
540 sh = list_first_entry(&log->no_space_stripes,
541 struct stripe_head, log_list);
542 list_del_init(&sh->log_list);
543 set_bit(STRIPE_HANDLE, &sh->state);
544 raid5_release_stripe(sh);
545 }
546 spin_unlock(&log->no_space_stripes_lock);
547}
548
Christoph Hellwig17036462015-10-05 09:31:06 +0200549static sector_t r5l_reclaimable_space(struct r5l_log *log)
550{
551 return r5l_ring_distance(log, log->last_checkpoint,
552 log->next_checkpoint);
553}
554
Christoph Hellwig04732f72015-10-05 09:31:07 +0200555static bool r5l_complete_finished_ios(struct r5l_log *log)
Christoph Hellwig17036462015-10-05 09:31:06 +0200556{
557 struct r5l_io_unit *io, *next;
558 bool found = false;
559
560 assert_spin_locked(&log->io_list_lock);
561
Christoph Hellwig04732f72015-10-05 09:31:07 +0200562 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
Christoph Hellwig17036462015-10-05 09:31:06 +0200563 /* don't change list order */
564 if (io->state < IO_UNIT_STRIPE_END)
565 break;
566
567 log->next_checkpoint = io->log_start;
568 log->next_cp_seq = io->seq;
569
570 list_del(&io->log_sibling);
571 r5l_free_io_unit(log, io);
572
573 found = true;
574 }
575
576 return found;
577}
578
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700579static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
580{
581 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700582 unsigned long flags;
583
584 spin_lock_irqsave(&log->io_list_lock, flags);
585 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
Christoph Hellwig17036462015-10-05 09:31:06 +0200586
Christoph Hellwig04732f72015-10-05 09:31:07 +0200587 if (!r5l_complete_finished_ios(log)) {
Shaohua Li85f2f9a2015-09-04 14:14:05 -0700588 spin_unlock_irqrestore(&log->io_list_lock, flags);
589 return;
590 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700591
Christoph Hellwig17036462015-10-05 09:31:06 +0200592 if (r5l_reclaimable_space(log) > log->max_free_space)
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700593 r5l_wake_reclaim(log, 0);
594
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700595 spin_unlock_irqrestore(&log->io_list_lock, flags);
596 wake_up(&log->iounit_wait);
597}
598
Shaohua Li0576b1c2015-08-13 14:32:00 -0700599void r5l_stripe_write_finished(struct stripe_head *sh)
600{
601 struct r5l_io_unit *io;
602
Shaohua Li0576b1c2015-08-13 14:32:00 -0700603 io = sh->log_io;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700604 sh->log_io = NULL;
605
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700606 if (io && atomic_dec_and_test(&io->pending_stripe))
607 __r5l_stripe_write_finished(io);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700608}
609
Shaohua Lia8c34f92015-09-02 13:49:46 -0700610static void r5l_log_flush_endio(struct bio *bio)
611{
612 struct r5l_log *log = container_of(bio, struct r5l_log,
613 flush_bio);
614 unsigned long flags;
615 struct r5l_io_unit *io;
Shaohua Lia8c34f92015-09-02 13:49:46 -0700616
617 spin_lock_irqsave(&log->io_list_lock, flags);
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200618 list_for_each_entry(io, &log->flushing_ios, log_sibling)
619 r5l_io_run_stripes(io);
Christoph Hellwig04732f72015-10-05 09:31:07 +0200620 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700621 spin_unlock_irqrestore(&log->io_list_lock, flags);
622}
623
Shaohua Li0576b1c2015-08-13 14:32:00 -0700624/*
625 * Starting dispatch IO to raid.
626 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
627 * broken meta in the middle of a log causes recovery can't find meta at the
628 * head of log. If operations require meta at the head persistent in log, we
629 * must make sure meta before it persistent in log too. A case is:
630 *
631 * stripe data/parity is in log, we start write stripe to raid disks. stripe
632 * data/parity must be persistent in log before we do the write to raid disks.
633 *
634 * The solution is we restrictly maintain io_unit list order. In this case, we
635 * only write stripes of an io_unit to raid disks till the io_unit is the first
636 * one whose data/parity is in log.
637 */
638void r5l_flush_stripe_to_raid(struct r5l_log *log)
639{
Shaohua Lia8c34f92015-09-02 13:49:46 -0700640 bool do_flush;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200641
642 if (!log || !log->need_cache_flush)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700643 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700644
Shaohua Lia8c34f92015-09-02 13:49:46 -0700645 spin_lock_irq(&log->io_list_lock);
646 /* flush bio is running */
647 if (!list_empty(&log->flushing_ios)) {
648 spin_unlock_irq(&log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700649 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700650 }
Shaohua Lia8c34f92015-09-02 13:49:46 -0700651 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
652 do_flush = !list_empty(&log->flushing_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700653 spin_unlock_irq(&log->io_list_lock);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700654
655 if (!do_flush)
656 return;
657 bio_reset(&log->flush_bio);
658 log->flush_bio.bi_bdev = log->rdev->bdev;
659 log->flush_bio.bi_end_io = r5l_log_flush_endio;
660 submit_bio(WRITE_FLUSH, &log->flush_bio);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700661}
662
Shaohua Li0576b1c2015-08-13 14:32:00 -0700663static void r5l_write_super(struct r5l_log *log, sector_t cp);
664static void r5l_do_reclaim(struct r5l_log *log)
665{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700666 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
Christoph Hellwig17036462015-10-05 09:31:06 +0200667 sector_t reclaimable;
668 sector_t next_checkpoint;
669 u64 next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700670
671 spin_lock_irq(&log->io_list_lock);
672 /*
673 * move proper io_unit to reclaim list. We should not change the order.
674 * reclaimable/unreclaimable io_unit can be mixed in the list, we
675 * shouldn't reuse space of an unreclaimable io_unit
676 */
677 while (1) {
Christoph Hellwig17036462015-10-05 09:31:06 +0200678 reclaimable = r5l_reclaimable_space(log);
679 if (reclaimable >= reclaim_target ||
Shaohua Li0576b1c2015-08-13 14:32:00 -0700680 (list_empty(&log->running_ios) &&
681 list_empty(&log->io_end_ios) &&
Shaohua Lia8c34f92015-09-02 13:49:46 -0700682 list_empty(&log->flushing_ios) &&
Christoph Hellwig04732f72015-10-05 09:31:07 +0200683 list_empty(&log->finished_ios)))
Shaohua Li0576b1c2015-08-13 14:32:00 -0700684 break;
685
Christoph Hellwig17036462015-10-05 09:31:06 +0200686 md_wakeup_thread(log->rdev->mddev->thread);
687 wait_event_lock_irq(log->iounit_wait,
688 r5l_reclaimable_space(log) > reclaimable,
689 log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700690 }
Christoph Hellwig17036462015-10-05 09:31:06 +0200691
692 next_checkpoint = log->next_checkpoint;
693 next_cp_seq = log->next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700694 spin_unlock_irq(&log->io_list_lock);
695
Christoph Hellwig17036462015-10-05 09:31:06 +0200696 BUG_ON(reclaimable < 0);
697 if (reclaimable == 0)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700698 return;
699
Shaohua Li0576b1c2015-08-13 14:32:00 -0700700 /*
701 * write_super will flush cache of each raid disk. We must write super
702 * here, because the log area might be reused soon and we don't want to
703 * confuse recovery
704 */
Christoph Hellwig17036462015-10-05 09:31:06 +0200705 r5l_write_super(log, next_checkpoint);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700706
707 mutex_lock(&log->io_mutex);
Christoph Hellwig17036462015-10-05 09:31:06 +0200708 log->last_checkpoint = next_checkpoint;
709 log->last_cp_seq = next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700710 mutex_unlock(&log->io_mutex);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700711
Christoph Hellwig17036462015-10-05 09:31:06 +0200712 r5l_run_no_space_stripes(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700713}
714
715static void r5l_reclaim_thread(struct md_thread *thread)
716{
717 struct mddev *mddev = thread->mddev;
718 struct r5conf *conf = mddev->private;
719 struct r5l_log *log = conf->log;
720
721 if (!log)
722 return;
723 r5l_do_reclaim(log);
724}
725
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700726static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
727{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700728 unsigned long target;
729 unsigned long new = (unsigned long)space; /* overflow in theory */
730
731 do {
732 target = log->reclaim_target;
733 if (new < target)
734 return;
735 } while (cmpxchg(&log->reclaim_target, target, new) != target);
736 md_wakeup_thread(log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700737}
738
Shaohua Lie6c033f2015-10-04 09:20:12 -0700739void r5l_quiesce(struct r5l_log *log, int state)
740{
741 if (!log || state == 2)
742 return;
743 if (state == 0) {
744 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
745 log->rdev->mddev, "reclaim");
746 } else if (state == 1) {
747 /*
748 * at this point all stripes are finished, so io_unit is at
749 * least in STRIPE_END state
750 */
751 r5l_wake_reclaim(log, -1L);
752 md_unregister_thread(&log->reclaim_thread);
753 r5l_do_reclaim(log);
754 }
755}
756
Shaohua Li355810d2015-08-13 14:32:01 -0700757struct r5l_recovery_ctx {
758 struct page *meta_page; /* current meta */
759 sector_t meta_total_blocks; /* total size of current meta and data */
760 sector_t pos; /* recovery position */
761 u64 seq; /* recovery position seq */
762};
763
764static int r5l_read_meta_block(struct r5l_log *log,
765 struct r5l_recovery_ctx *ctx)
766{
767 struct page *page = ctx->meta_page;
768 struct r5l_meta_block *mb;
769 u32 crc, stored_crc;
770
771 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
772 return -EIO;
773
774 mb = page_address(page);
775 stored_crc = le32_to_cpu(mb->checksum);
776 mb->checksum = 0;
777
778 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
779 le64_to_cpu(mb->seq) != ctx->seq ||
780 mb->version != R5LOG_VERSION ||
781 le64_to_cpu(mb->position) != ctx->pos)
782 return -EINVAL;
783
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700784 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700785 if (stored_crc != crc)
786 return -EINVAL;
787
788 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
789 return -EINVAL;
790
791 ctx->meta_total_blocks = BLOCK_SECTORS;
792
793 return 0;
794}
795
796static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
797 struct r5l_recovery_ctx *ctx,
798 sector_t stripe_sect,
799 int *offset, sector_t *log_offset)
800{
801 struct r5conf *conf = log->rdev->mddev->private;
802 struct stripe_head *sh;
803 struct r5l_payload_data_parity *payload;
804 int disk_index;
805
806 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
807 while (1) {
808 payload = page_address(ctx->meta_page) + *offset;
809
810 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
811 raid5_compute_sector(conf,
812 le64_to_cpu(payload->location), 0,
813 &disk_index, sh);
814
815 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
816 sh->dev[disk_index].page, READ, false);
817 sh->dev[disk_index].log_checksum =
818 le32_to_cpu(payload->checksum[0]);
819 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
820 ctx->meta_total_blocks += BLOCK_SECTORS;
821 } else {
822 disk_index = sh->pd_idx;
823 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
824 sh->dev[disk_index].page, READ, false);
825 sh->dev[disk_index].log_checksum =
826 le32_to_cpu(payload->checksum[0]);
827 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
828
829 if (sh->qd_idx >= 0) {
830 disk_index = sh->qd_idx;
831 sync_page_io(log->rdev,
832 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
833 PAGE_SIZE, sh->dev[disk_index].page,
834 READ, false);
835 sh->dev[disk_index].log_checksum =
836 le32_to_cpu(payload->checksum[1]);
837 set_bit(R5_Wantwrite,
838 &sh->dev[disk_index].flags);
839 }
840 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
841 }
842
843 *log_offset = r5l_ring_add(log, *log_offset,
844 le32_to_cpu(payload->size));
845 *offset += sizeof(struct r5l_payload_data_parity) +
846 sizeof(__le32) *
847 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
848 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
849 break;
850 }
851
852 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
853 void *addr;
854 u32 checksum;
855
856 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
857 continue;
858 addr = kmap_atomic(sh->dev[disk_index].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700859 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700860 kunmap_atomic(addr);
861 if (checksum != sh->dev[disk_index].log_checksum)
862 goto error;
863 }
864
865 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
866 struct md_rdev *rdev, *rrdev;
867
868 if (!test_and_clear_bit(R5_Wantwrite,
869 &sh->dev[disk_index].flags))
870 continue;
871
872 /* in case device is broken */
873 rdev = rcu_dereference(conf->disks[disk_index].rdev);
874 if (rdev)
875 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
876 sh->dev[disk_index].page, WRITE, false);
877 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
878 if (rrdev)
879 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
880 sh->dev[disk_index].page, WRITE, false);
881 }
882 raid5_release_stripe(sh);
883 return 0;
884
885error:
886 for (disk_index = 0; disk_index < sh->disks; disk_index++)
887 sh->dev[disk_index].flags = 0;
888 raid5_release_stripe(sh);
889 return -EINVAL;
890}
891
892static int r5l_recovery_flush_one_meta(struct r5l_log *log,
893 struct r5l_recovery_ctx *ctx)
894{
895 struct r5conf *conf = log->rdev->mddev->private;
896 struct r5l_payload_data_parity *payload;
897 struct r5l_meta_block *mb;
898 int offset;
899 sector_t log_offset;
900 sector_t stripe_sector;
901
902 mb = page_address(ctx->meta_page);
903 offset = sizeof(struct r5l_meta_block);
904 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
905
906 while (offset < le32_to_cpu(mb->meta_size)) {
907 int dd;
908
909 payload = (void *)mb + offset;
910 stripe_sector = raid5_compute_sector(conf,
911 le64_to_cpu(payload->location), 0, &dd, NULL);
912 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
913 &offset, &log_offset))
914 return -EINVAL;
915 }
916 return 0;
917}
918
919/* copy data/parity from log to raid disks */
920static void r5l_recovery_flush_log(struct r5l_log *log,
921 struct r5l_recovery_ctx *ctx)
922{
923 while (1) {
924 if (r5l_read_meta_block(log, ctx))
925 return;
926 if (r5l_recovery_flush_one_meta(log, ctx))
927 return;
928 ctx->seq++;
929 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
930 }
931}
932
933static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
934 u64 seq)
935{
936 struct page *page;
937 struct r5l_meta_block *mb;
938 u32 crc;
939
940 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
941 if (!page)
942 return -ENOMEM;
943 mb = page_address(page);
944 mb->magic = cpu_to_le32(R5LOG_MAGIC);
945 mb->version = R5LOG_VERSION;
946 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
947 mb->seq = cpu_to_le64(seq);
948 mb->position = cpu_to_le64(pos);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700949 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700950 mb->checksum = cpu_to_le32(crc);
951
952 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
953 __free_page(page);
954 return -EIO;
955 }
956 __free_page(page);
957 return 0;
958}
959
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700960static int r5l_recovery_log(struct r5l_log *log)
961{
Shaohua Li355810d2015-08-13 14:32:01 -0700962 struct r5l_recovery_ctx ctx;
963
964 ctx.pos = log->last_checkpoint;
965 ctx.seq = log->last_cp_seq;
966 ctx.meta_page = alloc_page(GFP_KERNEL);
967 if (!ctx.meta_page)
968 return -ENOMEM;
969
970 r5l_recovery_flush_log(log, &ctx);
971 __free_page(ctx.meta_page);
972
973 /*
974 * we did a recovery. Now ctx.pos points to an invalid meta block. New
975 * log will start here. but we can't let superblock point to last valid
976 * meta block. The log might looks like:
977 * | meta 1| meta 2| meta 3|
978 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
979 * superblock points to meta 1, we write a new valid meta 2n. if crash
980 * happens again, new recovery will start from meta 1. Since meta 2n is
981 * valid now, recovery will think meta 3 is valid, which is wrong.
982 * The solution is we create a new meta in meta2 with its seq == meta
983 * 1's seq + 10 and let superblock points to meta2. The same recovery will
984 * not think meta 3 is a valid meta, because its seq doesn't match
985 */
986 if (ctx.seq > log->last_cp_seq + 1) {
987 int ret;
988
989 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
990 if (ret)
991 return ret;
992 log->seq = ctx.seq + 11;
993 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
994 r5l_write_super(log, ctx.pos);
995 } else {
996 log->log_start = ctx.pos;
997 log->seq = ctx.seq;
998 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700999 return 0;
1000}
1001
1002static void r5l_write_super(struct r5l_log *log, sector_t cp)
1003{
1004 struct mddev *mddev = log->rdev->mddev;
1005
1006 log->rdev->journal_tail = cp;
1007 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1008}
1009
1010static int r5l_load_log(struct r5l_log *log)
1011{
1012 struct md_rdev *rdev = log->rdev;
1013 struct page *page;
1014 struct r5l_meta_block *mb;
1015 sector_t cp = log->rdev->journal_tail;
1016 u32 stored_crc, expected_crc;
1017 bool create_super = false;
1018 int ret;
1019
1020 /* Make sure it's valid */
1021 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1022 cp = 0;
1023 page = alloc_page(GFP_KERNEL);
1024 if (!page)
1025 return -ENOMEM;
1026
1027 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
1028 ret = -EIO;
1029 goto ioerr;
1030 }
1031 mb = page_address(page);
1032
1033 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1034 mb->version != R5LOG_VERSION) {
1035 create_super = true;
1036 goto create;
1037 }
1038 stored_crc = le32_to_cpu(mb->checksum);
1039 mb->checksum = 0;
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001040 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001041 if (stored_crc != expected_crc) {
1042 create_super = true;
1043 goto create;
1044 }
1045 if (le64_to_cpu(mb->position) != cp) {
1046 create_super = true;
1047 goto create;
1048 }
1049create:
1050 if (create_super) {
1051 log->last_cp_seq = prandom_u32();
1052 cp = 0;
1053 /*
1054 * Make sure super points to correct address. Log might have
1055 * data very soon. If super hasn't correct log tail address,
1056 * recovery can't find the log
1057 */
1058 r5l_write_super(log, cp);
1059 } else
1060 log->last_cp_seq = le64_to_cpu(mb->seq);
1061
1062 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001063 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1064 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1065 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001066 log->last_checkpoint = cp;
1067
1068 __free_page(page);
1069
1070 return r5l_recovery_log(log);
1071ioerr:
1072 __free_page(page);
1073 return ret;
1074}
1075
1076int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1077{
1078 struct r5l_log *log;
1079
1080 if (PAGE_SIZE != 4096)
1081 return -EINVAL;
1082 log = kzalloc(sizeof(*log), GFP_KERNEL);
1083 if (!log)
1084 return -ENOMEM;
1085 log->rdev = rdev;
1086
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02001087 log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
1088
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001089 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1090 sizeof(rdev->mddev->uuid));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001091
1092 mutex_init(&log->io_mutex);
1093
1094 spin_lock_init(&log->io_list_lock);
1095 INIT_LIST_HEAD(&log->running_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001096 INIT_LIST_HEAD(&log->io_end_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001097 INIT_LIST_HEAD(&log->flushing_ios);
Christoph Hellwig04732f72015-10-05 09:31:07 +02001098 INIT_LIST_HEAD(&log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001099 bio_init(&log->flush_bio);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001100
1101 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1102 if (!log->io_kc)
1103 goto io_kc;
1104
Shaohua Li0576b1c2015-08-13 14:32:00 -07001105 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1106 log->rdev->mddev, "reclaim");
1107 if (!log->reclaim_thread)
1108 goto reclaim_thread;
Shaohua Li0fd22b42015-09-02 13:49:47 -07001109 init_waitqueue_head(&log->iounit_wait);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001110
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001111 INIT_LIST_HEAD(&log->no_space_stripes);
1112 spin_lock_init(&log->no_space_stripes_lock);
1113
1114 if (r5l_load_log(log))
1115 goto error;
1116
1117 conf->log = log;
1118 return 0;
1119error:
Shaohua Li0576b1c2015-08-13 14:32:00 -07001120 md_unregister_thread(&log->reclaim_thread);
1121reclaim_thread:
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001122 kmem_cache_destroy(log->io_kc);
1123io_kc:
1124 kfree(log);
1125 return -EINVAL;
1126}
1127
1128void r5l_exit_log(struct r5l_log *log)
1129{
Shaohua Li0576b1c2015-08-13 14:32:00 -07001130 md_unregister_thread(&log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001131 kmem_cache_destroy(log->io_kc);
1132 kfree(log);
1133}