blob: 677973641d2ba916e3128780192e2a21711eae81 [file] [log] [blame]
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-prison.h"
Darrick J. Wongb844fe62013-04-05 15:36:32 +01009#include "dm-bio-record.h"
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000010#include "dm-cache-metadata.h"
11
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#define DM_MSG_PREFIX "cache"
21
22DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23 "A percentage of time allocated for copying to and/or from cache");
24
25/*----------------------------------------------------------------*/
26
27/*
28 * Glossary:
29 *
30 * oblock: index of an origin block
31 * cblock: index of a cache block
32 * promotion: movement of a block from origin to cache
33 * demotion: movement of a block from cache to origin
34 * migration: movement of a block between the origin and cache device,
35 * either direction
36 */
37
38/*----------------------------------------------------------------*/
39
40static size_t bitset_size_in_bytes(unsigned nr_entries)
41{
42 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43}
44
45static unsigned long *alloc_bitset(unsigned nr_entries)
46{
47 size_t s = bitset_size_in_bytes(nr_entries);
48 return vzalloc(s);
49}
50
51static void clear_bitset(void *bitset, unsigned nr_entries)
52{
53 size_t s = bitset_size_in_bytes(nr_entries);
54 memset(bitset, 0, s);
55}
56
57static void free_bitset(unsigned long *bits)
58{
59 vfree(bits);
60}
61
62/*----------------------------------------------------------------*/
63
64#define PRISON_CELLS 1024
65#define MIGRATION_POOL_SIZE 128
66#define COMMIT_PERIOD HZ
67#define MIGRATION_COUNT_WINDOW 10
68
69/*
70 * The block size of the device holding cache data must be >= 32KB
71 */
72#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
73
74/*
75 * FIXME: the cache is read/write for the time being.
76 */
77enum cache_mode {
78 CM_WRITE, /* metadata may be changed */
79 CM_READ_ONLY, /* metadata may not be changed */
80};
81
82struct cache_features {
83 enum cache_mode mode;
84 bool write_through:1;
85};
86
87struct cache_stats {
88 atomic_t read_hit;
89 atomic_t read_miss;
90 atomic_t write_hit;
91 atomic_t write_miss;
92 atomic_t demotion;
93 atomic_t promotion;
94 atomic_t copies_avoided;
95 atomic_t cache_cell_clash;
96 atomic_t commit_count;
97 atomic_t discard_count;
98};
99
100struct cache {
101 struct dm_target *ti;
102 struct dm_target_callbacks callbacks;
103
104 /*
105 * Metadata is written to this device.
106 */
107 struct dm_dev *metadata_dev;
108
109 /*
110 * The slower of the two data devices. Typically a spindle.
111 */
112 struct dm_dev *origin_dev;
113
114 /*
115 * The faster of the two data devices. Typically an SSD.
116 */
117 struct dm_dev *cache_dev;
118
119 /*
120 * Cache features such as write-through.
121 */
122 struct cache_features features;
123
124 /*
125 * Size of the origin device in _complete_ blocks and native sectors.
126 */
127 dm_oblock_t origin_blocks;
128 sector_t origin_sectors;
129
130 /*
131 * Size of the cache device in blocks.
132 */
133 dm_cblock_t cache_size;
134
135 /*
136 * Fields for converting from sectors to blocks.
137 */
138 uint32_t sectors_per_block;
139 int sectors_per_block_shift;
140
141 struct dm_cache_metadata *cmd;
142
143 spinlock_t lock;
144 struct bio_list deferred_bios;
145 struct bio_list deferred_flush_bios;
Joe Thornbere2e74d62013-03-20 17:21:27 +0000146 struct bio_list deferred_writethrough_bios;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000147 struct list_head quiesced_migrations;
148 struct list_head completed_migrations;
149 struct list_head need_commit_migrations;
150 sector_t migration_threshold;
151 atomic_t nr_migrations;
152 wait_queue_head_t migration_wait;
153
Joe Thornber8fafee92013-10-30 17:11:58 +0000154 wait_queue_head_t quiescing_wait;
155 atomic_t quiescing_ack;
156
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000157 /*
158 * cache_size entries, dirty if set
159 */
160 dm_cblock_t nr_dirty;
161 unsigned long *dirty_bitset;
162
163 /*
164 * origin_blocks entries, discarded if set.
165 */
Joe Thornber414dd672013-03-20 17:21:25 +0000166 uint32_t discard_block_size; /* a power of 2 times sectors per block */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000167 dm_dblock_t discard_nr_blocks;
168 unsigned long *discard_bitset;
169
170 struct dm_kcopyd_client *copier;
171 struct workqueue_struct *wq;
172 struct work_struct worker;
173
174 struct delayed_work waker;
175 unsigned long last_commit_jiffies;
176
177 struct dm_bio_prison *prison;
178 struct dm_deferred_set *all_io_ds;
179
180 mempool_t *migration_pool;
181 struct dm_cache_migration *next_migration;
182
183 struct dm_cache_policy *policy;
184 unsigned policy_nr_args;
185
186 bool need_tick_bio:1;
187 bool sized:1;
188 bool quiescing:1;
189 bool commit_requested:1;
190 bool loaded_mappings:1;
191 bool loaded_discards:1;
192
193 struct cache_stats stats;
194
195 /*
196 * Rather than reconstructing the table line for the status we just
197 * save it and regurgitate.
198 */
199 unsigned nr_ctr_args;
200 const char **ctr_args;
201};
202
203struct per_bio_data {
204 bool tick:1;
205 unsigned req_nr:2;
206 struct dm_deferred_entry *all_io_entry;
Joe Thornbere2e74d62013-03-20 17:21:27 +0000207
Mike Snitzer19b00922013-04-05 15:36:34 +0100208 /*
209 * writethrough fields. These MUST remain at the end of this
210 * structure and the 'cache' member must be the first as it
Joe Thornberaeed1422013-05-10 14:37:18 +0100211 * is used to determine the offset of the writethrough fields.
Mike Snitzer19b00922013-04-05 15:36:34 +0100212 */
Joe Thornbere2e74d62013-03-20 17:21:27 +0000213 struct cache *cache;
214 dm_cblock_t cblock;
215 bio_end_io_t *saved_bi_end_io;
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100216 struct dm_bio_details bio_details;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000217};
218
219struct dm_cache_migration {
220 struct list_head list;
221 struct cache *cache;
222
223 unsigned long start_jiffies;
224 dm_oblock_t old_oblock;
225 dm_oblock_t new_oblock;
226 dm_cblock_t cblock;
227
228 bool err:1;
229 bool writeback:1;
230 bool demote:1;
231 bool promote:1;
232
233 struct dm_bio_prison_cell *old_ocell;
234 struct dm_bio_prison_cell *new_ocell;
235};
236
237/*
238 * Processing a bio in the worker thread may require these memory
239 * allocations. We prealloc to avoid deadlocks (the same worker thread
240 * frees them back to the mempool).
241 */
242struct prealloc {
243 struct dm_cache_migration *mg;
244 struct dm_bio_prison_cell *cell1;
245 struct dm_bio_prison_cell *cell2;
246};
247
248static void wake_worker(struct cache *cache)
249{
250 queue_work(cache->wq, &cache->worker);
251}
252
253/*----------------------------------------------------------------*/
254
255static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
256{
257 /* FIXME: change to use a local slab. */
258 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
259}
260
261static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
262{
263 dm_bio_prison_free_cell(cache->prison, cell);
264}
265
266static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
267{
268 if (!p->mg) {
269 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
270 if (!p->mg)
271 return -ENOMEM;
272 }
273
274 if (!p->cell1) {
275 p->cell1 = alloc_prison_cell(cache);
276 if (!p->cell1)
277 return -ENOMEM;
278 }
279
280 if (!p->cell2) {
281 p->cell2 = alloc_prison_cell(cache);
282 if (!p->cell2)
283 return -ENOMEM;
284 }
285
286 return 0;
287}
288
289static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
290{
291 if (p->cell2)
292 free_prison_cell(cache, p->cell2);
293
294 if (p->cell1)
295 free_prison_cell(cache, p->cell1);
296
297 if (p->mg)
298 mempool_free(p->mg, cache->migration_pool);
299}
300
301static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
302{
303 struct dm_cache_migration *mg = p->mg;
304
305 BUG_ON(!mg);
306 p->mg = NULL;
307
308 return mg;
309}
310
311/*
312 * You must have a cell within the prealloc struct to return. If not this
313 * function will BUG() rather than returning NULL.
314 */
315static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
316{
317 struct dm_bio_prison_cell *r = NULL;
318
319 if (p->cell1) {
320 r = p->cell1;
321 p->cell1 = NULL;
322
323 } else if (p->cell2) {
324 r = p->cell2;
325 p->cell2 = NULL;
326 } else
327 BUG();
328
329 return r;
330}
331
332/*
333 * You can't have more than two cells in a prealloc struct. BUG() will be
334 * called if you try and overfill.
335 */
336static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
337{
338 if (!p->cell2)
339 p->cell2 = cell;
340
341 else if (!p->cell1)
342 p->cell1 = cell;
343
344 else
345 BUG();
346}
347
348/*----------------------------------------------------------------*/
349
350static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
351{
352 key->virtual = 0;
353 key->dev = 0;
354 key->block = from_oblock(oblock);
355}
356
357/*
358 * The caller hands in a preallocated cell, and a free function for it.
359 * The cell will be freed if there's an error, or if it wasn't used because
360 * a cell with that key already exists.
361 */
362typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
363
364static int bio_detain(struct cache *cache, dm_oblock_t oblock,
365 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
366 cell_free_fn free_fn, void *free_context,
367 struct dm_bio_prison_cell **cell_result)
368{
369 int r;
370 struct dm_cell_key key;
371
372 build_key(oblock, &key);
373 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
374 if (r)
375 free_fn(free_context, cell_prealloc);
376
377 return r;
378}
379
380static int get_cell(struct cache *cache,
381 dm_oblock_t oblock,
382 struct prealloc *structs,
383 struct dm_bio_prison_cell **cell_result)
384{
385 int r;
386 struct dm_cell_key key;
387 struct dm_bio_prison_cell *cell_prealloc;
388
389 cell_prealloc = prealloc_get_cell(structs);
390
391 build_key(oblock, &key);
392 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
393 if (r)
394 prealloc_put_cell(structs, cell_prealloc);
395
396 return r;
397}
398
Joe Thornberaeed1422013-05-10 14:37:18 +0100399/*----------------------------------------------------------------*/
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000400
401static bool is_dirty(struct cache *cache, dm_cblock_t b)
402{
403 return test_bit(from_cblock(b), cache->dirty_bitset);
404}
405
406static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
407{
408 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
409 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
410 policy_set_dirty(cache->policy, oblock);
411 }
412}
413
414static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
415{
416 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
417 policy_clear_dirty(cache->policy, oblock);
418 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
419 if (!from_cblock(cache->nr_dirty))
420 dm_table_event(cache->ti->table);
421 }
422}
423
424/*----------------------------------------------------------------*/
Joe Thornberaeed1422013-05-10 14:37:18 +0100425
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000426static bool block_size_is_power_of_two(struct cache *cache)
427{
428 return cache->sectors_per_block_shift >= 0;
429}
430
Joe Thornber414dd672013-03-20 17:21:25 +0000431static dm_block_t block_div(dm_block_t b, uint32_t n)
432{
433 do_div(b, n);
434
435 return b;
436}
437
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000438static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
439{
Joe Thornber414dd672013-03-20 17:21:25 +0000440 uint32_t discard_blocks = cache->discard_block_size;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000441 dm_block_t b = from_oblock(oblock);
442
443 if (!block_size_is_power_of_two(cache))
Joe Thornber414dd672013-03-20 17:21:25 +0000444 discard_blocks = discard_blocks / cache->sectors_per_block;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000445 else
446 discard_blocks >>= cache->sectors_per_block_shift;
447
Joe Thornber414dd672013-03-20 17:21:25 +0000448 b = block_div(b, discard_blocks);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000449
450 return to_dblock(b);
451}
452
453static void set_discard(struct cache *cache, dm_dblock_t b)
454{
455 unsigned long flags;
456
457 atomic_inc(&cache->stats.discard_count);
458
459 spin_lock_irqsave(&cache->lock, flags);
460 set_bit(from_dblock(b), cache->discard_bitset);
461 spin_unlock_irqrestore(&cache->lock, flags);
462}
463
464static void clear_discard(struct cache *cache, dm_dblock_t b)
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&cache->lock, flags);
469 clear_bit(from_dblock(b), cache->discard_bitset);
470 spin_unlock_irqrestore(&cache->lock, flags);
471}
472
473static bool is_discarded(struct cache *cache, dm_dblock_t b)
474{
475 int r;
476 unsigned long flags;
477
478 spin_lock_irqsave(&cache->lock, flags);
479 r = test_bit(from_dblock(b), cache->discard_bitset);
480 spin_unlock_irqrestore(&cache->lock, flags);
481
482 return r;
483}
484
485static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
486{
487 int r;
488 unsigned long flags;
489
490 spin_lock_irqsave(&cache->lock, flags);
491 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
492 cache->discard_bitset);
493 spin_unlock_irqrestore(&cache->lock, flags);
494
495 return r;
496}
497
498/*----------------------------------------------------------------*/
499
500static void load_stats(struct cache *cache)
501{
502 struct dm_cache_statistics stats;
503
504 dm_cache_metadata_get_stats(cache->cmd, &stats);
505 atomic_set(&cache->stats.read_hit, stats.read_hits);
506 atomic_set(&cache->stats.read_miss, stats.read_misses);
507 atomic_set(&cache->stats.write_hit, stats.write_hits);
508 atomic_set(&cache->stats.write_miss, stats.write_misses);
509}
510
511static void save_stats(struct cache *cache)
512{
513 struct dm_cache_statistics stats;
514
515 stats.read_hits = atomic_read(&cache->stats.read_hit);
516 stats.read_misses = atomic_read(&cache->stats.read_miss);
517 stats.write_hits = atomic_read(&cache->stats.write_hit);
518 stats.write_misses = atomic_read(&cache->stats.write_miss);
519
520 dm_cache_metadata_set_stats(cache->cmd, &stats);
521}
522
523/*----------------------------------------------------------------
524 * Per bio data
525 *--------------------------------------------------------------*/
Mike Snitzer19b00922013-04-05 15:36:34 +0100526
527/*
528 * If using writeback, leave out struct per_bio_data's writethrough fields.
529 */
530#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
531#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
532
533static size_t get_per_bio_data_size(struct cache *cache)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000534{
Mike Snitzer19b00922013-04-05 15:36:34 +0100535 return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
536}
537
538static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
539{
540 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000541 BUG_ON(!pb);
542 return pb;
543}
544
Mike Snitzer19b00922013-04-05 15:36:34 +0100545static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000546{
Mike Snitzer19b00922013-04-05 15:36:34 +0100547 struct per_bio_data *pb = get_per_bio_data(bio, data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000548
549 pb->tick = false;
550 pb->req_nr = dm_bio_get_target_bio_nr(bio);
551 pb->all_io_entry = NULL;
552
553 return pb;
554}
555
556/*----------------------------------------------------------------
557 * Remapping
558 *--------------------------------------------------------------*/
559static void remap_to_origin(struct cache *cache, struct bio *bio)
560{
561 bio->bi_bdev = cache->origin_dev->bdev;
562}
563
564static void remap_to_cache(struct cache *cache, struct bio *bio,
565 dm_cblock_t cblock)
566{
567 sector_t bi_sector = bio->bi_sector;
568
569 bio->bi_bdev = cache->cache_dev->bdev;
570 if (!block_size_is_power_of_two(cache))
571 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
572 sector_div(bi_sector, cache->sectors_per_block);
573 else
574 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
575 (bi_sector & (cache->sectors_per_block - 1));
576}
577
578static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
579{
580 unsigned long flags;
Mike Snitzer19b00922013-04-05 15:36:34 +0100581 size_t pb_data_size = get_per_bio_data_size(cache);
582 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000583
584 spin_lock_irqsave(&cache->lock, flags);
585 if (cache->need_tick_bio &&
586 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
587 pb->tick = true;
588 cache->need_tick_bio = false;
589 }
590 spin_unlock_irqrestore(&cache->lock, flags);
591}
592
593static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
594 dm_oblock_t oblock)
595{
596 check_if_tick_bio_needed(cache, bio);
597 remap_to_origin(cache, bio);
598 if (bio_data_dir(bio) == WRITE)
599 clear_discard(cache, oblock_to_dblock(cache, oblock));
600}
601
602static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
603 dm_oblock_t oblock, dm_cblock_t cblock)
604{
605 remap_to_cache(cache, bio, cblock);
606 if (bio_data_dir(bio) == WRITE) {
607 set_dirty(cache, oblock, cblock);
608 clear_discard(cache, oblock_to_dblock(cache, oblock));
609 }
610}
611
612static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
613{
614 sector_t block_nr = bio->bi_sector;
615
616 if (!block_size_is_power_of_two(cache))
617 (void) sector_div(block_nr, cache->sectors_per_block);
618 else
619 block_nr >>= cache->sectors_per_block_shift;
620
621 return to_oblock(block_nr);
622}
623
624static int bio_triggers_commit(struct cache *cache, struct bio *bio)
625{
626 return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
627}
628
629static void issue(struct cache *cache, struct bio *bio)
630{
631 unsigned long flags;
632
633 if (!bio_triggers_commit(cache, bio)) {
634 generic_make_request(bio);
635 return;
636 }
637
638 /*
639 * Batch together any bios that trigger commits and then issue a
640 * single commit for them in do_worker().
641 */
642 spin_lock_irqsave(&cache->lock, flags);
643 cache->commit_requested = true;
644 bio_list_add(&cache->deferred_flush_bios, bio);
645 spin_unlock_irqrestore(&cache->lock, flags);
646}
647
Joe Thornbere2e74d62013-03-20 17:21:27 +0000648static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
649{
650 unsigned long flags;
651
652 spin_lock_irqsave(&cache->lock, flags);
653 bio_list_add(&cache->deferred_writethrough_bios, bio);
654 spin_unlock_irqrestore(&cache->lock, flags);
655
656 wake_worker(cache);
657}
658
659static void writethrough_endio(struct bio *bio, int err)
660{
Mike Snitzer19b00922013-04-05 15:36:34 +0100661 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000662 bio->bi_end_io = pb->saved_bi_end_io;
663
664 if (err) {
665 bio_endio(bio, err);
666 return;
667 }
668
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100669 dm_bio_restore(&pb->bio_details, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000670 remap_to_cache(pb->cache, bio, pb->cblock);
671
672 /*
673 * We can't issue this bio directly, since we're in interrupt
Joe Thornberaeed1422013-05-10 14:37:18 +0100674 * context. So it gets put on a bio list for processing by the
Joe Thornbere2e74d62013-03-20 17:21:27 +0000675 * worker thread.
676 */
677 defer_writethrough_bio(pb->cache, bio);
678}
679
680/*
681 * When running in writethrough mode we need to send writes to clean blocks
682 * to both the cache and origin devices. In future we'd like to clone the
683 * bio and send them in parallel, but for now we're doing them in
684 * series as this is easier.
685 */
686static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
687 dm_oblock_t oblock, dm_cblock_t cblock)
688{
Mike Snitzer19b00922013-04-05 15:36:34 +0100689 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000690
691 pb->cache = cache;
692 pb->cblock = cblock;
693 pb->saved_bi_end_io = bio->bi_end_io;
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100694 dm_bio_record(&pb->bio_details, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000695 bio->bi_end_io = writethrough_endio;
696
697 remap_to_origin_clear_discard(pb->cache, bio, oblock);
698}
699
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000700/*----------------------------------------------------------------
701 * Migration processing
702 *
703 * Migration covers moving data from the origin device to the cache, or
704 * vice versa.
705 *--------------------------------------------------------------*/
706static void free_migration(struct dm_cache_migration *mg)
707{
708 mempool_free(mg, mg->cache->migration_pool);
709}
710
711static void inc_nr_migrations(struct cache *cache)
712{
713 atomic_inc(&cache->nr_migrations);
714}
715
716static void dec_nr_migrations(struct cache *cache)
717{
718 atomic_dec(&cache->nr_migrations);
719
720 /*
721 * Wake the worker in case we're suspending the target.
722 */
723 wake_up(&cache->migration_wait);
724}
725
726static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
727 bool holder)
728{
729 (holder ? dm_cell_release : dm_cell_release_no_holder)
730 (cache->prison, cell, &cache->deferred_bios);
731 free_prison_cell(cache, cell);
732}
733
734static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
735 bool holder)
736{
737 unsigned long flags;
738
739 spin_lock_irqsave(&cache->lock, flags);
740 __cell_defer(cache, cell, holder);
741 spin_unlock_irqrestore(&cache->lock, flags);
742
743 wake_worker(cache);
744}
745
746static void cleanup_migration(struct dm_cache_migration *mg)
747{
Joe Thornber8fafee92013-10-30 17:11:58 +0000748 struct cache *cache = mg->cache;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000749 free_migration(mg);
Joe Thornber8fafee92013-10-30 17:11:58 +0000750 dec_nr_migrations(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000751}
752
753static void migration_failure(struct dm_cache_migration *mg)
754{
755 struct cache *cache = mg->cache;
756
757 if (mg->writeback) {
758 DMWARN_LIMIT("writeback failed; couldn't copy block");
759 set_dirty(cache, mg->old_oblock, mg->cblock);
760 cell_defer(cache, mg->old_ocell, false);
761
762 } else if (mg->demote) {
763 DMWARN_LIMIT("demotion failed; couldn't copy block");
764 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
765
766 cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
767 if (mg->promote)
768 cell_defer(cache, mg->new_ocell, 1);
769 } else {
770 DMWARN_LIMIT("promotion failed; couldn't copy block");
771 policy_remove_mapping(cache->policy, mg->new_oblock);
772 cell_defer(cache, mg->new_ocell, 1);
773 }
774
775 cleanup_migration(mg);
776}
777
778static void migration_success_pre_commit(struct dm_cache_migration *mg)
779{
780 unsigned long flags;
781 struct cache *cache = mg->cache;
782
783 if (mg->writeback) {
784 cell_defer(cache, mg->old_ocell, false);
785 clear_dirty(cache, mg->old_oblock, mg->cblock);
786 cleanup_migration(mg);
787 return;
788
789 } else if (mg->demote) {
790 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
791 DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
792 policy_force_mapping(cache->policy, mg->new_oblock,
793 mg->old_oblock);
794 if (mg->promote)
795 cell_defer(cache, mg->new_ocell, true);
796 cleanup_migration(mg);
797 return;
798 }
799 } else {
800 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
801 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
802 policy_remove_mapping(cache->policy, mg->new_oblock);
803 cleanup_migration(mg);
804 return;
805 }
806 }
807
808 spin_lock_irqsave(&cache->lock, flags);
809 list_add_tail(&mg->list, &cache->need_commit_migrations);
810 cache->commit_requested = true;
811 spin_unlock_irqrestore(&cache->lock, flags);
812}
813
814static void migration_success_post_commit(struct dm_cache_migration *mg)
815{
816 unsigned long flags;
817 struct cache *cache = mg->cache;
818
819 if (mg->writeback) {
820 DMWARN("writeback unexpectedly triggered commit");
821 return;
822
823 } else if (mg->demote) {
824 cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
825
826 if (mg->promote) {
827 mg->demote = false;
828
829 spin_lock_irqsave(&cache->lock, flags);
830 list_add_tail(&mg->list, &cache->quiesced_migrations);
831 spin_unlock_irqrestore(&cache->lock, flags);
832
833 } else
834 cleanup_migration(mg);
835
836 } else {
837 cell_defer(cache, mg->new_ocell, true);
838 clear_dirty(cache, mg->new_oblock, mg->cblock);
839 cleanup_migration(mg);
840 }
841}
842
843static void copy_complete(int read_err, unsigned long write_err, void *context)
844{
845 unsigned long flags;
846 struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
847 struct cache *cache = mg->cache;
848
849 if (read_err || write_err)
850 mg->err = true;
851
852 spin_lock_irqsave(&cache->lock, flags);
853 list_add_tail(&mg->list, &cache->completed_migrations);
854 spin_unlock_irqrestore(&cache->lock, flags);
855
856 wake_worker(cache);
857}
858
859static void issue_copy_real(struct dm_cache_migration *mg)
860{
861 int r;
862 struct dm_io_region o_region, c_region;
863 struct cache *cache = mg->cache;
Heinz Mauelshagene88217a2014-03-12 00:40:05 +0100864 sector_t cblock = from_cblock(mg->cblock);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000865
866 o_region.bdev = cache->origin_dev->bdev;
867 o_region.count = cache->sectors_per_block;
868
869 c_region.bdev = cache->cache_dev->bdev;
Heinz Mauelshagene88217a2014-03-12 00:40:05 +0100870 c_region.sector = cblock * cache->sectors_per_block;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000871 c_region.count = cache->sectors_per_block;
872
873 if (mg->writeback || mg->demote) {
874 /* demote */
875 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
876 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
877 } else {
878 /* promote */
879 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
880 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
881 }
882
883 if (r < 0)
884 migration_failure(mg);
885}
886
887static void avoid_copy(struct dm_cache_migration *mg)
888{
889 atomic_inc(&mg->cache->stats.copies_avoided);
890 migration_success_pre_commit(mg);
891}
892
893static void issue_copy(struct dm_cache_migration *mg)
894{
895 bool avoid;
896 struct cache *cache = mg->cache;
897
898 if (mg->writeback || mg->demote)
899 avoid = !is_dirty(cache, mg->cblock) ||
900 is_discarded_oblock(cache, mg->old_oblock);
901 else
902 avoid = is_discarded_oblock(cache, mg->new_oblock);
903
904 avoid ? avoid_copy(mg) : issue_copy_real(mg);
905}
906
907static void complete_migration(struct dm_cache_migration *mg)
908{
909 if (mg->err)
910 migration_failure(mg);
911 else
912 migration_success_pre_commit(mg);
913}
914
915static void process_migrations(struct cache *cache, struct list_head *head,
916 void (*fn)(struct dm_cache_migration *))
917{
918 unsigned long flags;
919 struct list_head list;
920 struct dm_cache_migration *mg, *tmp;
921
922 INIT_LIST_HEAD(&list);
923 spin_lock_irqsave(&cache->lock, flags);
924 list_splice_init(head, &list);
925 spin_unlock_irqrestore(&cache->lock, flags);
926
927 list_for_each_entry_safe(mg, tmp, &list, list)
928 fn(mg);
929}
930
931static void __queue_quiesced_migration(struct dm_cache_migration *mg)
932{
933 list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
934}
935
936static void queue_quiesced_migration(struct dm_cache_migration *mg)
937{
938 unsigned long flags;
939 struct cache *cache = mg->cache;
940
941 spin_lock_irqsave(&cache->lock, flags);
942 __queue_quiesced_migration(mg);
943 spin_unlock_irqrestore(&cache->lock, flags);
944
945 wake_worker(cache);
946}
947
948static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
949{
950 unsigned long flags;
951 struct dm_cache_migration *mg, *tmp;
952
953 spin_lock_irqsave(&cache->lock, flags);
954 list_for_each_entry_safe(mg, tmp, work, list)
955 __queue_quiesced_migration(mg);
956 spin_unlock_irqrestore(&cache->lock, flags);
957
958 wake_worker(cache);
959}
960
961static void check_for_quiesced_migrations(struct cache *cache,
962 struct per_bio_data *pb)
963{
964 struct list_head work;
965
966 if (!pb->all_io_entry)
967 return;
968
969 INIT_LIST_HEAD(&work);
970 if (pb->all_io_entry)
971 dm_deferred_entry_dec(pb->all_io_entry, &work);
972
973 if (!list_empty(&work))
974 queue_quiesced_migrations(cache, &work);
975}
976
977static void quiesce_migration(struct dm_cache_migration *mg)
978{
979 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
980 queue_quiesced_migration(mg);
981}
982
983static void promote(struct cache *cache, struct prealloc *structs,
984 dm_oblock_t oblock, dm_cblock_t cblock,
985 struct dm_bio_prison_cell *cell)
986{
987 struct dm_cache_migration *mg = prealloc_get_migration(structs);
988
989 mg->err = false;
990 mg->writeback = false;
991 mg->demote = false;
992 mg->promote = true;
993 mg->cache = cache;
994 mg->new_oblock = oblock;
995 mg->cblock = cblock;
996 mg->old_ocell = NULL;
997 mg->new_ocell = cell;
998 mg->start_jiffies = jiffies;
999
1000 inc_nr_migrations(cache);
1001 quiesce_migration(mg);
1002}
1003
1004static void writeback(struct cache *cache, struct prealloc *structs,
1005 dm_oblock_t oblock, dm_cblock_t cblock,
1006 struct dm_bio_prison_cell *cell)
1007{
1008 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1009
1010 mg->err = false;
1011 mg->writeback = true;
1012 mg->demote = false;
1013 mg->promote = false;
1014 mg->cache = cache;
1015 mg->old_oblock = oblock;
1016 mg->cblock = cblock;
1017 mg->old_ocell = cell;
1018 mg->new_ocell = NULL;
1019 mg->start_jiffies = jiffies;
1020
1021 inc_nr_migrations(cache);
1022 quiesce_migration(mg);
1023}
1024
1025static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1026 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1027 dm_cblock_t cblock,
1028 struct dm_bio_prison_cell *old_ocell,
1029 struct dm_bio_prison_cell *new_ocell)
1030{
1031 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1032
1033 mg->err = false;
1034 mg->writeback = false;
1035 mg->demote = true;
1036 mg->promote = true;
1037 mg->cache = cache;
1038 mg->old_oblock = old_oblock;
1039 mg->new_oblock = new_oblock;
1040 mg->cblock = cblock;
1041 mg->old_ocell = old_ocell;
1042 mg->new_ocell = new_ocell;
1043 mg->start_jiffies = jiffies;
1044
1045 inc_nr_migrations(cache);
1046 quiesce_migration(mg);
1047}
1048
1049/*----------------------------------------------------------------
1050 * bio processing
1051 *--------------------------------------------------------------*/
1052static void defer_bio(struct cache *cache, struct bio *bio)
1053{
1054 unsigned long flags;
1055
1056 spin_lock_irqsave(&cache->lock, flags);
1057 bio_list_add(&cache->deferred_bios, bio);
1058 spin_unlock_irqrestore(&cache->lock, flags);
1059
1060 wake_worker(cache);
1061}
1062
1063static void process_flush_bio(struct cache *cache, struct bio *bio)
1064{
Mike Snitzer19b00922013-04-05 15:36:34 +01001065 size_t pb_data_size = get_per_bio_data_size(cache);
1066 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001067
1068 BUG_ON(bio->bi_size);
1069 if (!pb->req_nr)
1070 remap_to_origin(cache, bio);
1071 else
1072 remap_to_cache(cache, bio, 0);
1073
1074 issue(cache, bio);
1075}
1076
1077/*
1078 * People generally discard large parts of a device, eg, the whole device
1079 * when formatting. Splitting these large discards up into cache block
1080 * sized ios and then quiescing (always neccessary for discard) takes too
1081 * long.
1082 *
1083 * We keep it simple, and allow any size of discard to come in, and just
1084 * mark off blocks on the discard bitset. No passdown occurs!
1085 *
1086 * To implement passdown we need to change the bio_prison such that a cell
1087 * can have a key that spans many blocks.
1088 */
1089static void process_discard_bio(struct cache *cache, struct bio *bio)
1090{
1091 dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
1092 cache->discard_block_size);
1093 dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
1094 dm_block_t b;
1095
Joe Thornber414dd672013-03-20 17:21:25 +00001096 end_block = block_div(end_block, cache->discard_block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001097
1098 for (b = start_block; b < end_block; b++)
1099 set_discard(cache, to_dblock(b));
1100
1101 bio_endio(bio, 0);
1102}
1103
1104static bool spare_migration_bandwidth(struct cache *cache)
1105{
1106 sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1107 cache->sectors_per_block;
1108 return current_volume < cache->migration_threshold;
1109}
1110
1111static bool is_writethrough_io(struct cache *cache, struct bio *bio,
1112 dm_cblock_t cblock)
1113{
1114 return bio_data_dir(bio) == WRITE &&
1115 cache->features.write_through && !is_dirty(cache, cblock);
1116}
1117
1118static void inc_hit_counter(struct cache *cache, struct bio *bio)
1119{
1120 atomic_inc(bio_data_dir(bio) == READ ?
1121 &cache->stats.read_hit : &cache->stats.write_hit);
1122}
1123
1124static void inc_miss_counter(struct cache *cache, struct bio *bio)
1125{
1126 atomic_inc(bio_data_dir(bio) == READ ?
1127 &cache->stats.read_miss : &cache->stats.write_miss);
1128}
1129
1130static void process_bio(struct cache *cache, struct prealloc *structs,
1131 struct bio *bio)
1132{
1133 int r;
1134 bool release_cell = true;
1135 dm_oblock_t block = get_bio_block(cache, bio);
1136 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1137 struct policy_result lookup_result;
Mike Snitzer19b00922013-04-05 15:36:34 +01001138 size_t pb_data_size = get_per_bio_data_size(cache);
1139 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001140 bool discarded_block = is_discarded_oblock(cache, block);
1141 bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
1142
1143 /*
1144 * Check to see if that block is currently migrating.
1145 */
1146 cell_prealloc = prealloc_get_cell(structs);
1147 r = bio_detain(cache, block, bio, cell_prealloc,
1148 (cell_free_fn) prealloc_put_cell,
1149 structs, &new_ocell);
1150 if (r > 0)
1151 return;
1152
1153 r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1154 bio, &lookup_result);
1155
1156 if (r == -EWOULDBLOCK)
1157 /* migration has been denied */
1158 lookup_result.op = POLICY_MISS;
1159
1160 switch (lookup_result.op) {
1161 case POLICY_HIT:
1162 inc_hit_counter(cache, bio);
1163 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1164
Joe Thornbere2e74d62013-03-20 17:21:27 +00001165 if (is_writethrough_io(cache, bio, lookup_result.cblock))
1166 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1167 else
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001168 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1169
1170 issue(cache, bio);
1171 break;
1172
1173 case POLICY_MISS:
1174 inc_miss_counter(cache, bio);
1175 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
Joe Thornbere2e74d62013-03-20 17:21:27 +00001176 remap_to_origin_clear_discard(cache, bio, block);
1177 issue(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001178 break;
1179
1180 case POLICY_NEW:
1181 atomic_inc(&cache->stats.promotion);
1182 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1183 release_cell = false;
1184 break;
1185
1186 case POLICY_REPLACE:
1187 cell_prealloc = prealloc_get_cell(structs);
1188 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1189 (cell_free_fn) prealloc_put_cell,
1190 structs, &old_ocell);
1191 if (r > 0) {
1192 /*
1193 * We have to be careful to avoid lock inversion of
1194 * the cells. So we back off, and wait for the
1195 * old_ocell to become free.
1196 */
1197 policy_force_mapping(cache->policy, block,
1198 lookup_result.old_oblock);
1199 atomic_inc(&cache->stats.cache_cell_clash);
1200 break;
1201 }
1202 atomic_inc(&cache->stats.demotion);
1203 atomic_inc(&cache->stats.promotion);
1204
1205 demote_then_promote(cache, structs, lookup_result.old_oblock,
1206 block, lookup_result.cblock,
1207 old_ocell, new_ocell);
1208 release_cell = false;
1209 break;
1210
1211 default:
1212 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1213 (unsigned) lookup_result.op);
1214 bio_io_error(bio);
1215 }
1216
1217 if (release_cell)
1218 cell_defer(cache, new_ocell, false);
1219}
1220
1221static int need_commit_due_to_time(struct cache *cache)
1222{
1223 return jiffies < cache->last_commit_jiffies ||
1224 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1225}
1226
1227static int commit_if_needed(struct cache *cache)
1228{
1229 if (dm_cache_changed_this_transaction(cache->cmd) &&
1230 (cache->commit_requested || need_commit_due_to_time(cache))) {
1231 atomic_inc(&cache->stats.commit_count);
1232 cache->last_commit_jiffies = jiffies;
1233 cache->commit_requested = false;
1234 return dm_cache_commit(cache->cmd, false);
1235 }
1236
1237 return 0;
1238}
1239
1240static void process_deferred_bios(struct cache *cache)
1241{
1242 unsigned long flags;
1243 struct bio_list bios;
1244 struct bio *bio;
1245 struct prealloc structs;
1246
1247 memset(&structs, 0, sizeof(structs));
1248 bio_list_init(&bios);
1249
1250 spin_lock_irqsave(&cache->lock, flags);
1251 bio_list_merge(&bios, &cache->deferred_bios);
1252 bio_list_init(&cache->deferred_bios);
1253 spin_unlock_irqrestore(&cache->lock, flags);
1254
1255 while (!bio_list_empty(&bios)) {
1256 /*
1257 * If we've got no free migration structs, and processing
1258 * this bio might require one, we pause until there are some
1259 * prepared mappings to process.
1260 */
1261 if (prealloc_data_structs(cache, &structs)) {
1262 spin_lock_irqsave(&cache->lock, flags);
1263 bio_list_merge(&cache->deferred_bios, &bios);
1264 spin_unlock_irqrestore(&cache->lock, flags);
1265 break;
1266 }
1267
1268 bio = bio_list_pop(&bios);
1269
1270 if (bio->bi_rw & REQ_FLUSH)
1271 process_flush_bio(cache, bio);
1272 else if (bio->bi_rw & REQ_DISCARD)
1273 process_discard_bio(cache, bio);
1274 else
1275 process_bio(cache, &structs, bio);
1276 }
1277
1278 prealloc_free_structs(cache, &structs);
1279}
1280
1281static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1282{
1283 unsigned long flags;
1284 struct bio_list bios;
1285 struct bio *bio;
1286
1287 bio_list_init(&bios);
1288
1289 spin_lock_irqsave(&cache->lock, flags);
1290 bio_list_merge(&bios, &cache->deferred_flush_bios);
1291 bio_list_init(&cache->deferred_flush_bios);
1292 spin_unlock_irqrestore(&cache->lock, flags);
1293
1294 while ((bio = bio_list_pop(&bios)))
1295 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1296}
1297
Joe Thornbere2e74d62013-03-20 17:21:27 +00001298static void process_deferred_writethrough_bios(struct cache *cache)
1299{
1300 unsigned long flags;
1301 struct bio_list bios;
1302 struct bio *bio;
1303
1304 bio_list_init(&bios);
1305
1306 spin_lock_irqsave(&cache->lock, flags);
1307 bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1308 bio_list_init(&cache->deferred_writethrough_bios);
1309 spin_unlock_irqrestore(&cache->lock, flags);
1310
1311 while ((bio = bio_list_pop(&bios)))
1312 generic_make_request(bio);
1313}
1314
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001315static void writeback_some_dirty_blocks(struct cache *cache)
1316{
1317 int r = 0;
1318 dm_oblock_t oblock;
1319 dm_cblock_t cblock;
1320 struct prealloc structs;
1321 struct dm_bio_prison_cell *old_ocell;
1322
1323 memset(&structs, 0, sizeof(structs));
1324
1325 while (spare_migration_bandwidth(cache)) {
1326 if (prealloc_data_structs(cache, &structs))
1327 break;
1328
1329 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1330 if (r)
1331 break;
1332
1333 r = get_cell(cache, oblock, &structs, &old_ocell);
1334 if (r) {
1335 policy_set_dirty(cache->policy, oblock);
1336 break;
1337 }
1338
1339 writeback(cache, &structs, oblock, cblock, old_ocell);
1340 }
1341
1342 prealloc_free_structs(cache, &structs);
1343}
1344
1345/*----------------------------------------------------------------
1346 * Main worker loop
1347 *--------------------------------------------------------------*/
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001348static bool is_quiescing(struct cache *cache)
1349{
1350 int r;
1351 unsigned long flags;
1352
1353 spin_lock_irqsave(&cache->lock, flags);
1354 r = cache->quiescing;
1355 spin_unlock_irqrestore(&cache->lock, flags);
1356
1357 return r;
1358}
1359
Joe Thornber8fafee92013-10-30 17:11:58 +00001360static void ack_quiescing(struct cache *cache)
1361{
1362 if (is_quiescing(cache)) {
1363 atomic_inc(&cache->quiescing_ack);
1364 wake_up(&cache->quiescing_wait);
1365 }
1366}
1367
1368static void wait_for_quiescing_ack(struct cache *cache)
1369{
1370 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1371}
1372
1373static void start_quiescing(struct cache *cache)
1374{
1375 unsigned long flags;
1376
1377 spin_lock_irqsave(&cache->lock, flags);
1378 cache->quiescing = true;
1379 spin_unlock_irqrestore(&cache->lock, flags);
1380
1381 wait_for_quiescing_ack(cache);
1382}
1383
1384static void stop_quiescing(struct cache *cache)
1385{
1386 unsigned long flags;
1387
1388 spin_lock_irqsave(&cache->lock, flags);
1389 cache->quiescing = false;
1390 spin_unlock_irqrestore(&cache->lock, flags);
1391
1392 atomic_set(&cache->quiescing_ack, 0);
1393}
1394
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001395static void wait_for_migrations(struct cache *cache)
1396{
1397 wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1398}
1399
1400static void stop_worker(struct cache *cache)
1401{
1402 cancel_delayed_work(&cache->waker);
1403 flush_workqueue(cache->wq);
1404}
1405
1406static void requeue_deferred_io(struct cache *cache)
1407{
1408 struct bio *bio;
1409 struct bio_list bios;
1410
1411 bio_list_init(&bios);
1412 bio_list_merge(&bios, &cache->deferred_bios);
1413 bio_list_init(&cache->deferred_bios);
1414
1415 while ((bio = bio_list_pop(&bios)))
1416 bio_endio(bio, DM_ENDIO_REQUEUE);
1417}
1418
1419static int more_work(struct cache *cache)
1420{
1421 if (is_quiescing(cache))
1422 return !list_empty(&cache->quiesced_migrations) ||
1423 !list_empty(&cache->completed_migrations) ||
1424 !list_empty(&cache->need_commit_migrations);
1425 else
1426 return !bio_list_empty(&cache->deferred_bios) ||
1427 !bio_list_empty(&cache->deferred_flush_bios) ||
Joe Thornbere2e74d62013-03-20 17:21:27 +00001428 !bio_list_empty(&cache->deferred_writethrough_bios) ||
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001429 !list_empty(&cache->quiesced_migrations) ||
1430 !list_empty(&cache->completed_migrations) ||
1431 !list_empty(&cache->need_commit_migrations);
1432}
1433
1434static void do_worker(struct work_struct *ws)
1435{
1436 struct cache *cache = container_of(ws, struct cache, worker);
1437
1438 do {
Joe Thornber8fafee92013-10-30 17:11:58 +00001439 if (!is_quiescing(cache)) {
1440 writeback_some_dirty_blocks(cache);
1441 process_deferred_writethrough_bios(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001442 process_deferred_bios(cache);
Joe Thornber8fafee92013-10-30 17:11:58 +00001443 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001444
1445 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1446 process_migrations(cache, &cache->completed_migrations, complete_migration);
1447
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001448 if (commit_if_needed(cache)) {
1449 process_deferred_flush_bios(cache, false);
1450
1451 /*
1452 * FIXME: rollback metadata or just go into a
1453 * failure mode and error everything
1454 */
1455 } else {
1456 process_deferred_flush_bios(cache, true);
1457 process_migrations(cache, &cache->need_commit_migrations,
1458 migration_success_post_commit);
1459 }
Joe Thornber8fafee92013-10-30 17:11:58 +00001460
1461 ack_quiescing(cache);
1462
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001463 } while (more_work(cache));
1464}
1465
1466/*
1467 * We want to commit periodically so that not too much
1468 * unwritten metadata builds up.
1469 */
1470static void do_waker(struct work_struct *ws)
1471{
1472 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
Joe Thornberf8350da2013-05-10 14:37:16 +01001473 policy_tick(cache->policy);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001474 wake_worker(cache);
1475 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1476}
1477
1478/*----------------------------------------------------------------*/
1479
1480static int is_congested(struct dm_dev *dev, int bdi_bits)
1481{
1482 struct request_queue *q = bdev_get_queue(dev->bdev);
1483 return bdi_congested(&q->backing_dev_info, bdi_bits);
1484}
1485
1486static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1487{
1488 struct cache *cache = container_of(cb, struct cache, callbacks);
1489
1490 return is_congested(cache->origin_dev, bdi_bits) ||
1491 is_congested(cache->cache_dev, bdi_bits);
1492}
1493
1494/*----------------------------------------------------------------
1495 * Target methods
1496 *--------------------------------------------------------------*/
1497
1498/*
1499 * This function gets called on the error paths of the constructor, so we
1500 * have to cope with a partially initialised struct.
1501 */
1502static void destroy(struct cache *cache)
1503{
1504 unsigned i;
1505
1506 if (cache->next_migration)
1507 mempool_free(cache->next_migration, cache->migration_pool);
1508
1509 if (cache->migration_pool)
1510 mempool_destroy(cache->migration_pool);
1511
1512 if (cache->all_io_ds)
1513 dm_deferred_set_destroy(cache->all_io_ds);
1514
1515 if (cache->prison)
1516 dm_bio_prison_destroy(cache->prison);
1517
1518 if (cache->wq)
1519 destroy_workqueue(cache->wq);
1520
1521 if (cache->dirty_bitset)
1522 free_bitset(cache->dirty_bitset);
1523
1524 if (cache->discard_bitset)
1525 free_bitset(cache->discard_bitset);
1526
1527 if (cache->copier)
1528 dm_kcopyd_client_destroy(cache->copier);
1529
1530 if (cache->cmd)
1531 dm_cache_metadata_close(cache->cmd);
1532
1533 if (cache->metadata_dev)
1534 dm_put_device(cache->ti, cache->metadata_dev);
1535
1536 if (cache->origin_dev)
1537 dm_put_device(cache->ti, cache->origin_dev);
1538
1539 if (cache->cache_dev)
1540 dm_put_device(cache->ti, cache->cache_dev);
1541
1542 if (cache->policy)
1543 dm_cache_policy_destroy(cache->policy);
1544
1545 for (i = 0; i < cache->nr_ctr_args ; i++)
1546 kfree(cache->ctr_args[i]);
1547 kfree(cache->ctr_args);
1548
1549 kfree(cache);
1550}
1551
1552static void cache_dtr(struct dm_target *ti)
1553{
1554 struct cache *cache = ti->private;
1555
1556 destroy(cache);
1557}
1558
1559static sector_t get_dev_size(struct dm_dev *dev)
1560{
1561 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1562}
1563
1564/*----------------------------------------------------------------*/
1565
1566/*
1567 * Construct a cache device mapping.
1568 *
1569 * cache <metadata dev> <cache dev> <origin dev> <block size>
1570 * <#feature args> [<feature arg>]*
1571 * <policy> <#policy args> [<policy arg>]*
1572 *
1573 * metadata dev : fast device holding the persistent metadata
1574 * cache dev : fast device holding cached data blocks
1575 * origin dev : slow device holding original data blocks
1576 * block size : cache unit size in sectors
1577 *
1578 * #feature args : number of feature arguments passed
1579 * feature args : writethrough. (The default is writeback.)
1580 *
1581 * policy : the replacement policy to use
1582 * #policy args : an even number of policy arguments corresponding
1583 * to key/value pairs passed to the policy
1584 * policy args : key/value pairs passed to the policy
1585 * E.g. 'sequential_threshold 1024'
1586 * See cache-policies.txt for details.
1587 *
1588 * Optional feature arguments are:
1589 * writethrough : write through caching that prohibits cache block
1590 * content from being different from origin block content.
1591 * Without this argument, the default behaviour is to write
1592 * back cache block contents later for performance reasons,
1593 * so they may differ from the corresponding origin blocks.
1594 */
1595struct cache_args {
1596 struct dm_target *ti;
1597
1598 struct dm_dev *metadata_dev;
1599
1600 struct dm_dev *cache_dev;
1601 sector_t cache_sectors;
1602
1603 struct dm_dev *origin_dev;
1604 sector_t origin_sectors;
1605
1606 uint32_t block_size;
1607
1608 const char *policy_name;
1609 int policy_argc;
1610 const char **policy_argv;
1611
1612 struct cache_features features;
1613};
1614
1615static void destroy_cache_args(struct cache_args *ca)
1616{
1617 if (ca->metadata_dev)
1618 dm_put_device(ca->ti, ca->metadata_dev);
1619
1620 if (ca->cache_dev)
1621 dm_put_device(ca->ti, ca->cache_dev);
1622
1623 if (ca->origin_dev)
1624 dm_put_device(ca->ti, ca->origin_dev);
1625
1626 kfree(ca);
1627}
1628
1629static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1630{
1631 if (!as->argc) {
1632 *error = "Insufficient args";
1633 return false;
1634 }
1635
1636 return true;
1637}
1638
1639static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1640 char **error)
1641{
1642 int r;
1643 sector_t metadata_dev_size;
1644 char b[BDEVNAME_SIZE];
1645
1646 if (!at_least_one_arg(as, error))
1647 return -EINVAL;
1648
1649 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1650 &ca->metadata_dev);
1651 if (r) {
1652 *error = "Error opening metadata device";
1653 return r;
1654 }
1655
1656 metadata_dev_size = get_dev_size(ca->metadata_dev);
1657 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1658 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1659 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1660
1661 return 0;
1662}
1663
1664static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1665 char **error)
1666{
1667 int r;
1668
1669 if (!at_least_one_arg(as, error))
1670 return -EINVAL;
1671
1672 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1673 &ca->cache_dev);
1674 if (r) {
1675 *error = "Error opening cache device";
1676 return r;
1677 }
1678 ca->cache_sectors = get_dev_size(ca->cache_dev);
1679
1680 return 0;
1681}
1682
1683static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1684 char **error)
1685{
1686 int r;
1687
1688 if (!at_least_one_arg(as, error))
1689 return -EINVAL;
1690
1691 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1692 &ca->origin_dev);
1693 if (r) {
1694 *error = "Error opening origin device";
1695 return r;
1696 }
1697
1698 ca->origin_sectors = get_dev_size(ca->origin_dev);
1699 if (ca->ti->len > ca->origin_sectors) {
1700 *error = "Device size larger than cached device";
1701 return -EINVAL;
1702 }
1703
1704 return 0;
1705}
1706
1707static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1708 char **error)
1709{
1710 unsigned long tmp;
1711
1712 if (!at_least_one_arg(as, error))
1713 return -EINVAL;
1714
1715 if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
1716 tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1717 tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1718 *error = "Invalid data block size";
1719 return -EINVAL;
1720 }
1721
1722 if (tmp > ca->cache_sectors) {
1723 *error = "Data block size is larger than the cache device";
1724 return -EINVAL;
1725 }
1726
1727 ca->block_size = tmp;
1728
1729 return 0;
1730}
1731
1732static void init_features(struct cache_features *cf)
1733{
1734 cf->mode = CM_WRITE;
1735 cf->write_through = false;
1736}
1737
1738static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1739 char **error)
1740{
1741 static struct dm_arg _args[] = {
1742 {0, 1, "Invalid number of cache feature arguments"},
1743 };
1744
1745 int r;
1746 unsigned argc;
1747 const char *arg;
1748 struct cache_features *cf = &ca->features;
1749
1750 init_features(cf);
1751
1752 r = dm_read_arg_group(_args, as, &argc, error);
1753 if (r)
1754 return -EINVAL;
1755
1756 while (argc--) {
1757 arg = dm_shift_arg(as);
1758
1759 if (!strcasecmp(arg, "writeback"))
1760 cf->write_through = false;
1761
1762 else if (!strcasecmp(arg, "writethrough"))
1763 cf->write_through = true;
1764
1765 else {
1766 *error = "Unrecognised cache feature requested";
1767 return -EINVAL;
1768 }
1769 }
1770
1771 return 0;
1772}
1773
1774static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
1775 char **error)
1776{
1777 static struct dm_arg _args[] = {
1778 {0, 1024, "Invalid number of policy arguments"},
1779 };
1780
1781 int r;
1782
1783 if (!at_least_one_arg(as, error))
1784 return -EINVAL;
1785
1786 ca->policy_name = dm_shift_arg(as);
1787
1788 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
1789 if (r)
1790 return -EINVAL;
1791
1792 ca->policy_argv = (const char **)as->argv;
1793 dm_consume_args(as, ca->policy_argc);
1794
1795 return 0;
1796}
1797
1798static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
1799 char **error)
1800{
1801 int r;
1802 struct dm_arg_set as;
1803
1804 as.argc = argc;
1805 as.argv = argv;
1806
1807 r = parse_metadata_dev(ca, &as, error);
1808 if (r)
1809 return r;
1810
1811 r = parse_cache_dev(ca, &as, error);
1812 if (r)
1813 return r;
1814
1815 r = parse_origin_dev(ca, &as, error);
1816 if (r)
1817 return r;
1818
1819 r = parse_block_size(ca, &as, error);
1820 if (r)
1821 return r;
1822
1823 r = parse_features(ca, &as, error);
1824 if (r)
1825 return r;
1826
1827 r = parse_policy(ca, &as, error);
1828 if (r)
1829 return r;
1830
1831 return 0;
1832}
1833
1834/*----------------------------------------------------------------*/
1835
1836static struct kmem_cache *migration_cache;
1837
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01001838#define NOT_CORE_OPTION 1
1839
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001840static int process_config_option(struct cache *cache, const char *key, const char *value)
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01001841{
1842 unsigned long tmp;
1843
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001844 if (!strcasecmp(key, "migration_threshold")) {
1845 if (kstrtoul(value, 10, &tmp))
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01001846 return -EINVAL;
1847
1848 cache->migration_threshold = tmp;
1849 return 0;
1850 }
1851
1852 return NOT_CORE_OPTION;
1853}
1854
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001855static int set_config_value(struct cache *cache, const char *key, const char *value)
1856{
1857 int r = process_config_option(cache, key, value);
1858
1859 if (r == NOT_CORE_OPTION)
1860 r = policy_set_config_value(cache->policy, key, value);
1861
1862 if (r)
1863 DMWARN("bad config value for %s: %s", key, value);
1864
1865 return r;
1866}
1867
1868static int set_config_values(struct cache *cache, int argc, const char **argv)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001869{
1870 int r = 0;
1871
1872 if (argc & 1) {
1873 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
1874 return -EINVAL;
1875 }
1876
1877 while (argc) {
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001878 r = set_config_value(cache, argv[0], argv[1]);
1879 if (r)
1880 break;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001881
1882 argc -= 2;
1883 argv += 2;
1884 }
1885
1886 return r;
1887}
1888
1889static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1890 char **error)
1891{
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001892 cache->policy = dm_cache_policy_create(ca->policy_name,
1893 cache->cache_size,
1894 cache->origin_sectors,
1895 cache->sectors_per_block);
1896 if (!cache->policy) {
1897 *error = "Error creating cache's policy";
1898 return -ENOMEM;
1899 }
1900
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001901 return 0;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001902}
1903
1904/*
1905 * We want the discard block size to be a power of two, at least the size
1906 * of the cache block size, and have no more than 2^14 discard blocks
1907 * across the origin.
1908 */
1909#define MAX_DISCARD_BLOCKS (1 << 14)
1910
1911static bool too_many_discard_blocks(sector_t discard_block_size,
1912 sector_t origin_size)
1913{
1914 (void) sector_div(origin_size, discard_block_size);
1915
1916 return origin_size > MAX_DISCARD_BLOCKS;
1917}
1918
1919static sector_t calculate_discard_block_size(sector_t cache_block_size,
1920 sector_t origin_size)
1921{
1922 sector_t discard_block_size;
1923
1924 discard_block_size = roundup_pow_of_two(cache_block_size);
1925
1926 if (origin_size)
1927 while (too_many_discard_blocks(discard_block_size, origin_size))
1928 discard_block_size *= 2;
1929
1930 return discard_block_size;
1931}
1932
Joe Thornberf8350da2013-05-10 14:37:16 +01001933#define DEFAULT_MIGRATION_THRESHOLD 2048
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001934
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001935static int cache_create(struct cache_args *ca, struct cache **result)
1936{
1937 int r = 0;
1938 char **error = &ca->ti->error;
1939 struct cache *cache;
1940 struct dm_target *ti = ca->ti;
1941 dm_block_t origin_blocks;
1942 struct dm_cache_metadata *cmd;
1943 bool may_format = ca->features.mode == CM_WRITE;
1944
1945 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
1946 if (!cache)
1947 return -ENOMEM;
1948
1949 cache->ti = ca->ti;
1950 ti->private = cache;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001951 ti->num_flush_bios = 2;
1952 ti->flush_supported = true;
1953
1954 ti->num_discard_bios = 1;
1955 ti->discards_supported = true;
1956 ti->discard_zeroes_data_unsupported = true;
Heinz Mauelshagen50faf932014-05-23 14:10:01 -04001957 /* Discard bios must be split on a block boundary */
1958 ti->split_discard_bios = true;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001959
Joe Thornber8c5008f2013-05-10 14:37:18 +01001960 cache->features = ca->features;
Mike Snitzer19b00922013-04-05 15:36:34 +01001961 ti->per_bio_data_size = get_per_bio_data_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001962
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001963 cache->callbacks.congested_fn = cache_is_congested;
1964 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
1965
1966 cache->metadata_dev = ca->metadata_dev;
1967 cache->origin_dev = ca->origin_dev;
1968 cache->cache_dev = ca->cache_dev;
1969
1970 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
1971
1972 /* FIXME: factor out this whole section */
1973 origin_blocks = cache->origin_sectors = ca->origin_sectors;
Joe Thornber414dd672013-03-20 17:21:25 +00001974 origin_blocks = block_div(origin_blocks, ca->block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001975 cache->origin_blocks = to_oblock(origin_blocks);
1976
1977 cache->sectors_per_block = ca->block_size;
1978 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
1979 r = -EINVAL;
1980 goto bad;
1981 }
1982
1983 if (ca->block_size & (ca->block_size - 1)) {
1984 dm_block_t cache_size = ca->cache_sectors;
1985
1986 cache->sectors_per_block_shift = -1;
Joe Thornber414dd672013-03-20 17:21:25 +00001987 cache_size = block_div(cache_size, ca->block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001988 cache->cache_size = to_cblock(cache_size);
1989 } else {
1990 cache->sectors_per_block_shift = __ffs(ca->block_size);
1991 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
1992 }
1993
1994 r = create_cache_policy(cache, ca, error);
1995 if (r)
1996 goto bad;
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001997
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001998 cache->policy_nr_args = ca->policy_argc;
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001999 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2000
2001 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2002 if (r) {
2003 *error = "Error setting cache policy's config values";
2004 goto bad;
2005 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002006
2007 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2008 ca->block_size, may_format,
2009 dm_cache_policy_get_hint_size(cache->policy));
2010 if (IS_ERR(cmd)) {
2011 *error = "Error creating metadata object";
2012 r = PTR_ERR(cmd);
2013 goto bad;
2014 }
2015 cache->cmd = cmd;
2016
2017 spin_lock_init(&cache->lock);
2018 bio_list_init(&cache->deferred_bios);
2019 bio_list_init(&cache->deferred_flush_bios);
Joe Thornbere2e74d62013-03-20 17:21:27 +00002020 bio_list_init(&cache->deferred_writethrough_bios);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002021 INIT_LIST_HEAD(&cache->quiesced_migrations);
2022 INIT_LIST_HEAD(&cache->completed_migrations);
2023 INIT_LIST_HEAD(&cache->need_commit_migrations);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002024 atomic_set(&cache->nr_migrations, 0);
2025 init_waitqueue_head(&cache->migration_wait);
2026
Joe Thornber8fafee92013-10-30 17:11:58 +00002027 init_waitqueue_head(&cache->quiescing_wait);
2028 atomic_set(&cache->quiescing_ack, 0);
2029
Wei Yongjunfa4d6832013-05-10 14:37:14 +01002030 r = -ENOMEM;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002031 cache->nr_dirty = 0;
2032 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2033 if (!cache->dirty_bitset) {
2034 *error = "could not allocate dirty bitset";
2035 goto bad;
2036 }
2037 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2038
2039 cache->discard_block_size =
2040 calculate_discard_block_size(cache->sectors_per_block,
2041 cache->origin_sectors);
2042 cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
2043 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2044 if (!cache->discard_bitset) {
2045 *error = "could not allocate discard bitset";
2046 goto bad;
2047 }
2048 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2049
2050 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2051 if (IS_ERR(cache->copier)) {
2052 *error = "could not create kcopyd client";
2053 r = PTR_ERR(cache->copier);
2054 goto bad;
2055 }
2056
2057 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2058 if (!cache->wq) {
2059 *error = "could not create workqueue for metadata object";
2060 goto bad;
2061 }
2062 INIT_WORK(&cache->worker, do_worker);
2063 INIT_DELAYED_WORK(&cache->waker, do_waker);
2064 cache->last_commit_jiffies = jiffies;
2065
2066 cache->prison = dm_bio_prison_create(PRISON_CELLS);
2067 if (!cache->prison) {
2068 *error = "could not create bio prison";
2069 goto bad;
2070 }
2071
2072 cache->all_io_ds = dm_deferred_set_create();
2073 if (!cache->all_io_ds) {
2074 *error = "could not create all_io deferred set";
2075 goto bad;
2076 }
2077
2078 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2079 migration_cache);
2080 if (!cache->migration_pool) {
2081 *error = "Error creating cache's migration mempool";
2082 goto bad;
2083 }
2084
2085 cache->next_migration = NULL;
2086
2087 cache->need_tick_bio = true;
2088 cache->sized = false;
2089 cache->quiescing = false;
2090 cache->commit_requested = false;
2091 cache->loaded_mappings = false;
2092 cache->loaded_discards = false;
2093
2094 load_stats(cache);
2095
2096 atomic_set(&cache->stats.demotion, 0);
2097 atomic_set(&cache->stats.promotion, 0);
2098 atomic_set(&cache->stats.copies_avoided, 0);
2099 atomic_set(&cache->stats.cache_cell_clash, 0);
2100 atomic_set(&cache->stats.commit_count, 0);
2101 atomic_set(&cache->stats.discard_count, 0);
2102
2103 *result = cache;
2104 return 0;
2105
2106bad:
2107 destroy(cache);
2108 return r;
2109}
2110
2111static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2112{
2113 unsigned i;
2114 const char **copy;
2115
2116 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2117 if (!copy)
2118 return -ENOMEM;
2119 for (i = 0; i < argc; i++) {
2120 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2121 if (!copy[i]) {
2122 while (i--)
2123 kfree(copy[i]);
2124 kfree(copy);
2125 return -ENOMEM;
2126 }
2127 }
2128
2129 cache->nr_ctr_args = argc;
2130 cache->ctr_args = copy;
2131
2132 return 0;
2133}
2134
2135static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2136{
2137 int r = -EINVAL;
2138 struct cache_args *ca;
2139 struct cache *cache = NULL;
2140
2141 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2142 if (!ca) {
2143 ti->error = "Error allocating memory for cache";
2144 return -ENOMEM;
2145 }
2146 ca->ti = ti;
2147
2148 r = parse_cache_args(ca, argc, argv, &ti->error);
2149 if (r)
2150 goto out;
2151
2152 r = cache_create(ca, &cache);
Heinz Mauelshagen617a0b82013-03-20 17:21:26 +00002153 if (r)
2154 goto out;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002155
2156 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2157 if (r) {
2158 destroy(cache);
2159 goto out;
2160 }
2161
2162 ti->private = cache;
2163
2164out:
2165 destroy_cache_args(ca);
2166 return r;
2167}
2168
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002169static int cache_map(struct dm_target *ti, struct bio *bio)
2170{
2171 struct cache *cache = ti->private;
2172
2173 int r;
2174 dm_oblock_t block = get_bio_block(cache, bio);
Mike Snitzer19b00922013-04-05 15:36:34 +01002175 size_t pb_data_size = get_per_bio_data_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002176 bool can_migrate = false;
2177 bool discarded_block;
2178 struct dm_bio_prison_cell *cell;
2179 struct policy_result lookup_result;
Heinz Mauelshagenbd86e7c2014-03-12 16:13:39 +01002180 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002181
Heinz Mauelshagenbd86e7c2014-03-12 16:13:39 +01002182 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002183 /*
2184 * This can only occur if the io goes to a partial block at
2185 * the end of the origin device. We don't cache these.
2186 * Just remap to the origin and carry on.
2187 */
Heinz Mauelshagenbd86e7c2014-03-12 16:13:39 +01002188 remap_to_origin(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002189 return DM_MAPIO_REMAPPED;
2190 }
2191
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002192 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2193 defer_bio(cache, bio);
2194 return DM_MAPIO_SUBMITTED;
2195 }
2196
2197 /*
2198 * Check to see if that block is currently migrating.
2199 */
2200 cell = alloc_prison_cell(cache);
2201 if (!cell) {
2202 defer_bio(cache, bio);
2203 return DM_MAPIO_SUBMITTED;
2204 }
2205
2206 r = bio_detain(cache, block, bio, cell,
2207 (cell_free_fn) free_prison_cell,
2208 cache, &cell);
2209 if (r) {
2210 if (r < 0)
2211 defer_bio(cache, bio);
2212
2213 return DM_MAPIO_SUBMITTED;
2214 }
2215
2216 discarded_block = is_discarded_oblock(cache, block);
2217
2218 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2219 bio, &lookup_result);
2220 if (r == -EWOULDBLOCK) {
2221 cell_defer(cache, cell, true);
2222 return DM_MAPIO_SUBMITTED;
2223
2224 } else if (r) {
2225 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2226 bio_io_error(bio);
2227 return DM_MAPIO_SUBMITTED;
2228 }
2229
2230 switch (lookup_result.op) {
2231 case POLICY_HIT:
2232 inc_hit_counter(cache, bio);
2233 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2234
Joe Thornbere2e74d62013-03-20 17:21:27 +00002235 if (is_writethrough_io(cache, bio, lookup_result.cblock))
2236 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2237 else
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002238 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
Joe Thornbere2e74d62013-03-20 17:21:27 +00002239
2240 cell_defer(cache, cell, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002241 break;
2242
2243 case POLICY_MISS:
2244 inc_miss_counter(cache, bio);
2245 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2246
2247 if (pb->req_nr != 0) {
2248 /*
2249 * This is a duplicate writethrough io that is no
2250 * longer needed because the block has been demoted.
2251 */
2252 bio_endio(bio, 0);
2253 cell_defer(cache, cell, false);
2254 return DM_MAPIO_SUBMITTED;
2255 } else {
2256 remap_to_origin_clear_discard(cache, bio, block);
2257 cell_defer(cache, cell, false);
2258 }
2259 break;
2260
2261 default:
2262 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2263 (unsigned) lookup_result.op);
2264 bio_io_error(bio);
2265 return DM_MAPIO_SUBMITTED;
2266 }
2267
2268 return DM_MAPIO_REMAPPED;
2269}
2270
2271static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2272{
2273 struct cache *cache = ti->private;
2274 unsigned long flags;
Mike Snitzer19b00922013-04-05 15:36:34 +01002275 size_t pb_data_size = get_per_bio_data_size(cache);
2276 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002277
2278 if (pb->tick) {
2279 policy_tick(cache->policy);
2280
2281 spin_lock_irqsave(&cache->lock, flags);
2282 cache->need_tick_bio = true;
2283 spin_unlock_irqrestore(&cache->lock, flags);
2284 }
2285
2286 check_for_quiesced_migrations(cache, pb);
2287
2288 return 0;
2289}
2290
2291static int write_dirty_bitset(struct cache *cache)
2292{
2293 unsigned i, r;
2294
2295 for (i = 0; i < from_cblock(cache->cache_size); i++) {
2296 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2297 is_dirty(cache, to_cblock(i)));
2298 if (r)
2299 return r;
2300 }
2301
2302 return 0;
2303}
2304
2305static int write_discard_bitset(struct cache *cache)
2306{
2307 unsigned i, r;
2308
2309 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2310 cache->discard_nr_blocks);
2311 if (r) {
2312 DMERR("could not resize on-disk discard bitset");
2313 return r;
2314 }
2315
2316 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2317 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2318 is_discarded(cache, to_dblock(i)));
2319 if (r)
2320 return r;
2321 }
2322
2323 return 0;
2324}
2325
2326static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
2327 uint32_t hint)
2328{
2329 struct cache *cache = context;
2330 return dm_cache_save_hint(cache->cmd, cblock, hint);
2331}
2332
2333static int write_hints(struct cache *cache)
2334{
2335 int r;
2336
2337 r = dm_cache_begin_hints(cache->cmd, cache->policy);
2338 if (r) {
2339 DMERR("dm_cache_begin_hints failed");
2340 return r;
2341 }
2342
2343 r = policy_walk_mappings(cache->policy, save_hint, cache);
2344 if (r)
2345 DMERR("policy_walk_mappings failed");
2346
2347 return r;
2348}
2349
2350/*
2351 * returns true on success
2352 */
2353static bool sync_metadata(struct cache *cache)
2354{
2355 int r1, r2, r3, r4;
2356
2357 r1 = write_dirty_bitset(cache);
2358 if (r1)
2359 DMERR("could not write dirty bitset");
2360
2361 r2 = write_discard_bitset(cache);
2362 if (r2)
2363 DMERR("could not write discard bitset");
2364
2365 save_stats(cache);
2366
2367 r3 = write_hints(cache);
2368 if (r3)
2369 DMERR("could not write hints");
2370
2371 /*
2372 * If writing the above metadata failed, we still commit, but don't
2373 * set the clean shutdown flag. This will effectively force every
2374 * dirty bit to be set on reload.
2375 */
2376 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2377 if (r4)
2378 DMERR("could not write cache metadata. Data loss may occur.");
2379
2380 return !r1 && !r2 && !r3 && !r4;
2381}
2382
2383static void cache_postsuspend(struct dm_target *ti)
2384{
2385 struct cache *cache = ti->private;
2386
2387 start_quiescing(cache);
2388 wait_for_migrations(cache);
2389 stop_worker(cache);
2390 requeue_deferred_io(cache);
2391 stop_quiescing(cache);
2392
2393 (void) sync_metadata(cache);
2394}
2395
2396static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2397 bool dirty, uint32_t hint, bool hint_valid)
2398{
2399 int r;
2400 struct cache *cache = context;
2401
2402 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2403 if (r)
2404 return r;
2405
2406 if (dirty)
2407 set_dirty(cache, oblock, cblock);
2408 else
2409 clear_dirty(cache, oblock, cblock);
2410
2411 return 0;
2412}
2413
2414static int load_discard(void *context, sector_t discard_block_size,
2415 dm_dblock_t dblock, bool discard)
2416{
2417 struct cache *cache = context;
2418
2419 /* FIXME: handle mis-matched block size */
2420
2421 if (discard)
2422 set_discard(cache, dblock);
2423 else
2424 clear_discard(cache, dblock);
2425
2426 return 0;
2427}
2428
2429static int cache_preresume(struct dm_target *ti)
2430{
2431 int r = 0;
2432 struct cache *cache = ti->private;
2433 sector_t actual_cache_size = get_dev_size(cache->cache_dev);
2434 (void) sector_div(actual_cache_size, cache->sectors_per_block);
2435
2436 /*
2437 * Check to see if the cache has resized.
2438 */
2439 if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
2440 cache->cache_size = to_cblock(actual_cache_size);
2441
2442 r = dm_cache_resize(cache->cmd, cache->cache_size);
2443 if (r) {
2444 DMERR("could not resize cache metadata");
2445 return r;
2446 }
2447
2448 cache->sized = true;
2449 }
2450
2451 if (!cache->loaded_mappings) {
Mike Snitzerea2dd8c2013-03-20 17:21:28 +00002452 r = dm_cache_load_mappings(cache->cmd, cache->policy,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002453 load_mapping, cache);
2454 if (r) {
2455 DMERR("could not load cache mappings");
2456 return r;
2457 }
2458
2459 cache->loaded_mappings = true;
2460 }
2461
2462 if (!cache->loaded_discards) {
2463 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2464 if (r) {
2465 DMERR("could not load origin discards");
2466 return r;
2467 }
2468
2469 cache->loaded_discards = true;
2470 }
2471
2472 return r;
2473}
2474
2475static void cache_resume(struct dm_target *ti)
2476{
2477 struct cache *cache = ti->private;
2478
2479 cache->need_tick_bio = true;
2480 do_waker(&cache->waker.work);
2481}
2482
2483/*
2484 * Status format:
2485 *
2486 * <#used metadata blocks>/<#total metadata blocks>
2487 * <#read hits> <#read misses> <#write hits> <#write misses>
2488 * <#demotions> <#promotions> <#blocks in cache> <#dirty>
2489 * <#features> <features>*
2490 * <#core args> <core args>
2491 * <#policy args> <policy args>*
2492 */
2493static void cache_status(struct dm_target *ti, status_type_t type,
2494 unsigned status_flags, char *result, unsigned maxlen)
2495{
2496 int r = 0;
2497 unsigned i;
2498 ssize_t sz = 0;
2499 dm_block_t nr_free_blocks_metadata = 0;
2500 dm_block_t nr_blocks_metadata = 0;
2501 char buf[BDEVNAME_SIZE];
2502 struct cache *cache = ti->private;
2503 dm_cblock_t residency;
2504
2505 switch (type) {
2506 case STATUSTYPE_INFO:
2507 /* Commit to ensure statistics aren't out-of-date */
2508 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2509 r = dm_cache_commit(cache->cmd, false);
2510 if (r)
2511 DMERR("could not commit metadata for accurate status");
2512 }
2513
2514 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2515 &nr_free_blocks_metadata);
2516 if (r) {
2517 DMERR("could not get metadata free block count");
2518 goto err;
2519 }
2520
2521 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2522 if (r) {
2523 DMERR("could not get metadata device size");
2524 goto err;
2525 }
2526
2527 residency = policy_residency(cache->policy);
2528
2529 DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
2530 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2531 (unsigned long long)nr_blocks_metadata,
2532 (unsigned) atomic_read(&cache->stats.read_hit),
2533 (unsigned) atomic_read(&cache->stats.read_miss),
2534 (unsigned) atomic_read(&cache->stats.write_hit),
2535 (unsigned) atomic_read(&cache->stats.write_miss),
2536 (unsigned) atomic_read(&cache->stats.demotion),
2537 (unsigned) atomic_read(&cache->stats.promotion),
2538 (unsigned long long) from_cblock(residency),
2539 cache->nr_dirty);
2540
2541 if (cache->features.write_through)
2542 DMEMIT("1 writethrough ");
2543 else
2544 DMEMIT("0 ");
2545
2546 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2547 if (sz < maxlen) {
2548 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2549 if (r)
2550 DMERR("policy_emit_config_values returned %d", r);
2551 }
2552
2553 break;
2554
2555 case STATUSTYPE_TABLE:
2556 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2557 DMEMIT("%s ", buf);
2558 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2559 DMEMIT("%s ", buf);
2560 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2561 DMEMIT("%s", buf);
2562
2563 for (i = 0; i < cache->nr_ctr_args - 1; i++)
2564 DMEMIT(" %s", cache->ctr_args[i]);
2565 if (cache->nr_ctr_args)
2566 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
2567 }
2568
2569 return;
2570
2571err:
2572 DMEMIT("Error");
2573}
2574
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002575/*
2576 * Supports <key> <value>.
2577 *
2578 * The key migration_threshold is supported by the cache target core.
2579 */
2580static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2581{
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002582 struct cache *cache = ti->private;
2583
2584 if (argc != 2)
2585 return -EINVAL;
2586
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002587 return set_config_value(cache, argv[0], argv[1]);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002588}
2589
2590static int cache_iterate_devices(struct dm_target *ti,
2591 iterate_devices_callout_fn fn, void *data)
2592{
2593 int r = 0;
2594 struct cache *cache = ti->private;
2595
2596 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
2597 if (!r)
2598 r = fn(ti, cache->origin_dev, 0, ti->len, data);
2599
2600 return r;
2601}
2602
2603/*
2604 * We assume I/O is going to the origin (which is the volume
2605 * more likely to have restrictions e.g. by being striped).
2606 * (Looking up the exact location of the data would be expensive
2607 * and could always be out of date by the time the bio is submitted.)
2608 */
2609static int cache_bvec_merge(struct dm_target *ti,
2610 struct bvec_merge_data *bvm,
2611 struct bio_vec *biovec, int max_size)
2612{
2613 struct cache *cache = ti->private;
2614 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
2615
2616 if (!q->merge_bvec_fn)
2617 return max_size;
2618
2619 bvm->bi_bdev = cache->origin_dev->bdev;
2620 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2621}
2622
2623static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
2624{
2625 /*
2626 * FIXME: these limits may be incompatible with the cache device
2627 */
2628 limits->max_discard_sectors = cache->discard_block_size * 1024;
2629 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
2630}
2631
2632static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
2633{
2634 struct cache *cache = ti->private;
2635
2636 blk_limits_io_min(limits, 0);
2637 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
2638 set_discard_limits(cache, limits);
2639}
2640
2641/*----------------------------------------------------------------*/
2642
2643static struct target_type cache_target = {
2644 .name = "cache",
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002645 .version = {1, 1, 1},
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002646 .module = THIS_MODULE,
2647 .ctr = cache_ctr,
2648 .dtr = cache_dtr,
2649 .map = cache_map,
2650 .end_io = cache_end_io,
2651 .postsuspend = cache_postsuspend,
2652 .preresume = cache_preresume,
2653 .resume = cache_resume,
2654 .status = cache_status,
2655 .message = cache_message,
2656 .iterate_devices = cache_iterate_devices,
2657 .merge = cache_bvec_merge,
2658 .io_hints = cache_io_hints,
2659};
2660
2661static int __init dm_cache_init(void)
2662{
2663 int r;
2664
2665 r = dm_register_target(&cache_target);
2666 if (r) {
2667 DMERR("cache target registration failed: %d", r);
2668 return r;
2669 }
2670
2671 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
2672 if (!migration_cache) {
2673 dm_unregister_target(&cache_target);
2674 return -ENOMEM;
2675 }
2676
2677 return 0;
2678}
2679
2680static void __exit dm_cache_exit(void)
2681{
2682 dm_unregister_target(&cache_target);
2683 kmem_cache_destroy(migration_cache);
2684}
2685
2686module_init(dm_cache_init);
2687module_exit(dm_cache_exit);
2688
2689MODULE_DESCRIPTION(DM_NAME " cache target");
2690MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
2691MODULE_LICENSE("GPL");