blob: 516f9c922bb257b95f90c67b596195c1e2a568c2 [file] [log] [blame]
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-prison.h"
Darrick J. Wongb844fe62013-04-05 15:36:32 +01009#include "dm-bio-record.h"
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000010#include "dm-cache-metadata.h"
11
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#define DM_MSG_PREFIX "cache"
21
22DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23 "A percentage of time allocated for copying to and/or from cache");
24
25/*----------------------------------------------------------------*/
26
27/*
28 * Glossary:
29 *
30 * oblock: index of an origin block
31 * cblock: index of a cache block
32 * promotion: movement of a block from origin to cache
33 * demotion: movement of a block from cache to origin
34 * migration: movement of a block between the origin and cache device,
35 * either direction
36 */
37
38/*----------------------------------------------------------------*/
39
40static size_t bitset_size_in_bytes(unsigned nr_entries)
41{
42 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43}
44
45static unsigned long *alloc_bitset(unsigned nr_entries)
46{
47 size_t s = bitset_size_in_bytes(nr_entries);
48 return vzalloc(s);
49}
50
51static void clear_bitset(void *bitset, unsigned nr_entries)
52{
53 size_t s = bitset_size_in_bytes(nr_entries);
54 memset(bitset, 0, s);
55}
56
57static void free_bitset(unsigned long *bits)
58{
59 vfree(bits);
60}
61
62/*----------------------------------------------------------------*/
63
64#define PRISON_CELLS 1024
65#define MIGRATION_POOL_SIZE 128
66#define COMMIT_PERIOD HZ
67#define MIGRATION_COUNT_WINDOW 10
68
69/*
70 * The block size of the device holding cache data must be >= 32KB
71 */
72#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
73
74/*
75 * FIXME: the cache is read/write for the time being.
76 */
77enum cache_mode {
78 CM_WRITE, /* metadata may be changed */
79 CM_READ_ONLY, /* metadata may not be changed */
80};
81
82struct cache_features {
83 enum cache_mode mode;
84 bool write_through:1;
85};
86
87struct cache_stats {
88 atomic_t read_hit;
89 atomic_t read_miss;
90 atomic_t write_hit;
91 atomic_t write_miss;
92 atomic_t demotion;
93 atomic_t promotion;
94 atomic_t copies_avoided;
95 atomic_t cache_cell_clash;
96 atomic_t commit_count;
97 atomic_t discard_count;
98};
99
100struct cache {
101 struct dm_target *ti;
102 struct dm_target_callbacks callbacks;
103
104 /*
105 * Metadata is written to this device.
106 */
107 struct dm_dev *metadata_dev;
108
109 /*
110 * The slower of the two data devices. Typically a spindle.
111 */
112 struct dm_dev *origin_dev;
113
114 /*
115 * The faster of the two data devices. Typically an SSD.
116 */
117 struct dm_dev *cache_dev;
118
119 /*
120 * Cache features such as write-through.
121 */
122 struct cache_features features;
123
124 /*
125 * Size of the origin device in _complete_ blocks and native sectors.
126 */
127 dm_oblock_t origin_blocks;
128 sector_t origin_sectors;
129
130 /*
131 * Size of the cache device in blocks.
132 */
133 dm_cblock_t cache_size;
134
135 /*
136 * Fields for converting from sectors to blocks.
137 */
138 uint32_t sectors_per_block;
139 int sectors_per_block_shift;
140
141 struct dm_cache_metadata *cmd;
142
143 spinlock_t lock;
144 struct bio_list deferred_bios;
145 struct bio_list deferred_flush_bios;
Joe Thornbere2e74d62013-03-20 17:21:27 +0000146 struct bio_list deferred_writethrough_bios;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000147 struct list_head quiesced_migrations;
148 struct list_head completed_migrations;
149 struct list_head need_commit_migrations;
150 sector_t migration_threshold;
151 atomic_t nr_migrations;
152 wait_queue_head_t migration_wait;
153
Joe Thornber8fafee92013-10-30 17:11:58 +0000154 wait_queue_head_t quiescing_wait;
155 atomic_t quiescing_ack;
156
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000157 /*
158 * cache_size entries, dirty if set
159 */
160 dm_cblock_t nr_dirty;
161 unsigned long *dirty_bitset;
162
163 /*
164 * origin_blocks entries, discarded if set.
165 */
Joe Thornber414dd672013-03-20 17:21:25 +0000166 uint32_t discard_block_size; /* a power of 2 times sectors per block */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000167 dm_dblock_t discard_nr_blocks;
168 unsigned long *discard_bitset;
169
170 struct dm_kcopyd_client *copier;
171 struct workqueue_struct *wq;
172 struct work_struct worker;
173
174 struct delayed_work waker;
175 unsigned long last_commit_jiffies;
176
177 struct dm_bio_prison *prison;
178 struct dm_deferred_set *all_io_ds;
179
180 mempool_t *migration_pool;
181 struct dm_cache_migration *next_migration;
182
183 struct dm_cache_policy *policy;
184 unsigned policy_nr_args;
185
186 bool need_tick_bio:1;
187 bool sized:1;
188 bool quiescing:1;
189 bool commit_requested:1;
190 bool loaded_mappings:1;
191 bool loaded_discards:1;
192
193 struct cache_stats stats;
194
195 /*
196 * Rather than reconstructing the table line for the status we just
197 * save it and regurgitate.
198 */
199 unsigned nr_ctr_args;
200 const char **ctr_args;
201};
202
203struct per_bio_data {
204 bool tick:1;
205 unsigned req_nr:2;
206 struct dm_deferred_entry *all_io_entry;
Joe Thornbere2e74d62013-03-20 17:21:27 +0000207
Mike Snitzer19b00922013-04-05 15:36:34 +0100208 /*
209 * writethrough fields. These MUST remain at the end of this
210 * structure and the 'cache' member must be the first as it
Joe Thornberaeed1422013-05-10 14:37:18 +0100211 * is used to determine the offset of the writethrough fields.
Mike Snitzer19b00922013-04-05 15:36:34 +0100212 */
Joe Thornbere2e74d62013-03-20 17:21:27 +0000213 struct cache *cache;
214 dm_cblock_t cblock;
215 bio_end_io_t *saved_bi_end_io;
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100216 struct dm_bio_details bio_details;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000217};
218
219struct dm_cache_migration {
220 struct list_head list;
221 struct cache *cache;
222
223 unsigned long start_jiffies;
224 dm_oblock_t old_oblock;
225 dm_oblock_t new_oblock;
226 dm_cblock_t cblock;
227
228 bool err:1;
229 bool writeback:1;
230 bool demote:1;
231 bool promote:1;
232
233 struct dm_bio_prison_cell *old_ocell;
234 struct dm_bio_prison_cell *new_ocell;
235};
236
237/*
238 * Processing a bio in the worker thread may require these memory
239 * allocations. We prealloc to avoid deadlocks (the same worker thread
240 * frees them back to the mempool).
241 */
242struct prealloc {
243 struct dm_cache_migration *mg;
244 struct dm_bio_prison_cell *cell1;
245 struct dm_bio_prison_cell *cell2;
246};
247
248static void wake_worker(struct cache *cache)
249{
250 queue_work(cache->wq, &cache->worker);
251}
252
253/*----------------------------------------------------------------*/
254
255static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
256{
257 /* FIXME: change to use a local slab. */
258 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
259}
260
261static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
262{
263 dm_bio_prison_free_cell(cache->prison, cell);
264}
265
266static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
267{
268 if (!p->mg) {
269 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
270 if (!p->mg)
271 return -ENOMEM;
272 }
273
274 if (!p->cell1) {
275 p->cell1 = alloc_prison_cell(cache);
276 if (!p->cell1)
277 return -ENOMEM;
278 }
279
280 if (!p->cell2) {
281 p->cell2 = alloc_prison_cell(cache);
282 if (!p->cell2)
283 return -ENOMEM;
284 }
285
286 return 0;
287}
288
289static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
290{
291 if (p->cell2)
292 free_prison_cell(cache, p->cell2);
293
294 if (p->cell1)
295 free_prison_cell(cache, p->cell1);
296
297 if (p->mg)
298 mempool_free(p->mg, cache->migration_pool);
299}
300
301static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
302{
303 struct dm_cache_migration *mg = p->mg;
304
305 BUG_ON(!mg);
306 p->mg = NULL;
307
308 return mg;
309}
310
311/*
312 * You must have a cell within the prealloc struct to return. If not this
313 * function will BUG() rather than returning NULL.
314 */
315static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
316{
317 struct dm_bio_prison_cell *r = NULL;
318
319 if (p->cell1) {
320 r = p->cell1;
321 p->cell1 = NULL;
322
323 } else if (p->cell2) {
324 r = p->cell2;
325 p->cell2 = NULL;
326 } else
327 BUG();
328
329 return r;
330}
331
332/*
333 * You can't have more than two cells in a prealloc struct. BUG() will be
334 * called if you try and overfill.
335 */
336static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
337{
338 if (!p->cell2)
339 p->cell2 = cell;
340
341 else if (!p->cell1)
342 p->cell1 = cell;
343
344 else
345 BUG();
346}
347
348/*----------------------------------------------------------------*/
349
350static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
351{
352 key->virtual = 0;
353 key->dev = 0;
354 key->block = from_oblock(oblock);
355}
356
357/*
358 * The caller hands in a preallocated cell, and a free function for it.
359 * The cell will be freed if there's an error, or if it wasn't used because
360 * a cell with that key already exists.
361 */
362typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
363
364static int bio_detain(struct cache *cache, dm_oblock_t oblock,
365 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
366 cell_free_fn free_fn, void *free_context,
367 struct dm_bio_prison_cell **cell_result)
368{
369 int r;
370 struct dm_cell_key key;
371
372 build_key(oblock, &key);
373 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
374 if (r)
375 free_fn(free_context, cell_prealloc);
376
377 return r;
378}
379
380static int get_cell(struct cache *cache,
381 dm_oblock_t oblock,
382 struct prealloc *structs,
383 struct dm_bio_prison_cell **cell_result)
384{
385 int r;
386 struct dm_cell_key key;
387 struct dm_bio_prison_cell *cell_prealloc;
388
389 cell_prealloc = prealloc_get_cell(structs);
390
391 build_key(oblock, &key);
392 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
393 if (r)
394 prealloc_put_cell(structs, cell_prealloc);
395
396 return r;
397}
398
Joe Thornberaeed1422013-05-10 14:37:18 +0100399/*----------------------------------------------------------------*/
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000400
401static bool is_dirty(struct cache *cache, dm_cblock_t b)
402{
403 return test_bit(from_cblock(b), cache->dirty_bitset);
404}
405
406static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
407{
408 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
409 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
410 policy_set_dirty(cache->policy, oblock);
411 }
412}
413
414static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
415{
416 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
417 policy_clear_dirty(cache->policy, oblock);
418 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
419 if (!from_cblock(cache->nr_dirty))
420 dm_table_event(cache->ti->table);
421 }
422}
423
424/*----------------------------------------------------------------*/
Joe Thornberaeed1422013-05-10 14:37:18 +0100425
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000426static bool block_size_is_power_of_two(struct cache *cache)
427{
428 return cache->sectors_per_block_shift >= 0;
429}
430
Joe Thornber414dd672013-03-20 17:21:25 +0000431static dm_block_t block_div(dm_block_t b, uint32_t n)
432{
433 do_div(b, n);
434
435 return b;
436}
437
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000438static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
439{
Joe Thornber414dd672013-03-20 17:21:25 +0000440 uint32_t discard_blocks = cache->discard_block_size;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000441 dm_block_t b = from_oblock(oblock);
442
443 if (!block_size_is_power_of_two(cache))
Joe Thornber414dd672013-03-20 17:21:25 +0000444 discard_blocks = discard_blocks / cache->sectors_per_block;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000445 else
446 discard_blocks >>= cache->sectors_per_block_shift;
447
Joe Thornber414dd672013-03-20 17:21:25 +0000448 b = block_div(b, discard_blocks);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000449
450 return to_dblock(b);
451}
452
453static void set_discard(struct cache *cache, dm_dblock_t b)
454{
455 unsigned long flags;
456
457 atomic_inc(&cache->stats.discard_count);
458
459 spin_lock_irqsave(&cache->lock, flags);
460 set_bit(from_dblock(b), cache->discard_bitset);
461 spin_unlock_irqrestore(&cache->lock, flags);
462}
463
464static void clear_discard(struct cache *cache, dm_dblock_t b)
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&cache->lock, flags);
469 clear_bit(from_dblock(b), cache->discard_bitset);
470 spin_unlock_irqrestore(&cache->lock, flags);
471}
472
473static bool is_discarded(struct cache *cache, dm_dblock_t b)
474{
475 int r;
476 unsigned long flags;
477
478 spin_lock_irqsave(&cache->lock, flags);
479 r = test_bit(from_dblock(b), cache->discard_bitset);
480 spin_unlock_irqrestore(&cache->lock, flags);
481
482 return r;
483}
484
485static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
486{
487 int r;
488 unsigned long flags;
489
490 spin_lock_irqsave(&cache->lock, flags);
491 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
492 cache->discard_bitset);
493 spin_unlock_irqrestore(&cache->lock, flags);
494
495 return r;
496}
497
498/*----------------------------------------------------------------*/
499
500static void load_stats(struct cache *cache)
501{
502 struct dm_cache_statistics stats;
503
504 dm_cache_metadata_get_stats(cache->cmd, &stats);
505 atomic_set(&cache->stats.read_hit, stats.read_hits);
506 atomic_set(&cache->stats.read_miss, stats.read_misses);
507 atomic_set(&cache->stats.write_hit, stats.write_hits);
508 atomic_set(&cache->stats.write_miss, stats.write_misses);
509}
510
511static void save_stats(struct cache *cache)
512{
513 struct dm_cache_statistics stats;
514
515 stats.read_hits = atomic_read(&cache->stats.read_hit);
516 stats.read_misses = atomic_read(&cache->stats.read_miss);
517 stats.write_hits = atomic_read(&cache->stats.write_hit);
518 stats.write_misses = atomic_read(&cache->stats.write_miss);
519
520 dm_cache_metadata_set_stats(cache->cmd, &stats);
521}
522
523/*----------------------------------------------------------------
524 * Per bio data
525 *--------------------------------------------------------------*/
Mike Snitzer19b00922013-04-05 15:36:34 +0100526
527/*
528 * If using writeback, leave out struct per_bio_data's writethrough fields.
529 */
530#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
531#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
532
533static size_t get_per_bio_data_size(struct cache *cache)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000534{
Mike Snitzer19b00922013-04-05 15:36:34 +0100535 return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
536}
537
538static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
539{
540 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000541 BUG_ON(!pb);
542 return pb;
543}
544
Mike Snitzer19b00922013-04-05 15:36:34 +0100545static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000546{
Mike Snitzer19b00922013-04-05 15:36:34 +0100547 struct per_bio_data *pb = get_per_bio_data(bio, data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000548
549 pb->tick = false;
550 pb->req_nr = dm_bio_get_target_bio_nr(bio);
551 pb->all_io_entry = NULL;
552
553 return pb;
554}
555
556/*----------------------------------------------------------------
557 * Remapping
558 *--------------------------------------------------------------*/
559static void remap_to_origin(struct cache *cache, struct bio *bio)
560{
561 bio->bi_bdev = cache->origin_dev->bdev;
562}
563
564static void remap_to_cache(struct cache *cache, struct bio *bio,
565 dm_cblock_t cblock)
566{
567 sector_t bi_sector = bio->bi_sector;
568
569 bio->bi_bdev = cache->cache_dev->bdev;
570 if (!block_size_is_power_of_two(cache))
571 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
572 sector_div(bi_sector, cache->sectors_per_block);
573 else
574 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
575 (bi_sector & (cache->sectors_per_block - 1));
576}
577
578static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
579{
580 unsigned long flags;
Mike Snitzer19b00922013-04-05 15:36:34 +0100581 size_t pb_data_size = get_per_bio_data_size(cache);
582 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000583
584 spin_lock_irqsave(&cache->lock, flags);
585 if (cache->need_tick_bio &&
586 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
587 pb->tick = true;
588 cache->need_tick_bio = false;
589 }
590 spin_unlock_irqrestore(&cache->lock, flags);
591}
592
593static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
594 dm_oblock_t oblock)
595{
596 check_if_tick_bio_needed(cache, bio);
597 remap_to_origin(cache, bio);
598 if (bio_data_dir(bio) == WRITE)
599 clear_discard(cache, oblock_to_dblock(cache, oblock));
600}
601
602static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
603 dm_oblock_t oblock, dm_cblock_t cblock)
604{
605 remap_to_cache(cache, bio, cblock);
606 if (bio_data_dir(bio) == WRITE) {
607 set_dirty(cache, oblock, cblock);
608 clear_discard(cache, oblock_to_dblock(cache, oblock));
609 }
610}
611
612static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
613{
614 sector_t block_nr = bio->bi_sector;
615
616 if (!block_size_is_power_of_two(cache))
617 (void) sector_div(block_nr, cache->sectors_per_block);
618 else
619 block_nr >>= cache->sectors_per_block_shift;
620
621 return to_oblock(block_nr);
622}
623
624static int bio_triggers_commit(struct cache *cache, struct bio *bio)
625{
626 return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
627}
628
629static void issue(struct cache *cache, struct bio *bio)
630{
631 unsigned long flags;
632
633 if (!bio_triggers_commit(cache, bio)) {
634 generic_make_request(bio);
635 return;
636 }
637
638 /*
639 * Batch together any bios that trigger commits and then issue a
640 * single commit for them in do_worker().
641 */
642 spin_lock_irqsave(&cache->lock, flags);
643 cache->commit_requested = true;
644 bio_list_add(&cache->deferred_flush_bios, bio);
645 spin_unlock_irqrestore(&cache->lock, flags);
646}
647
Joe Thornbere2e74d62013-03-20 17:21:27 +0000648static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
649{
650 unsigned long flags;
651
652 spin_lock_irqsave(&cache->lock, flags);
653 bio_list_add(&cache->deferred_writethrough_bios, bio);
654 spin_unlock_irqrestore(&cache->lock, flags);
655
656 wake_worker(cache);
657}
658
659static void writethrough_endio(struct bio *bio, int err)
660{
Mike Snitzer19b00922013-04-05 15:36:34 +0100661 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000662 bio->bi_end_io = pb->saved_bi_end_io;
663
664 if (err) {
665 bio_endio(bio, err);
666 return;
667 }
668
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100669 dm_bio_restore(&pb->bio_details, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000670 remap_to_cache(pb->cache, bio, pb->cblock);
671
672 /*
673 * We can't issue this bio directly, since we're in interrupt
Joe Thornberaeed1422013-05-10 14:37:18 +0100674 * context. So it gets put on a bio list for processing by the
Joe Thornbere2e74d62013-03-20 17:21:27 +0000675 * worker thread.
676 */
677 defer_writethrough_bio(pb->cache, bio);
678}
679
680/*
681 * When running in writethrough mode we need to send writes to clean blocks
682 * to both the cache and origin devices. In future we'd like to clone the
683 * bio and send them in parallel, but for now we're doing them in
684 * series as this is easier.
685 */
686static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
687 dm_oblock_t oblock, dm_cblock_t cblock)
688{
Mike Snitzer19b00922013-04-05 15:36:34 +0100689 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000690
691 pb->cache = cache;
692 pb->cblock = cblock;
693 pb->saved_bi_end_io = bio->bi_end_io;
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100694 dm_bio_record(&pb->bio_details, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000695 bio->bi_end_io = writethrough_endio;
696
697 remap_to_origin_clear_discard(pb->cache, bio, oblock);
698}
699
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000700/*----------------------------------------------------------------
701 * Migration processing
702 *
703 * Migration covers moving data from the origin device to the cache, or
704 * vice versa.
705 *--------------------------------------------------------------*/
706static void free_migration(struct dm_cache_migration *mg)
707{
708 mempool_free(mg, mg->cache->migration_pool);
709}
710
711static void inc_nr_migrations(struct cache *cache)
712{
713 atomic_inc(&cache->nr_migrations);
714}
715
716static void dec_nr_migrations(struct cache *cache)
717{
718 atomic_dec(&cache->nr_migrations);
719
720 /*
721 * Wake the worker in case we're suspending the target.
722 */
723 wake_up(&cache->migration_wait);
724}
725
726static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
727 bool holder)
728{
729 (holder ? dm_cell_release : dm_cell_release_no_holder)
730 (cache->prison, cell, &cache->deferred_bios);
731 free_prison_cell(cache, cell);
732}
733
734static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
735 bool holder)
736{
737 unsigned long flags;
738
739 spin_lock_irqsave(&cache->lock, flags);
740 __cell_defer(cache, cell, holder);
741 spin_unlock_irqrestore(&cache->lock, flags);
742
743 wake_worker(cache);
744}
745
746static void cleanup_migration(struct dm_cache_migration *mg)
747{
Joe Thornber8fafee92013-10-30 17:11:58 +0000748 struct cache *cache = mg->cache;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000749 free_migration(mg);
Joe Thornber8fafee92013-10-30 17:11:58 +0000750 dec_nr_migrations(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000751}
752
753static void migration_failure(struct dm_cache_migration *mg)
754{
755 struct cache *cache = mg->cache;
756
757 if (mg->writeback) {
758 DMWARN_LIMIT("writeback failed; couldn't copy block");
759 set_dirty(cache, mg->old_oblock, mg->cblock);
760 cell_defer(cache, mg->old_ocell, false);
761
762 } else if (mg->demote) {
763 DMWARN_LIMIT("demotion failed; couldn't copy block");
764 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
765
766 cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
767 if (mg->promote)
768 cell_defer(cache, mg->new_ocell, 1);
769 } else {
770 DMWARN_LIMIT("promotion failed; couldn't copy block");
771 policy_remove_mapping(cache->policy, mg->new_oblock);
772 cell_defer(cache, mg->new_ocell, 1);
773 }
774
775 cleanup_migration(mg);
776}
777
778static void migration_success_pre_commit(struct dm_cache_migration *mg)
779{
780 unsigned long flags;
781 struct cache *cache = mg->cache;
782
783 if (mg->writeback) {
784 cell_defer(cache, mg->old_ocell, false);
785 clear_dirty(cache, mg->old_oblock, mg->cblock);
786 cleanup_migration(mg);
787 return;
788
789 } else if (mg->demote) {
790 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
791 DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
792 policy_force_mapping(cache->policy, mg->new_oblock,
793 mg->old_oblock);
794 if (mg->promote)
795 cell_defer(cache, mg->new_ocell, true);
796 cleanup_migration(mg);
797 return;
798 }
799 } else {
800 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
801 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
802 policy_remove_mapping(cache->policy, mg->new_oblock);
803 cleanup_migration(mg);
804 return;
805 }
806 }
807
808 spin_lock_irqsave(&cache->lock, flags);
809 list_add_tail(&mg->list, &cache->need_commit_migrations);
810 cache->commit_requested = true;
811 spin_unlock_irqrestore(&cache->lock, flags);
812}
813
814static void migration_success_post_commit(struct dm_cache_migration *mg)
815{
816 unsigned long flags;
817 struct cache *cache = mg->cache;
818
819 if (mg->writeback) {
820 DMWARN("writeback unexpectedly triggered commit");
821 return;
822
823 } else if (mg->demote) {
824 cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
825
826 if (mg->promote) {
827 mg->demote = false;
828
829 spin_lock_irqsave(&cache->lock, flags);
830 list_add_tail(&mg->list, &cache->quiesced_migrations);
831 spin_unlock_irqrestore(&cache->lock, flags);
832
833 } else
834 cleanup_migration(mg);
835
836 } else {
837 cell_defer(cache, mg->new_ocell, true);
838 clear_dirty(cache, mg->new_oblock, mg->cblock);
839 cleanup_migration(mg);
840 }
841}
842
843static void copy_complete(int read_err, unsigned long write_err, void *context)
844{
845 unsigned long flags;
846 struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
847 struct cache *cache = mg->cache;
848
849 if (read_err || write_err)
850 mg->err = true;
851
852 spin_lock_irqsave(&cache->lock, flags);
853 list_add_tail(&mg->list, &cache->completed_migrations);
854 spin_unlock_irqrestore(&cache->lock, flags);
855
856 wake_worker(cache);
857}
858
859static void issue_copy_real(struct dm_cache_migration *mg)
860{
861 int r;
862 struct dm_io_region o_region, c_region;
863 struct cache *cache = mg->cache;
864
865 o_region.bdev = cache->origin_dev->bdev;
866 o_region.count = cache->sectors_per_block;
867
868 c_region.bdev = cache->cache_dev->bdev;
869 c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
870 c_region.count = cache->sectors_per_block;
871
872 if (mg->writeback || mg->demote) {
873 /* demote */
874 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
875 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
876 } else {
877 /* promote */
878 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
879 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
880 }
881
882 if (r < 0)
883 migration_failure(mg);
884}
885
886static void avoid_copy(struct dm_cache_migration *mg)
887{
888 atomic_inc(&mg->cache->stats.copies_avoided);
889 migration_success_pre_commit(mg);
890}
891
892static void issue_copy(struct dm_cache_migration *mg)
893{
894 bool avoid;
895 struct cache *cache = mg->cache;
896
897 if (mg->writeback || mg->demote)
898 avoid = !is_dirty(cache, mg->cblock) ||
899 is_discarded_oblock(cache, mg->old_oblock);
900 else
901 avoid = is_discarded_oblock(cache, mg->new_oblock);
902
903 avoid ? avoid_copy(mg) : issue_copy_real(mg);
904}
905
906static void complete_migration(struct dm_cache_migration *mg)
907{
908 if (mg->err)
909 migration_failure(mg);
910 else
911 migration_success_pre_commit(mg);
912}
913
914static void process_migrations(struct cache *cache, struct list_head *head,
915 void (*fn)(struct dm_cache_migration *))
916{
917 unsigned long flags;
918 struct list_head list;
919 struct dm_cache_migration *mg, *tmp;
920
921 INIT_LIST_HEAD(&list);
922 spin_lock_irqsave(&cache->lock, flags);
923 list_splice_init(head, &list);
924 spin_unlock_irqrestore(&cache->lock, flags);
925
926 list_for_each_entry_safe(mg, tmp, &list, list)
927 fn(mg);
928}
929
930static void __queue_quiesced_migration(struct dm_cache_migration *mg)
931{
932 list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
933}
934
935static void queue_quiesced_migration(struct dm_cache_migration *mg)
936{
937 unsigned long flags;
938 struct cache *cache = mg->cache;
939
940 spin_lock_irqsave(&cache->lock, flags);
941 __queue_quiesced_migration(mg);
942 spin_unlock_irqrestore(&cache->lock, flags);
943
944 wake_worker(cache);
945}
946
947static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
948{
949 unsigned long flags;
950 struct dm_cache_migration *mg, *tmp;
951
952 spin_lock_irqsave(&cache->lock, flags);
953 list_for_each_entry_safe(mg, tmp, work, list)
954 __queue_quiesced_migration(mg);
955 spin_unlock_irqrestore(&cache->lock, flags);
956
957 wake_worker(cache);
958}
959
960static void check_for_quiesced_migrations(struct cache *cache,
961 struct per_bio_data *pb)
962{
963 struct list_head work;
964
965 if (!pb->all_io_entry)
966 return;
967
968 INIT_LIST_HEAD(&work);
969 if (pb->all_io_entry)
970 dm_deferred_entry_dec(pb->all_io_entry, &work);
971
972 if (!list_empty(&work))
973 queue_quiesced_migrations(cache, &work);
974}
975
976static void quiesce_migration(struct dm_cache_migration *mg)
977{
978 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
979 queue_quiesced_migration(mg);
980}
981
982static void promote(struct cache *cache, struct prealloc *structs,
983 dm_oblock_t oblock, dm_cblock_t cblock,
984 struct dm_bio_prison_cell *cell)
985{
986 struct dm_cache_migration *mg = prealloc_get_migration(structs);
987
988 mg->err = false;
989 mg->writeback = false;
990 mg->demote = false;
991 mg->promote = true;
992 mg->cache = cache;
993 mg->new_oblock = oblock;
994 mg->cblock = cblock;
995 mg->old_ocell = NULL;
996 mg->new_ocell = cell;
997 mg->start_jiffies = jiffies;
998
999 inc_nr_migrations(cache);
1000 quiesce_migration(mg);
1001}
1002
1003static void writeback(struct cache *cache, struct prealloc *structs,
1004 dm_oblock_t oblock, dm_cblock_t cblock,
1005 struct dm_bio_prison_cell *cell)
1006{
1007 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1008
1009 mg->err = false;
1010 mg->writeback = true;
1011 mg->demote = false;
1012 mg->promote = false;
1013 mg->cache = cache;
1014 mg->old_oblock = oblock;
1015 mg->cblock = cblock;
1016 mg->old_ocell = cell;
1017 mg->new_ocell = NULL;
1018 mg->start_jiffies = jiffies;
1019
1020 inc_nr_migrations(cache);
1021 quiesce_migration(mg);
1022}
1023
1024static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1025 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1026 dm_cblock_t cblock,
1027 struct dm_bio_prison_cell *old_ocell,
1028 struct dm_bio_prison_cell *new_ocell)
1029{
1030 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1031
1032 mg->err = false;
1033 mg->writeback = false;
1034 mg->demote = true;
1035 mg->promote = true;
1036 mg->cache = cache;
1037 mg->old_oblock = old_oblock;
1038 mg->new_oblock = new_oblock;
1039 mg->cblock = cblock;
1040 mg->old_ocell = old_ocell;
1041 mg->new_ocell = new_ocell;
1042 mg->start_jiffies = jiffies;
1043
1044 inc_nr_migrations(cache);
1045 quiesce_migration(mg);
1046}
1047
1048/*----------------------------------------------------------------
1049 * bio processing
1050 *--------------------------------------------------------------*/
1051static void defer_bio(struct cache *cache, struct bio *bio)
1052{
1053 unsigned long flags;
1054
1055 spin_lock_irqsave(&cache->lock, flags);
1056 bio_list_add(&cache->deferred_bios, bio);
1057 spin_unlock_irqrestore(&cache->lock, flags);
1058
1059 wake_worker(cache);
1060}
1061
1062static void process_flush_bio(struct cache *cache, struct bio *bio)
1063{
Mike Snitzer19b00922013-04-05 15:36:34 +01001064 size_t pb_data_size = get_per_bio_data_size(cache);
1065 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001066
1067 BUG_ON(bio->bi_size);
1068 if (!pb->req_nr)
1069 remap_to_origin(cache, bio);
1070 else
1071 remap_to_cache(cache, bio, 0);
1072
1073 issue(cache, bio);
1074}
1075
1076/*
1077 * People generally discard large parts of a device, eg, the whole device
1078 * when formatting. Splitting these large discards up into cache block
1079 * sized ios and then quiescing (always neccessary for discard) takes too
1080 * long.
1081 *
1082 * We keep it simple, and allow any size of discard to come in, and just
1083 * mark off blocks on the discard bitset. No passdown occurs!
1084 *
1085 * To implement passdown we need to change the bio_prison such that a cell
1086 * can have a key that spans many blocks.
1087 */
1088static void process_discard_bio(struct cache *cache, struct bio *bio)
1089{
1090 dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
1091 cache->discard_block_size);
1092 dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
1093 dm_block_t b;
1094
Joe Thornber414dd672013-03-20 17:21:25 +00001095 end_block = block_div(end_block, cache->discard_block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001096
1097 for (b = start_block; b < end_block; b++)
1098 set_discard(cache, to_dblock(b));
1099
1100 bio_endio(bio, 0);
1101}
1102
1103static bool spare_migration_bandwidth(struct cache *cache)
1104{
1105 sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1106 cache->sectors_per_block;
1107 return current_volume < cache->migration_threshold;
1108}
1109
1110static bool is_writethrough_io(struct cache *cache, struct bio *bio,
1111 dm_cblock_t cblock)
1112{
1113 return bio_data_dir(bio) == WRITE &&
1114 cache->features.write_through && !is_dirty(cache, cblock);
1115}
1116
1117static void inc_hit_counter(struct cache *cache, struct bio *bio)
1118{
1119 atomic_inc(bio_data_dir(bio) == READ ?
1120 &cache->stats.read_hit : &cache->stats.write_hit);
1121}
1122
1123static void inc_miss_counter(struct cache *cache, struct bio *bio)
1124{
1125 atomic_inc(bio_data_dir(bio) == READ ?
1126 &cache->stats.read_miss : &cache->stats.write_miss);
1127}
1128
1129static void process_bio(struct cache *cache, struct prealloc *structs,
1130 struct bio *bio)
1131{
1132 int r;
1133 bool release_cell = true;
1134 dm_oblock_t block = get_bio_block(cache, bio);
1135 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1136 struct policy_result lookup_result;
Mike Snitzer19b00922013-04-05 15:36:34 +01001137 size_t pb_data_size = get_per_bio_data_size(cache);
1138 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001139 bool discarded_block = is_discarded_oblock(cache, block);
1140 bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
1141
1142 /*
1143 * Check to see if that block is currently migrating.
1144 */
1145 cell_prealloc = prealloc_get_cell(structs);
1146 r = bio_detain(cache, block, bio, cell_prealloc,
1147 (cell_free_fn) prealloc_put_cell,
1148 structs, &new_ocell);
1149 if (r > 0)
1150 return;
1151
1152 r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1153 bio, &lookup_result);
1154
1155 if (r == -EWOULDBLOCK)
1156 /* migration has been denied */
1157 lookup_result.op = POLICY_MISS;
1158
1159 switch (lookup_result.op) {
1160 case POLICY_HIT:
1161 inc_hit_counter(cache, bio);
1162 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1163
Joe Thornbere2e74d62013-03-20 17:21:27 +00001164 if (is_writethrough_io(cache, bio, lookup_result.cblock))
1165 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1166 else
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001167 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1168
1169 issue(cache, bio);
1170 break;
1171
1172 case POLICY_MISS:
1173 inc_miss_counter(cache, bio);
1174 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
Joe Thornbere2e74d62013-03-20 17:21:27 +00001175 remap_to_origin_clear_discard(cache, bio, block);
1176 issue(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001177 break;
1178
1179 case POLICY_NEW:
1180 atomic_inc(&cache->stats.promotion);
1181 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1182 release_cell = false;
1183 break;
1184
1185 case POLICY_REPLACE:
1186 cell_prealloc = prealloc_get_cell(structs);
1187 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1188 (cell_free_fn) prealloc_put_cell,
1189 structs, &old_ocell);
1190 if (r > 0) {
1191 /*
1192 * We have to be careful to avoid lock inversion of
1193 * the cells. So we back off, and wait for the
1194 * old_ocell to become free.
1195 */
1196 policy_force_mapping(cache->policy, block,
1197 lookup_result.old_oblock);
1198 atomic_inc(&cache->stats.cache_cell_clash);
1199 break;
1200 }
1201 atomic_inc(&cache->stats.demotion);
1202 atomic_inc(&cache->stats.promotion);
1203
1204 demote_then_promote(cache, structs, lookup_result.old_oblock,
1205 block, lookup_result.cblock,
1206 old_ocell, new_ocell);
1207 release_cell = false;
1208 break;
1209
1210 default:
1211 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1212 (unsigned) lookup_result.op);
1213 bio_io_error(bio);
1214 }
1215
1216 if (release_cell)
1217 cell_defer(cache, new_ocell, false);
1218}
1219
1220static int need_commit_due_to_time(struct cache *cache)
1221{
1222 return jiffies < cache->last_commit_jiffies ||
1223 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1224}
1225
1226static int commit_if_needed(struct cache *cache)
1227{
1228 if (dm_cache_changed_this_transaction(cache->cmd) &&
1229 (cache->commit_requested || need_commit_due_to_time(cache))) {
1230 atomic_inc(&cache->stats.commit_count);
1231 cache->last_commit_jiffies = jiffies;
1232 cache->commit_requested = false;
1233 return dm_cache_commit(cache->cmd, false);
1234 }
1235
1236 return 0;
1237}
1238
1239static void process_deferred_bios(struct cache *cache)
1240{
1241 unsigned long flags;
1242 struct bio_list bios;
1243 struct bio *bio;
1244 struct prealloc structs;
1245
1246 memset(&structs, 0, sizeof(structs));
1247 bio_list_init(&bios);
1248
1249 spin_lock_irqsave(&cache->lock, flags);
1250 bio_list_merge(&bios, &cache->deferred_bios);
1251 bio_list_init(&cache->deferred_bios);
1252 spin_unlock_irqrestore(&cache->lock, flags);
1253
1254 while (!bio_list_empty(&bios)) {
1255 /*
1256 * If we've got no free migration structs, and processing
1257 * this bio might require one, we pause until there are some
1258 * prepared mappings to process.
1259 */
1260 if (prealloc_data_structs(cache, &structs)) {
1261 spin_lock_irqsave(&cache->lock, flags);
1262 bio_list_merge(&cache->deferred_bios, &bios);
1263 spin_unlock_irqrestore(&cache->lock, flags);
1264 break;
1265 }
1266
1267 bio = bio_list_pop(&bios);
1268
1269 if (bio->bi_rw & REQ_FLUSH)
1270 process_flush_bio(cache, bio);
1271 else if (bio->bi_rw & REQ_DISCARD)
1272 process_discard_bio(cache, bio);
1273 else
1274 process_bio(cache, &structs, bio);
1275 }
1276
1277 prealloc_free_structs(cache, &structs);
1278}
1279
1280static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1281{
1282 unsigned long flags;
1283 struct bio_list bios;
1284 struct bio *bio;
1285
1286 bio_list_init(&bios);
1287
1288 spin_lock_irqsave(&cache->lock, flags);
1289 bio_list_merge(&bios, &cache->deferred_flush_bios);
1290 bio_list_init(&cache->deferred_flush_bios);
1291 spin_unlock_irqrestore(&cache->lock, flags);
1292
1293 while ((bio = bio_list_pop(&bios)))
1294 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1295}
1296
Joe Thornbere2e74d62013-03-20 17:21:27 +00001297static void process_deferred_writethrough_bios(struct cache *cache)
1298{
1299 unsigned long flags;
1300 struct bio_list bios;
1301 struct bio *bio;
1302
1303 bio_list_init(&bios);
1304
1305 spin_lock_irqsave(&cache->lock, flags);
1306 bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1307 bio_list_init(&cache->deferred_writethrough_bios);
1308 spin_unlock_irqrestore(&cache->lock, flags);
1309
1310 while ((bio = bio_list_pop(&bios)))
1311 generic_make_request(bio);
1312}
1313
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001314static void writeback_some_dirty_blocks(struct cache *cache)
1315{
1316 int r = 0;
1317 dm_oblock_t oblock;
1318 dm_cblock_t cblock;
1319 struct prealloc structs;
1320 struct dm_bio_prison_cell *old_ocell;
1321
1322 memset(&structs, 0, sizeof(structs));
1323
1324 while (spare_migration_bandwidth(cache)) {
1325 if (prealloc_data_structs(cache, &structs))
1326 break;
1327
1328 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1329 if (r)
1330 break;
1331
1332 r = get_cell(cache, oblock, &structs, &old_ocell);
1333 if (r) {
1334 policy_set_dirty(cache->policy, oblock);
1335 break;
1336 }
1337
1338 writeback(cache, &structs, oblock, cblock, old_ocell);
1339 }
1340
1341 prealloc_free_structs(cache, &structs);
1342}
1343
1344/*----------------------------------------------------------------
1345 * Main worker loop
1346 *--------------------------------------------------------------*/
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001347static bool is_quiescing(struct cache *cache)
1348{
1349 int r;
1350 unsigned long flags;
1351
1352 spin_lock_irqsave(&cache->lock, flags);
1353 r = cache->quiescing;
1354 spin_unlock_irqrestore(&cache->lock, flags);
1355
1356 return r;
1357}
1358
Joe Thornber8fafee92013-10-30 17:11:58 +00001359static void ack_quiescing(struct cache *cache)
1360{
1361 if (is_quiescing(cache)) {
1362 atomic_inc(&cache->quiescing_ack);
1363 wake_up(&cache->quiescing_wait);
1364 }
1365}
1366
1367static void wait_for_quiescing_ack(struct cache *cache)
1368{
1369 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1370}
1371
1372static void start_quiescing(struct cache *cache)
1373{
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(&cache->lock, flags);
1377 cache->quiescing = true;
1378 spin_unlock_irqrestore(&cache->lock, flags);
1379
1380 wait_for_quiescing_ack(cache);
1381}
1382
1383static void stop_quiescing(struct cache *cache)
1384{
1385 unsigned long flags;
1386
1387 spin_lock_irqsave(&cache->lock, flags);
1388 cache->quiescing = false;
1389 spin_unlock_irqrestore(&cache->lock, flags);
1390
1391 atomic_set(&cache->quiescing_ack, 0);
1392}
1393
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001394static void wait_for_migrations(struct cache *cache)
1395{
1396 wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1397}
1398
1399static void stop_worker(struct cache *cache)
1400{
1401 cancel_delayed_work(&cache->waker);
1402 flush_workqueue(cache->wq);
1403}
1404
1405static void requeue_deferred_io(struct cache *cache)
1406{
1407 struct bio *bio;
1408 struct bio_list bios;
1409
1410 bio_list_init(&bios);
1411 bio_list_merge(&bios, &cache->deferred_bios);
1412 bio_list_init(&cache->deferred_bios);
1413
1414 while ((bio = bio_list_pop(&bios)))
1415 bio_endio(bio, DM_ENDIO_REQUEUE);
1416}
1417
1418static int more_work(struct cache *cache)
1419{
1420 if (is_quiescing(cache))
1421 return !list_empty(&cache->quiesced_migrations) ||
1422 !list_empty(&cache->completed_migrations) ||
1423 !list_empty(&cache->need_commit_migrations);
1424 else
1425 return !bio_list_empty(&cache->deferred_bios) ||
1426 !bio_list_empty(&cache->deferred_flush_bios) ||
Joe Thornbere2e74d62013-03-20 17:21:27 +00001427 !bio_list_empty(&cache->deferred_writethrough_bios) ||
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001428 !list_empty(&cache->quiesced_migrations) ||
1429 !list_empty(&cache->completed_migrations) ||
1430 !list_empty(&cache->need_commit_migrations);
1431}
1432
1433static void do_worker(struct work_struct *ws)
1434{
1435 struct cache *cache = container_of(ws, struct cache, worker);
1436
1437 do {
Joe Thornber8fafee92013-10-30 17:11:58 +00001438 if (!is_quiescing(cache)) {
1439 writeback_some_dirty_blocks(cache);
1440 process_deferred_writethrough_bios(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001441 process_deferred_bios(cache);
Joe Thornber8fafee92013-10-30 17:11:58 +00001442 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001443
1444 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1445 process_migrations(cache, &cache->completed_migrations, complete_migration);
1446
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001447 if (commit_if_needed(cache)) {
1448 process_deferred_flush_bios(cache, false);
1449
1450 /*
1451 * FIXME: rollback metadata or just go into a
1452 * failure mode and error everything
1453 */
1454 } else {
1455 process_deferred_flush_bios(cache, true);
1456 process_migrations(cache, &cache->need_commit_migrations,
1457 migration_success_post_commit);
1458 }
Joe Thornber8fafee92013-10-30 17:11:58 +00001459
1460 ack_quiescing(cache);
1461
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001462 } while (more_work(cache));
1463}
1464
1465/*
1466 * We want to commit periodically so that not too much
1467 * unwritten metadata builds up.
1468 */
1469static void do_waker(struct work_struct *ws)
1470{
1471 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
Joe Thornberf8350da2013-05-10 14:37:16 +01001472 policy_tick(cache->policy);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001473 wake_worker(cache);
1474 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1475}
1476
1477/*----------------------------------------------------------------*/
1478
1479static int is_congested(struct dm_dev *dev, int bdi_bits)
1480{
1481 struct request_queue *q = bdev_get_queue(dev->bdev);
1482 return bdi_congested(&q->backing_dev_info, bdi_bits);
1483}
1484
1485static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1486{
1487 struct cache *cache = container_of(cb, struct cache, callbacks);
1488
1489 return is_congested(cache->origin_dev, bdi_bits) ||
1490 is_congested(cache->cache_dev, bdi_bits);
1491}
1492
1493/*----------------------------------------------------------------
1494 * Target methods
1495 *--------------------------------------------------------------*/
1496
1497/*
1498 * This function gets called on the error paths of the constructor, so we
1499 * have to cope with a partially initialised struct.
1500 */
1501static void destroy(struct cache *cache)
1502{
1503 unsigned i;
1504
1505 if (cache->next_migration)
1506 mempool_free(cache->next_migration, cache->migration_pool);
1507
1508 if (cache->migration_pool)
1509 mempool_destroy(cache->migration_pool);
1510
1511 if (cache->all_io_ds)
1512 dm_deferred_set_destroy(cache->all_io_ds);
1513
1514 if (cache->prison)
1515 dm_bio_prison_destroy(cache->prison);
1516
1517 if (cache->wq)
1518 destroy_workqueue(cache->wq);
1519
1520 if (cache->dirty_bitset)
1521 free_bitset(cache->dirty_bitset);
1522
1523 if (cache->discard_bitset)
1524 free_bitset(cache->discard_bitset);
1525
1526 if (cache->copier)
1527 dm_kcopyd_client_destroy(cache->copier);
1528
1529 if (cache->cmd)
1530 dm_cache_metadata_close(cache->cmd);
1531
1532 if (cache->metadata_dev)
1533 dm_put_device(cache->ti, cache->metadata_dev);
1534
1535 if (cache->origin_dev)
1536 dm_put_device(cache->ti, cache->origin_dev);
1537
1538 if (cache->cache_dev)
1539 dm_put_device(cache->ti, cache->cache_dev);
1540
1541 if (cache->policy)
1542 dm_cache_policy_destroy(cache->policy);
1543
1544 for (i = 0; i < cache->nr_ctr_args ; i++)
1545 kfree(cache->ctr_args[i]);
1546 kfree(cache->ctr_args);
1547
1548 kfree(cache);
1549}
1550
1551static void cache_dtr(struct dm_target *ti)
1552{
1553 struct cache *cache = ti->private;
1554
1555 destroy(cache);
1556}
1557
1558static sector_t get_dev_size(struct dm_dev *dev)
1559{
1560 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1561}
1562
1563/*----------------------------------------------------------------*/
1564
1565/*
1566 * Construct a cache device mapping.
1567 *
1568 * cache <metadata dev> <cache dev> <origin dev> <block size>
1569 * <#feature args> [<feature arg>]*
1570 * <policy> <#policy args> [<policy arg>]*
1571 *
1572 * metadata dev : fast device holding the persistent metadata
1573 * cache dev : fast device holding cached data blocks
1574 * origin dev : slow device holding original data blocks
1575 * block size : cache unit size in sectors
1576 *
1577 * #feature args : number of feature arguments passed
1578 * feature args : writethrough. (The default is writeback.)
1579 *
1580 * policy : the replacement policy to use
1581 * #policy args : an even number of policy arguments corresponding
1582 * to key/value pairs passed to the policy
1583 * policy args : key/value pairs passed to the policy
1584 * E.g. 'sequential_threshold 1024'
1585 * See cache-policies.txt for details.
1586 *
1587 * Optional feature arguments are:
1588 * writethrough : write through caching that prohibits cache block
1589 * content from being different from origin block content.
1590 * Without this argument, the default behaviour is to write
1591 * back cache block contents later for performance reasons,
1592 * so they may differ from the corresponding origin blocks.
1593 */
1594struct cache_args {
1595 struct dm_target *ti;
1596
1597 struct dm_dev *metadata_dev;
1598
1599 struct dm_dev *cache_dev;
1600 sector_t cache_sectors;
1601
1602 struct dm_dev *origin_dev;
1603 sector_t origin_sectors;
1604
1605 uint32_t block_size;
1606
1607 const char *policy_name;
1608 int policy_argc;
1609 const char **policy_argv;
1610
1611 struct cache_features features;
1612};
1613
1614static void destroy_cache_args(struct cache_args *ca)
1615{
1616 if (ca->metadata_dev)
1617 dm_put_device(ca->ti, ca->metadata_dev);
1618
1619 if (ca->cache_dev)
1620 dm_put_device(ca->ti, ca->cache_dev);
1621
1622 if (ca->origin_dev)
1623 dm_put_device(ca->ti, ca->origin_dev);
1624
1625 kfree(ca);
1626}
1627
1628static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1629{
1630 if (!as->argc) {
1631 *error = "Insufficient args";
1632 return false;
1633 }
1634
1635 return true;
1636}
1637
1638static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1639 char **error)
1640{
1641 int r;
1642 sector_t metadata_dev_size;
1643 char b[BDEVNAME_SIZE];
1644
1645 if (!at_least_one_arg(as, error))
1646 return -EINVAL;
1647
1648 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1649 &ca->metadata_dev);
1650 if (r) {
1651 *error = "Error opening metadata device";
1652 return r;
1653 }
1654
1655 metadata_dev_size = get_dev_size(ca->metadata_dev);
1656 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1657 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1658 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1659
1660 return 0;
1661}
1662
1663static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1664 char **error)
1665{
1666 int r;
1667
1668 if (!at_least_one_arg(as, error))
1669 return -EINVAL;
1670
1671 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1672 &ca->cache_dev);
1673 if (r) {
1674 *error = "Error opening cache device";
1675 return r;
1676 }
1677 ca->cache_sectors = get_dev_size(ca->cache_dev);
1678
1679 return 0;
1680}
1681
1682static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1683 char **error)
1684{
1685 int r;
1686
1687 if (!at_least_one_arg(as, error))
1688 return -EINVAL;
1689
1690 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1691 &ca->origin_dev);
1692 if (r) {
1693 *error = "Error opening origin device";
1694 return r;
1695 }
1696
1697 ca->origin_sectors = get_dev_size(ca->origin_dev);
1698 if (ca->ti->len > ca->origin_sectors) {
1699 *error = "Device size larger than cached device";
1700 return -EINVAL;
1701 }
1702
1703 return 0;
1704}
1705
1706static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1707 char **error)
1708{
1709 unsigned long tmp;
1710
1711 if (!at_least_one_arg(as, error))
1712 return -EINVAL;
1713
1714 if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
1715 tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1716 tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1717 *error = "Invalid data block size";
1718 return -EINVAL;
1719 }
1720
1721 if (tmp > ca->cache_sectors) {
1722 *error = "Data block size is larger than the cache device";
1723 return -EINVAL;
1724 }
1725
1726 ca->block_size = tmp;
1727
1728 return 0;
1729}
1730
1731static void init_features(struct cache_features *cf)
1732{
1733 cf->mode = CM_WRITE;
1734 cf->write_through = false;
1735}
1736
1737static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1738 char **error)
1739{
1740 static struct dm_arg _args[] = {
1741 {0, 1, "Invalid number of cache feature arguments"},
1742 };
1743
1744 int r;
1745 unsigned argc;
1746 const char *arg;
1747 struct cache_features *cf = &ca->features;
1748
1749 init_features(cf);
1750
1751 r = dm_read_arg_group(_args, as, &argc, error);
1752 if (r)
1753 return -EINVAL;
1754
1755 while (argc--) {
1756 arg = dm_shift_arg(as);
1757
1758 if (!strcasecmp(arg, "writeback"))
1759 cf->write_through = false;
1760
1761 else if (!strcasecmp(arg, "writethrough"))
1762 cf->write_through = true;
1763
1764 else {
1765 *error = "Unrecognised cache feature requested";
1766 return -EINVAL;
1767 }
1768 }
1769
1770 return 0;
1771}
1772
1773static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
1774 char **error)
1775{
1776 static struct dm_arg _args[] = {
1777 {0, 1024, "Invalid number of policy arguments"},
1778 };
1779
1780 int r;
1781
1782 if (!at_least_one_arg(as, error))
1783 return -EINVAL;
1784
1785 ca->policy_name = dm_shift_arg(as);
1786
1787 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
1788 if (r)
1789 return -EINVAL;
1790
1791 ca->policy_argv = (const char **)as->argv;
1792 dm_consume_args(as, ca->policy_argc);
1793
1794 return 0;
1795}
1796
1797static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
1798 char **error)
1799{
1800 int r;
1801 struct dm_arg_set as;
1802
1803 as.argc = argc;
1804 as.argv = argv;
1805
1806 r = parse_metadata_dev(ca, &as, error);
1807 if (r)
1808 return r;
1809
1810 r = parse_cache_dev(ca, &as, error);
1811 if (r)
1812 return r;
1813
1814 r = parse_origin_dev(ca, &as, error);
1815 if (r)
1816 return r;
1817
1818 r = parse_block_size(ca, &as, error);
1819 if (r)
1820 return r;
1821
1822 r = parse_features(ca, &as, error);
1823 if (r)
1824 return r;
1825
1826 r = parse_policy(ca, &as, error);
1827 if (r)
1828 return r;
1829
1830 return 0;
1831}
1832
1833/*----------------------------------------------------------------*/
1834
1835static struct kmem_cache *migration_cache;
1836
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01001837#define NOT_CORE_OPTION 1
1838
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001839static int process_config_option(struct cache *cache, const char *key, const char *value)
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01001840{
1841 unsigned long tmp;
1842
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001843 if (!strcasecmp(key, "migration_threshold")) {
1844 if (kstrtoul(value, 10, &tmp))
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01001845 return -EINVAL;
1846
1847 cache->migration_threshold = tmp;
1848 return 0;
1849 }
1850
1851 return NOT_CORE_OPTION;
1852}
1853
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001854static int set_config_value(struct cache *cache, const char *key, const char *value)
1855{
1856 int r = process_config_option(cache, key, value);
1857
1858 if (r == NOT_CORE_OPTION)
1859 r = policy_set_config_value(cache->policy, key, value);
1860
1861 if (r)
1862 DMWARN("bad config value for %s: %s", key, value);
1863
1864 return r;
1865}
1866
1867static int set_config_values(struct cache *cache, int argc, const char **argv)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001868{
1869 int r = 0;
1870
1871 if (argc & 1) {
1872 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
1873 return -EINVAL;
1874 }
1875
1876 while (argc) {
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001877 r = set_config_value(cache, argv[0], argv[1]);
1878 if (r)
1879 break;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001880
1881 argc -= 2;
1882 argv += 2;
1883 }
1884
1885 return r;
1886}
1887
1888static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1889 char **error)
1890{
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001891 cache->policy = dm_cache_policy_create(ca->policy_name,
1892 cache->cache_size,
1893 cache->origin_sectors,
1894 cache->sectors_per_block);
1895 if (!cache->policy) {
1896 *error = "Error creating cache's policy";
1897 return -ENOMEM;
1898 }
1899
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001900 return 0;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001901}
1902
1903/*
1904 * We want the discard block size to be a power of two, at least the size
1905 * of the cache block size, and have no more than 2^14 discard blocks
1906 * across the origin.
1907 */
1908#define MAX_DISCARD_BLOCKS (1 << 14)
1909
1910static bool too_many_discard_blocks(sector_t discard_block_size,
1911 sector_t origin_size)
1912{
1913 (void) sector_div(origin_size, discard_block_size);
1914
1915 return origin_size > MAX_DISCARD_BLOCKS;
1916}
1917
1918static sector_t calculate_discard_block_size(sector_t cache_block_size,
1919 sector_t origin_size)
1920{
1921 sector_t discard_block_size;
1922
1923 discard_block_size = roundup_pow_of_two(cache_block_size);
1924
1925 if (origin_size)
1926 while (too_many_discard_blocks(discard_block_size, origin_size))
1927 discard_block_size *= 2;
1928
1929 return discard_block_size;
1930}
1931
Joe Thornberf8350da2013-05-10 14:37:16 +01001932#define DEFAULT_MIGRATION_THRESHOLD 2048
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001933
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001934static int cache_create(struct cache_args *ca, struct cache **result)
1935{
1936 int r = 0;
1937 char **error = &ca->ti->error;
1938 struct cache *cache;
1939 struct dm_target *ti = ca->ti;
1940 dm_block_t origin_blocks;
1941 struct dm_cache_metadata *cmd;
1942 bool may_format = ca->features.mode == CM_WRITE;
1943
1944 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
1945 if (!cache)
1946 return -ENOMEM;
1947
1948 cache->ti = ca->ti;
1949 ti->private = cache;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001950 ti->num_flush_bios = 2;
1951 ti->flush_supported = true;
1952
1953 ti->num_discard_bios = 1;
1954 ti->discards_supported = true;
1955 ti->discard_zeroes_data_unsupported = true;
1956
Joe Thornber8c5008f2013-05-10 14:37:18 +01001957 cache->features = ca->features;
Mike Snitzer19b00922013-04-05 15:36:34 +01001958 ti->per_bio_data_size = get_per_bio_data_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001959
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001960 cache->callbacks.congested_fn = cache_is_congested;
1961 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
1962
1963 cache->metadata_dev = ca->metadata_dev;
1964 cache->origin_dev = ca->origin_dev;
1965 cache->cache_dev = ca->cache_dev;
1966
1967 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
1968
1969 /* FIXME: factor out this whole section */
1970 origin_blocks = cache->origin_sectors = ca->origin_sectors;
Joe Thornber414dd672013-03-20 17:21:25 +00001971 origin_blocks = block_div(origin_blocks, ca->block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001972 cache->origin_blocks = to_oblock(origin_blocks);
1973
1974 cache->sectors_per_block = ca->block_size;
1975 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
1976 r = -EINVAL;
1977 goto bad;
1978 }
1979
1980 if (ca->block_size & (ca->block_size - 1)) {
1981 dm_block_t cache_size = ca->cache_sectors;
1982
1983 cache->sectors_per_block_shift = -1;
Joe Thornber414dd672013-03-20 17:21:25 +00001984 cache_size = block_div(cache_size, ca->block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001985 cache->cache_size = to_cblock(cache_size);
1986 } else {
1987 cache->sectors_per_block_shift = __ffs(ca->block_size);
1988 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
1989 }
1990
1991 r = create_cache_policy(cache, ca, error);
1992 if (r)
1993 goto bad;
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001994
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001995 cache->policy_nr_args = ca->policy_argc;
Joe Thornber2f14f4b2013-05-10 14:37:21 +01001996 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
1997
1998 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
1999 if (r) {
2000 *error = "Error setting cache policy's config values";
2001 goto bad;
2002 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002003
2004 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2005 ca->block_size, may_format,
2006 dm_cache_policy_get_hint_size(cache->policy));
2007 if (IS_ERR(cmd)) {
2008 *error = "Error creating metadata object";
2009 r = PTR_ERR(cmd);
2010 goto bad;
2011 }
2012 cache->cmd = cmd;
2013
2014 spin_lock_init(&cache->lock);
2015 bio_list_init(&cache->deferred_bios);
2016 bio_list_init(&cache->deferred_flush_bios);
Joe Thornbere2e74d62013-03-20 17:21:27 +00002017 bio_list_init(&cache->deferred_writethrough_bios);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002018 INIT_LIST_HEAD(&cache->quiesced_migrations);
2019 INIT_LIST_HEAD(&cache->completed_migrations);
2020 INIT_LIST_HEAD(&cache->need_commit_migrations);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002021 atomic_set(&cache->nr_migrations, 0);
2022 init_waitqueue_head(&cache->migration_wait);
2023
Joe Thornber8fafee92013-10-30 17:11:58 +00002024 init_waitqueue_head(&cache->quiescing_wait);
2025 atomic_set(&cache->quiescing_ack, 0);
2026
Wei Yongjunfa4d6832013-05-10 14:37:14 +01002027 r = -ENOMEM;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002028 cache->nr_dirty = 0;
2029 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2030 if (!cache->dirty_bitset) {
2031 *error = "could not allocate dirty bitset";
2032 goto bad;
2033 }
2034 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2035
2036 cache->discard_block_size =
2037 calculate_discard_block_size(cache->sectors_per_block,
2038 cache->origin_sectors);
2039 cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
2040 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2041 if (!cache->discard_bitset) {
2042 *error = "could not allocate discard bitset";
2043 goto bad;
2044 }
2045 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2046
2047 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2048 if (IS_ERR(cache->copier)) {
2049 *error = "could not create kcopyd client";
2050 r = PTR_ERR(cache->copier);
2051 goto bad;
2052 }
2053
2054 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2055 if (!cache->wq) {
2056 *error = "could not create workqueue for metadata object";
2057 goto bad;
2058 }
2059 INIT_WORK(&cache->worker, do_worker);
2060 INIT_DELAYED_WORK(&cache->waker, do_waker);
2061 cache->last_commit_jiffies = jiffies;
2062
2063 cache->prison = dm_bio_prison_create(PRISON_CELLS);
2064 if (!cache->prison) {
2065 *error = "could not create bio prison";
2066 goto bad;
2067 }
2068
2069 cache->all_io_ds = dm_deferred_set_create();
2070 if (!cache->all_io_ds) {
2071 *error = "could not create all_io deferred set";
2072 goto bad;
2073 }
2074
2075 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2076 migration_cache);
2077 if (!cache->migration_pool) {
2078 *error = "Error creating cache's migration mempool";
2079 goto bad;
2080 }
2081
2082 cache->next_migration = NULL;
2083
2084 cache->need_tick_bio = true;
2085 cache->sized = false;
2086 cache->quiescing = false;
2087 cache->commit_requested = false;
2088 cache->loaded_mappings = false;
2089 cache->loaded_discards = false;
2090
2091 load_stats(cache);
2092
2093 atomic_set(&cache->stats.demotion, 0);
2094 atomic_set(&cache->stats.promotion, 0);
2095 atomic_set(&cache->stats.copies_avoided, 0);
2096 atomic_set(&cache->stats.cache_cell_clash, 0);
2097 atomic_set(&cache->stats.commit_count, 0);
2098 atomic_set(&cache->stats.discard_count, 0);
2099
2100 *result = cache;
2101 return 0;
2102
2103bad:
2104 destroy(cache);
2105 return r;
2106}
2107
2108static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2109{
2110 unsigned i;
2111 const char **copy;
2112
2113 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2114 if (!copy)
2115 return -ENOMEM;
2116 for (i = 0; i < argc; i++) {
2117 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2118 if (!copy[i]) {
2119 while (i--)
2120 kfree(copy[i]);
2121 kfree(copy);
2122 return -ENOMEM;
2123 }
2124 }
2125
2126 cache->nr_ctr_args = argc;
2127 cache->ctr_args = copy;
2128
2129 return 0;
2130}
2131
2132static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2133{
2134 int r = -EINVAL;
2135 struct cache_args *ca;
2136 struct cache *cache = NULL;
2137
2138 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2139 if (!ca) {
2140 ti->error = "Error allocating memory for cache";
2141 return -ENOMEM;
2142 }
2143 ca->ti = ti;
2144
2145 r = parse_cache_args(ca, argc, argv, &ti->error);
2146 if (r)
2147 goto out;
2148
2149 r = cache_create(ca, &cache);
Heinz Mauelshagen617a0b82013-03-20 17:21:26 +00002150 if (r)
2151 goto out;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002152
2153 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2154 if (r) {
2155 destroy(cache);
2156 goto out;
2157 }
2158
2159 ti->private = cache;
2160
2161out:
2162 destroy_cache_args(ca);
2163 return r;
2164}
2165
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002166static int cache_map(struct dm_target *ti, struct bio *bio)
2167{
2168 struct cache *cache = ti->private;
2169
2170 int r;
2171 dm_oblock_t block = get_bio_block(cache, bio);
Mike Snitzer19b00922013-04-05 15:36:34 +01002172 size_t pb_data_size = get_per_bio_data_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002173 bool can_migrate = false;
2174 bool discarded_block;
2175 struct dm_bio_prison_cell *cell;
2176 struct policy_result lookup_result;
2177 struct per_bio_data *pb;
2178
2179 if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
2180 /*
2181 * This can only occur if the io goes to a partial block at
2182 * the end of the origin device. We don't cache these.
2183 * Just remap to the origin and carry on.
2184 */
2185 remap_to_origin_clear_discard(cache, bio, block);
2186 return DM_MAPIO_REMAPPED;
2187 }
2188
Mike Snitzer19b00922013-04-05 15:36:34 +01002189 pb = init_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002190
2191 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2192 defer_bio(cache, bio);
2193 return DM_MAPIO_SUBMITTED;
2194 }
2195
2196 /*
2197 * Check to see if that block is currently migrating.
2198 */
2199 cell = alloc_prison_cell(cache);
2200 if (!cell) {
2201 defer_bio(cache, bio);
2202 return DM_MAPIO_SUBMITTED;
2203 }
2204
2205 r = bio_detain(cache, block, bio, cell,
2206 (cell_free_fn) free_prison_cell,
2207 cache, &cell);
2208 if (r) {
2209 if (r < 0)
2210 defer_bio(cache, bio);
2211
2212 return DM_MAPIO_SUBMITTED;
2213 }
2214
2215 discarded_block = is_discarded_oblock(cache, block);
2216
2217 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2218 bio, &lookup_result);
2219 if (r == -EWOULDBLOCK) {
2220 cell_defer(cache, cell, true);
2221 return DM_MAPIO_SUBMITTED;
2222
2223 } else if (r) {
2224 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2225 bio_io_error(bio);
2226 return DM_MAPIO_SUBMITTED;
2227 }
2228
2229 switch (lookup_result.op) {
2230 case POLICY_HIT:
2231 inc_hit_counter(cache, bio);
2232 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2233
Joe Thornbere2e74d62013-03-20 17:21:27 +00002234 if (is_writethrough_io(cache, bio, lookup_result.cblock))
2235 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2236 else
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002237 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
Joe Thornbere2e74d62013-03-20 17:21:27 +00002238
2239 cell_defer(cache, cell, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002240 break;
2241
2242 case POLICY_MISS:
2243 inc_miss_counter(cache, bio);
2244 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2245
2246 if (pb->req_nr != 0) {
2247 /*
2248 * This is a duplicate writethrough io that is no
2249 * longer needed because the block has been demoted.
2250 */
2251 bio_endio(bio, 0);
2252 cell_defer(cache, cell, false);
2253 return DM_MAPIO_SUBMITTED;
2254 } else {
2255 remap_to_origin_clear_discard(cache, bio, block);
2256 cell_defer(cache, cell, false);
2257 }
2258 break;
2259
2260 default:
2261 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2262 (unsigned) lookup_result.op);
2263 bio_io_error(bio);
2264 return DM_MAPIO_SUBMITTED;
2265 }
2266
2267 return DM_MAPIO_REMAPPED;
2268}
2269
2270static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2271{
2272 struct cache *cache = ti->private;
2273 unsigned long flags;
Mike Snitzer19b00922013-04-05 15:36:34 +01002274 size_t pb_data_size = get_per_bio_data_size(cache);
2275 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002276
2277 if (pb->tick) {
2278 policy_tick(cache->policy);
2279
2280 spin_lock_irqsave(&cache->lock, flags);
2281 cache->need_tick_bio = true;
2282 spin_unlock_irqrestore(&cache->lock, flags);
2283 }
2284
2285 check_for_quiesced_migrations(cache, pb);
2286
2287 return 0;
2288}
2289
2290static int write_dirty_bitset(struct cache *cache)
2291{
2292 unsigned i, r;
2293
2294 for (i = 0; i < from_cblock(cache->cache_size); i++) {
2295 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2296 is_dirty(cache, to_cblock(i)));
2297 if (r)
2298 return r;
2299 }
2300
2301 return 0;
2302}
2303
2304static int write_discard_bitset(struct cache *cache)
2305{
2306 unsigned i, r;
2307
2308 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2309 cache->discard_nr_blocks);
2310 if (r) {
2311 DMERR("could not resize on-disk discard bitset");
2312 return r;
2313 }
2314
2315 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2316 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2317 is_discarded(cache, to_dblock(i)));
2318 if (r)
2319 return r;
2320 }
2321
2322 return 0;
2323}
2324
2325static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
2326 uint32_t hint)
2327{
2328 struct cache *cache = context;
2329 return dm_cache_save_hint(cache->cmd, cblock, hint);
2330}
2331
2332static int write_hints(struct cache *cache)
2333{
2334 int r;
2335
2336 r = dm_cache_begin_hints(cache->cmd, cache->policy);
2337 if (r) {
2338 DMERR("dm_cache_begin_hints failed");
2339 return r;
2340 }
2341
2342 r = policy_walk_mappings(cache->policy, save_hint, cache);
2343 if (r)
2344 DMERR("policy_walk_mappings failed");
2345
2346 return r;
2347}
2348
2349/*
2350 * returns true on success
2351 */
2352static bool sync_metadata(struct cache *cache)
2353{
2354 int r1, r2, r3, r4;
2355
2356 r1 = write_dirty_bitset(cache);
2357 if (r1)
2358 DMERR("could not write dirty bitset");
2359
2360 r2 = write_discard_bitset(cache);
2361 if (r2)
2362 DMERR("could not write discard bitset");
2363
2364 save_stats(cache);
2365
2366 r3 = write_hints(cache);
2367 if (r3)
2368 DMERR("could not write hints");
2369
2370 /*
2371 * If writing the above metadata failed, we still commit, but don't
2372 * set the clean shutdown flag. This will effectively force every
2373 * dirty bit to be set on reload.
2374 */
2375 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2376 if (r4)
2377 DMERR("could not write cache metadata. Data loss may occur.");
2378
2379 return !r1 && !r2 && !r3 && !r4;
2380}
2381
2382static void cache_postsuspend(struct dm_target *ti)
2383{
2384 struct cache *cache = ti->private;
2385
2386 start_quiescing(cache);
2387 wait_for_migrations(cache);
2388 stop_worker(cache);
2389 requeue_deferred_io(cache);
2390 stop_quiescing(cache);
2391
2392 (void) sync_metadata(cache);
2393}
2394
2395static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2396 bool dirty, uint32_t hint, bool hint_valid)
2397{
2398 int r;
2399 struct cache *cache = context;
2400
2401 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2402 if (r)
2403 return r;
2404
2405 if (dirty)
2406 set_dirty(cache, oblock, cblock);
2407 else
2408 clear_dirty(cache, oblock, cblock);
2409
2410 return 0;
2411}
2412
2413static int load_discard(void *context, sector_t discard_block_size,
2414 dm_dblock_t dblock, bool discard)
2415{
2416 struct cache *cache = context;
2417
2418 /* FIXME: handle mis-matched block size */
2419
2420 if (discard)
2421 set_discard(cache, dblock);
2422 else
2423 clear_discard(cache, dblock);
2424
2425 return 0;
2426}
2427
2428static int cache_preresume(struct dm_target *ti)
2429{
2430 int r = 0;
2431 struct cache *cache = ti->private;
2432 sector_t actual_cache_size = get_dev_size(cache->cache_dev);
2433 (void) sector_div(actual_cache_size, cache->sectors_per_block);
2434
2435 /*
2436 * Check to see if the cache has resized.
2437 */
2438 if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
2439 cache->cache_size = to_cblock(actual_cache_size);
2440
2441 r = dm_cache_resize(cache->cmd, cache->cache_size);
2442 if (r) {
2443 DMERR("could not resize cache metadata");
2444 return r;
2445 }
2446
2447 cache->sized = true;
2448 }
2449
2450 if (!cache->loaded_mappings) {
Mike Snitzerea2dd8c2013-03-20 17:21:28 +00002451 r = dm_cache_load_mappings(cache->cmd, cache->policy,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002452 load_mapping, cache);
2453 if (r) {
2454 DMERR("could not load cache mappings");
2455 return r;
2456 }
2457
2458 cache->loaded_mappings = true;
2459 }
2460
2461 if (!cache->loaded_discards) {
2462 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2463 if (r) {
2464 DMERR("could not load origin discards");
2465 return r;
2466 }
2467
2468 cache->loaded_discards = true;
2469 }
2470
2471 return r;
2472}
2473
2474static void cache_resume(struct dm_target *ti)
2475{
2476 struct cache *cache = ti->private;
2477
2478 cache->need_tick_bio = true;
2479 do_waker(&cache->waker.work);
2480}
2481
2482/*
2483 * Status format:
2484 *
2485 * <#used metadata blocks>/<#total metadata blocks>
2486 * <#read hits> <#read misses> <#write hits> <#write misses>
2487 * <#demotions> <#promotions> <#blocks in cache> <#dirty>
2488 * <#features> <features>*
2489 * <#core args> <core args>
2490 * <#policy args> <policy args>*
2491 */
2492static void cache_status(struct dm_target *ti, status_type_t type,
2493 unsigned status_flags, char *result, unsigned maxlen)
2494{
2495 int r = 0;
2496 unsigned i;
2497 ssize_t sz = 0;
2498 dm_block_t nr_free_blocks_metadata = 0;
2499 dm_block_t nr_blocks_metadata = 0;
2500 char buf[BDEVNAME_SIZE];
2501 struct cache *cache = ti->private;
2502 dm_cblock_t residency;
2503
2504 switch (type) {
2505 case STATUSTYPE_INFO:
2506 /* Commit to ensure statistics aren't out-of-date */
2507 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2508 r = dm_cache_commit(cache->cmd, false);
2509 if (r)
2510 DMERR("could not commit metadata for accurate status");
2511 }
2512
2513 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2514 &nr_free_blocks_metadata);
2515 if (r) {
2516 DMERR("could not get metadata free block count");
2517 goto err;
2518 }
2519
2520 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2521 if (r) {
2522 DMERR("could not get metadata device size");
2523 goto err;
2524 }
2525
2526 residency = policy_residency(cache->policy);
2527
2528 DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
2529 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2530 (unsigned long long)nr_blocks_metadata,
2531 (unsigned) atomic_read(&cache->stats.read_hit),
2532 (unsigned) atomic_read(&cache->stats.read_miss),
2533 (unsigned) atomic_read(&cache->stats.write_hit),
2534 (unsigned) atomic_read(&cache->stats.write_miss),
2535 (unsigned) atomic_read(&cache->stats.demotion),
2536 (unsigned) atomic_read(&cache->stats.promotion),
2537 (unsigned long long) from_cblock(residency),
2538 cache->nr_dirty);
2539
2540 if (cache->features.write_through)
2541 DMEMIT("1 writethrough ");
2542 else
2543 DMEMIT("0 ");
2544
2545 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2546 if (sz < maxlen) {
2547 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2548 if (r)
2549 DMERR("policy_emit_config_values returned %d", r);
2550 }
2551
2552 break;
2553
2554 case STATUSTYPE_TABLE:
2555 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2556 DMEMIT("%s ", buf);
2557 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2558 DMEMIT("%s ", buf);
2559 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2560 DMEMIT("%s", buf);
2561
2562 for (i = 0; i < cache->nr_ctr_args - 1; i++)
2563 DMEMIT(" %s", cache->ctr_args[i]);
2564 if (cache->nr_ctr_args)
2565 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
2566 }
2567
2568 return;
2569
2570err:
2571 DMEMIT("Error");
2572}
2573
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002574/*
2575 * Supports <key> <value>.
2576 *
2577 * The key migration_threshold is supported by the cache target core.
2578 */
2579static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2580{
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002581 struct cache *cache = ti->private;
2582
2583 if (argc != 2)
2584 return -EINVAL;
2585
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002586 return set_config_value(cache, argv[0], argv[1]);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002587}
2588
2589static int cache_iterate_devices(struct dm_target *ti,
2590 iterate_devices_callout_fn fn, void *data)
2591{
2592 int r = 0;
2593 struct cache *cache = ti->private;
2594
2595 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
2596 if (!r)
2597 r = fn(ti, cache->origin_dev, 0, ti->len, data);
2598
2599 return r;
2600}
2601
2602/*
2603 * We assume I/O is going to the origin (which is the volume
2604 * more likely to have restrictions e.g. by being striped).
2605 * (Looking up the exact location of the data would be expensive
2606 * and could always be out of date by the time the bio is submitted.)
2607 */
2608static int cache_bvec_merge(struct dm_target *ti,
2609 struct bvec_merge_data *bvm,
2610 struct bio_vec *biovec, int max_size)
2611{
2612 struct cache *cache = ti->private;
2613 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
2614
2615 if (!q->merge_bvec_fn)
2616 return max_size;
2617
2618 bvm->bi_bdev = cache->origin_dev->bdev;
2619 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2620}
2621
2622static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
2623{
2624 /*
2625 * FIXME: these limits may be incompatible with the cache device
2626 */
2627 limits->max_discard_sectors = cache->discard_block_size * 1024;
2628 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
2629}
2630
2631static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
2632{
2633 struct cache *cache = ti->private;
2634
2635 blk_limits_io_min(limits, 0);
2636 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
2637 set_discard_limits(cache, limits);
2638}
2639
2640/*----------------------------------------------------------------*/
2641
2642static struct target_type cache_target = {
2643 .name = "cache",
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002644 .version = {1, 1, 1},
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002645 .module = THIS_MODULE,
2646 .ctr = cache_ctr,
2647 .dtr = cache_dtr,
2648 .map = cache_map,
2649 .end_io = cache_end_io,
2650 .postsuspend = cache_postsuspend,
2651 .preresume = cache_preresume,
2652 .resume = cache_resume,
2653 .status = cache_status,
2654 .message = cache_message,
2655 .iterate_devices = cache_iterate_devices,
2656 .merge = cache_bvec_merge,
2657 .io_hints = cache_io_hints,
2658};
2659
2660static int __init dm_cache_init(void)
2661{
2662 int r;
2663
2664 r = dm_register_target(&cache_target);
2665 if (r) {
2666 DMERR("cache target registration failed: %d", r);
2667 return r;
2668 }
2669
2670 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
2671 if (!migration_cache) {
2672 dm_unregister_target(&cache_target);
2673 return -ENOMEM;
2674 }
2675
2676 return 0;
2677}
2678
2679static void __exit dm_cache_exit(void)
2680{
2681 dm_unregister_target(&cache_target);
2682 kmem_cache_destroy(migration_cache);
2683}
2684
2685module_init(dm_cache_init);
2686module_exit(dm_cache_exit);
2687
2688MODULE_DESCRIPTION(DM_NAME " cache target");
2689MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
2690MODULE_LICENSE("GPL");