aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c214
1 files changed, 144 insertions, 70 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0f4e84b15c3..10744091e6c 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -6,6 +6,7 @@
#include "dm.h"
#include "dm-bio-prison.h"
+#include "dm-bio-record.h"
#include "dm-cache-metadata.h"
#include <linux/dm-io.h>
@@ -142,6 +143,7 @@ struct cache {
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
+ struct bio_list deferred_writethrough_bios;
struct list_head quiesced_migrations;
struct list_head completed_migrations;
struct list_head need_commit_migrations;
@@ -158,7 +160,7 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- sector_t discard_block_size; /* a power of 2 times sectors per block */
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
@@ -199,6 +201,16 @@ struct per_bio_data {
bool tick:1;
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
+
+ /*
+ * writethrough fields. These MUST remain at the end of this
+ * structure and the 'cache' member must be the first as it
+ * is used to determine the offsetof the writethrough fields.
+ */
+ struct cache *cache;
+ dm_cblock_t cblock;
+ bio_end_io_t *saved_bi_end_io;
+ struct dm_bio_details bio_details;
};
struct dm_cache_migration {
@@ -412,17 +424,24 @@ static bool block_size_is_power_of_two(struct cache *cache)
return cache->sectors_per_block_shift >= 0;
}
+static dm_block_t block_div(dm_block_t b, uint32_t n)
+{
+ do_div(b, n);
+
+ return b;
+}
+
static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
{
- sector_t discard_blocks = cache->discard_block_size;
+ uint32_t discard_blocks = cache->discard_block_size;
dm_block_t b = from_oblock(oblock);
if (!block_size_is_power_of_two(cache))
- (void) sector_div(discard_blocks, cache->sectors_per_block);
+ discard_blocks = discard_blocks / cache->sectors_per_block;
else
discard_blocks >>= cache->sectors_per_block_shift;
- (void) sector_div(b, discard_blocks);
+ b = block_div(b, discard_blocks);
return to_dblock(b);
}
@@ -500,16 +519,28 @@ static void save_stats(struct cache *cache)
/*----------------------------------------------------------------
* Per bio data
*--------------------------------------------------------------*/
-static struct per_bio_data *get_per_bio_data(struct bio *bio)
+
+/*
+ * If using writeback, leave out struct per_bio_data's writethrough fields.
+ */
+#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
+#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
+
+static size_t get_per_bio_data_size(struct cache *cache)
{
- struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+}
+
+static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
+{
+ struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
BUG_ON(!pb);
return pb;
}
-static struct per_bio_data *init_per_bio_data(struct bio *bio)
+static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
{
- struct per_bio_data *pb = get_per_bio_data(bio);
+ struct per_bio_data *pb = get_per_bio_data(bio, data_size);
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -543,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
@@ -609,6 +641,58 @@ static void issue(struct cache *cache, struct bio *bio)
spin_unlock_irqrestore(&cache->lock, flags);
}
+static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_add(&cache->deferred_writethrough_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void writethrough_endio(struct bio *bio, int err)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+ bio->bi_end_io = pb->saved_bi_end_io;
+
+ if (err) {
+ bio_endio(bio, err);
+ return;
+ }
+
+ dm_bio_restore(&pb->bio_details, bio);
+ remap_to_cache(pb->cache, bio, pb->cblock);
+
+ /*
+ * We can't issue this bio directly, since we're in interrupt
+ * context. So it get's put on a bio list for processing by the
+ * worker thread.
+ */
+ defer_writethrough_bio(pb->cache, bio);
+}
+
+/*
+ * When running in writethrough mode we need to send writes to clean blocks
+ * to both the cache and origin devices. In future we'd like to clone the
+ * bio and send them in parallel, but for now we're doing them in
+ * series as this is easier.
+ */
+static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+
+ pb->cache = cache;
+ pb->cblock = cblock;
+ pb->saved_bi_end_io = bio->bi_end_io;
+ dm_bio_record(&pb->bio_details, bio);
+ bio->bi_end_io = writethrough_endio;
+
+ remap_to_origin_clear_discard(pb->cache, bio, oblock);
+}
+
/*----------------------------------------------------------------
* Migration processing
*
@@ -972,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio)
static void process_flush_bio(struct cache *cache, struct bio *bio)
{
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
BUG_ON(bio->bi_size);
if (!pb->req_nr)
@@ -1002,7 +1087,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
dm_block_t b;
- (void) sector_div(end_block, cache->discard_block_size);
+ end_block = block_div(end_block, cache->discard_block_size);
for (b = start_block; b < end_block; b++)
set_discard(cache, to_dblock(b));
@@ -1044,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
dm_oblock_t block = get_bio_block(cache, bio);
struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
struct policy_result lookup_result;
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
bool discarded_block = is_discarded_oblock(cache, block);
bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
@@ -1070,14 +1156,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
inc_hit_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
- if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
- /*
- * No need to mark anything dirty in write through mode.
- */
- pb->req_nr == 0 ?
- remap_to_cache(cache, bio, lookup_result.cblock) :
- remap_to_origin_clear_discard(cache, bio, block);
- } else
+ if (is_writethrough_io(cache, bio, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ else
remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
issue(cache, bio);
@@ -1086,17 +1167,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
case POLICY_MISS:
inc_miss_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
-
- if (pb->req_nr != 0) {
- /*
- * This is a duplicate writethrough io that is no
- * longer needed because the block has been demoted.
- */
- bio_endio(bio, 0);
- } else {
- remap_to_origin_clear_discard(cache, bio, block);
- issue(cache, bio);
- }
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
break;
case POLICY_NEW:
@@ -1217,6 +1289,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
submit_bios ? generic_make_request(bio) : bio_io_error(bio);
}
+static void process_deferred_writethrough_bios(struct cache *cache)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_writethrough_bios);
+ bio_list_init(&cache->deferred_writethrough_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ generic_make_request(bio);
+}
+
static void writeback_some_dirty_blocks(struct cache *cache)
{
int r = 0;
@@ -1313,6 +1402,7 @@ static int more_work(struct cache *cache)
else
return !bio_list_empty(&cache->deferred_bios) ||
!bio_list_empty(&cache->deferred_flush_bios) ||
+ !bio_list_empty(&cache->deferred_writethrough_bios) ||
!list_empty(&cache->quiesced_migrations) ||
!list_empty(&cache->completed_migrations) ||
!list_empty(&cache->need_commit_migrations);
@@ -1331,6 +1421,8 @@ static void do_worker(struct work_struct *ws)
writeback_some_dirty_blocks(cache);
+ process_deferred_writethrough_bios(cache);
+
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
@@ -1756,8 +1848,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
}
r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
- if (r)
+ if (r) {
+ *error = "Error setting cache policy's config values";
dm_cache_policy_destroy(cache->policy);
+ cache->policy = NULL;
+ }
return r;
}
@@ -1793,8 +1888,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
-static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
-
static int cache_create(struct cache_args *ca, struct cache **result)
{
int r = 0;
@@ -1811,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->ti = ca->ti;
ti->private = cache;
- ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->num_flush_bios = 2;
ti->flush_supported = true;
@@ -1820,9 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discard_zeroes_data_unsupported = true;
memcpy(&cache->features, &ca->features, sizeof(cache->features));
-
- if (cache->features.write_through)
- ti->num_write_bios = cache_num_write_bios;
+ ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -1835,7 +1925,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
/* FIXME: factor out this whole section */
origin_blocks = cache->origin_sectors = ca->origin_sectors;
- (void) sector_div(origin_blocks, ca->block_size);
+ origin_blocks = block_div(origin_blocks, ca->block_size);
cache->origin_blocks = to_oblock(origin_blocks);
cache->sectors_per_block = ca->block_size;
@@ -1848,7 +1938,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
dm_block_t cache_size = ca->cache_sectors;
cache->sectors_per_block_shift = -1;
- (void) sector_div(cache_size, ca->block_size);
+ cache_size = block_div(cache_size, ca->block_size);
cache->cache_size = to_cblock(cache_size);
} else {
cache->sectors_per_block_shift = __ffs(ca->block_size);
@@ -1873,6 +1963,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
spin_lock_init(&cache->lock);
bio_list_init(&cache->deferred_bios);
bio_list_init(&cache->deferred_flush_bios);
+ bio_list_init(&cache->deferred_writethrough_bios);
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
@@ -2002,6 +2093,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out;
r = cache_create(ca, &cache);
+ if (r)
+ goto out;
r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
if (r) {
@@ -2016,26 +2109,13 @@ out:
return r;
}
-static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
-{
- int r;
- struct cache *cache = ti->private;
- dm_oblock_t block = get_bio_block(cache, bio);
- dm_cblock_t cblock;
-
- r = policy_lookup(cache->policy, block, &cblock);
- if (r < 0)
- return 2; /* assume the worst */
-
- return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
-}
-
static int cache_map(struct dm_target *ti, struct bio *bio)
{
struct cache *cache = ti->private;
int r;
dm_oblock_t block = get_bio_block(cache, bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
bool can_migrate = false;
bool discarded_block;
struct dm_bio_prison_cell *cell;
@@ -2052,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio);
+ pb = init_per_bio_data(bio, pb_data_size);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
@@ -2097,18 +2177,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
inc_hit_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
- if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
- /*
- * No need to mark anything dirty in write through mode.
- */
- pb->req_nr == 0 ?
- remap_to_cache(cache, bio, lookup_result.cblock) :
- remap_to_origin_clear_discard(cache, bio, block);
- cell_defer(cache, cell, false);
- } else {
+ if (is_writethrough_io(cache, bio, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ else
remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
- cell_defer(cache, cell, false);
- }
+
+ cell_defer(cache, cell, false);
break;
case POLICY_MISS:
@@ -2143,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct cache *cache = ti->private;
unsigned long flags;
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
if (pb->tick) {
policy_tick(cache->policy);
@@ -2319,8 +2394,7 @@ static int cache_preresume(struct dm_target *ti)
}
if (!cache->loaded_mappings) {
- r = dm_cache_load_mappings(cache->cmd,
- dm_cache_policy_get_name(cache->policy),
+ r = dm_cache_load_mappings(cache->cmd, cache->policy,
load_mapping, cache);
if (r) {
DMERR("could not load cache mappings");
@@ -2535,7 +2609,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,