blob: 3c86b5efe78f9b6cde142123668a5422997a787d [file] [log] [blame]
Joe Thornberf2836352013-03-01 22:45:51 +00001/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-cache-policy.h"
8#include "dm.h"
9
10#include <linux/hash.h>
11#include <linux/module.h>
12#include <linux/mutex.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
16#define DM_MSG_PREFIX "cache-policy-mq"
Joe Thornberf2836352013-03-01 22:45:51 +000017
18static struct kmem_cache *mq_entry_cache;
19
20/*----------------------------------------------------------------*/
21
22static unsigned next_power(unsigned n, unsigned min)
23{
24 return roundup_pow_of_two(max(n, min));
25}
26
27/*----------------------------------------------------------------*/
28
Joe Thornberf2836352013-03-01 22:45:51 +000029/*
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
32 *
33 * The io_tracker tries to spot when the io is in one of these sequential
34 * modes.
35 *
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
38 */
39#define RANDOM_THRESHOLD_DEFAULT 4
40#define SEQUENTIAL_THRESHOLD_DEFAULT 512
41
42enum io_pattern {
43 PATTERN_SEQUENTIAL,
44 PATTERN_RANDOM
45};
46
47struct io_tracker {
48 enum io_pattern pattern;
49
50 unsigned nr_seq_samples;
51 unsigned nr_rand_samples;
52 unsigned thresholds[2];
53
54 dm_oblock_t last_end_oblock;
55};
56
57static void iot_init(struct io_tracker *t,
58 int sequential_threshold, int random_threshold)
59{
60 t->pattern = PATTERN_RANDOM;
61 t->nr_seq_samples = 0;
62 t->nr_rand_samples = 0;
63 t->last_end_oblock = 0;
64 t->thresholds[PATTERN_RANDOM] = random_threshold;
65 t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
66}
67
68static enum io_pattern iot_pattern(struct io_tracker *t)
69{
70 return t->pattern;
71}
72
73static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74{
Kent Overstreet4f024f32013-10-11 15:44:27 -070075 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
Joe Thornberf2836352013-03-01 22:45:51 +000076 t->nr_seq_samples++;
77 else {
78 /*
79 * Just one non-sequential IO is enough to reset the
80 * counters.
81 */
82 if (t->nr_seq_samples) {
83 t->nr_seq_samples = 0;
84 t->nr_rand_samples = 0;
85 }
86
87 t->nr_rand_samples++;
88 }
89
Kent Overstreet4f024f32013-10-11 15:44:27 -070090 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
Joe Thornberf2836352013-03-01 22:45:51 +000091}
92
93static void iot_check_for_pattern_switch(struct io_tracker *t)
94{
95 switch (t->pattern) {
96 case PATTERN_SEQUENTIAL:
97 if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
98 t->pattern = PATTERN_RANDOM;
99 t->nr_seq_samples = t->nr_rand_samples = 0;
100 }
101 break;
102
103 case PATTERN_RANDOM:
104 if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
105 t->pattern = PATTERN_SEQUENTIAL;
106 t->nr_seq_samples = t->nr_rand_samples = 0;
107 }
108 break;
109 }
110}
111
112static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
113{
114 iot_update_stats(t, bio);
115 iot_check_for_pattern_switch(t);
116}
117
118/*----------------------------------------------------------------*/
119
120
121/*
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
124 * sorted queue.
125 */
126#define NR_QUEUE_LEVELS 16u
127
128struct queue {
Joe Thornber75da39b2015-02-20 12:58:03 +0000129 unsigned nr_elts;
Joe Thornberf2836352013-03-01 22:45:51 +0000130 struct list_head qs[NR_QUEUE_LEVELS];
131};
132
133static void queue_init(struct queue *q)
134{
135 unsigned i;
136
Joe Thornber75da39b2015-02-20 12:58:03 +0000137 q->nr_elts = 0;
Joe Thornberf2836352013-03-01 22:45:51 +0000138 for (i = 0; i < NR_QUEUE_LEVELS; i++)
139 INIT_LIST_HEAD(q->qs + i);
140}
141
Joe Thornberc86c3072013-10-24 14:10:28 -0400142static bool queue_empty(struct queue *q)
143{
Joe Thornber75da39b2015-02-20 12:58:03 +0000144 return q->nr_elts == 0;
Joe Thornberc86c3072013-10-24 14:10:28 -0400145}
146
147/*
Joe Thornberf2836352013-03-01 22:45:51 +0000148 * Insert an entry to the back of the given level.
149 */
150static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
151{
Joe Thornber75da39b2015-02-20 12:58:03 +0000152 q->nr_elts++;
Joe Thornberf2836352013-03-01 22:45:51 +0000153 list_add_tail(elt, q->qs + level);
154}
155
Joe Thornber75da39b2015-02-20 12:58:03 +0000156static void queue_remove(struct queue *q, struct list_head *elt)
Joe Thornberf2836352013-03-01 22:45:51 +0000157{
Joe Thornber75da39b2015-02-20 12:58:03 +0000158 q->nr_elts--;
Joe Thornberf2836352013-03-01 22:45:51 +0000159 list_del(elt);
160}
161
162/*
Joe Thornberf2836352013-03-01 22:45:51 +0000163 * Gives us the oldest entry of the lowest popoulated level. If the first
164 * level is emptied then we shift down one level.
165 */
Joe Thornberb155aa02014-10-22 14:30:58 +0100166static struct list_head *queue_peek(struct queue *q)
Joe Thornberf2836352013-03-01 22:45:51 +0000167{
168 unsigned level;
Joe Thornberf2836352013-03-01 22:45:51 +0000169
170 for (level = 0; level < NR_QUEUE_LEVELS; level++)
Joe Thornberb155aa02014-10-22 14:30:58 +0100171 if (!list_empty(q->qs + level))
172 return q->qs[level].next;
Joe Thornberf2836352013-03-01 22:45:51 +0000173
174 return NULL;
175}
176
Joe Thornberb155aa02014-10-22 14:30:58 +0100177static struct list_head *queue_pop(struct queue *q)
178{
179 struct list_head *r = queue_peek(q);
180
181 if (r) {
Joe Thornber75da39b2015-02-20 12:58:03 +0000182 q->nr_elts--;
Joe Thornberb155aa02014-10-22 14:30:58 +0100183 list_del(r);
Joe Thornberb155aa02014-10-22 14:30:58 +0100184 }
185
186 return r;
187}
188
Joe Thornberf2836352013-03-01 22:45:51 +0000189static struct list_head *list_pop(struct list_head *lh)
190{
191 struct list_head *r = lh->next;
192
193 BUG_ON(!r);
194 list_del_init(r);
195
196 return r;
197}
198
199/*----------------------------------------------------------------*/
200
201/*
202 * Describes a cache entry. Used in both the cache and the pre_cache.
203 */
204struct entry {
205 struct hlist_node hlist;
206 struct list_head list;
207 dm_oblock_t oblock;
Joe Thornberf2836352013-03-01 22:45:51 +0000208
209 /*
210 * FIXME: pack these better
211 */
Joe Thornber01911c12013-10-24 14:10:28 -0400212 bool dirty:1;
Joe Thornberf2836352013-03-01 22:45:51 +0000213 unsigned hit_count;
214 unsigned generation;
215 unsigned tick;
216};
217
Joe Thornber633618e2013-11-09 11:12:51 +0000218/*
219 * Rather than storing the cblock in an entry, we allocate all entries in
220 * an array, and infer the cblock from the entry position.
221 *
222 * Free entries are linked together into a list.
223 */
224struct entry_pool {
225 struct entry *entries, *entries_end;
226 struct list_head free;
227 unsigned nr_allocated;
228};
229
230static int epool_init(struct entry_pool *ep, unsigned nr_entries)
231{
232 unsigned i;
233
234 ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
235 if (!ep->entries)
236 return -ENOMEM;
237
238 ep->entries_end = ep->entries + nr_entries;
239
240 INIT_LIST_HEAD(&ep->free);
241 for (i = 0; i < nr_entries; i++)
242 list_add(&ep->entries[i].list, &ep->free);
243
244 ep->nr_allocated = 0;
245
246 return 0;
247}
248
249static void epool_exit(struct entry_pool *ep)
250{
251 vfree(ep->entries);
252}
253
254static struct entry *alloc_entry(struct entry_pool *ep)
255{
256 struct entry *e;
257
258 if (list_empty(&ep->free))
259 return NULL;
260
261 e = list_entry(list_pop(&ep->free), struct entry, list);
262 INIT_LIST_HEAD(&e->list);
263 INIT_HLIST_NODE(&e->hlist);
264 ep->nr_allocated++;
265
266 return e;
267}
268
269/*
270 * This assumes the cblock hasn't already been allocated.
271 */
272static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
273{
274 struct entry *e = ep->entries + from_cblock(cblock);
Joe Thornber633618e2013-11-09 11:12:51 +0000275
Wei Yongjunb8158052013-11-18 13:32:43 -0500276 list_del_init(&e->list);
Joe Thornber633618e2013-11-09 11:12:51 +0000277 INIT_HLIST_NODE(&e->hlist);
278 ep->nr_allocated++;
279
280 return e;
281}
282
283static void free_entry(struct entry_pool *ep, struct entry *e)
284{
285 BUG_ON(!ep->nr_allocated);
286 ep->nr_allocated--;
287 INIT_HLIST_NODE(&e->hlist);
288 list_add(&e->list, &ep->free);
289}
290
Joe Thornber532906a2013-11-08 16:36:17 +0000291/*
292 * Returns NULL if the entry is free.
293 */
294static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
295{
296 struct entry *e = ep->entries + from_cblock(cblock);
Mike Snitzer7b6b2bc2013-11-12 12:17:43 -0500297 return !hlist_unhashed(&e->hlist) ? e : NULL;
Joe Thornber532906a2013-11-08 16:36:17 +0000298}
299
Joe Thornber633618e2013-11-09 11:12:51 +0000300static bool epool_empty(struct entry_pool *ep)
301{
302 return list_empty(&ep->free);
303}
304
305static bool in_pool(struct entry_pool *ep, struct entry *e)
306{
307 return e >= ep->entries && e < ep->entries_end;
308}
309
310static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
311{
312 return to_cblock(e - ep->entries);
313}
314
315/*----------------------------------------------------------------*/
316
Joe Thornberf2836352013-03-01 22:45:51 +0000317struct mq_policy {
318 struct dm_cache_policy policy;
319
320 /* protects everything */
321 struct mutex lock;
322 dm_cblock_t cache_size;
323 struct io_tracker tracker;
324
325 /*
Joe Thornber633618e2013-11-09 11:12:51 +0000326 * Entries come from two pools, one of pre-cache entries, and one
327 * for the cache proper.
328 */
329 struct entry_pool pre_cache_pool;
330 struct entry_pool cache_pool;
331
332 /*
Joe Thornber01911c12013-10-24 14:10:28 -0400333 * We maintain three queues of entries. The cache proper,
334 * consisting of a clean and dirty queue, contains the currently
335 * active mappings. Whereas the pre_cache tracks blocks that
336 * are being hit frequently and potential candidates for promotion
337 * to the cache.
Joe Thornberf2836352013-03-01 22:45:51 +0000338 */
339 struct queue pre_cache;
Joe Thornber01911c12013-10-24 14:10:28 -0400340 struct queue cache_clean;
341 struct queue cache_dirty;
Joe Thornberf2836352013-03-01 22:45:51 +0000342
343 /*
344 * Keeps track of time, incremented by the core. We use this to
345 * avoid attributing multiple hits within the same tick.
346 *
347 * Access to tick_protected should be done with the spin lock held.
348 * It's copied to tick at the start of the map function (within the
349 * mutex).
350 */
351 spinlock_t tick_lock;
352 unsigned tick_protected;
353 unsigned tick;
354
355 /*
356 * A count of the number of times the map function has been called
357 * and found an entry in the pre_cache or cache. Currently used to
358 * calculate the generation.
359 */
360 unsigned hit_count;
361
362 /*
363 * A generation is a longish period that is used to trigger some
364 * book keeping effects. eg, decrementing hit counts on entries.
365 * This is needed to allow the cache to evolve as io patterns
366 * change.
367 */
368 unsigned generation;
369 unsigned generation_period; /* in lookups (will probably change) */
370
Joe Thornber78e03d62013-12-09 12:53:05 +0000371 unsigned discard_promote_adjustment;
372 unsigned read_promote_adjustment;
373 unsigned write_promote_adjustment;
374
Joe Thornberf2836352013-03-01 22:45:51 +0000375 /*
Joe Thornberf2836352013-03-01 22:45:51 +0000376 * The hash table allows us to quickly find an entry by origin
377 * block. Both pre_cache and cache entries are in here.
378 */
379 unsigned nr_buckets;
380 dm_block_t hash_bits;
381 struct hlist_head *table;
382};
383
Joe Thornber78e03d62013-12-09 12:53:05 +0000384#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
385#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
386#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
Joe Thornberb155aa02014-10-22 14:30:58 +0100387#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
Joe Thornber78e03d62013-12-09 12:53:05 +0000388
Joe Thornberf2836352013-03-01 22:45:51 +0000389/*----------------------------------------------------------------*/
Joe Thornberf2836352013-03-01 22:45:51 +0000390
391/*
392 * Simple hash table implementation. Should replace with the standard hash
393 * table that's making its way upstream.
394 */
395static void hash_insert(struct mq_policy *mq, struct entry *e)
396{
397 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
398
399 hlist_add_head(&e->hlist, mq->table + h);
400}
401
402static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
403{
404 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
405 struct hlist_head *bucket = mq->table + h;
406 struct entry *e;
407
408 hlist_for_each_entry(e, bucket, hlist)
409 if (e->oblock == oblock) {
410 hlist_del(&e->hlist);
411 hlist_add_head(&e->hlist, bucket);
412 return e;
413 }
414
415 return NULL;
416}
417
418static void hash_remove(struct entry *e)
419{
420 hlist_del(&e->hlist);
421}
422
423/*----------------------------------------------------------------*/
424
Joe Thornberf2836352013-03-01 22:45:51 +0000425static bool any_free_cblocks(struct mq_policy *mq)
426{
Joe Thornber633618e2013-11-09 11:12:51 +0000427 return !epool_empty(&mq->cache_pool);
Joe Thornberf2836352013-03-01 22:45:51 +0000428}
429
Joe Thornberc86c3072013-10-24 14:10:28 -0400430static bool any_clean_cblocks(struct mq_policy *mq)
431{
432 return !queue_empty(&mq->cache_clean);
433}
434
Joe Thornberf2836352013-03-01 22:45:51 +0000435/*----------------------------------------------------------------*/
436
437/*
438 * Now we get to the meat of the policy. This section deals with deciding
439 * when to to add entries to the pre_cache and cache, and move between
440 * them.
441 */
442
443/*
444 * The queue level is based on the log2 of the hit count.
445 */
446static unsigned queue_level(struct entry *e)
447{
448 return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
449}
450
Joe Thornber633618e2013-11-09 11:12:51 +0000451static bool in_cache(struct mq_policy *mq, struct entry *e)
452{
453 return in_pool(&mq->cache_pool, e);
454}
455
Joe Thornberf2836352013-03-01 22:45:51 +0000456/*
457 * Inserts the entry into the pre_cache or the cache. Ensures the cache
Joe Thornber633618e2013-11-09 11:12:51 +0000458 * block is marked as allocated if necc. Inserts into the hash table.
459 * Sets the tick which records when the entry was last moved about.
Joe Thornberf2836352013-03-01 22:45:51 +0000460 */
461static void push(struct mq_policy *mq, struct entry *e)
462{
463 e->tick = mq->tick;
464 hash_insert(mq, e);
465
Joe Thornber633618e2013-11-09 11:12:51 +0000466 if (in_cache(mq, e))
Joe Thornber01911c12013-10-24 14:10:28 -0400467 queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
468 queue_level(e), &e->list);
Joe Thornber633618e2013-11-09 11:12:51 +0000469 else
Joe Thornberf2836352013-03-01 22:45:51 +0000470 queue_push(&mq->pre_cache, queue_level(e), &e->list);
471}
472
473/*
474 * Removes an entry from pre_cache or cache. Removes from the hash table.
Joe Thornberf2836352013-03-01 22:45:51 +0000475 */
476static void del(struct mq_policy *mq, struct entry *e)
477{
Joe Thornber75da39b2015-02-20 12:58:03 +0000478 if (in_cache(mq, e))
479 queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
480 else
481 queue_remove(&mq->pre_cache, &e->list);
482
Joe Thornberf2836352013-03-01 22:45:51 +0000483 hash_remove(e);
Joe Thornberf2836352013-03-01 22:45:51 +0000484}
485
486/*
487 * Like del, except it removes the first entry in the queue (ie. the least
488 * recently used).
489 */
490static struct entry *pop(struct mq_policy *mq, struct queue *q)
491{
Joe Thornber0184b442013-10-24 14:10:28 -0400492 struct entry *e;
493 struct list_head *h = queue_pop(q);
Joe Thornberf2836352013-03-01 22:45:51 +0000494
Joe Thornber0184b442013-10-24 14:10:28 -0400495 if (!h)
496 return NULL;
Joe Thornberf2836352013-03-01 22:45:51 +0000497
Joe Thornber0184b442013-10-24 14:10:28 -0400498 e = container_of(h, struct entry, list);
499 hash_remove(e);
Joe Thornberf2836352013-03-01 22:45:51 +0000500
501 return e;
502}
503
Joe Thornberb155aa02014-10-22 14:30:58 +0100504static struct entry *peek(struct queue *q)
505{
506 struct list_head *h = queue_peek(q);
507 return h ? container_of(h, struct entry, list) : NULL;
508}
509
Joe Thornberf2836352013-03-01 22:45:51 +0000510/*
511 * Has this entry already been updated?
512 */
513static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
514{
515 return mq->tick == e->tick;
516}
517
518/*
519 * The promotion threshold is adjusted every generation. As are the counts
520 * of the entries.
521 *
522 * At the moment the threshold is taken by averaging the hit counts of some
Joe Thornber01911c12013-10-24 14:10:28 -0400523 * of the entries in the cache (the first 20 entries across all levels in
524 * ascending order, giving preference to the clean entries at each level).
Joe Thornberf2836352013-03-01 22:45:51 +0000525 *
526 * We can be much cleverer than this though. For example, each promotion
527 * could bump up the threshold helping to prevent churn. Much more to do
528 * here.
529 */
530
531#define MAX_TO_AVERAGE 20
532
533static void check_generation(struct mq_policy *mq)
534{
535 unsigned total = 0, nr = 0, count = 0, level;
536 struct list_head *head;
537 struct entry *e;
538
Joe Thornber633618e2013-11-09 11:12:51 +0000539 if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
Joe Thornberf2836352013-03-01 22:45:51 +0000540 mq->hit_count = 0;
541 mq->generation++;
542
543 for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
Joe Thornber01911c12013-10-24 14:10:28 -0400544 head = mq->cache_clean.qs + level;
545 list_for_each_entry(e, head, list) {
546 nr++;
547 total += e->hit_count;
548
549 if (++count >= MAX_TO_AVERAGE)
550 break;
551 }
552
553 head = mq->cache_dirty.qs + level;
Joe Thornberf2836352013-03-01 22:45:51 +0000554 list_for_each_entry(e, head, list) {
555 nr++;
556 total += e->hit_count;
557
558 if (++count >= MAX_TO_AVERAGE)
559 break;
560 }
561 }
Joe Thornberf2836352013-03-01 22:45:51 +0000562 }
563}
564
565/*
566 * Whenever we use an entry we bump up it's hit counter, and push it to the
567 * back to it's current level.
568 */
569static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
570{
571 if (updated_this_tick(mq, e))
572 return;
573
574 e->hit_count++;
575 mq->hit_count++;
576 check_generation(mq);
577
578 /* generation adjustment, to stop the counts increasing forever. */
579 /* FIXME: divide? */
580 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
581 e->generation = mq->generation;
582
583 del(mq, e);
584 push(mq, e);
585}
586
587/*
588 * Demote the least recently used entry from the cache to the pre_cache.
589 * Returns the new cache entry to use, and the old origin block it was
590 * mapped to.
591 *
592 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
593 * straight back into the cache if it's subsequently hit. There are
594 * various options here, and more experimentation would be good:
595 *
596 * - just forget about the demoted entry completely (ie. don't insert it
597 into the pre_cache).
598 * - divide the hit count rather that setting to some hard coded value.
599 * - set the hit count to a hard coded value other than 1, eg, is it better
600 * if it goes in at level 2?
601 */
Joe Thornber633618e2013-11-09 11:12:51 +0000602static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
Joe Thornberf2836352013-03-01 22:45:51 +0000603{
Joe Thornber01911c12013-10-24 14:10:28 -0400604 struct entry *demoted = pop(mq, &mq->cache_clean);
Joe Thornberf2836352013-03-01 22:45:51 +0000605
Joe Thornber01911c12013-10-24 14:10:28 -0400606 if (!demoted)
607 /*
608 * We could get a block from mq->cache_dirty, but that
609 * would add extra latency to the triggering bio as it
610 * waits for the writeback. Better to not promote this
611 * time and hope there's a clean block next time this block
612 * is hit.
613 */
614 return -ENOSPC;
615
Joe Thornberf2836352013-03-01 22:45:51 +0000616 *oblock = demoted->oblock;
Joe Thornber633618e2013-11-09 11:12:51 +0000617 free_entry(&mq->cache_pool, demoted);
618
619 /*
620 * We used to put the demoted block into the pre-cache, but I think
621 * it's simpler to just let it work it's way up from zero again.
622 * Stops blocks flickering in and out of the cache.
623 */
Joe Thornberf2836352013-03-01 22:45:51 +0000624
Joe Thornber01911c12013-10-24 14:10:28 -0400625 return 0;
Joe Thornberf2836352013-03-01 22:45:51 +0000626}
627
628/*
Joe Thornberb155aa02014-10-22 14:30:58 +0100629 * Entries in the pre_cache whose hit count passes the promotion
630 * threshold move to the cache proper. Working out the correct
631 * value for the promotion_threshold is crucial to this policy.
632 */
633static unsigned promote_threshold(struct mq_policy *mq)
634{
635 struct entry *e;
636
637 if (any_free_cblocks(mq))
638 return 0;
639
640 e = peek(&mq->cache_clean);
641 if (e)
642 return e->hit_count;
643
644 e = peek(&mq->cache_dirty);
645 if (e)
646 return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
647
648 /* This should never happen */
649 return 0;
650}
651
652/*
Joe Thornberf2836352013-03-01 22:45:51 +0000653 * We modify the basic promotion_threshold depending on the specific io.
654 *
655 * If the origin block has been discarded then there's no cost to copy it
656 * to the cache.
657 *
658 * We bias towards reads, since they can be demoted at no cost if they
659 * haven't been dirtied.
660 */
Joe Thornberf2836352013-03-01 22:45:51 +0000661static unsigned adjusted_promote_threshold(struct mq_policy *mq,
662 bool discarded_oblock, int data_dir)
663{
Joe Thornberc86c3072013-10-24 14:10:28 -0400664 if (data_dir == READ)
Joe Thornberb155aa02014-10-22 14:30:58 +0100665 return promote_threshold(mq) + mq->read_promote_adjustment;
Joe Thornberc86c3072013-10-24 14:10:28 -0400666
667 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
Joe Thornberf2836352013-03-01 22:45:51 +0000668 /*
669 * We don't need to do any copying at all, so give this a
Joe Thornberc86c3072013-10-24 14:10:28 -0400670 * very low threshold.
Joe Thornberf2836352013-03-01 22:45:51 +0000671 */
Joe Thornber78e03d62013-12-09 12:53:05 +0000672 return mq->discard_promote_adjustment;
Joe Thornberc86c3072013-10-24 14:10:28 -0400673 }
Joe Thornberf2836352013-03-01 22:45:51 +0000674
Joe Thornberb155aa02014-10-22 14:30:58 +0100675 return promote_threshold(mq) + mq->write_promote_adjustment;
Joe Thornberf2836352013-03-01 22:45:51 +0000676}
677
678static bool should_promote(struct mq_policy *mq, struct entry *e,
679 bool discarded_oblock, int data_dir)
680{
681 return e->hit_count >=
682 adjusted_promote_threshold(mq, discarded_oblock, data_dir);
683}
684
685static int cache_entry_found(struct mq_policy *mq,
686 struct entry *e,
687 struct policy_result *result)
688{
689 requeue_and_update_tick(mq, e);
690
Joe Thornber633618e2013-11-09 11:12:51 +0000691 if (in_cache(mq, e)) {
Joe Thornberf2836352013-03-01 22:45:51 +0000692 result->op = POLICY_HIT;
Joe Thornber633618e2013-11-09 11:12:51 +0000693 result->cblock = infer_cblock(&mq->cache_pool, e);
Joe Thornberf2836352013-03-01 22:45:51 +0000694 }
695
696 return 0;
697}
698
699/*
Joe Thornber0184b442013-10-24 14:10:28 -0400700 * Moves an entry from the pre_cache to the cache. The main work is
Joe Thornberf2836352013-03-01 22:45:51 +0000701 * finding which cache block to use.
702 */
703static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
704 struct policy_result *result)
705{
Joe Thornber01911c12013-10-24 14:10:28 -0400706 int r;
Joe Thornber633618e2013-11-09 11:12:51 +0000707 struct entry *new_e;
Joe Thornberf2836352013-03-01 22:45:51 +0000708
Joe Thornber633618e2013-11-09 11:12:51 +0000709 /* Ensure there's a free cblock in the cache */
710 if (epool_empty(&mq->cache_pool)) {
Joe Thornberf2836352013-03-01 22:45:51 +0000711 result->op = POLICY_REPLACE;
Joe Thornber633618e2013-11-09 11:12:51 +0000712 r = demote_cblock(mq, &result->old_oblock);
Joe Thornber01911c12013-10-24 14:10:28 -0400713 if (r) {
714 result->op = POLICY_MISS;
715 return 0;
716 }
Joe Thornberf2836352013-03-01 22:45:51 +0000717 } else
718 result->op = POLICY_NEW;
719
Joe Thornber633618e2013-11-09 11:12:51 +0000720 new_e = alloc_entry(&mq->cache_pool);
721 BUG_ON(!new_e);
722
723 new_e->oblock = e->oblock;
724 new_e->dirty = false;
725 new_e->hit_count = e->hit_count;
726 new_e->generation = e->generation;
727 new_e->tick = e->tick;
Joe Thornberf2836352013-03-01 22:45:51 +0000728
729 del(mq, e);
Joe Thornber633618e2013-11-09 11:12:51 +0000730 free_entry(&mq->pre_cache_pool, e);
731 push(mq, new_e);
732
733 result->cblock = infer_cblock(&mq->cache_pool, new_e);
Joe Thornberf2836352013-03-01 22:45:51 +0000734
735 return 0;
736}
737
738static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
739 bool can_migrate, bool discarded_oblock,
740 int data_dir, struct policy_result *result)
741{
742 int r = 0;
743 bool updated = updated_this_tick(mq, e);
744
Joe Thornberf2836352013-03-01 22:45:51 +0000745 if ((!discarded_oblock && updated) ||
Joe Thornberaf95e7a2013-11-15 10:51:20 +0000746 !should_promote(mq, e, discarded_oblock, data_dir)) {
747 requeue_and_update_tick(mq, e);
Joe Thornberf2836352013-03-01 22:45:51 +0000748 result->op = POLICY_MISS;
Joe Thornberaf95e7a2013-11-15 10:51:20 +0000749
750 } else if (!can_migrate)
Joe Thornberf2836352013-03-01 22:45:51 +0000751 r = -EWOULDBLOCK;
Joe Thornberaf95e7a2013-11-15 10:51:20 +0000752
753 else {
754 requeue_and_update_tick(mq, e);
Joe Thornberf2836352013-03-01 22:45:51 +0000755 r = pre_cache_to_cache(mq, e, result);
Joe Thornberaf95e7a2013-11-15 10:51:20 +0000756 }
Joe Thornberf2836352013-03-01 22:45:51 +0000757
758 return r;
759}
760
761static void insert_in_pre_cache(struct mq_policy *mq,
762 dm_oblock_t oblock)
763{
Joe Thornber633618e2013-11-09 11:12:51 +0000764 struct entry *e = alloc_entry(&mq->pre_cache_pool);
Joe Thornberf2836352013-03-01 22:45:51 +0000765
766 if (!e)
767 /*
768 * There's no spare entry structure, so we grab the least
769 * used one from the pre_cache.
770 */
771 e = pop(mq, &mq->pre_cache);
772
773 if (unlikely(!e)) {
774 DMWARN("couldn't pop from pre cache");
775 return;
776 }
777
Joe Thornber633618e2013-11-09 11:12:51 +0000778 e->dirty = false;
779 e->oblock = oblock;
780 e->hit_count = 1;
781 e->generation = mq->generation;
782 push(mq, e);
Joe Thornberf2836352013-03-01 22:45:51 +0000783}
784
785static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
786 struct policy_result *result)
787{
Joe Thornberc86c3072013-10-24 14:10:28 -0400788 int r;
Joe Thornberf2836352013-03-01 22:45:51 +0000789 struct entry *e;
Joe Thornberf2836352013-03-01 22:45:51 +0000790
Joe Thornber633618e2013-11-09 11:12:51 +0000791 if (epool_empty(&mq->cache_pool)) {
792 result->op = POLICY_REPLACE;
793 r = demote_cblock(mq, &result->old_oblock);
Joe Thornberc86c3072013-10-24 14:10:28 -0400794 if (unlikely(r)) {
795 result->op = POLICY_MISS;
796 insert_in_pre_cache(mq, oblock);
797 return;
798 }
Joe Thornberf2836352013-03-01 22:45:51 +0000799
Joe Thornberc86c3072013-10-24 14:10:28 -0400800 /*
801 * This will always succeed, since we've just demoted.
802 */
Joe Thornber633618e2013-11-09 11:12:51 +0000803 e = alloc_entry(&mq->cache_pool);
804 BUG_ON(!e);
Joe Thornberc86c3072013-10-24 14:10:28 -0400805
806 } else {
Joe Thornber633618e2013-11-09 11:12:51 +0000807 e = alloc_entry(&mq->cache_pool);
Joe Thornberc86c3072013-10-24 14:10:28 -0400808 result->op = POLICY_NEW;
Joe Thornberf2836352013-03-01 22:45:51 +0000809 }
810
811 e->oblock = oblock;
Joe Thornber01911c12013-10-24 14:10:28 -0400812 e->dirty = false;
Joe Thornberf2836352013-03-01 22:45:51 +0000813 e->hit_count = 1;
814 e->generation = mq->generation;
815 push(mq, e);
816
Joe Thornber633618e2013-11-09 11:12:51 +0000817 result->cblock = infer_cblock(&mq->cache_pool, e);
Joe Thornberf2836352013-03-01 22:45:51 +0000818}
819
820static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
821 bool can_migrate, bool discarded_oblock,
822 int data_dir, struct policy_result *result)
823{
Joe Thornber78e03d62013-12-09 12:53:05 +0000824 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
Joe Thornberf2836352013-03-01 22:45:51 +0000825 if (can_migrate)
826 insert_in_cache(mq, oblock, result);
827 else
828 return -EWOULDBLOCK;
829 } else {
830 insert_in_pre_cache(mq, oblock);
831 result->op = POLICY_MISS;
832 }
833
834 return 0;
835}
836
837/*
838 * Looks the oblock up in the hash table, then decides whether to put in
839 * pre_cache, or cache etc.
840 */
841static int map(struct mq_policy *mq, dm_oblock_t oblock,
842 bool can_migrate, bool discarded_oblock,
843 int data_dir, struct policy_result *result)
844{
845 int r = 0;
846 struct entry *e = hash_lookup(mq, oblock);
847
Joe Thornber633618e2013-11-09 11:12:51 +0000848 if (e && in_cache(mq, e))
Joe Thornberf2836352013-03-01 22:45:51 +0000849 r = cache_entry_found(mq, e, result);
Joe Thornber633618e2013-11-09 11:12:51 +0000850
Mike Snitzerf1afb362014-10-30 10:02:01 -0400851 else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] &&
852 iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
Joe Thornberf2836352013-03-01 22:45:51 +0000853 result->op = POLICY_MISS;
Joe Thornber633618e2013-11-09 11:12:51 +0000854
Joe Thornberf2836352013-03-01 22:45:51 +0000855 else if (e)
856 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
857 data_dir, result);
Joe Thornber633618e2013-11-09 11:12:51 +0000858
Joe Thornberf2836352013-03-01 22:45:51 +0000859 else
860 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
861 data_dir, result);
862
863 if (r == -EWOULDBLOCK)
864 result->op = POLICY_MISS;
865
866 return r;
867}
868
869/*----------------------------------------------------------------*/
870
871/*
872 * Public interface, via the policy struct. See dm-cache-policy.h for a
873 * description of these.
874 */
875
876static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
877{
878 return container_of(p, struct mq_policy, policy);
879}
880
881static void mq_destroy(struct dm_cache_policy *p)
882{
883 struct mq_policy *mq = to_mq_policy(p);
884
Heinz Mauelshagen14f398c2014-02-28 12:02:56 -0500885 vfree(mq->table);
Joe Thornber633618e2013-11-09 11:12:51 +0000886 epool_exit(&mq->cache_pool);
887 epool_exit(&mq->pre_cache_pool);
Joe Thornberf2836352013-03-01 22:45:51 +0000888 kfree(mq);
889}
890
891static void copy_tick(struct mq_policy *mq)
892{
893 unsigned long flags;
894
895 spin_lock_irqsave(&mq->tick_lock, flags);
896 mq->tick = mq->tick_protected;
897 spin_unlock_irqrestore(&mq->tick_lock, flags);
898}
899
900static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
901 bool can_block, bool can_migrate, bool discarded_oblock,
902 struct bio *bio, struct policy_result *result)
903{
904 int r;
905 struct mq_policy *mq = to_mq_policy(p);
906
907 result->op = POLICY_MISS;
908
909 if (can_block)
910 mutex_lock(&mq->lock);
911 else if (!mutex_trylock(&mq->lock))
912 return -EWOULDBLOCK;
913
914 copy_tick(mq);
915
916 iot_examine_bio(&mq->tracker, bio);
917 r = map(mq, oblock, can_migrate, discarded_oblock,
918 bio_data_dir(bio), result);
919
920 mutex_unlock(&mq->lock);
921
922 return r;
923}
924
925static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
926{
927 int r;
928 struct mq_policy *mq = to_mq_policy(p);
929 struct entry *e;
930
931 if (!mutex_trylock(&mq->lock))
932 return -EWOULDBLOCK;
933
934 e = hash_lookup(mq, oblock);
Joe Thornber633618e2013-11-09 11:12:51 +0000935 if (e && in_cache(mq, e)) {
936 *cblock = infer_cblock(&mq->cache_pool, e);
Joe Thornberf2836352013-03-01 22:45:51 +0000937 r = 0;
938 } else
939 r = -ENOENT;
940
941 mutex_unlock(&mq->lock);
942
943 return r;
944}
945
Joe Thornber633618e2013-11-09 11:12:51 +0000946static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
Joe Thornber01911c12013-10-24 14:10:28 -0400947{
Joe Thornber01911c12013-10-24 14:10:28 -0400948 struct entry *e;
949
Joe Thornber01911c12013-10-24 14:10:28 -0400950 e = hash_lookup(mq, oblock);
Joe Thornber633618e2013-11-09 11:12:51 +0000951 BUG_ON(!e || !in_cache(mq, e));
Joe Thornber01911c12013-10-24 14:10:28 -0400952
Joe Thornber633618e2013-11-09 11:12:51 +0000953 del(mq, e);
954 e->dirty = set;
955 push(mq, e);
Joe Thornber01911c12013-10-24 14:10:28 -0400956}
957
958static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
959{
Joe Thornber633618e2013-11-09 11:12:51 +0000960 struct mq_policy *mq = to_mq_policy(p);
961
962 mutex_lock(&mq->lock);
963 __mq_set_clear_dirty(mq, oblock, true);
964 mutex_unlock(&mq->lock);
Joe Thornber01911c12013-10-24 14:10:28 -0400965}
966
967static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
968{
Joe Thornber633618e2013-11-09 11:12:51 +0000969 struct mq_policy *mq = to_mq_policy(p);
970
971 mutex_lock(&mq->lock);
972 __mq_set_clear_dirty(mq, oblock, false);
973 mutex_unlock(&mq->lock);
Joe Thornber01911c12013-10-24 14:10:28 -0400974}
975
Joe Thornberf2836352013-03-01 22:45:51 +0000976static int mq_load_mapping(struct dm_cache_policy *p,
977 dm_oblock_t oblock, dm_cblock_t cblock,
978 uint32_t hint, bool hint_valid)
979{
980 struct mq_policy *mq = to_mq_policy(p);
981 struct entry *e;
982
Joe Thornber633618e2013-11-09 11:12:51 +0000983 e = alloc_particular_entry(&mq->cache_pool, cblock);
Joe Thornberf2836352013-03-01 22:45:51 +0000984 e->oblock = oblock;
Joe Thornber01911c12013-10-24 14:10:28 -0400985 e->dirty = false; /* this gets corrected in a minute */
Joe Thornberf2836352013-03-01 22:45:51 +0000986 e->hit_count = hint_valid ? hint : 1;
987 e->generation = mq->generation;
988 push(mq, e);
989
990 return 0;
991}
992
Joe Thornber633618e2013-11-09 11:12:51 +0000993static int mq_save_hints(struct mq_policy *mq, struct queue *q,
994 policy_walk_fn fn, void *context)
995{
996 int r;
997 unsigned level;
998 struct entry *e;
999
1000 for (level = 0; level < NR_QUEUE_LEVELS; level++)
1001 list_for_each_entry(e, q->qs + level, list) {
1002 r = fn(context, infer_cblock(&mq->cache_pool, e),
1003 e->oblock, e->hit_count);
1004 if (r)
1005 return r;
1006 }
1007
1008 return 0;
1009}
1010
Joe Thornberf2836352013-03-01 22:45:51 +00001011static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
1012 void *context)
1013{
1014 struct mq_policy *mq = to_mq_policy(p);
1015 int r = 0;
Joe Thornberf2836352013-03-01 22:45:51 +00001016
1017 mutex_lock(&mq->lock);
1018
Joe Thornber633618e2013-11-09 11:12:51 +00001019 r = mq_save_hints(mq, &mq->cache_clean, fn, context);
1020 if (!r)
1021 r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
Joe Thornber01911c12013-10-24 14:10:28 -04001022
Joe Thornberf2836352013-03-01 22:45:51 +00001023 mutex_unlock(&mq->lock);
1024
1025 return r;
1026}
1027
Joe Thornber633618e2013-11-09 11:12:51 +00001028static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
1029{
1030 struct entry *e;
1031
1032 e = hash_lookup(mq, oblock);
1033 BUG_ON(!e || !in_cache(mq, e));
1034
1035 del(mq, e);
1036 free_entry(&mq->cache_pool, e);
1037}
1038
Geert Uytterhoevenb936bf82013-07-26 09:57:31 +02001039static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
Joe Thornberf2836352013-03-01 22:45:51 +00001040{
Geert Uytterhoevenb936bf82013-07-26 09:57:31 +02001041 struct mq_policy *mq = to_mq_policy(p);
Geert Uytterhoevenb936bf82013-07-26 09:57:31 +02001042
1043 mutex_lock(&mq->lock);
Joe Thornber633618e2013-11-09 11:12:51 +00001044 __remove_mapping(mq, oblock);
Joe Thornberf2836352013-03-01 22:45:51 +00001045 mutex_unlock(&mq->lock);
1046}
1047
Joe Thornber532906a2013-11-08 16:36:17 +00001048static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
1049{
1050 struct entry *e = epool_find(&mq->cache_pool, cblock);
1051
1052 if (!e)
1053 return -ENODATA;
1054
1055 del(mq, e);
1056 free_entry(&mq->cache_pool, e);
1057
1058 return 0;
1059}
1060
1061static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1062{
1063 int r;
1064 struct mq_policy *mq = to_mq_policy(p);
1065
1066 mutex_lock(&mq->lock);
1067 r = __remove_cblock(mq, cblock);
1068 mutex_unlock(&mq->lock);
1069
1070 return r;
1071}
1072
Joe Thornber01911c12013-10-24 14:10:28 -04001073static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1074 dm_cblock_t *cblock)
1075{
1076 struct entry *e = pop(mq, &mq->cache_dirty);
1077
1078 if (!e)
1079 return -ENODATA;
1080
1081 *oblock = e->oblock;
Joe Thornber633618e2013-11-09 11:12:51 +00001082 *cblock = infer_cblock(&mq->cache_pool, e);
Joe Thornber01911c12013-10-24 14:10:28 -04001083 e->dirty = false;
1084 push(mq, e);
1085
1086 return 0;
1087}
1088
1089static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1090 dm_cblock_t *cblock)
1091{
1092 int r;
1093 struct mq_policy *mq = to_mq_policy(p);
1094
1095 mutex_lock(&mq->lock);
1096 r = __mq_writeback_work(mq, oblock, cblock);
1097 mutex_unlock(&mq->lock);
1098
1099 return r;
1100}
1101
Joe Thornber633618e2013-11-09 11:12:51 +00001102static void __force_mapping(struct mq_policy *mq,
1103 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
Joe Thornberf2836352013-03-01 22:45:51 +00001104{
1105 struct entry *e = hash_lookup(mq, current_oblock);
1106
Joe Thornber633618e2013-11-09 11:12:51 +00001107 if (e && in_cache(mq, e)) {
1108 del(mq, e);
1109 e->oblock = new_oblock;
1110 e->dirty = true;
1111 push(mq, e);
1112 }
Joe Thornberf2836352013-03-01 22:45:51 +00001113}
1114
1115static void mq_force_mapping(struct dm_cache_policy *p,
1116 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1117{
1118 struct mq_policy *mq = to_mq_policy(p);
1119
1120 mutex_lock(&mq->lock);
Joe Thornber633618e2013-11-09 11:12:51 +00001121 __force_mapping(mq, current_oblock, new_oblock);
Joe Thornberf2836352013-03-01 22:45:51 +00001122 mutex_unlock(&mq->lock);
1123}
1124
1125static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1126{
Joe Thornber99ba2ae2013-10-21 11:44:57 +01001127 dm_cblock_t r;
Joe Thornberf2836352013-03-01 22:45:51 +00001128 struct mq_policy *mq = to_mq_policy(p);
1129
Joe Thornber99ba2ae2013-10-21 11:44:57 +01001130 mutex_lock(&mq->lock);
Joe Thornber633618e2013-11-09 11:12:51 +00001131 r = to_cblock(mq->cache_pool.nr_allocated);
Joe Thornber99ba2ae2013-10-21 11:44:57 +01001132 mutex_unlock(&mq->lock);
1133
1134 return r;
Joe Thornberf2836352013-03-01 22:45:51 +00001135}
1136
1137static void mq_tick(struct dm_cache_policy *p)
1138{
1139 struct mq_policy *mq = to_mq_policy(p);
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&mq->tick_lock, flags);
1143 mq->tick_protected++;
1144 spin_unlock_irqrestore(&mq->tick_lock, flags);
1145}
1146
1147static int mq_set_config_value(struct dm_cache_policy *p,
1148 const char *key, const char *value)
1149{
1150 struct mq_policy *mq = to_mq_policy(p);
Joe Thornberf2836352013-03-01 22:45:51 +00001151 unsigned long tmp;
1152
Joe Thornberf2836352013-03-01 22:45:51 +00001153 if (kstrtoul(value, 10, &tmp))
1154 return -EINVAL;
1155
Joe Thornber78e03d62013-12-09 12:53:05 +00001156 if (!strcasecmp(key, "random_threshold")) {
1157 mq->tracker.thresholds[PATTERN_RANDOM] = tmp;
1158
1159 } else if (!strcasecmp(key, "sequential_threshold")) {
1160 mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;
1161
1162 } else if (!strcasecmp(key, "discard_promote_adjustment"))
1163 mq->discard_promote_adjustment = tmp;
1164
1165 else if (!strcasecmp(key, "read_promote_adjustment"))
1166 mq->read_promote_adjustment = tmp;
1167
1168 else if (!strcasecmp(key, "write_promote_adjustment"))
1169 mq->write_promote_adjustment = tmp;
1170
1171 else
1172 return -EINVAL;
Joe Thornberf2836352013-03-01 22:45:51 +00001173
1174 return 0;
1175}
1176
1177static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
1178{
1179 ssize_t sz = 0;
1180 struct mq_policy *mq = to_mq_policy(p);
1181
Joe Thornber78e03d62013-12-09 12:53:05 +00001182 DMEMIT("10 random_threshold %u "
1183 "sequential_threshold %u "
1184 "discard_promote_adjustment %u "
1185 "read_promote_adjustment %u "
1186 "write_promote_adjustment %u",
Joe Thornberf2836352013-03-01 22:45:51 +00001187 mq->tracker.thresholds[PATTERN_RANDOM],
Joe Thornber78e03d62013-12-09 12:53:05 +00001188 mq->tracker.thresholds[PATTERN_SEQUENTIAL],
1189 mq->discard_promote_adjustment,
1190 mq->read_promote_adjustment,
1191 mq->write_promote_adjustment);
Joe Thornberf2836352013-03-01 22:45:51 +00001192
1193 return 0;
1194}
1195
1196/* Init the policy plugin interface function pointers. */
1197static void init_policy_functions(struct mq_policy *mq)
1198{
1199 mq->policy.destroy = mq_destroy;
1200 mq->policy.map = mq_map;
1201 mq->policy.lookup = mq_lookup;
Joe Thornber01911c12013-10-24 14:10:28 -04001202 mq->policy.set_dirty = mq_set_dirty;
1203 mq->policy.clear_dirty = mq_clear_dirty;
Joe Thornberf2836352013-03-01 22:45:51 +00001204 mq->policy.load_mapping = mq_load_mapping;
1205 mq->policy.walk_mappings = mq_walk_mappings;
1206 mq->policy.remove_mapping = mq_remove_mapping;
Joe Thornber532906a2013-11-08 16:36:17 +00001207 mq->policy.remove_cblock = mq_remove_cblock;
Joe Thornber01911c12013-10-24 14:10:28 -04001208 mq->policy.writeback_work = mq_writeback_work;
Joe Thornberf2836352013-03-01 22:45:51 +00001209 mq->policy.force_mapping = mq_force_mapping;
1210 mq->policy.residency = mq_residency;
1211 mq->policy.tick = mq_tick;
1212 mq->policy.emit_config_values = mq_emit_config_values;
1213 mq->policy.set_config_value = mq_set_config_value;
1214}
1215
1216static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1217 sector_t origin_size,
1218 sector_t cache_block_size)
1219{
Joe Thornberf2836352013-03-01 22:45:51 +00001220 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1221
1222 if (!mq)
1223 return NULL;
1224
1225 init_policy_functions(mq);
1226 iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
Joe Thornberf2836352013-03-01 22:45:51 +00001227 mq->cache_size = cache_size;
Joe Thornber633618e2013-11-09 11:12:51 +00001228
1229 if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
1230 DMERR("couldn't initialize pool of pre-cache entries");
1231 goto bad_pre_cache_init;
1232 }
1233
1234 if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
1235 DMERR("couldn't initialize pool of cache entries");
1236 goto bad_cache_init;
1237 }
1238
Joe Thornberf2836352013-03-01 22:45:51 +00001239 mq->tick_protected = 0;
1240 mq->tick = 0;
1241 mq->hit_count = 0;
1242 mq->generation = 0;
Joe Thornber78e03d62013-12-09 12:53:05 +00001243 mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
1244 mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
1245 mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
Joe Thornberf2836352013-03-01 22:45:51 +00001246 mutex_init(&mq->lock);
1247 spin_lock_init(&mq->tick_lock);
Joe Thornberf2836352013-03-01 22:45:51 +00001248
1249 queue_init(&mq->pre_cache);
Joe Thornber01911c12013-10-24 14:10:28 -04001250 queue_init(&mq->cache_clean);
1251 queue_init(&mq->cache_dirty);
1252
Joe Thornberf2836352013-03-01 22:45:51 +00001253 mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
1254
Joe Thornberf2836352013-03-01 22:45:51 +00001255 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1256 mq->hash_bits = ffs(mq->nr_buckets) - 1;
Heinz Mauelshagen14f398c2014-02-28 12:02:56 -05001257 mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
Joe Thornberf2836352013-03-01 22:45:51 +00001258 if (!mq->table)
1259 goto bad_alloc_table;
1260
Joe Thornberf2836352013-03-01 22:45:51 +00001261 return &mq->policy;
1262
Joe Thornberf2836352013-03-01 22:45:51 +00001263bad_alloc_table:
Joe Thornber633618e2013-11-09 11:12:51 +00001264 epool_exit(&mq->cache_pool);
1265bad_cache_init:
1266 epool_exit(&mq->pre_cache_pool);
1267bad_pre_cache_init:
Joe Thornberf2836352013-03-01 22:45:51 +00001268 kfree(mq);
1269
1270 return NULL;
1271}
1272
1273/*----------------------------------------------------------------*/
1274
1275static struct dm_cache_policy_type mq_policy_type = {
1276 .name = "mq",
Mike Snitzerf1afb362014-10-30 10:02:01 -04001277 .version = {1, 3, 0},
Joe Thornberf2836352013-03-01 22:45:51 +00001278 .hint_size = 4,
1279 .owner = THIS_MODULE,
1280 .create = mq_create
1281};
1282
1283static struct dm_cache_policy_type default_policy_type = {
1284 .name = "default",
Mike Snitzerf1afb362014-10-30 10:02:01 -04001285 .version = {1, 3, 0},
Joe Thornberf2836352013-03-01 22:45:51 +00001286 .hint_size = 4,
1287 .owner = THIS_MODULE,
Mike Snitzer2e68c4e2014-01-15 21:06:55 -05001288 .create = mq_create,
1289 .real = &mq_policy_type
Joe Thornberf2836352013-03-01 22:45:51 +00001290};
1291
1292static int __init mq_init(void)
1293{
1294 int r;
1295
1296 mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
1297 sizeof(struct entry),
1298 __alignof__(struct entry),
1299 0, NULL);
1300 if (!mq_entry_cache)
1301 goto bad;
1302
1303 r = dm_cache_policy_register(&mq_policy_type);
1304 if (r) {
1305 DMERR("register failed %d", r);
1306 goto bad_register_mq;
1307 }
1308
1309 r = dm_cache_policy_register(&default_policy_type);
1310 if (!r) {
Mike Snitzer4e7f5062013-03-20 17:21:27 +00001311 DMINFO("version %u.%u.%u loaded",
1312 mq_policy_type.version[0],
1313 mq_policy_type.version[1],
1314 mq_policy_type.version[2]);
Joe Thornberf2836352013-03-01 22:45:51 +00001315 return 0;
1316 }
1317
1318 DMERR("register failed (as default) %d", r);
1319
1320 dm_cache_policy_unregister(&mq_policy_type);
1321bad_register_mq:
1322 kmem_cache_destroy(mq_entry_cache);
1323bad:
1324 return -ENOMEM;
1325}
1326
1327static void __exit mq_exit(void)
1328{
1329 dm_cache_policy_unregister(&mq_policy_type);
1330 dm_cache_policy_unregister(&default_policy_type);
1331
1332 kmem_cache_destroy(mq_entry_cache);
1333}
1334
1335module_init(mq_init);
1336module_exit(mq_exit);
1337
1338MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1339MODULE_LICENSE("GPL");
1340MODULE_DESCRIPTION("mq cache policy");
1341
1342MODULE_ALIAS("dm-cache-default");