blob: dcaa1d9dfbe48081a3aedb5e3cc315b852dbed31 [file] [log] [blame]
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9#include "dm-bufio.h"
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Mikulas Patocka95d402f2011-10-31 20:19:09 +000015#include <linux/shrinker.h>
Stephen Rothwell6f662632011-11-01 18:30:49 +110016#include <linux/module.h>
Joe Thornber4e420c42014-10-06 13:48:51 +010017#include <linux/rbtree.h>
Mikulas Patocka95d402f2011-10-31 20:19:09 +000018
19#define DM_MSG_PREFIX "bufio"
20
21/*
22 * Memory management policy:
23 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
24 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
25 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
26 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
27 * dirty buffers.
28 */
29#define DM_BUFIO_MIN_BUFFERS 8
30
31#define DM_BUFIO_MEMORY_PERCENT 2
32#define DM_BUFIO_VMALLOC_PERCENT 25
33#define DM_BUFIO_WRITEBACK_PERCENT 75
34
35/*
36 * Check buffer ages in this interval (seconds)
37 */
38#define DM_BUFIO_WORK_TIMER_SECS 10
39
40/*
41 * Free buffers when they are older than this (seconds)
42 */
43#define DM_BUFIO_DEFAULT_AGE_SECS 60
44
45/*
46 * The number of bvec entries that are embedded directly in the buffer.
47 * If the chunk size is larger, dm-io is used to do the io.
48 */
49#define DM_BUFIO_INLINE_VECS 16
50
51/*
Mikulas Patocka95d402f2011-10-31 20:19:09 +000052 * Don't try to use kmem_cache_alloc for blocks larger than this.
53 * For explanation, see alloc_buffer_data below.
54 */
55#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
56#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
57
58/*
59 * dm_buffer->list_mode
60 */
61#define LIST_CLEAN 0
62#define LIST_DIRTY 1
63#define LIST_SIZE 2
64
65/*
66 * Linking of buffers:
67 * All buffers are linked to cache_hash with their hash_list field.
68 *
69 * Clean buffers that are not being written (B_WRITING not set)
70 * are linked to lru[LIST_CLEAN] with their lru_list field.
71 *
72 * Dirty and clean buffers that are being written are linked to
73 * lru[LIST_DIRTY] with their lru_list field. When the write
74 * finishes, the buffer cannot be relinked immediately (because we
75 * are in an interrupt context and relinking requires process
76 * context), so some clean-not-writing buffers can be held on
77 * dirty_lru too. They are later added to lru in the process
78 * context.
79 */
80struct dm_bufio_client {
81 struct mutex lock;
82
83 struct list_head lru[LIST_SIZE];
84 unsigned long n_buffers[LIST_SIZE];
85
86 struct block_device *bdev;
87 unsigned block_size;
88 unsigned char sectors_per_block_bits;
89 unsigned char pages_per_block_bits;
90 unsigned char blocks_per_page_bits;
91 unsigned aux_size;
92 void (*alloc_callback)(struct dm_buffer *);
93 void (*write_callback)(struct dm_buffer *);
94
95 struct dm_io_client *dm_io;
96
97 struct list_head reserved_buffers;
98 unsigned need_reserved_buffers;
99
Mikulas Patocka55b082e2014-01-13 19:13:05 -0500100 unsigned minimum_buffers;
101
Joe Thornber4e420c42014-10-06 13:48:51 +0100102 struct rb_root buffer_tree;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000103 wait_queue_head_t free_buffer_wait;
104
105 int async_write_error;
106
107 struct list_head client_list;
108 struct shrinker shrinker;
109};
110
111/*
112 * Buffer state bits.
113 */
114#define B_READING 0
115#define B_WRITING 1
116#define B_DIRTY 2
117
118/*
119 * Describes how the block was allocated:
120 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
121 * See the comment at alloc_buffer_data.
122 */
123enum data_mode {
124 DATA_MODE_SLAB = 0,
125 DATA_MODE_GET_FREE_PAGES = 1,
126 DATA_MODE_VMALLOC = 2,
127 DATA_MODE_LIMIT = 3
128};
129
130struct dm_buffer {
Joe Thornber4e420c42014-10-06 13:48:51 +0100131 struct rb_node node;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000132 struct list_head lru_list;
133 sector_t block;
134 void *data;
135 enum data_mode data_mode;
136 unsigned char list_mode; /* LIST_* */
137 unsigned hold_count;
138 int read_error;
139 int write_error;
140 unsigned long state;
141 unsigned long last_accessed;
142 struct dm_bufio_client *c;
Mikulas Patocka24809452013-07-10 23:41:18 +0100143 struct list_head write_list;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000144 struct bio bio;
145 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
146};
147
148/*----------------------------------------------------------------*/
149
150static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
151static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
152
153static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
154{
155 unsigned ret = c->blocks_per_page_bits - 1;
156
157 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
158
159 return ret;
160}
161
162#define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
163#define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
164
165#define dm_bufio_in_request() (!!current->bio_list)
166
167static void dm_bufio_lock(struct dm_bufio_client *c)
168{
169 mutex_lock_nested(&c->lock, dm_bufio_in_request());
170}
171
172static int dm_bufio_trylock(struct dm_bufio_client *c)
173{
174 return mutex_trylock(&c->lock);
175}
176
177static void dm_bufio_unlock(struct dm_bufio_client *c)
178{
179 mutex_unlock(&c->lock);
180}
181
182/*
183 * FIXME Move to sched.h?
184 */
185#ifdef CONFIG_PREEMPT_VOLUNTARY
186# define dm_bufio_cond_resched() \
187do { \
188 if (unlikely(need_resched())) \
189 _cond_resched(); \
190} while (0)
191#else
192# define dm_bufio_cond_resched() do { } while (0)
193#endif
194
195/*----------------------------------------------------------------*/
196
197/*
198 * Default cache size: available memory divided by the ratio.
199 */
200static unsigned long dm_bufio_default_cache_size;
201
202/*
203 * Total cache size set by the user.
204 */
205static unsigned long dm_bufio_cache_size;
206
207/*
208 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
209 * at any time. If it disagrees, the user has changed cache size.
210 */
211static unsigned long dm_bufio_cache_size_latch;
212
213static DEFINE_SPINLOCK(param_spinlock);
214
215/*
216 * Buffers are freed after this timeout
217 */
218static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
219
220static unsigned long dm_bufio_peak_allocated;
221static unsigned long dm_bufio_allocated_kmem_cache;
222static unsigned long dm_bufio_allocated_get_free_pages;
223static unsigned long dm_bufio_allocated_vmalloc;
224static unsigned long dm_bufio_current_allocated;
225
226/*----------------------------------------------------------------*/
227
228/*
229 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
230 */
231static unsigned long dm_bufio_cache_size_per_client;
232
233/*
234 * The current number of clients.
235 */
236static int dm_bufio_client_count;
237
238/*
239 * The list of all clients.
240 */
241static LIST_HEAD(dm_bufio_all_clients);
242
243/*
244 * This mutex protects dm_bufio_cache_size_latch,
245 * dm_bufio_cache_size_per_client and dm_bufio_client_count
246 */
247static DEFINE_MUTEX(dm_bufio_clients_lock);
248
Joe Thornber4e420c42014-10-06 13:48:51 +0100249/*----------------------------------------------------------------
250 * A red/black tree acts as an index for all the buffers.
251 *--------------------------------------------------------------*/
252static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
253{
254 struct rb_node *n = c->buffer_tree.rb_node;
255 struct dm_buffer *b;
256
257 while (n) {
258 b = container_of(n, struct dm_buffer, node);
259
260 if (b->block == block)
261 return b;
262
263 n = (b->block < block) ? n->rb_left : n->rb_right;
264 }
265
266 return NULL;
267}
268
269static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
270{
271 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
272 struct dm_buffer *found;
273
274 while (*new) {
275 found = container_of(*new, struct dm_buffer, node);
276
277 if (found->block == b->block) {
278 BUG_ON(found != b);
279 return;
280 }
281
282 parent = *new;
283 new = (found->block < b->block) ?
284 &((*new)->rb_left) : &((*new)->rb_right);
285 }
286
287 rb_link_node(&b->node, parent, new);
288 rb_insert_color(&b->node, &c->buffer_tree);
289}
290
291static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
292{
293 rb_erase(&b->node, &c->buffer_tree);
294}
295
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000296/*----------------------------------------------------------------*/
297
298static void adjust_total_allocated(enum data_mode data_mode, long diff)
299{
300 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
301 &dm_bufio_allocated_kmem_cache,
302 &dm_bufio_allocated_get_free_pages,
303 &dm_bufio_allocated_vmalloc,
304 };
305
306 spin_lock(&param_spinlock);
307
308 *class_ptr[data_mode] += diff;
309
310 dm_bufio_current_allocated += diff;
311
312 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
313 dm_bufio_peak_allocated = dm_bufio_current_allocated;
314
315 spin_unlock(&param_spinlock);
316}
317
318/*
319 * Change the number of clients and recalculate per-client limit.
320 */
321static void __cache_size_refresh(void)
322{
323 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
324 BUG_ON(dm_bufio_client_count < 0);
325
Mikulas Patockafe5fe902012-10-12 16:59:46 +0100326 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000327
328 /*
329 * Use default if set to 0 and report the actual cache size used.
330 */
331 if (!dm_bufio_cache_size_latch) {
332 (void)cmpxchg(&dm_bufio_cache_size, 0,
333 dm_bufio_default_cache_size);
334 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
335 }
336
337 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
338 (dm_bufio_client_count ? : 1);
339}
340
341/*
342 * Allocating buffer data.
343 *
344 * Small buffers are allocated with kmem_cache, to use space optimally.
345 *
346 * For large buffers, we choose between get_free_pages and vmalloc.
347 * Each has advantages and disadvantages.
348 *
349 * __get_free_pages can randomly fail if the memory is fragmented.
350 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
351 * as low as 128M) so using it for caching is not appropriate.
352 *
353 * If the allocation may fail we use __get_free_pages. Memory fragmentation
354 * won't have a fatal effect here, but it just causes flushes of some other
355 * buffers and more I/O will be performed. Don't use __get_free_pages if it
356 * always fails (i.e. order >= MAX_ORDER).
357 *
358 * If the allocation shouldn't fail we use __vmalloc. This is only for the
359 * initial reserve allocation, so there's no risk of wasting all vmalloc
360 * space.
361 */
362static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
363 enum data_mode *data_mode)
364{
Mikulas Patocka502624b2013-05-10 14:37:15 +0100365 unsigned noio_flag;
366 void *ptr;
367
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000368 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
369 *data_mode = DATA_MODE_SLAB;
370 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
371 }
372
373 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
374 gfp_mask & __GFP_NORETRY) {
375 *data_mode = DATA_MODE_GET_FREE_PAGES;
376 return (void *)__get_free_pages(gfp_mask,
377 c->pages_per_block_bits);
378 }
379
380 *data_mode = DATA_MODE_VMALLOC;
Mikulas Patocka502624b2013-05-10 14:37:15 +0100381
382 /*
383 * __vmalloc allocates the data pages and auxiliary structures with
384 * gfp_flags that were specified, but pagetables are always allocated
385 * with GFP_KERNEL, no matter what was specified as gfp_mask.
386 *
387 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
388 * all allocations done by this process (including pagetables) are done
389 * as if GFP_NOIO was specified.
390 */
391
392 if (gfp_mask & __GFP_NORETRY)
393 noio_flag = memalloc_noio_save();
394
Mikulas Patocka220cd052013-07-10 23:41:16 +0100395 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
Mikulas Patocka502624b2013-05-10 14:37:15 +0100396
397 if (gfp_mask & __GFP_NORETRY)
398 memalloc_noio_restore(noio_flag);
399
400 return ptr;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000401}
402
403/*
404 * Free buffer's data.
405 */
406static void free_buffer_data(struct dm_bufio_client *c,
407 void *data, enum data_mode data_mode)
408{
409 switch (data_mode) {
410 case DATA_MODE_SLAB:
411 kmem_cache_free(DM_BUFIO_CACHE(c), data);
412 break;
413
414 case DATA_MODE_GET_FREE_PAGES:
415 free_pages((unsigned long)data, c->pages_per_block_bits);
416 break;
417
418 case DATA_MODE_VMALLOC:
419 vfree(data);
420 break;
421
422 default:
423 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
424 data_mode);
425 BUG();
426 }
427}
428
429/*
430 * Allocate buffer and its data.
431 */
432static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
433{
434 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
435 gfp_mask);
436
437 if (!b)
438 return NULL;
439
440 b->c = c;
441
442 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
443 if (!b->data) {
444 kfree(b);
445 return NULL;
446 }
447
448 adjust_total_allocated(b->data_mode, (long)c->block_size);
449
450 return b;
451}
452
453/*
454 * Free buffer and its data.
455 */
456static void free_buffer(struct dm_buffer *b)
457{
458 struct dm_bufio_client *c = b->c;
459
460 adjust_total_allocated(b->data_mode, -(long)c->block_size);
461
462 free_buffer_data(c, b->data, b->data_mode);
463 kfree(b);
464}
465
466/*
467 * Link buffer to the hash list and clean or dirty queue.
468 */
469static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
470{
471 struct dm_bufio_client *c = b->c;
472
473 c->n_buffers[dirty]++;
474 b->block = block;
475 b->list_mode = dirty;
476 list_add(&b->lru_list, &c->lru[dirty]);
Joe Thornber4e420c42014-10-06 13:48:51 +0100477 __insert(b->c, b);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000478 b->last_accessed = jiffies;
479}
480
481/*
482 * Unlink buffer from the hash list and dirty or clean queue.
483 */
484static void __unlink_buffer(struct dm_buffer *b)
485{
486 struct dm_bufio_client *c = b->c;
487
488 BUG_ON(!c->n_buffers[b->list_mode]);
489
490 c->n_buffers[b->list_mode]--;
Joe Thornber4e420c42014-10-06 13:48:51 +0100491 __remove(b->c, b);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000492 list_del(&b->lru_list);
493}
494
495/*
496 * Place the buffer to the head of dirty or clean LRU queue.
497 */
498static void __relink_lru(struct dm_buffer *b, int dirty)
499{
500 struct dm_bufio_client *c = b->c;
501
502 BUG_ON(!c->n_buffers[b->list_mode]);
503
504 c->n_buffers[b->list_mode]--;
505 c->n_buffers[dirty]++;
506 b->list_mode = dirty;
Wei Yongjun54499af2012-10-12 16:59:44 +0100507 list_move(&b->lru_list, &c->lru[dirty]);
Joe Thornbereb76faf2014-09-30 09:32:46 +0100508 b->last_accessed = jiffies;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000509}
510
511/*----------------------------------------------------------------
512 * Submit I/O on the buffer.
513 *
514 * Bio interface is faster but it has some problems:
515 * the vector list is limited (increasing this limit increases
516 * memory-consumption per buffer, so it is not viable);
517 *
518 * the memory must be direct-mapped, not vmalloced;
519 *
520 * the I/O driver can reject requests spuriously if it thinks that
521 * the requests are too big for the device or if they cross a
522 * controller-defined memory boundary.
523 *
524 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
525 * it is not vmalloced, try using the bio interface.
526 *
527 * If the buffer is big, if it is vmalloced or if the underlying device
528 * rejects the bio because it is too large, use dm-io layer to do the I/O.
529 * The dm-io layer splits the I/O into multiple requests, avoiding the above
530 * shortcomings.
531 *--------------------------------------------------------------*/
532
533/*
534 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
535 * that the request was handled directly with bio interface.
536 */
537static void dmio_complete(unsigned long error, void *context)
538{
539 struct dm_buffer *b = context;
540
541 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
542}
543
544static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
545 bio_end_io_t *end_io)
546{
547 int r;
548 struct dm_io_request io_req = {
549 .bi_rw = rw,
550 .notify.fn = dmio_complete,
551 .notify.context = b,
552 .client = b->c->dm_io,
553 };
554 struct dm_io_region region = {
555 .bdev = b->c->bdev,
556 .sector = block << b->c->sectors_per_block_bits,
557 .count = b->c->block_size >> SECTOR_SHIFT,
558 };
559
560 if (b->data_mode != DATA_MODE_VMALLOC) {
561 io_req.mem.type = DM_IO_KMEM;
562 io_req.mem.ptr.addr = b->data;
563 } else {
564 io_req.mem.type = DM_IO_VMA;
565 io_req.mem.ptr.vma = b->data;
566 }
567
568 b->bio.bi_end_io = end_io;
569
570 r = dm_io(&io_req, 1, &region, NULL);
571 if (r)
572 end_io(&b->bio, r);
573}
574
575static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
576 bio_end_io_t *end_io)
577{
578 char *ptr;
579 int len;
580
581 bio_init(&b->bio);
582 b->bio.bi_io_vec = b->bio_vec;
583 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700584 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000585 b->bio.bi_bdev = b->c->bdev;
586 b->bio.bi_end_io = end_io;
587
588 /*
589 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
590 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
591 */
592 ptr = b->data;
593 len = b->c->block_size;
594
595 if (len >= PAGE_SIZE)
596 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
597 else
598 BUG_ON((unsigned long)ptr & (len - 1));
599
600 do {
601 if (!bio_add_page(&b->bio, virt_to_page(ptr),
602 len < PAGE_SIZE ? len : PAGE_SIZE,
603 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
604 BUG_ON(b->c->block_size <= PAGE_SIZE);
605 use_dmio(b, rw, block, end_io);
606 return;
607 }
608
609 len -= PAGE_SIZE;
610 ptr += PAGE_SIZE;
611 } while (len > 0);
612
613 submit_bio(rw, &b->bio);
614}
615
616static void submit_io(struct dm_buffer *b, int rw, sector_t block,
617 bio_end_io_t *end_io)
618{
619 if (rw == WRITE && b->c->write_callback)
620 b->c->write_callback(b);
621
622 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
623 b->data_mode != DATA_MODE_VMALLOC)
624 use_inline_bio(b, rw, block, end_io);
625 else
626 use_dmio(b, rw, block, end_io);
627}
628
629/*----------------------------------------------------------------
630 * Writing dirty buffers
631 *--------------------------------------------------------------*/
632
633/*
634 * The endio routine for write.
635 *
636 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
637 * it.
638 */
639static void write_endio(struct bio *bio, int error)
640{
641 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
642
643 b->write_error = error;
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100644 if (unlikely(error)) {
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000645 struct dm_bufio_client *c = b->c;
646 (void)cmpxchg(&c->async_write_error, 0, error);
647 }
648
649 BUG_ON(!test_bit(B_WRITING, &b->state));
650
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100651 smp_mb__before_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000652 clear_bit(B_WRITING, &b->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100653 smp_mb__after_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000654
655 wake_up_bit(&b->state, B_WRITING);
656}
657
658/*
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000659 * Initiate a write on a dirty buffer, but don't wait for it.
660 *
661 * - If the buffer is not dirty, exit.
662 * - If there some previous write going on, wait for it to finish (we can't
663 * have two writes on the same buffer simultaneously).
664 * - Submit our write and don't wait on it. We set B_WRITING indicating
665 * that there is a write in progress.
666 */
Mikulas Patocka24809452013-07-10 23:41:18 +0100667static void __write_dirty_buffer(struct dm_buffer *b,
668 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000669{
670 if (!test_bit(B_DIRTY, &b->state))
671 return;
672
673 clear_bit(B_DIRTY, &b->state);
NeilBrown74316202014-07-07 15:16:04 +1000674 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000675
Mikulas Patocka24809452013-07-10 23:41:18 +0100676 if (!write_list)
677 submit_io(b, WRITE, b->block, write_endio);
678 else
679 list_add_tail(&b->write_list, write_list);
680}
681
682static void __flush_write_list(struct list_head *write_list)
683{
684 struct blk_plug plug;
685 blk_start_plug(&plug);
686 while (!list_empty(write_list)) {
687 struct dm_buffer *b =
688 list_entry(write_list->next, struct dm_buffer, write_list);
689 list_del(&b->write_list);
690 submit_io(b, WRITE, b->block, write_endio);
691 dm_bufio_cond_resched();
692 }
693 blk_finish_plug(&plug);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000694}
695
696/*
697 * Wait until any activity on the buffer finishes. Possibly write the
698 * buffer if it is dirty. When this function finishes, there is no I/O
699 * running on the buffer and the buffer is not dirty.
700 */
701static void __make_buffer_clean(struct dm_buffer *b)
702{
703 BUG_ON(b->hold_count);
704
705 if (!b->state) /* fast case */
706 return;
707
NeilBrown74316202014-07-07 15:16:04 +1000708 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka24809452013-07-10 23:41:18 +0100709 __write_dirty_buffer(b, NULL);
NeilBrown74316202014-07-07 15:16:04 +1000710 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000711}
712
713/*
714 * Find some buffer that is not held by anybody, clean it, unlink it and
715 * return it.
716 */
717static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
718{
719 struct dm_buffer *b;
720
721 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
722 BUG_ON(test_bit(B_WRITING, &b->state));
723 BUG_ON(test_bit(B_DIRTY, &b->state));
724
725 if (!b->hold_count) {
726 __make_buffer_clean(b);
727 __unlink_buffer(b);
728 return b;
729 }
730 dm_bufio_cond_resched();
731 }
732
733 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
734 BUG_ON(test_bit(B_READING, &b->state));
735
736 if (!b->hold_count) {
737 __make_buffer_clean(b);
738 __unlink_buffer(b);
739 return b;
740 }
741 dm_bufio_cond_resched();
742 }
743
744 return NULL;
745}
746
747/*
748 * Wait until some other threads free some buffer or release hold count on
749 * some buffer.
750 *
751 * This function is entered with c->lock held, drops it and regains it
752 * before exiting.
753 */
754static void __wait_for_free_buffer(struct dm_bufio_client *c)
755{
756 DECLARE_WAITQUEUE(wait, current);
757
758 add_wait_queue(&c->free_buffer_wait, &wait);
759 set_task_state(current, TASK_UNINTERRUPTIBLE);
760 dm_bufio_unlock(c);
761
762 io_schedule();
763
764 set_task_state(current, TASK_RUNNING);
765 remove_wait_queue(&c->free_buffer_wait, &wait);
766
767 dm_bufio_lock(c);
768}
769
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100770enum new_flag {
771 NF_FRESH = 0,
772 NF_READ = 1,
773 NF_GET = 2,
774 NF_PREFETCH = 3
775};
776
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000777/*
778 * Allocate a new buffer. If the allocation is not possible, wait until
779 * some other thread frees a buffer.
780 *
781 * May drop the lock and regain it.
782 */
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100783static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000784{
785 struct dm_buffer *b;
786
787 /*
788 * dm-bufio is resistant to allocation failures (it just keeps
789 * one buffer reserved in cases all the allocations fail).
790 * So set flags to not try too hard:
791 * GFP_NOIO: don't recurse into the I/O layer
792 * __GFP_NORETRY: don't retry and rather return failure
793 * __GFP_NOMEMALLOC: don't use emergency reserves
794 * __GFP_NOWARN: don't print a warning in case of failure
795 *
796 * For debugging, if we set the cache size to 1, no new buffers will
797 * be allocated.
798 */
799 while (1) {
800 if (dm_bufio_cache_size_latch != 1) {
801 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
802 if (b)
803 return b;
804 }
805
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100806 if (nf == NF_PREFETCH)
807 return NULL;
808
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000809 if (!list_empty(&c->reserved_buffers)) {
810 b = list_entry(c->reserved_buffers.next,
811 struct dm_buffer, lru_list);
812 list_del(&b->lru_list);
813 c->need_reserved_buffers++;
814
815 return b;
816 }
817
818 b = __get_unclaimed_buffer(c);
819 if (b)
820 return b;
821
822 __wait_for_free_buffer(c);
823 }
824}
825
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100826static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000827{
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100828 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
829
830 if (!b)
831 return NULL;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000832
833 if (c->alloc_callback)
834 c->alloc_callback(b);
835
836 return b;
837}
838
839/*
840 * Free a buffer and wake other threads waiting for free buffers.
841 */
842static void __free_buffer_wake(struct dm_buffer *b)
843{
844 struct dm_bufio_client *c = b->c;
845
846 if (!c->need_reserved_buffers)
847 free_buffer(b);
848 else {
849 list_add(&b->lru_list, &c->reserved_buffers);
850 c->need_reserved_buffers--;
851 }
852
853 wake_up(&c->free_buffer_wait);
854}
855
Mikulas Patocka24809452013-07-10 23:41:18 +0100856static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
857 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000858{
859 struct dm_buffer *b, *tmp;
860
861 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
862 BUG_ON(test_bit(B_READING, &b->state));
863
864 if (!test_bit(B_DIRTY, &b->state) &&
865 !test_bit(B_WRITING, &b->state)) {
866 __relink_lru(b, LIST_CLEAN);
867 continue;
868 }
869
870 if (no_wait && test_bit(B_WRITING, &b->state))
871 return;
872
Mikulas Patocka24809452013-07-10 23:41:18 +0100873 __write_dirty_buffer(b, write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000874 dm_bufio_cond_resched();
875 }
876}
877
878/*
879 * Get writeback threshold and buffer limit for a given client.
880 */
881static void __get_memory_limit(struct dm_bufio_client *c,
882 unsigned long *threshold_buffers,
883 unsigned long *limit_buffers)
884{
885 unsigned long buffers;
886
Mikulas Patockafe5fe902012-10-12 16:59:46 +0100887 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000888 mutex_lock(&dm_bufio_clients_lock);
889 __cache_size_refresh();
890 mutex_unlock(&dm_bufio_clients_lock);
891 }
892
893 buffers = dm_bufio_cache_size_per_client >>
894 (c->sectors_per_block_bits + SECTOR_SHIFT);
895
Mikulas Patocka55b082e2014-01-13 19:13:05 -0500896 if (buffers < c->minimum_buffers)
897 buffers = c->minimum_buffers;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000898
899 *limit_buffers = buffers;
900 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
901}
902
903/*
904 * Check if we're over watermark.
905 * If we are over threshold_buffers, start freeing buffers.
906 * If we're over "limit_buffers", block until we get under the limit.
907 */
Mikulas Patocka24809452013-07-10 23:41:18 +0100908static void __check_watermark(struct dm_bufio_client *c,
909 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000910{
911 unsigned long threshold_buffers, limit_buffers;
912
913 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
914
915 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
916 limit_buffers) {
917
918 struct dm_buffer *b = __get_unclaimed_buffer(c);
919
920 if (!b)
921 return;
922
923 __free_buffer_wake(b);
924 dm_bufio_cond_resched();
925 }
926
927 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
Mikulas Patocka24809452013-07-10 23:41:18 +0100928 __write_dirty_buffers_async(c, 1, write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000929}
930
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000931/*----------------------------------------------------------------
932 * Getting a buffer
933 *--------------------------------------------------------------*/
934
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000935static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
Mikulas Patocka24809452013-07-10 23:41:18 +0100936 enum new_flag nf, int *need_submit,
937 struct list_head *write_list)
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000938{
939 struct dm_buffer *b, *new_b = NULL;
940
941 *need_submit = 0;
942
943 b = __find(c, block);
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100944 if (b)
945 goto found_buffer;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000946
947 if (nf == NF_GET)
948 return NULL;
949
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100950 new_b = __alloc_buffer_wait(c, nf);
951 if (!new_b)
952 return NULL;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000953
954 /*
955 * We've had a period where the mutex was unlocked, so need to
956 * recheck the hash table.
957 */
958 b = __find(c, block);
959 if (b) {
960 __free_buffer_wake(new_b);
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100961 goto found_buffer;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000962 }
963
Mikulas Patocka24809452013-07-10 23:41:18 +0100964 __check_watermark(c, write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000965
966 b = new_b;
967 b->hold_count = 1;
968 b->read_error = 0;
969 b->write_error = 0;
970 __link_buffer(b, block, LIST_CLEAN);
971
972 if (nf == NF_FRESH) {
973 b->state = 0;
974 return b;
975 }
976
977 b->state = 1 << B_READING;
978 *need_submit = 1;
979
980 return b;
Mikulas Patockaa66cc282012-03-28 18:41:29 +0100981
982found_buffer:
983 if (nf == NF_PREFETCH)
984 return NULL;
985 /*
986 * Note: it is essential that we don't wait for the buffer to be
987 * read if dm_bufio_get function is used. Both dm_bufio_get and
988 * dm_bufio_prefetch can be used in the driver request routine.
989 * If the user called both dm_bufio_prefetch and dm_bufio_get on
990 * the same buffer, it would deadlock if we waited.
991 */
992 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
993 return NULL;
994
995 b->hold_count++;
996 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
997 test_bit(B_WRITING, &b->state));
998 return b;
Mikulas Patocka95d402f2011-10-31 20:19:09 +0000999}
1000
1001/*
1002 * The endio routine for reading: set the error, clear the bit and wake up
1003 * anyone waiting on the buffer.
1004 */
1005static void read_endio(struct bio *bio, int error)
1006{
1007 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1008
1009 b->read_error = error;
1010
1011 BUG_ON(!test_bit(B_READING, &b->state));
1012
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001013 smp_mb__before_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001014 clear_bit(B_READING, &b->state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001015 smp_mb__after_atomic();
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001016
1017 wake_up_bit(&b->state, B_READING);
1018}
1019
1020/*
1021 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1022 * functions is similar except that dm_bufio_new doesn't read the
1023 * buffer from the disk (assuming that the caller overwrites all the data
1024 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1025 */
1026static void *new_read(struct dm_bufio_client *c, sector_t block,
1027 enum new_flag nf, struct dm_buffer **bp)
1028{
1029 int need_submit;
1030 struct dm_buffer *b;
1031
Mikulas Patocka24809452013-07-10 23:41:18 +01001032 LIST_HEAD(write_list);
1033
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001034 dm_bufio_lock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001035 b = __bufio_new(c, block, nf, &need_submit, &write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001036 dm_bufio_unlock(c);
1037
Mikulas Patocka24809452013-07-10 23:41:18 +01001038 __flush_write_list(&write_list);
1039
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001040 if (!b)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001041 return b;
1042
1043 if (need_submit)
1044 submit_io(b, READ, b->block, read_endio);
1045
NeilBrown74316202014-07-07 15:16:04 +10001046 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001047
1048 if (b->read_error) {
1049 int error = b->read_error;
1050
1051 dm_bufio_release(b);
1052
1053 return ERR_PTR(error);
1054 }
1055
1056 *bp = b;
1057
1058 return b->data;
1059}
1060
1061void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1062 struct dm_buffer **bp)
1063{
1064 return new_read(c, block, NF_GET, bp);
1065}
1066EXPORT_SYMBOL_GPL(dm_bufio_get);
1067
1068void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1069 struct dm_buffer **bp)
1070{
1071 BUG_ON(dm_bufio_in_request());
1072
1073 return new_read(c, block, NF_READ, bp);
1074}
1075EXPORT_SYMBOL_GPL(dm_bufio_read);
1076
1077void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1078 struct dm_buffer **bp)
1079{
1080 BUG_ON(dm_bufio_in_request());
1081
1082 return new_read(c, block, NF_FRESH, bp);
1083}
1084EXPORT_SYMBOL_GPL(dm_bufio_new);
1085
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001086void dm_bufio_prefetch(struct dm_bufio_client *c,
1087 sector_t block, unsigned n_blocks)
1088{
1089 struct blk_plug plug;
1090
Mikulas Patocka24809452013-07-10 23:41:18 +01001091 LIST_HEAD(write_list);
1092
Mikulas Patocka3b6b7812013-03-20 17:21:25 +00001093 BUG_ON(dm_bufio_in_request());
1094
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001095 blk_start_plug(&plug);
1096 dm_bufio_lock(c);
1097
1098 for (; n_blocks--; block++) {
1099 int need_submit;
1100 struct dm_buffer *b;
Mikulas Patocka24809452013-07-10 23:41:18 +01001101 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1102 &write_list);
1103 if (unlikely(!list_empty(&write_list))) {
1104 dm_bufio_unlock(c);
1105 blk_finish_plug(&plug);
1106 __flush_write_list(&write_list);
1107 blk_start_plug(&plug);
1108 dm_bufio_lock(c);
1109 }
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001110 if (unlikely(b != NULL)) {
1111 dm_bufio_unlock(c);
1112
1113 if (need_submit)
1114 submit_io(b, READ, b->block, read_endio);
1115 dm_bufio_release(b);
1116
1117 dm_bufio_cond_resched();
1118
1119 if (!n_blocks)
1120 goto flush_plug;
1121 dm_bufio_lock(c);
1122 }
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001123 }
1124
1125 dm_bufio_unlock(c);
1126
1127flush_plug:
1128 blk_finish_plug(&plug);
1129}
1130EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1131
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001132void dm_bufio_release(struct dm_buffer *b)
1133{
1134 struct dm_bufio_client *c = b->c;
1135
1136 dm_bufio_lock(c);
1137
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001138 BUG_ON(!b->hold_count);
1139
1140 b->hold_count--;
1141 if (!b->hold_count) {
1142 wake_up(&c->free_buffer_wait);
1143
1144 /*
1145 * If there were errors on the buffer, and the buffer is not
1146 * to be written, free the buffer. There is no point in caching
1147 * invalid buffer.
1148 */
1149 if ((b->read_error || b->write_error) &&
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001150 !test_bit(B_READING, &b->state) &&
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001151 !test_bit(B_WRITING, &b->state) &&
1152 !test_bit(B_DIRTY, &b->state)) {
1153 __unlink_buffer(b);
1154 __free_buffer_wake(b);
1155 }
1156 }
1157
1158 dm_bufio_unlock(c);
1159}
1160EXPORT_SYMBOL_GPL(dm_bufio_release);
1161
1162void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1163{
1164 struct dm_bufio_client *c = b->c;
1165
1166 dm_bufio_lock(c);
1167
Mikulas Patockaa66cc282012-03-28 18:41:29 +01001168 BUG_ON(test_bit(B_READING, &b->state));
1169
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001170 if (!test_and_set_bit(B_DIRTY, &b->state))
1171 __relink_lru(b, LIST_DIRTY);
1172
1173 dm_bufio_unlock(c);
1174}
1175EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1176
1177void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1178{
Mikulas Patocka24809452013-07-10 23:41:18 +01001179 LIST_HEAD(write_list);
1180
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001181 BUG_ON(dm_bufio_in_request());
1182
1183 dm_bufio_lock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001184 __write_dirty_buffers_async(c, 0, &write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001185 dm_bufio_unlock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001186 __flush_write_list(&write_list);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001187}
1188EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1189
1190/*
1191 * For performance, it is essential that the buffers are written asynchronously
1192 * and simultaneously (so that the block layer can merge the writes) and then
1193 * waited upon.
1194 *
1195 * Finally, we flush hardware disk cache.
1196 */
1197int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1198{
1199 int a, f;
1200 unsigned long buffers_processed = 0;
1201 struct dm_buffer *b, *tmp;
1202
Mikulas Patocka24809452013-07-10 23:41:18 +01001203 LIST_HEAD(write_list);
1204
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001205 dm_bufio_lock(c);
Mikulas Patocka24809452013-07-10 23:41:18 +01001206 __write_dirty_buffers_async(c, 0, &write_list);
1207 dm_bufio_unlock(c);
1208 __flush_write_list(&write_list);
1209 dm_bufio_lock(c);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001210
1211again:
1212 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1213 int dropped_lock = 0;
1214
1215 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1216 buffers_processed++;
1217
1218 BUG_ON(test_bit(B_READING, &b->state));
1219
1220 if (test_bit(B_WRITING, &b->state)) {
1221 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1222 dropped_lock = 1;
1223 b->hold_count++;
1224 dm_bufio_unlock(c);
NeilBrown74316202014-07-07 15:16:04 +10001225 wait_on_bit_io(&b->state, B_WRITING,
1226 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001227 dm_bufio_lock(c);
1228 b->hold_count--;
1229 } else
NeilBrown74316202014-07-07 15:16:04 +10001230 wait_on_bit_io(&b->state, B_WRITING,
1231 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001232 }
1233
1234 if (!test_bit(B_DIRTY, &b->state) &&
1235 !test_bit(B_WRITING, &b->state))
1236 __relink_lru(b, LIST_CLEAN);
1237
1238 dm_bufio_cond_resched();
1239
1240 /*
1241 * If we dropped the lock, the list is no longer consistent,
1242 * so we must restart the search.
1243 *
1244 * In the most common case, the buffer just processed is
1245 * relinked to the clean list, so we won't loop scanning the
1246 * same buffer again and again.
1247 *
1248 * This may livelock if there is another thread simultaneously
1249 * dirtying buffers, so we count the number of buffers walked
1250 * and if it exceeds the total number of buffers, it means that
1251 * someone is doing some writes simultaneously with us. In
1252 * this case, stop, dropping the lock.
1253 */
1254 if (dropped_lock)
1255 goto again;
1256 }
1257 wake_up(&c->free_buffer_wait);
1258 dm_bufio_unlock(c);
1259
1260 a = xchg(&c->async_write_error, 0);
1261 f = dm_bufio_issue_flush(c);
1262 if (a)
1263 return a;
1264
1265 return f;
1266}
1267EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1268
1269/*
1270 * Use dm-io to send and empty barrier flush the device.
1271 */
1272int dm_bufio_issue_flush(struct dm_bufio_client *c)
1273{
1274 struct dm_io_request io_req = {
Mikulas Patocka3daec3b2013-03-01 22:45:45 +00001275 .bi_rw = WRITE_FLUSH,
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001276 .mem.type = DM_IO_KMEM,
1277 .mem.ptr.addr = NULL,
1278 .client = c->dm_io,
1279 };
1280 struct dm_io_region io_reg = {
1281 .bdev = c->bdev,
1282 .sector = 0,
1283 .count = 0,
1284 };
1285
1286 BUG_ON(dm_bufio_in_request());
1287
1288 return dm_io(&io_req, 1, &io_reg, NULL);
1289}
1290EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1291
1292/*
1293 * We first delete any other buffer that may be at that new location.
1294 *
1295 * Then, we write the buffer to the original location if it was dirty.
1296 *
1297 * Then, if we are the only one who is holding the buffer, relink the buffer
1298 * in the hash queue for the new location.
1299 *
1300 * If there was someone else holding the buffer, we write it to the new
1301 * location but not relink it, because that other user needs to have the buffer
1302 * at the same place.
1303 */
1304void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1305{
1306 struct dm_bufio_client *c = b->c;
1307 struct dm_buffer *new;
1308
1309 BUG_ON(dm_bufio_in_request());
1310
1311 dm_bufio_lock(c);
1312
1313retry:
1314 new = __find(c, new_block);
1315 if (new) {
1316 if (new->hold_count) {
1317 __wait_for_free_buffer(c);
1318 goto retry;
1319 }
1320
1321 /*
1322 * FIXME: Is there any point waiting for a write that's going
1323 * to be overwritten in a bit?
1324 */
1325 __make_buffer_clean(new);
1326 __unlink_buffer(new);
1327 __free_buffer_wake(new);
1328 }
1329
1330 BUG_ON(!b->hold_count);
1331 BUG_ON(test_bit(B_READING, &b->state));
1332
Mikulas Patocka24809452013-07-10 23:41:18 +01001333 __write_dirty_buffer(b, NULL);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001334 if (b->hold_count == 1) {
NeilBrown74316202014-07-07 15:16:04 +10001335 wait_on_bit_io(&b->state, B_WRITING,
1336 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001337 set_bit(B_DIRTY, &b->state);
1338 __unlink_buffer(b);
1339 __link_buffer(b, new_block, LIST_DIRTY);
1340 } else {
1341 sector_t old_block;
NeilBrown74316202014-07-07 15:16:04 +10001342 wait_on_bit_lock_io(&b->state, B_WRITING,
1343 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001344 /*
1345 * Relink buffer to "new_block" so that write_callback
1346 * sees "new_block" as a block number.
1347 * After the write, link the buffer back to old_block.
1348 * All this must be done in bufio lock, so that block number
1349 * change isn't visible to other threads.
1350 */
1351 old_block = b->block;
1352 __unlink_buffer(b);
1353 __link_buffer(b, new_block, b->list_mode);
1354 submit_io(b, WRITE, new_block, write_endio);
NeilBrown74316202014-07-07 15:16:04 +10001355 wait_on_bit_io(&b->state, B_WRITING,
1356 TASK_UNINTERRUPTIBLE);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001357 __unlink_buffer(b);
1358 __link_buffer(b, old_block, b->list_mode);
1359 }
1360
1361 dm_bufio_unlock(c);
1362 dm_bufio_release(b);
1363}
1364EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1365
Mikulas Patocka55494bf2014-01-13 19:12:36 -05001366/*
1367 * Free the given buffer.
1368 *
1369 * This is just a hint, if the buffer is in use or dirty, this function
1370 * does nothing.
1371 */
1372void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1373{
1374 struct dm_buffer *b;
1375
1376 dm_bufio_lock(c);
1377
1378 b = __find(c, block);
1379 if (b && likely(!b->hold_count) && likely(!b->state)) {
1380 __unlink_buffer(b);
1381 __free_buffer_wake(b);
1382 }
1383
1384 dm_bufio_unlock(c);
1385}
1386EXPORT_SYMBOL(dm_bufio_forget);
1387
Mikulas Patocka55b082e2014-01-13 19:13:05 -05001388void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1389{
1390 c->minimum_buffers = n;
1391}
1392EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1393
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001394unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1395{
1396 return c->block_size;
1397}
1398EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1399
1400sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1401{
1402 return i_size_read(c->bdev->bd_inode) >>
1403 (SECTOR_SHIFT + c->sectors_per_block_bits);
1404}
1405EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1406
1407sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1408{
1409 return b->block;
1410}
1411EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1412
1413void *dm_bufio_get_block_data(struct dm_buffer *b)
1414{
1415 return b->data;
1416}
1417EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1418
1419void *dm_bufio_get_aux_data(struct dm_buffer *b)
1420{
1421 return b + 1;
1422}
1423EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1424
1425struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1426{
1427 return b->c;
1428}
1429EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1430
1431static void drop_buffers(struct dm_bufio_client *c)
1432{
1433 struct dm_buffer *b;
1434 int i;
1435
1436 BUG_ON(dm_bufio_in_request());
1437
1438 /*
1439 * An optimization so that the buffers are not written one-by-one.
1440 */
1441 dm_bufio_write_dirty_buffers_async(c);
1442
1443 dm_bufio_lock(c);
1444
1445 while ((b = __get_unclaimed_buffer(c)))
1446 __free_buffer_wake(b);
1447
1448 for (i = 0; i < LIST_SIZE; i++)
1449 list_for_each_entry(b, &c->lru[i], lru_list)
1450 DMERR("leaked buffer %llx, hold count %u, list %d",
1451 (unsigned long long)b->block, b->hold_count, i);
1452
1453 for (i = 0; i < LIST_SIZE; i++)
1454 BUG_ON(!list_empty(&c->lru[i]));
1455
1456 dm_bufio_unlock(c);
1457}
1458
1459/*
1460 * Test if the buffer is unused and too old, and commit it.
Mikulas Patocka9d28eb12014-10-16 14:45:20 -04001461 * And if GFP_NOFS is used, we must not do any I/O because we hold
1462 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1463 * rerouted to different bufio client.
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001464 */
1465static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1466 unsigned long max_jiffies)
1467{
1468 if (jiffies - b->last_accessed < max_jiffies)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001469 return 0;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001470
Mikulas Patocka9d28eb12014-10-16 14:45:20 -04001471 if (!(gfp & __GFP_FS)) {
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001472 if (test_bit(B_READING, &b->state) ||
1473 test_bit(B_WRITING, &b->state) ||
1474 test_bit(B_DIRTY, &b->state))
Dave Chinner7dc19d52013-08-28 10:18:11 +10001475 return 0;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001476 }
1477
1478 if (b->hold_count)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001479 return 0;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001480
1481 __make_buffer_clean(b);
1482 __unlink_buffer(b);
1483 __free_buffer_wake(b);
1484
Dave Chinner7dc19d52013-08-28 10:18:11 +10001485 return 1;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001486}
1487
Dave Chinner7dc19d52013-08-28 10:18:11 +10001488static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1489 gfp_t gfp_mask)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001490{
1491 int l;
1492 struct dm_buffer *b, *tmp;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001493 long freed = 0;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001494
1495 for (l = 0; l < LIST_SIZE; l++) {
Dave Chinner7dc19d52013-08-28 10:18:11 +10001496 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1497 freed += __cleanup_old_buffer(b, gfp_mask, 0);
1498 if (!--nr_to_scan)
Mikulas Patocka0e825862014-10-01 13:29:48 -04001499 return freed;
1500 dm_bufio_cond_resched();
Dave Chinner7dc19d52013-08-28 10:18:11 +10001501 }
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001502 }
Dave Chinner7dc19d52013-08-28 10:18:11 +10001503 return freed;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001504}
1505
Dave Chinner7dc19d52013-08-28 10:18:11 +10001506static unsigned long
1507dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001508{
Dave Chinner7dc19d52013-08-28 10:18:11 +10001509 struct dm_bufio_client *c;
1510 unsigned long freed;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001511
Dave Chinner7dc19d52013-08-28 10:18:11 +10001512 c = container_of(shrink, struct dm_bufio_client, shrinker);
Mikulas Patocka9d28eb12014-10-16 14:45:20 -04001513 if (sc->gfp_mask & __GFP_FS)
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001514 dm_bufio_lock(c);
1515 else if (!dm_bufio_trylock(c))
Dave Chinner7dc19d52013-08-28 10:18:11 +10001516 return SHRINK_STOP;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001517
Dave Chinner7dc19d52013-08-28 10:18:11 +10001518 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001519 dm_bufio_unlock(c);
Dave Chinner7dc19d52013-08-28 10:18:11 +10001520 return freed;
1521}
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001522
Dave Chinner7dc19d52013-08-28 10:18:11 +10001523static unsigned long
1524dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1525{
1526 struct dm_bufio_client *c;
1527 unsigned long count;
1528
1529 c = container_of(shrink, struct dm_bufio_client, shrinker);
Mikulas Patocka9d28eb12014-10-16 14:45:20 -04001530 if (sc->gfp_mask & __GFP_FS)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001531 dm_bufio_lock(c);
1532 else if (!dm_bufio_trylock(c))
1533 return 0;
1534
1535 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1536 dm_bufio_unlock(c);
1537 return count;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001538}
1539
1540/*
1541 * Create the buffering interface
1542 */
1543struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1544 unsigned reserved_buffers, unsigned aux_size,
1545 void (*alloc_callback)(struct dm_buffer *),
1546 void (*write_callback)(struct dm_buffer *))
1547{
1548 int r;
1549 struct dm_bufio_client *c;
1550 unsigned i;
1551
1552 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1553 (block_size & (block_size - 1)));
1554
Greg Thelend8c712e2014-07-31 09:07:19 -07001555 c = kzalloc(sizeof(*c), GFP_KERNEL);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001556 if (!c) {
1557 r = -ENOMEM;
1558 goto bad_client;
1559 }
Joe Thornber4e420c42014-10-06 13:48:51 +01001560 c->buffer_tree = RB_ROOT;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001561
1562 c->bdev = bdev;
1563 c->block_size = block_size;
1564 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1565 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1566 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1567 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1568 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1569
1570 c->aux_size = aux_size;
1571 c->alloc_callback = alloc_callback;
1572 c->write_callback = write_callback;
1573
1574 for (i = 0; i < LIST_SIZE; i++) {
1575 INIT_LIST_HEAD(&c->lru[i]);
1576 c->n_buffers[i] = 0;
1577 }
1578
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001579 mutex_init(&c->lock);
1580 INIT_LIST_HEAD(&c->reserved_buffers);
1581 c->need_reserved_buffers = reserved_buffers;
1582
Mikulas Patocka55b082e2014-01-13 19:13:05 -05001583 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1584
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001585 init_waitqueue_head(&c->free_buffer_wait);
1586 c->async_write_error = 0;
1587
1588 c->dm_io = dm_io_client_create();
1589 if (IS_ERR(c->dm_io)) {
1590 r = PTR_ERR(c->dm_io);
1591 goto bad_dm_io;
1592 }
1593
1594 mutex_lock(&dm_bufio_clients_lock);
1595 if (c->blocks_per_page_bits) {
1596 if (!DM_BUFIO_CACHE_NAME(c)) {
1597 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1598 if (!DM_BUFIO_CACHE_NAME(c)) {
1599 r = -ENOMEM;
1600 mutex_unlock(&dm_bufio_clients_lock);
1601 goto bad_cache;
1602 }
1603 }
1604
1605 if (!DM_BUFIO_CACHE(c)) {
1606 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1607 c->block_size,
1608 c->block_size, 0, NULL);
1609 if (!DM_BUFIO_CACHE(c)) {
1610 r = -ENOMEM;
1611 mutex_unlock(&dm_bufio_clients_lock);
1612 goto bad_cache;
1613 }
1614 }
1615 }
1616 mutex_unlock(&dm_bufio_clients_lock);
1617
1618 while (c->need_reserved_buffers) {
1619 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1620
1621 if (!b) {
1622 r = -ENOMEM;
1623 goto bad_buffer;
1624 }
1625 __free_buffer_wake(b);
1626 }
1627
1628 mutex_lock(&dm_bufio_clients_lock);
1629 dm_bufio_client_count++;
1630 list_add(&c->client_list, &dm_bufio_all_clients);
1631 __cache_size_refresh();
1632 mutex_unlock(&dm_bufio_clients_lock);
1633
Dave Chinner7dc19d52013-08-28 10:18:11 +10001634 c->shrinker.count_objects = dm_bufio_shrink_count;
1635 c->shrinker.scan_objects = dm_bufio_shrink_scan;
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001636 c->shrinker.seeks = 1;
1637 c->shrinker.batch = 0;
1638 register_shrinker(&c->shrinker);
1639
1640 return c;
1641
1642bad_buffer:
1643bad_cache:
1644 while (!list_empty(&c->reserved_buffers)) {
1645 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1646 struct dm_buffer, lru_list);
1647 list_del(&b->lru_list);
1648 free_buffer(b);
1649 }
1650 dm_io_client_destroy(c->dm_io);
1651bad_dm_io:
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001652 kfree(c);
1653bad_client:
1654 return ERR_PTR(r);
1655}
1656EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1657
1658/*
1659 * Free the buffering interface.
1660 * It is required that there are no references on any buffers.
1661 */
1662void dm_bufio_client_destroy(struct dm_bufio_client *c)
1663{
1664 unsigned i;
1665
1666 drop_buffers(c);
1667
1668 unregister_shrinker(&c->shrinker);
1669
1670 mutex_lock(&dm_bufio_clients_lock);
1671
1672 list_del(&c->client_list);
1673 dm_bufio_client_count--;
1674 __cache_size_refresh();
1675
1676 mutex_unlock(&dm_bufio_clients_lock);
1677
Joe Thornber4e420c42014-10-06 13:48:51 +01001678 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001679 BUG_ON(c->need_reserved_buffers);
1680
1681 while (!list_empty(&c->reserved_buffers)) {
1682 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1683 struct dm_buffer, lru_list);
1684 list_del(&b->lru_list);
1685 free_buffer(b);
1686 }
1687
1688 for (i = 0; i < LIST_SIZE; i++)
1689 if (c->n_buffers[i])
1690 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1691
1692 for (i = 0; i < LIST_SIZE; i++)
1693 BUG_ON(c->n_buffers[i]);
1694
1695 dm_io_client_destroy(c->dm_io);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001696 kfree(c);
1697}
1698EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1699
1700static void cleanup_old_buffers(void)
1701{
Mikulas Patockafe5fe902012-10-12 16:59:46 +01001702 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001703 struct dm_bufio_client *c;
1704
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001705 if (max_age > ULONG_MAX / HZ)
1706 max_age = ULONG_MAX / HZ;
1707
1708 mutex_lock(&dm_bufio_clients_lock);
1709 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1710 if (!dm_bufio_trylock(c))
1711 continue;
1712
1713 while (!list_empty(&c->lru[LIST_CLEAN])) {
1714 struct dm_buffer *b;
1715 b = list_entry(c->lru[LIST_CLEAN].prev,
1716 struct dm_buffer, lru_list);
Dave Chinner7dc19d52013-08-28 10:18:11 +10001717 if (!__cleanup_old_buffer(b, 0, max_age * HZ))
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001718 break;
1719 dm_bufio_cond_resched();
1720 }
1721
1722 dm_bufio_unlock(c);
1723 dm_bufio_cond_resched();
1724 }
1725 mutex_unlock(&dm_bufio_clients_lock);
1726}
1727
1728static struct workqueue_struct *dm_bufio_wq;
1729static struct delayed_work dm_bufio_work;
1730
1731static void work_fn(struct work_struct *w)
1732{
1733 cleanup_old_buffers();
1734
1735 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1736 DM_BUFIO_WORK_TIMER_SECS * HZ);
1737}
1738
1739/*----------------------------------------------------------------
1740 * Module setup
1741 *--------------------------------------------------------------*/
1742
1743/*
1744 * This is called only once for the whole dm_bufio module.
1745 * It initializes memory limit.
1746 */
1747static int __init dm_bufio_init(void)
1748{
1749 __u64 mem;
1750
Mikulas Patocka4cb57ab2013-12-05 17:33:29 -05001751 dm_bufio_allocated_kmem_cache = 0;
1752 dm_bufio_allocated_get_free_pages = 0;
1753 dm_bufio_allocated_vmalloc = 0;
1754 dm_bufio_current_allocated = 0;
1755
Mikulas Patocka95d402f2011-10-31 20:19:09 +00001756 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1757 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1758
1759 mem = (__u64)((totalram_pages - totalhigh_pages) *
1760 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1761
1762 if (mem > ULONG_MAX)
1763 mem = ULONG_MAX;
1764
1765#ifdef CONFIG_MMU
1766 /*
1767 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1768 * in fs/proc/internal.h
1769 */
1770 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1771 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1772#endif
1773
1774 dm_bufio_default_cache_size = mem;
1775
1776 mutex_lock(&dm_bufio_clients_lock);
1777 __cache_size_refresh();
1778 mutex_unlock(&dm_bufio_clients_lock);
1779
1780 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1781 if (!dm_bufio_wq)
1782 return -ENOMEM;
1783
1784 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1785 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1786 DM_BUFIO_WORK_TIMER_SECS * HZ);
1787
1788 return 0;
1789}
1790
1791/*
1792 * This is called once when unloading the dm_bufio module.
1793 */
1794static void __exit dm_bufio_exit(void)
1795{
1796 int bug = 0;
1797 int i;
1798
1799 cancel_delayed_work_sync(&dm_bufio_work);
1800 destroy_workqueue(dm_bufio_wq);
1801
1802 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1803 struct kmem_cache *kc = dm_bufio_caches[i];
1804
1805 if (kc)
1806 kmem_cache_destroy(kc);
1807 }
1808
1809 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1810 kfree(dm_bufio_cache_names[i]);
1811
1812 if (dm_bufio_client_count) {
1813 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1814 __func__, dm_bufio_client_count);
1815 bug = 1;
1816 }
1817
1818 if (dm_bufio_current_allocated) {
1819 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1820 __func__, dm_bufio_current_allocated);
1821 bug = 1;
1822 }
1823
1824 if (dm_bufio_allocated_get_free_pages) {
1825 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1826 __func__, dm_bufio_allocated_get_free_pages);
1827 bug = 1;
1828 }
1829
1830 if (dm_bufio_allocated_vmalloc) {
1831 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1832 __func__, dm_bufio_allocated_vmalloc);
1833 bug = 1;
1834 }
1835
1836 if (bug)
1837 BUG();
1838}
1839
1840module_init(dm_bufio_init)
1841module_exit(dm_bufio_exit)
1842
1843module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1844MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1845
1846module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1847MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1848
1849module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1850MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1851
1852module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1853MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1854
1855module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1856MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1857
1858module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1859MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1860
1861module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1862MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1863
1864MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1865MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1866MODULE_LICENSE("GPL");