blob: 5304eaab6cbe0417f980e74fca24b2a8f2b28737 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Some low level IO code, and hacks for various block layer limitations
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "bset.h"
10#include "debug.h"
11
12static void bch_bi_idx_hack_endio(struct bio *bio, int error)
13{
14 struct bio *p = bio->bi_private;
15
16 bio_endio(p, error);
17 bio_put(bio);
18}
19
20static void bch_generic_make_request_hack(struct bio *bio)
21{
22 if (bio->bi_idx) {
23 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
24
25 memcpy(clone->bi_io_vec,
26 bio_iovec(bio),
27 bio_segments(bio) * sizeof(struct bio_vec));
28
29 clone->bi_sector = bio->bi_sector;
30 clone->bi_bdev = bio->bi_bdev;
31 clone->bi_rw = bio->bi_rw;
32 clone->bi_vcnt = bio_segments(bio);
33 clone->bi_size = bio->bi_size;
34
35 clone->bi_private = bio;
36 clone->bi_end_io = bch_bi_idx_hack_endio;
37
38 bio = clone;
39 }
40
Kent Overstreetbca97ad2013-04-20 15:26:31 -070041 /*
42 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
43 * bios might have had more than that (before we split them per device
44 * limitations).
45 *
46 * To be taken out once immutable bvec stuff is in.
47 */
48 bio->bi_max_vecs = bio->bi_vcnt;
49
Kent Overstreetcafe5632013-03-23 16:11:31 -070050 generic_make_request(bio);
51}
52
53/**
54 * bch_bio_split - split a bio
55 * @bio: bio to split
56 * @sectors: number of sectors to split from the front of @bio
57 * @gfp: gfp mask
58 * @bs: bio set to allocate from
59 *
60 * Allocates and returns a new bio which represents @sectors from the start of
61 * @bio, and updates @bio to represent the remaining sectors.
62 *
63 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
64 * unchanged.
65 *
66 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
67 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
68 * freed before the split.
69 *
70 * If bch_bio_split() is running under generic_make_request(), it's not safe to
71 * allocate more than one bio from the same bio set. Therefore, if it is running
72 * under generic_make_request() it masks out __GFP_WAIT when doing the
73 * allocation. The caller must check for failure if there's any possibility of
74 * it being called from under generic_make_request(); it is then the caller's
75 * responsibility to retry from a safe context (by e.g. punting to workqueue).
76 */
77struct bio *bch_bio_split(struct bio *bio, int sectors,
78 gfp_t gfp, struct bio_set *bs)
79{
80 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
81 struct bio_vec *bv;
82 struct bio *ret = NULL;
83
84 BUG_ON(sectors <= 0);
85
86 /*
87 * If we're being called from underneath generic_make_request() and we
88 * already allocated any bios from this bio set, we risk deadlock if we
89 * use the mempool. So instead, we possibly fail and let the caller punt
90 * to workqueue or somesuch and retry in a safe context.
91 */
92 if (current->bio_list)
93 gfp &= ~__GFP_WAIT;
94
95 if (sectors >= bio_sectors(bio))
96 return bio;
97
98 if (bio->bi_rw & REQ_DISCARD) {
99 ret = bio_alloc_bioset(gfp, 1, bs);
100 idx = 0;
101 goto out;
102 }
103
104 bio_for_each_segment(bv, bio, idx) {
105 vcnt = idx - bio->bi_idx;
106
107 if (!nbytes) {
108 ret = bio_alloc_bioset(gfp, vcnt, bs);
109 if (!ret)
110 return NULL;
111
112 memcpy(ret->bi_io_vec, bio_iovec(bio),
113 sizeof(struct bio_vec) * vcnt);
114
115 break;
116 } else if (nbytes < bv->bv_len) {
117 ret = bio_alloc_bioset(gfp, ++vcnt, bs);
118 if (!ret)
119 return NULL;
120
121 memcpy(ret->bi_io_vec, bio_iovec(bio),
122 sizeof(struct bio_vec) * vcnt);
123
124 ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
125 bv->bv_offset += nbytes;
126 bv->bv_len -= nbytes;
127 break;
128 }
129
130 nbytes -= bv->bv_len;
131 }
132out:
133 ret->bi_bdev = bio->bi_bdev;
134 ret->bi_sector = bio->bi_sector;
135 ret->bi_size = sectors << 9;
136 ret->bi_rw = bio->bi_rw;
137 ret->bi_vcnt = vcnt;
138 ret->bi_max_vecs = vcnt;
139
140 bio->bi_sector += sectors;
141 bio->bi_size -= sectors << 9;
142 bio->bi_idx = idx;
143
144 if (bio_integrity(bio)) {
145 if (bio_integrity_clone(ret, bio, gfp)) {
146 bio_put(ret);
147 return NULL;
148 }
149
150 bio_integrity_trim(ret, 0, bio_sectors(ret));
151 bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
152 }
153
154 return ret;
155}
156
157static unsigned bch_bio_max_sectors(struct bio *bio)
158{
159 unsigned ret = bio_sectors(bio);
160 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
Kent Overstreet1545f132013-04-10 15:50:57 -0700161 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
162 queue_max_segments(q));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700163 struct bio_vec *bv, *end = bio_iovec(bio) +
Kent Overstreet1545f132013-04-10 15:50:57 -0700164 min_t(int, bio_segments(bio), max_segments);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700165
166 struct bvec_merge_data bvm = {
167 .bi_bdev = bio->bi_bdev,
168 .bi_sector = bio->bi_sector,
169 .bi_size = 0,
170 .bi_rw = bio->bi_rw,
171 };
172
173 if (bio->bi_rw & REQ_DISCARD)
174 return min(ret, q->limits.max_discard_sectors);
175
Kent Overstreet1545f132013-04-10 15:50:57 -0700176 if (bio_segments(bio) > max_segments ||
Kent Overstreetcafe5632013-03-23 16:11:31 -0700177 q->merge_bvec_fn) {
178 ret = 0;
179
180 for (bv = bio_iovec(bio); bv < end; bv++) {
181 if (q->merge_bvec_fn &&
182 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
183 break;
184
185 ret += bv->bv_len >> 9;
186 bvm.bi_size += bv->bv_len;
187 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700188 }
189
190 ret = min(ret, queue_max_sectors(q));
191
192 WARN_ON(!ret);
193 ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
194
195 return ret;
196}
197
198static void bch_bio_submit_split_done(struct closure *cl)
199{
200 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
201
202 s->bio->bi_end_io = s->bi_end_io;
203 s->bio->bi_private = s->bi_private;
204 bio_endio(s->bio, 0);
205
206 closure_debug_destroy(&s->cl);
207 mempool_free(s, s->p->bio_split_hook);
208}
209
210static void bch_bio_submit_split_endio(struct bio *bio, int error)
211{
212 struct closure *cl = bio->bi_private;
213 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
214
215 if (error)
216 clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
217
218 bio_put(bio);
219 closure_put(cl);
220}
221
222static void __bch_bio_submit_split(struct closure *cl)
223{
224 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
225 struct bio *bio = s->bio, *n;
226
227 do {
228 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
229 GFP_NOIO, s->p->bio_split);
230 if (!n)
231 continue_at(cl, __bch_bio_submit_split, system_wq);
232
233 n->bi_end_io = bch_bio_submit_split_endio;
234 n->bi_private = cl;
235
236 closure_get(cl);
237 bch_generic_make_request_hack(n);
238 } while (n != bio);
239
240 continue_at(cl, bch_bio_submit_split_done, NULL);
241}
242
243void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
244{
245 struct bio_split_hook *s;
246
247 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
248 goto submit;
249
250 if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
251 goto submit;
252
253 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
254
255 s->bio = bio;
256 s->p = p;
257 s->bi_end_io = bio->bi_end_io;
258 s->bi_private = bio->bi_private;
259 bio_get(bio);
260
261 closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL);
262 return;
263submit:
264 bch_generic_make_request_hack(bio);
265}
266
267/* Bios with headers */
268
269void bch_bbio_free(struct bio *bio, struct cache_set *c)
270{
271 struct bbio *b = container_of(bio, struct bbio, bio);
272 mempool_free(b, c->bio_meta);
273}
274
275struct bio *bch_bbio_alloc(struct cache_set *c)
276{
277 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
278 struct bio *bio = &b->bio;
279
280 bio_init(bio);
281 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
282 bio->bi_max_vecs = bucket_pages(c);
283 bio->bi_io_vec = bio->bi_inline_vecs;
284
285 return bio;
286}
287
288void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
289{
290 struct bbio *b = container_of(bio, struct bbio, bio);
291
292 bio->bi_sector = PTR_OFFSET(&b->key, 0);
293 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
294
295 b->submit_time_us = local_clock_us();
296 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
297}
298
299void bch_submit_bbio(struct bio *bio, struct cache_set *c,
300 struct bkey *k, unsigned ptr)
301{
302 struct bbio *b = container_of(bio, struct bbio, bio);
303 bch_bkey_copy_single_ptr(&b->key, k, ptr);
304 __bch_submit_bbio(bio, c);
305}
306
307/* IO errors */
308
309void bch_count_io_errors(struct cache *ca, int error, const char *m)
310{
311 /*
312 * The halflife of an error is:
313 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
314 */
315
316 if (ca->set->error_decay) {
317 unsigned count = atomic_inc_return(&ca->io_count);
318
319 while (count > ca->set->error_decay) {
320 unsigned errors;
321 unsigned old = count;
322 unsigned new = count - ca->set->error_decay;
323
324 /*
325 * First we subtract refresh from count; each time we
326 * succesfully do so, we rescale the errors once:
327 */
328
329 count = atomic_cmpxchg(&ca->io_count, old, new);
330
331 if (count == old) {
332 count = new;
333
334 errors = atomic_read(&ca->io_errors);
335 do {
336 old = errors;
337 new = ((uint64_t) errors * 127) / 128;
338 errors = atomic_cmpxchg(&ca->io_errors,
339 old, new);
340 } while (old != errors);
341 }
342 }
343 }
344
345 if (error) {
346 char buf[BDEVNAME_SIZE];
347 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
348 &ca->io_errors);
349 errors >>= IO_ERROR_SHIFT;
350
351 if (errors < ca->set->error_limit)
352 pr_err("%s: IO error on %s, recovering",
353 bdevname(ca->bdev, buf), m);
354 else
355 bch_cache_set_error(ca->set,
356 "%s: too many IO errors %s",
357 bdevname(ca->bdev, buf), m);
358 }
359}
360
361void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
362 int error, const char *m)
363{
364 struct bbio *b = container_of(bio, struct bbio, bio);
365 struct cache *ca = PTR_CACHE(c, &b->key, 0);
366
367 unsigned threshold = bio->bi_rw & REQ_WRITE
368 ? c->congested_write_threshold_us
369 : c->congested_read_threshold_us;
370
371 if (threshold) {
372 unsigned t = local_clock_us();
373
374 int us = t - b->submit_time_us;
375 int congested = atomic_read(&c->congested);
376
377 if (us > (int) threshold) {
378 int ms = us / 1024;
379 c->congested_last_us = t;
380
381 ms = min(ms, CONGESTED_MAX + congested);
382 atomic_sub(ms, &c->congested);
383 } else if (congested < 0)
384 atomic_inc(&c->congested);
385 }
386
387 bch_count_io_errors(ca, error, m);
388}
389
390void bch_bbio_endio(struct cache_set *c, struct bio *bio,
391 int error, const char *m)
392{
393 struct closure *cl = bio->bi_private;
394
395 bch_bbio_count_io_errors(c, bio, error, m);
396 bio_put(bio);
397 closure_put(cl);
398}