blob: 9a32f5868fb9004dbf57802db688191d9b017f8f [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Lukas Czerner5dba3082011-05-06 19:26:27 -060012struct bio_batch {
13 atomic_t done;
14 unsigned long flags;
15 struct completion *wait;
16};
17
18static void bio_batch_end_io(struct bio *bio, int err)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040019{
Lukas Czerner5dba3082011-05-06 19:26:27 -060020 struct bio_batch *bb = bio->bi_private;
21
Lukas Czerner8af19542011-05-06 19:30:01 -060022 if (err && (err != -EOPNOTSUPP))
Lukas Czerner5dba3082011-05-06 19:26:27 -060023 clear_bit(BIO_UPTODATE, &bb->flags);
Lukas Czerner5dba3082011-05-06 19:26:27 -060024 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040026 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig8c555362010-08-18 05:29:22 -040045 int type = REQ_WRITE | REQ_DISCARD;
Shaohua Li8dd2cb72012-12-14 11:15:36 +080046 sector_t max_discard_sectors;
47 sector_t granularity, alignment;
Lukas Czerner5dba3082011-05-06 19:26:27 -060048 struct bio_batch bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040050 int ret = 0;
Shaohua Li0cfbcaf2012-12-14 11:15:51 +080051 struct blk_plug plug;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040052
53 if (!q)
54 return -ENXIO;
55
56 if (!blk_queue_discard(q))
57 return -EOPNOTSUPP;
58
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020059 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
Shaohua Li8dd2cb72012-12-14 11:15:36 +080061 alignment = bdev_discard_alignment(bdev) >> 9;
62 alignment = sector_div(alignment, granularity);
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020063
Jens Axboe10d1f9e2010-07-15 10:49:31 -060064 /*
65 * Ensure that max_discard_sectors is of the proper
Paolo Bonzinic6e66632012-08-02 09:48:50 +020066 * granularity, so that requests stay aligned after a split.
Jens Axboe10d1f9e2010-07-15 10:49:31 -060067 */
68 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
Shaohua Li8dd2cb72012-12-14 11:15:36 +080069 sector_div(max_discard_sectors, granularity);
70 max_discard_sectors *= granularity;
Jens Axboe4c645002011-07-23 20:34:59 +020071 if (unlikely(!max_discard_sectors)) {
Mike Snitzer0f799602011-07-06 21:30:50 +020072 /* Avoid infinite loop below. Being cautious never hurts. */
73 return -EOPNOTSUPP;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060074 }
75
Christoph Hellwigdd3932e2010-09-16 20:51:46 +020076 if (flags & BLKDEV_DISCARD_SECURE) {
Adrian Hunter8d57a982010-08-11 14:17:49 -070077 if (!blk_queue_secdiscard(q))
78 return -EOPNOTSUPP;
Christoph Hellwig8c555362010-08-18 05:29:22 -040079 type |= REQ_SECURE;
Adrian Hunter8d57a982010-08-11 14:17:49 -070080 }
81
Lukas Czerner5dba3082011-05-06 19:26:27 -060082 atomic_set(&bb.done, 1);
83 bb.flags = 1 << BIO_UPTODATE;
84 bb.wait = &wait;
85
Shaohua Li0cfbcaf2012-12-14 11:15:51 +080086 blk_start_plug(&plug);
Lukas Czerner5dba3082011-05-06 19:26:27 -060087 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020088 unsigned int req_sects;
Shaohua Li8dd2cb72012-12-14 11:15:36 +080089 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020090
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040091 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020092 if (!bio) {
93 ret = -ENOMEM;
94 break;
95 }
96
Paolo Bonzinic6e66632012-08-02 09:48:50 +020097 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
98
99 /*
100 * If splitting a request, and the next starting sector would be
101 * misaligned, stop the discard at the previous aligned sector.
102 */
103 end_sect = sector + req_sects;
Shaohua Li8dd2cb72012-12-14 11:15:36 +0800104 tmp = end_sect;
105 if (req_sects < nr_sects &&
106 sector_div(tmp, granularity) != alignment) {
107 end_sect = end_sect - alignment;
108 sector_div(end_sect, granularity);
109 end_sect = end_sect * granularity + alignment;
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200110 req_sects = end_sect - sector;
111 }
112
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400113 bio->bi_sector = sector;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600114 bio->bi_end_io = bio_batch_end_io;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400115 bio->bi_bdev = bdev;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600116 bio->bi_private = &bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400117
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200118 bio->bi_size = req_sects << 9;
119 nr_sects -= req_sects;
120 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400121
Lukas Czerner5dba3082011-05-06 19:26:27 -0600122 atomic_inc(&bb.done);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400123 submit_bio(type, bio);
Jens Axboe163d66d2014-02-12 09:34:01 -0700124
125 /*
126 * We can loop for a long time in here, if someone does
127 * full device discards (like mkfs). Be nice and allow
128 * us to schedule out to avoid softlocking if preempt
129 * is disabled.
130 */
131 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -0600132 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800133 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400134
Lukas Czerner5dba3082011-05-06 19:26:27 -0600135 /* Wait for bios in-flight */
136 if (!atomic_dec_and_test(&bb.done))
Vladimir Davydov55770222013-02-14 18:19:59 +0400137 wait_for_completion_io(&wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400138
Lukas Czerner8af19542011-05-06 19:30:01 -0600139 if (!test_bit(BIO_UPTODATE, &bb.flags))
Lukas Czerner5dba3082011-05-06 19:26:27 -0600140 ret = -EIO;
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200141
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400142 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400143}
144EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400145
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400146/**
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400147 * blkdev_issue_write_same - queue a write same operation
148 * @bdev: target blockdev
149 * @sector: start sector
150 * @nr_sects: number of sectors to write
151 * @gfp_mask: memory allocation flags (for bio_alloc)
152 * @page: page containing data to write
153 *
154 * Description:
155 * Issue a write same request for the sectors in question.
156 */
157int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
158 sector_t nr_sects, gfp_t gfp_mask,
159 struct page *page)
160{
161 DECLARE_COMPLETION_ONSTACK(wait);
162 struct request_queue *q = bdev_get_queue(bdev);
163 unsigned int max_write_same_sectors;
164 struct bio_batch bb;
165 struct bio *bio;
166 int ret = 0;
167
168 if (!q)
169 return -ENXIO;
170
171 max_write_same_sectors = q->limits.max_write_same_sectors;
172
173 if (max_write_same_sectors == 0)
174 return -EOPNOTSUPP;
175
176 atomic_set(&bb.done, 1);
177 bb.flags = 1 << BIO_UPTODATE;
178 bb.wait = &wait;
179
180 while (nr_sects) {
181 bio = bio_alloc(gfp_mask, 1);
182 if (!bio) {
183 ret = -ENOMEM;
184 break;
185 }
186
187 bio->bi_sector = sector;
188 bio->bi_end_io = bio_batch_end_io;
189 bio->bi_bdev = bdev;
190 bio->bi_private = &bb;
191 bio->bi_vcnt = 1;
192 bio->bi_io_vec->bv_page = page;
193 bio->bi_io_vec->bv_offset = 0;
194 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
195
196 if (nr_sects > max_write_same_sectors) {
197 bio->bi_size = max_write_same_sectors << 9;
198 nr_sects -= max_write_same_sectors;
199 sector += max_write_same_sectors;
200 } else {
201 bio->bi_size = nr_sects << 9;
202 nr_sects = 0;
203 }
204
205 atomic_inc(&bb.done);
206 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
207 }
208
209 /* Wait for bios in-flight */
210 if (!atomic_dec_and_test(&bb.done))
Vladimir Davydov55770222013-02-14 18:19:59 +0400211 wait_for_completion_io(&wait);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400212
213 if (!test_bit(BIO_UPTODATE, &bb.flags))
214 ret = -ENOTSUPP;
215
216 return ret;
217}
218EXPORT_SYMBOL(blkdev_issue_write_same);
219
220/**
Ben Hutchings291d24f2011-03-01 13:45:24 -0500221 * blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400222 * @bdev: blockdev to issue
223 * @sector: start sector
224 * @nr_sects: number of sectors to write
225 * @gfp_mask: memory allocation flags (for bio_alloc)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400226 *
227 * Description:
228 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400229 */
230
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400231int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200232 sector_t nr_sects, gfp_t gfp_mask)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400233{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200234 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400235 struct bio *bio;
236 struct bio_batch bb;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100237 unsigned int sz;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400238 DECLARE_COMPLETION_ONSTACK(wait);
239
Lukas Czerner0aeea182011-03-11 10:23:53 +0100240 atomic_set(&bb.done, 1);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400241 bb.flags = 1 << BIO_UPTODATE;
242 bb.wait = &wait;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400243
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200244 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400245 while (nr_sects != 0) {
246 bio = bio_alloc(gfp_mask,
247 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200248 if (!bio) {
249 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400250 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200251 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400252
253 bio->bi_sector = sector;
254 bio->bi_bdev = bdev;
255 bio->bi_end_io = bio_batch_end_io;
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200256 bio->bi_private = &bb;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400257
Jens Axboe0341aaf2010-04-29 09:28:21 +0200258 while (nr_sects != 0) {
259 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400260 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
261 nr_sects -= ret >> 9;
262 sector += ret >> 9;
263 if (ret < (sz << 9))
264 break;
265 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200266 ret = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100267 atomic_inc(&bb.done);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400268 submit_bio(WRITE, bio);
269 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400270
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200271 /* Wait for bios in-flight */
Lukas Czerner0aeea182011-03-11 10:23:53 +0100272 if (!atomic_dec_and_test(&bb.done))
Vladimir Davydov55770222013-02-14 18:19:59 +0400273 wait_for_completion_io(&wait);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400274
275 if (!test_bit(BIO_UPTODATE, &bb.flags))
276 /* One of bios in the batch was completed with error.*/
277 ret = -EIO;
278
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400279 return ret;
280}
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400281
282/**
283 * blkdev_issue_zeroout - zero-fill a block range
284 * @bdev: blockdev to write
285 * @sector: start sector
286 * @nr_sects: number of sectors to write
287 * @gfp_mask: memory allocation flags (for bio_alloc)
288 *
289 * Description:
290 * Generate and issue number of bios with zerofiled pages.
291 */
292
293int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
294 sector_t nr_sects, gfp_t gfp_mask)
295{
296 if (bdev_write_same(bdev)) {
297 unsigned char bdn[BDEVNAME_SIZE];
298
299 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
300 ZERO_PAGE(0)))
301 return 0;
302
303 bdevname(bdev, bdn);
304 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
305 }
306
307 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
308}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400309EXPORT_SYMBOL(blkdev_issue_zeroout);