blob: 48528fd4313113baaf0d3ffa21e488909b7904e8 [file] [log] [blame]
bellardfc01f7e2003-06-30 10:03:06 +00001/*
2 * QEMU System Emulator block driver
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardfc01f7e2003-06-30 10:03:06 +00004 * Copyright (c) 2003 Fabrice Bellard
ths5fafdf22007-09-16 21:08:06 +00005 *
bellardfc01f7e2003-06-30 10:03:06 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
blueswir13990d092008-12-05 17:53:21 +000024#include "config-host.h"
pbrookfaf07962007-11-11 02:51:17 +000025#include "qemu-common.h"
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +010026#include "trace.h"
aliguori376253e2009-03-05 23:01:23 +000027#include "monitor.h"
bellardea2384d2004-08-01 21:59:26 +000028#include "block_int.h"
Anthony Liguori5efa9d52009-05-09 17:03:42 -050029#include "module.h"
Luiz Capitulinof795e742011-10-21 16:05:43 -020030#include "qjson.h"
Kevin Wolf68485422011-06-30 10:05:46 +020031#include "qemu-coroutine.h"
Luiz Capitulinob2023812011-09-21 17:16:47 -030032#include "qmp-commands.h"
Zhi Yong Wu0563e192011-11-03 16:57:25 +080033#include "qemu-timer.h"
bellardfc01f7e2003-06-30 10:03:06 +000034
Juan Quintela71e72a12009-07-27 16:12:56 +020035#ifdef CONFIG_BSD
bellard7674e7b2005-04-26 21:59:26 +000036#include <sys/types.h>
37#include <sys/stat.h>
38#include <sys/ioctl.h>
Blue Swirl72cf2d42009-09-12 07:36:22 +000039#include <sys/queue.h>
blueswir1c5e97232009-03-07 20:06:23 +000040#ifndef __DragonFly__
bellard7674e7b2005-04-26 21:59:26 +000041#include <sys/disk.h>
42#endif
blueswir1c5e97232009-03-07 20:06:23 +000043#endif
bellard7674e7b2005-04-26 21:59:26 +000044
aliguori49dc7682009-03-08 16:26:59 +000045#ifdef _WIN32
46#include <windows.h>
47#endif
48
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010049#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
50
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000051typedef enum {
52 BDRV_REQ_COPY_ON_READ = 0x1,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000053 BDRV_REQ_ZERO_WRITE = 0x2,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000054} BdrvRequestFlags;
55
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +020056static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
aliguorif141eaf2009-04-07 18:43:24 +000057static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
58 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
aliguoric87c0672009-04-07 18:43:20 +000059 BlockDriverCompletionFunc *cb, void *opaque);
aliguorif141eaf2009-04-07 18:43:24 +000060static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
pbrookce1a14d2006-08-07 02:38:06 +000062 BlockDriverCompletionFunc *cb, void *opaque);
Kevin Wolff9f05dc2011-07-15 13:50:26 +020063static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
64 int64_t sector_num, int nb_sectors,
65 QEMUIOVector *iov);
66static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors,
68 QEMUIOVector *iov);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +010069static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +000070 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
71 BdrvRequestFlags flags);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +010072static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +000073 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
74 BdrvRequestFlags flags);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010075static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
76 int64_t sector_num,
77 QEMUIOVector *qiov,
78 int nb_sectors,
79 BlockDriverCompletionFunc *cb,
80 void *opaque,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +010081 bool is_write);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +010082static void coroutine_fn bdrv_co_do_rw(void *opaque);
Kevin Wolf621f0582012-03-20 15:12:58 +010083static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
84 int64_t sector_num, int nb_sectors);
bellardec530c82006-04-25 22:36:06 +000085
Zhi Yong Wu98f90db2011-11-08 13:00:14 +080086static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
87 bool is_write, double elapsed_time, uint64_t *wait);
88static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
89 double elapsed_time, uint64_t *wait);
90static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
91 bool is_write, int64_t *wait);
92
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +010093static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
94 QTAILQ_HEAD_INITIALIZER(bdrv_states);
blueswir17ee930d2008-09-17 19:04:14 +000095
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +010096static QLIST_HEAD(, BlockDriver) bdrv_drivers =
97 QLIST_HEAD_INITIALIZER(bdrv_drivers);
bellardea2384d2004-08-01 21:59:26 +000098
Markus Armbrusterf9092b12010-06-25 10:33:39 +020099/* The device to use for VM snapshots */
100static BlockDriverState *bs_snapshots;
101
Markus Armbrustereb852012009-10-27 18:41:44 +0100102/* If non-zero, use only whitelisted block drivers */
103static int use_bdrv_whitelist;
104
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000105#ifdef _WIN32
106static int is_windows_drive_prefix(const char *filename)
107{
108 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
109 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
110 filename[1] == ':');
111}
112
113int is_windows_drive(const char *filename)
114{
115 if (is_windows_drive_prefix(filename) &&
116 filename[2] == '\0')
117 return 1;
118 if (strstart(filename, "\\\\.\\", NULL) ||
119 strstart(filename, "//./", NULL))
120 return 1;
121 return 0;
122}
123#endif
124
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800125/* throttling disk I/O limits */
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800126void bdrv_io_limits_disable(BlockDriverState *bs)
127{
128 bs->io_limits_enabled = false;
129
130 while (qemu_co_queue_next(&bs->throttled_reqs));
131
132 if (bs->block_timer) {
133 qemu_del_timer(bs->block_timer);
134 qemu_free_timer(bs->block_timer);
135 bs->block_timer = NULL;
136 }
137
138 bs->slice_start = 0;
139 bs->slice_end = 0;
140 bs->slice_time = 0;
141 memset(&bs->io_base, 0, sizeof(bs->io_base));
142}
143
Zhi Yong Wu0563e192011-11-03 16:57:25 +0800144static void bdrv_block_timer(void *opaque)
145{
146 BlockDriverState *bs = opaque;
147
148 qemu_co_queue_next(&bs->throttled_reqs);
149}
150
151void bdrv_io_limits_enable(BlockDriverState *bs)
152{
153 qemu_co_queue_init(&bs->throttled_reqs);
154 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
155 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
156 bs->slice_start = qemu_get_clock_ns(vm_clock);
157 bs->slice_end = bs->slice_start + bs->slice_time;
158 memset(&bs->io_base, 0, sizeof(bs->io_base));
159 bs->io_limits_enabled = true;
160}
161
162bool bdrv_io_limits_enabled(BlockDriverState *bs)
163{
164 BlockIOLimit *io_limits = &bs->io_limits;
165 return io_limits->bps[BLOCK_IO_LIMIT_READ]
166 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
167 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
168 || io_limits->iops[BLOCK_IO_LIMIT_READ]
169 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
170 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
171}
172
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800173static void bdrv_io_limits_intercept(BlockDriverState *bs,
174 bool is_write, int nb_sectors)
175{
176 int64_t wait_time = -1;
177
178 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
179 qemu_co_queue_wait(&bs->throttled_reqs);
180 }
181
182 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
183 * throttled requests will not be dequeued until the current request is
184 * allowed to be serviced. So if the current request still exceeds the
185 * limits, it will be inserted to the head. All requests followed it will
186 * be still in throttled_reqs queue.
187 */
188
189 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
190 qemu_mod_timer(bs->block_timer,
191 wait_time + qemu_get_clock_ns(vm_clock));
192 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
193 }
194
195 qemu_co_queue_next(&bs->throttled_reqs);
196}
197
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000198/* check if the path starts with "<protocol>:" */
199static int path_has_protocol(const char *path)
200{
Paolo Bonzini947995c2012-05-08 16:51:48 +0200201 const char *p;
202
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000203#ifdef _WIN32
204 if (is_windows_drive(path) ||
205 is_windows_drive_prefix(path)) {
206 return 0;
207 }
Paolo Bonzini947995c2012-05-08 16:51:48 +0200208 p = path + strcspn(path, ":/\\");
209#else
210 p = path + strcspn(path, ":/");
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000211#endif
212
Paolo Bonzini947995c2012-05-08 16:51:48 +0200213 return *p == ':';
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000214}
215
bellard83f64092006-08-01 16:21:11 +0000216int path_is_absolute(const char *path)
217{
bellard21664422007-01-07 18:22:37 +0000218#ifdef _WIN32
219 /* specific case for names like: "\\.\d:" */
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200220 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
bellard21664422007-01-07 18:22:37 +0000221 return 1;
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200222 }
223 return (*path == '/' || *path == '\\');
bellard3b9f94e2007-01-07 17:27:07 +0000224#else
Paolo Bonzinif53f4da2012-05-08 16:51:47 +0200225 return (*path == '/');
bellard3b9f94e2007-01-07 17:27:07 +0000226#endif
bellard83f64092006-08-01 16:21:11 +0000227}
228
229/* if filename is absolute, just copy it to dest. Otherwise, build a
230 path to it by considering it is relative to base_path. URL are
231 supported. */
232void path_combine(char *dest, int dest_size,
233 const char *base_path,
234 const char *filename)
235{
236 const char *p, *p1;
237 int len;
238
239 if (dest_size <= 0)
240 return;
241 if (path_is_absolute(filename)) {
242 pstrcpy(dest, dest_size, filename);
243 } else {
244 p = strchr(base_path, ':');
245 if (p)
246 p++;
247 else
248 p = base_path;
bellard3b9f94e2007-01-07 17:27:07 +0000249 p1 = strrchr(base_path, '/');
250#ifdef _WIN32
251 {
252 const char *p2;
253 p2 = strrchr(base_path, '\\');
254 if (!p1 || p2 > p1)
255 p1 = p2;
256 }
257#endif
bellard83f64092006-08-01 16:21:11 +0000258 if (p1)
259 p1++;
260 else
261 p1 = base_path;
262 if (p1 > p)
263 p = p1;
264 len = p - base_path;
265 if (len > dest_size - 1)
266 len = dest_size - 1;
267 memcpy(dest, base_path, len);
268 dest[len] = '\0';
269 pstrcat(dest, dest_size, filename);
270 }
271}
272
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200273void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
274{
275 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
276 pstrcpy(dest, sz, bs->backing_file);
277 } else {
278 path_combine(dest, sz, bs->filename, bs->backing_file);
279 }
280}
281
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500282void bdrv_register(BlockDriver *bdrv)
bellardea2384d2004-08-01 21:59:26 +0000283{
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +0100284 /* Block drivers without coroutine functions need emulation */
285 if (!bdrv->bdrv_co_readv) {
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200286 bdrv->bdrv_co_readv = bdrv_co_readv_em;
287 bdrv->bdrv_co_writev = bdrv_co_writev_em;
288
Stefan Hajnoczif8c35c12011-10-13 21:09:31 +0100289 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
290 * the block driver lacks aio we need to emulate that too.
291 */
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200292 if (!bdrv->bdrv_aio_readv) {
293 /* add AIO emulation layer */
294 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
295 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
Kevin Wolff9f05dc2011-07-15 13:50:26 +0200296 }
bellard83f64092006-08-01 16:21:11 +0000297 }
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +0200298
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100299 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
bellardea2384d2004-08-01 21:59:26 +0000300}
bellardb3380822004-03-14 21:38:54 +0000301
302/* create a new block device (by default it is empty) */
303BlockDriverState *bdrv_new(const char *device_name)
bellardfc01f7e2003-06-30 10:03:06 +0000304{
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +0100305 BlockDriverState *bs;
bellardb3380822004-03-14 21:38:54 +0000306
Anthony Liguori7267c092011-08-20 22:09:37 -0500307 bs = g_malloc0(sizeof(BlockDriverState));
bellardb3380822004-03-14 21:38:54 +0000308 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
bellardea2384d2004-08-01 21:59:26 +0000309 if (device_name[0] != '\0') {
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +0100310 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
bellardea2384d2004-08-01 21:59:26 +0000311 }
Luiz Capitulino28a72822011-09-26 17:43:50 -0300312 bdrv_iostatus_disable(bs);
bellardb3380822004-03-14 21:38:54 +0000313 return bs;
314}
315
bellardea2384d2004-08-01 21:59:26 +0000316BlockDriver *bdrv_find_format(const char *format_name)
317{
318 BlockDriver *drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100319 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
320 if (!strcmp(drv1->format_name, format_name)) {
bellardea2384d2004-08-01 21:59:26 +0000321 return drv1;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100322 }
bellardea2384d2004-08-01 21:59:26 +0000323 }
324 return NULL;
325}
326
Markus Armbrustereb852012009-10-27 18:41:44 +0100327static int bdrv_is_whitelisted(BlockDriver *drv)
328{
329 static const char *whitelist[] = {
330 CONFIG_BDRV_WHITELIST
331 };
332 const char **p;
333
334 if (!whitelist[0])
335 return 1; /* no whitelist, anything goes */
336
337 for (p = whitelist; *p; p++) {
338 if (!strcmp(drv->format_name, *p)) {
339 return 1;
340 }
341 }
342 return 0;
343}
344
345BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
346{
347 BlockDriver *drv = bdrv_find_format(format_name);
348 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
349}
350
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800351typedef struct CreateCo {
352 BlockDriver *drv;
353 char *filename;
354 QEMUOptionParameter *options;
355 int ret;
356} CreateCo;
357
358static void coroutine_fn bdrv_create_co_entry(void *opaque)
359{
360 CreateCo *cco = opaque;
361 assert(cco->drv);
362
363 cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
364}
365
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200366int bdrv_create(BlockDriver *drv, const char* filename,
367 QEMUOptionParameter *options)
bellardea2384d2004-08-01 21:59:26 +0000368{
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800369 int ret;
Kevin Wolf0e7e1982009-05-18 16:42:10 +0200370
Zhi Yong Wu5b7e1542012-05-07 16:50:42 +0800371 Coroutine *co;
372 CreateCo cco = {
373 .drv = drv,
374 .filename = g_strdup(filename),
375 .options = options,
376 .ret = NOT_DONE,
377 };
378
379 if (!drv->bdrv_create) {
380 return -ENOTSUP;
381 }
382
383 if (qemu_in_coroutine()) {
384 /* Fast-path if already in coroutine context */
385 bdrv_create_co_entry(&cco);
386 } else {
387 co = qemu_coroutine_create(bdrv_create_co_entry);
388 qemu_coroutine_enter(co, &cco);
389 while (cco.ret == NOT_DONE) {
390 qemu_aio_wait();
391 }
392 }
393
394 ret = cco.ret;
395 g_free(cco.filename);
396
397 return ret;
bellardea2384d2004-08-01 21:59:26 +0000398}
399
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200400int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
401{
402 BlockDriver *drv;
403
MORITA Kazutakab50cbab2010-05-26 11:35:36 +0900404 drv = bdrv_find_protocol(filename);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200405 if (drv == NULL) {
Stefan Hajnoczi16905d72010-11-30 15:14:14 +0000406 return -ENOENT;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200407 }
408
409 return bdrv_create(drv, filename, options);
410}
411
Jim Meyeringeba25052012-05-28 09:27:54 +0200412/*
413 * Create a uniquely-named empty temporary file.
414 * Return 0 upon success, otherwise a negative errno value.
415 */
416int get_tmp_filename(char *filename, int size)
417{
bellardd5249392004-08-03 21:14:23 +0000418#ifdef _WIN32
bellard3b9f94e2007-01-07 17:27:07 +0000419 char temp_dir[MAX_PATH];
Jim Meyeringeba25052012-05-28 09:27:54 +0200420 /* GetTempFileName requires that its output buffer (4th param)
421 have length MAX_PATH or greater. */
422 assert(size >= MAX_PATH);
423 return (GetTempPath(MAX_PATH, temp_dir)
424 && GetTempFileName(temp_dir, "qem", 0, filename)
425 ? 0 : -GetLastError());
bellardd5249392004-08-03 21:14:23 +0000426#else
bellardea2384d2004-08-01 21:59:26 +0000427 int fd;
blueswir17ccfb2e2008-09-14 06:45:34 +0000428 const char *tmpdir;
aurel320badc1e2008-03-10 00:05:34 +0000429 tmpdir = getenv("TMPDIR");
430 if (!tmpdir)
431 tmpdir = "/tmp";
Jim Meyeringeba25052012-05-28 09:27:54 +0200432 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
433 return -EOVERFLOW;
434 }
bellardea2384d2004-08-01 21:59:26 +0000435 fd = mkstemp(filename);
Jim Meyeringeba25052012-05-28 09:27:54 +0200436 if (fd < 0 || close(fd)) {
437 return -errno;
438 }
439 return 0;
bellardd5249392004-08-03 21:14:23 +0000440#endif
Jim Meyeringeba25052012-05-28 09:27:54 +0200441}
bellardea2384d2004-08-01 21:59:26 +0000442
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200443/*
444 * Detect host devices. By convention, /dev/cdrom[N] is always
445 * recognized as a host CDROM.
446 */
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200447static BlockDriver *find_hdev_driver(const char *filename)
448{
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200449 int score_max = 0, score;
450 BlockDriver *drv = NULL, *d;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200451
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100452 QLIST_FOREACH(d, &bdrv_drivers, list) {
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200453 if (d->bdrv_probe_device) {
454 score = d->bdrv_probe_device(filename);
455 if (score > score_max) {
456 score_max = score;
457 drv = d;
458 }
459 }
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200460 }
461
Christoph Hellwig508c7cb2009-06-15 14:04:22 +0200462 return drv;
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200463}
Christoph Hellwigf3a5d3f2009-06-15 13:55:19 +0200464
MORITA Kazutakab50cbab2010-05-26 11:35:36 +0900465BlockDriver *bdrv_find_protocol(const char *filename)
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200466{
467 BlockDriver *drv1;
468 char protocol[128];
469 int len;
470 const char *p;
471
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200472 /* TODO Drivers without bdrv_file_open must be specified explicitly */
473
Christoph Hellwig39508e72010-06-23 12:25:17 +0200474 /*
475 * XXX(hch): we really should not let host device detection
476 * override an explicit protocol specification, but moving this
477 * later breaks access to device names with colons in them.
478 * Thanks to the brain-dead persistent naming schemes on udev-
479 * based Linux systems those actually are quite common.
480 */
481 drv1 = find_hdev_driver(filename);
482 if (drv1) {
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200483 return drv1;
484 }
Christoph Hellwig39508e72010-06-23 12:25:17 +0200485
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000486 if (!path_has_protocol(filename)) {
Christoph Hellwig39508e72010-06-23 12:25:17 +0200487 return bdrv_find_format("file");
488 }
Stefan Hajnoczi9e0b22f2010-12-09 11:53:00 +0000489 p = strchr(filename, ':');
490 assert(p != NULL);
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200491 len = p - filename;
492 if (len > sizeof(protocol) - 1)
493 len = sizeof(protocol) - 1;
494 memcpy(protocol, filename, len);
495 protocol[len] = '\0';
496 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
497 if (drv1->protocol_name &&
498 !strcmp(drv1->protocol_name, protocol)) {
499 return drv1;
500 }
501 }
502 return NULL;
503}
504
Stefan Weilc98ac352010-07-21 21:51:51 +0200505static int find_image_format(const char *filename, BlockDriver **pdrv)
bellardea2384d2004-08-01 21:59:26 +0000506{
bellard83f64092006-08-01 16:21:11 +0000507 int ret, score, score_max;
bellardea2384d2004-08-01 21:59:26 +0000508 BlockDriver *drv1, *drv;
bellard83f64092006-08-01 16:21:11 +0000509 uint8_t buf[2048];
510 BlockDriverState *bs;
ths3b46e622007-09-17 08:09:54 +0000511
Naphtali Spreif5edb012010-01-17 16:48:13 +0200512 ret = bdrv_file_open(&bs, filename, 0);
Stefan Weilc98ac352010-07-21 21:51:51 +0200513 if (ret < 0) {
514 *pdrv = NULL;
515 return ret;
516 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700517
Kevin Wolf08a00552010-06-01 18:37:31 +0200518 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
519 if (bs->sg || !bdrv_is_inserted(bs)) {
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700520 bdrv_delete(bs);
Stefan Weilc98ac352010-07-21 21:51:51 +0200521 drv = bdrv_find_format("raw");
522 if (!drv) {
523 ret = -ENOENT;
524 }
525 *pdrv = drv;
526 return ret;
Nicholas A. Bellinger1a396852010-05-27 08:56:28 -0700527 }
Nicholas Bellingerf8ea0b02010-05-17 09:45:57 -0700528
bellard83f64092006-08-01 16:21:11 +0000529 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
530 bdrv_delete(bs);
531 if (ret < 0) {
Stefan Weilc98ac352010-07-21 21:51:51 +0200532 *pdrv = NULL;
533 return ret;
bellard83f64092006-08-01 16:21:11 +0000534 }
535
bellardea2384d2004-08-01 21:59:26 +0000536 score_max = 0;
Christoph Hellwig84a12e62010-04-07 22:30:24 +0200537 drv = NULL;
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +0100538 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
bellard83f64092006-08-01 16:21:11 +0000539 if (drv1->bdrv_probe) {
540 score = drv1->bdrv_probe(buf, ret, filename);
541 if (score > score_max) {
542 score_max = score;
543 drv = drv1;
544 }
bellardea2384d2004-08-01 21:59:26 +0000545 }
546 }
Stefan Weilc98ac352010-07-21 21:51:51 +0200547 if (!drv) {
548 ret = -ENOENT;
549 }
550 *pdrv = drv;
551 return ret;
bellardea2384d2004-08-01 21:59:26 +0000552}
553
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100554/**
555 * Set the current 'total_sectors' value
556 */
557static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
558{
559 BlockDriver *drv = bs->drv;
560
Nicholas Bellinger396759a2010-05-17 09:46:04 -0700561 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
562 if (bs->sg)
563 return 0;
564
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100565 /* query actual device if possible, otherwise just trust the hint */
566 if (drv->bdrv_getlength) {
567 int64_t length = drv->bdrv_getlength(bs);
568 if (length < 0) {
569 return length;
570 }
571 hint = length >> BDRV_SECTOR_BITS;
572 }
573
574 bs->total_sectors = hint;
575 return 0;
576}
577
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100578/**
579 * Set open flags for a given cache mode
580 *
581 * Return 0 on success, -1 if the cache mode was invalid.
582 */
583int bdrv_parse_cache_flags(const char *mode, int *flags)
584{
585 *flags &= ~BDRV_O_CACHE_MASK;
586
587 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
588 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
Stefan Hajnoczi92196b22011-08-04 12:26:52 +0100589 } else if (!strcmp(mode, "directsync")) {
590 *flags |= BDRV_O_NOCACHE;
Stefan Hajnoczic3993cd2011-08-04 12:26:51 +0100591 } else if (!strcmp(mode, "writeback")) {
592 *flags |= BDRV_O_CACHE_WB;
593 } else if (!strcmp(mode, "unsafe")) {
594 *flags |= BDRV_O_CACHE_WB;
595 *flags |= BDRV_O_NO_FLUSH;
596 } else if (!strcmp(mode, "writethrough")) {
597 /* this is the default */
598 } else {
599 return -1;
600 }
601
602 return 0;
603}
604
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000605/**
606 * The copy-on-read flag is actually a reference count so multiple users may
607 * use the feature without worrying about clobbering its previous state.
608 * Copy-on-read stays enabled until all users have called to disable it.
609 */
610void bdrv_enable_copy_on_read(BlockDriverState *bs)
611{
612 bs->copy_on_read++;
613}
614
615void bdrv_disable_copy_on_read(BlockDriverState *bs)
616{
617 assert(bs->copy_on_read > 0);
618 bs->copy_on_read--;
619}
620
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200621/*
Kevin Wolf57915332010-04-14 15:24:50 +0200622 * Common part for opening disk images and files
623 */
624static int bdrv_open_common(BlockDriverState *bs, const char *filename,
625 int flags, BlockDriver *drv)
626{
627 int ret, open_flags;
628
629 assert(drv != NULL);
Paolo Bonzini64058752012-05-08 16:51:49 +0200630 assert(bs->file == NULL);
Kevin Wolf57915332010-04-14 15:24:50 +0200631
Stefan Hajnoczi28dcee12011-09-22 20:14:12 +0100632 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
633
Kevin Wolf57915332010-04-14 15:24:50 +0200634 bs->open_flags = flags;
Kevin Wolf57915332010-04-14 15:24:50 +0200635 bs->buffer_alignment = 512;
636
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000637 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
638 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
639 bdrv_enable_copy_on_read(bs);
640 }
641
Kevin Wolf57915332010-04-14 15:24:50 +0200642 pstrcpy(bs->filename, sizeof(bs->filename), filename);
643
644 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
645 return -ENOTSUP;
646 }
647
648 bs->drv = drv;
Anthony Liguori7267c092011-08-20 22:09:37 -0500649 bs->opaque = g_malloc0(drv->instance_size);
Kevin Wolf57915332010-04-14 15:24:50 +0200650
Stefan Hajnoczi03f541b2011-10-27 10:54:28 +0100651 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
Paolo Bonzinie1e9b0a2012-06-06 00:04:53 +0200652 open_flags = flags | BDRV_O_CACHE_WB;
Kevin Wolf57915332010-04-14 15:24:50 +0200653
654 /*
655 * Clear flags that are internal to the block layer before opening the
656 * image.
657 */
Paolo Bonzinie1e9b0a2012-06-06 00:04:53 +0200658 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Kevin Wolf57915332010-04-14 15:24:50 +0200659
660 /*
Stefan Weilebabb672011-04-26 10:29:36 +0200661 * Snapshots should be writable.
Kevin Wolf57915332010-04-14 15:24:50 +0200662 */
663 if (bs->is_temporary) {
664 open_flags |= BDRV_O_RDWR;
665 }
666
Stefan Hajnoczie7c63792011-10-27 10:54:27 +0100667 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
668
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200669 /* Open the image, either directly or using a protocol */
670 if (drv->bdrv_file_open) {
671 ret = drv->bdrv_file_open(bs, filename, open_flags);
672 } else {
673 ret = bdrv_file_open(&bs->file, filename, open_flags);
674 if (ret >= 0) {
675 ret = drv->bdrv_open(bs, open_flags);
676 }
677 }
678
Kevin Wolf57915332010-04-14 15:24:50 +0200679 if (ret < 0) {
680 goto free_and_fail;
681 }
682
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100683 ret = refresh_total_sectors(bs, bs->total_sectors);
684 if (ret < 0) {
685 goto free_and_fail;
Kevin Wolf57915332010-04-14 15:24:50 +0200686 }
Stefan Hajnoczi51762282010-04-19 16:56:41 +0100687
Kevin Wolf57915332010-04-14 15:24:50 +0200688#ifndef _WIN32
689 if (bs->is_temporary) {
690 unlink(filename);
691 }
692#endif
693 return 0;
694
695free_and_fail:
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200696 if (bs->file) {
697 bdrv_delete(bs->file);
698 bs->file = NULL;
699 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500700 g_free(bs->opaque);
Kevin Wolf57915332010-04-14 15:24:50 +0200701 bs->opaque = NULL;
702 bs->drv = NULL;
703 return ret;
704}
705
706/*
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200707 * Opens a file using a protocol (file, host_device, nbd, ...)
708 */
bellard83f64092006-08-01 16:21:11 +0000709int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
bellardb3380822004-03-14 21:38:54 +0000710{
bellard83f64092006-08-01 16:21:11 +0000711 BlockDriverState *bs;
Christoph Hellwig6db95602010-04-05 16:53:57 +0200712 BlockDriver *drv;
bellard83f64092006-08-01 16:21:11 +0000713 int ret;
714
MORITA Kazutakab50cbab2010-05-26 11:35:36 +0900715 drv = bdrv_find_protocol(filename);
Christoph Hellwig6db95602010-04-05 16:53:57 +0200716 if (!drv) {
717 return -ENOENT;
718 }
719
bellard83f64092006-08-01 16:21:11 +0000720 bs = bdrv_new("");
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200721 ret = bdrv_open_common(bs, filename, flags, drv);
bellard83f64092006-08-01 16:21:11 +0000722 if (ret < 0) {
723 bdrv_delete(bs);
724 return ret;
bellard3b0d4f62005-10-30 18:30:10 +0000725 }
aliguori71d07702009-03-03 17:37:16 +0000726 bs->growable = 1;
bellard83f64092006-08-01 16:21:11 +0000727 *pbs = bs;
728 return 0;
bellardea2384d2004-08-01 21:59:26 +0000729}
bellardfc01f7e2003-06-30 10:03:06 +0000730
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200731/*
732 * Opens a disk image (raw, qcow2, vmdk, ...)
733 */
Kevin Wolfd6e90982010-03-31 14:40:27 +0200734int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
735 BlockDriver *drv)
bellardea2384d2004-08-01 21:59:26 +0000736{
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200737 int ret;
Kevin Wolf2b572812011-10-26 11:03:01 +0200738 char tmp_filename[PATH_MAX];
bellard712e7872005-04-28 21:09:32 +0000739
bellard83f64092006-08-01 16:21:11 +0000740 if (flags & BDRV_O_SNAPSHOT) {
bellardea2384d2004-08-01 21:59:26 +0000741 BlockDriverState *bs1;
742 int64_t total_size;
aliguori7c96d462008-09-12 17:54:13 +0000743 int is_protocol = 0;
Kevin Wolf91a073a2009-05-27 14:48:06 +0200744 BlockDriver *bdrv_qcow2;
745 QEMUOptionParameter *options;
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200746 char backing_filename[PATH_MAX];
ths3b46e622007-09-17 08:09:54 +0000747
bellardea2384d2004-08-01 21:59:26 +0000748 /* if snapshot, we create a temporary backing file and open it
749 instead of opening 'filename' directly */
750
751 /* if there is a backing file, use it */
752 bs1 = bdrv_new("");
Kevin Wolfd6e90982010-03-31 14:40:27 +0200753 ret = bdrv_open(bs1, filename, 0, drv);
aliguori51d7c002009-03-05 23:00:29 +0000754 if (ret < 0) {
bellardea2384d2004-08-01 21:59:26 +0000755 bdrv_delete(bs1);
aliguori51d7c002009-03-05 23:00:29 +0000756 return ret;
bellardea2384d2004-08-01 21:59:26 +0000757 }
Jes Sorensen3e829902010-05-27 16:20:30 +0200758 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
aliguori7c96d462008-09-12 17:54:13 +0000759
760 if (bs1->drv && bs1->drv->protocol_name)
761 is_protocol = 1;
762
bellardea2384d2004-08-01 21:59:26 +0000763 bdrv_delete(bs1);
ths3b46e622007-09-17 08:09:54 +0000764
Jim Meyeringeba25052012-05-28 09:27:54 +0200765 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
766 if (ret < 0) {
767 return ret;
768 }
aliguori7c96d462008-09-12 17:54:13 +0000769
770 /* Real path is meaningless for protocols */
771 if (is_protocol)
772 snprintf(backing_filename, sizeof(backing_filename),
773 "%s", filename);
Kirill A. Shutemov114cdfa2009-12-25 18:19:22 +0000774 else if (!realpath(filename, backing_filename))
775 return -errno;
aliguori7c96d462008-09-12 17:54:13 +0000776
Kevin Wolf91a073a2009-05-27 14:48:06 +0200777 bdrv_qcow2 = bdrv_find_format("qcow2");
778 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
779
Jes Sorensen3e829902010-05-27 16:20:30 +0200780 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
Kevin Wolf91a073a2009-05-27 14:48:06 +0200781 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
782 if (drv) {
783 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
784 drv->format_name);
785 }
786
787 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
Jan Kiszkad7487682010-04-29 18:24:50 +0200788 free_option_parameters(options);
aliguori51d7c002009-03-05 23:00:29 +0000789 if (ret < 0) {
790 return ret;
bellardea2384d2004-08-01 21:59:26 +0000791 }
Kevin Wolf91a073a2009-05-27 14:48:06 +0200792
bellardea2384d2004-08-01 21:59:26 +0000793 filename = tmp_filename;
Kevin Wolf91a073a2009-05-27 14:48:06 +0200794 drv = bdrv_qcow2;
bellardea2384d2004-08-01 21:59:26 +0000795 bs->is_temporary = 1;
796 }
bellard712e7872005-04-28 21:09:32 +0000797
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200798 /* Find the right image format driver */
Christoph Hellwig6db95602010-04-05 16:53:57 +0200799 if (!drv) {
Stefan Weilc98ac352010-07-21 21:51:51 +0200800 ret = find_image_format(filename, &drv);
aliguori51d7c002009-03-05 23:00:29 +0000801 }
Christoph Hellwig69873072010-01-20 18:13:25 +0100802
aliguori51d7c002009-03-05 23:00:29 +0000803 if (!drv) {
aliguori51d7c002009-03-05 23:00:29 +0000804 goto unlink_and_fail;
bellardea2384d2004-08-01 21:59:26 +0000805 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200806
807 /* Open the image */
808 ret = bdrv_open_common(bs, filename, flags, drv);
809 if (ret < 0) {
Christoph Hellwig69873072010-01-20 18:13:25 +0100810 goto unlink_and_fail;
811 }
812
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200813 /* If there is a backing file, use it */
814 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
815 char backing_filename[PATH_MAX];
816 int back_flags;
817 BlockDriver *back_drv = NULL;
818
819 bs->backing_hd = bdrv_new("");
Paolo Bonzinidc5a1372012-05-08 16:51:50 +0200820 bdrv_get_full_backing_filename(bs, backing_filename,
821 sizeof(backing_filename));
Stefan Hajnoczidf2dbb42010-12-02 16:54:13 +0000822
823 if (bs->backing_format[0] != '\0') {
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200824 back_drv = bdrv_find_format(bs->backing_format);
Stefan Hajnoczidf2dbb42010-12-02 16:54:13 +0000825 }
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200826
827 /* backing files always opened read-only */
828 back_flags =
829 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
830
831 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
832 if (ret < 0) {
833 bdrv_close(bs);
834 return ret;
835 }
836 if (bs->is_temporary) {
837 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
838 } else {
839 /* base image inherits from "parent" */
840 bs->backing_hd->keep_read_only = bs->keep_read_only;
841 }
842 }
843
844 if (!bdrv_key_required(bs)) {
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +0200845 bdrv_dev_change_media_cb(bs, true);
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200846 }
847
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800848 /* throttling disk I/O limits */
849 if (bs->io_limits_enabled) {
850 bdrv_io_limits_enable(bs);
851 }
852
Kevin Wolfb6ce07a2010-04-12 16:37:13 +0200853 return 0;
854
855unlink_and_fail:
856 if (bs->is_temporary) {
857 unlink(filename);
858 }
859 return ret;
860}
861
bellardfc01f7e2003-06-30 10:03:06 +0000862void bdrv_close(BlockDriverState *bs)
863{
Liu Yuan80ccf932012-04-20 17:10:56 +0800864 bdrv_flush(bs);
bellard19cb3732006-08-19 11:45:59 +0000865 if (bs->drv) {
Paolo Bonzini3e914652012-03-30 13:17:11 +0200866 if (bs->job) {
867 block_job_cancel_sync(bs->job);
868 }
Kevin Wolf7094f122012-04-11 11:06:37 +0200869 bdrv_drain_all();
870
Markus Armbrusterf9092b12010-06-25 10:33:39 +0200871 if (bs == bs_snapshots) {
872 bs_snapshots = NULL;
873 }
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +0100874 if (bs->backing_hd) {
bellardea2384d2004-08-01 21:59:26 +0000875 bdrv_delete(bs->backing_hd);
Stefan Hajnoczi557df6a2010-04-17 10:49:06 +0100876 bs->backing_hd = NULL;
877 }
bellardea2384d2004-08-01 21:59:26 +0000878 bs->drv->bdrv_close(bs);
Anthony Liguori7267c092011-08-20 22:09:37 -0500879 g_free(bs->opaque);
bellardea2384d2004-08-01 21:59:26 +0000880#ifdef _WIN32
881 if (bs->is_temporary) {
882 unlink(bs->filename);
883 }
bellard67b915a2004-03-31 23:37:16 +0000884#endif
bellardea2384d2004-08-01 21:59:26 +0000885 bs->opaque = NULL;
886 bs->drv = NULL;
Stefan Hajnoczi53fec9d2011-11-28 16:08:47 +0000887 bs->copy_on_read = 0;
Paolo Bonzinia275fa42012-05-08 16:51:43 +0200888 bs->backing_file[0] = '\0';
889 bs->backing_format[0] = '\0';
Paolo Bonzini64058752012-05-08 16:51:49 +0200890 bs->total_sectors = 0;
891 bs->encrypted = 0;
892 bs->valid_key = 0;
893 bs->sg = 0;
894 bs->growable = 0;
bellardb3380822004-03-14 21:38:54 +0000895
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200896 if (bs->file != NULL) {
Paolo Bonzini0ac93772012-05-08 16:51:44 +0200897 bdrv_delete(bs->file);
898 bs->file = NULL;
Kevin Wolf66f82ce2010-04-14 14:17:38 +0200899 }
900
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +0200901 bdrv_dev_change_media_cb(bs, false);
bellardb3380822004-03-14 21:38:54 +0000902 }
Zhi Yong Wu98f90db2011-11-08 13:00:14 +0800903
904 /*throttling disk I/O limits*/
905 if (bs->io_limits_enabled) {
906 bdrv_io_limits_disable(bs);
907 }
bellardb3380822004-03-14 21:38:54 +0000908}
909
MORITA Kazutaka2bc93fe2010-05-28 11:44:57 +0900910void bdrv_close_all(void)
911{
912 BlockDriverState *bs;
913
914 QTAILQ_FOREACH(bs, &bdrv_states, list) {
915 bdrv_close(bs);
916 }
917}
918
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000919/*
920 * Wait for pending requests to complete across all BlockDriverStates
921 *
922 * This function does not flush data to disk, use bdrv_flush_all() for that
923 * after calling this function.
Zhi Yong Wu4c355d52012-04-12 14:00:57 +0200924 *
925 * Note that completion of an asynchronous I/O operation can trigger any
926 * number of other I/O operations on other devices---for example a coroutine
927 * can be arbitrarily complex and a constant flow of I/O can come until the
928 * coroutine is complete. Because of this, it is not possible to have a
929 * function to drain a single device's I/O queue.
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000930 */
931void bdrv_drain_all(void)
932{
933 BlockDriverState *bs;
Zhi Yong Wu4c355d52012-04-12 14:00:57 +0200934 bool busy;
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000935
Zhi Yong Wu4c355d52012-04-12 14:00:57 +0200936 do {
937 busy = qemu_aio_wait();
938
939 /* FIXME: We do not have timer support here, so this is effectively
940 * a busy wait.
941 */
942 QTAILQ_FOREACH(bs, &bdrv_states, list) {
943 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
944 qemu_co_queue_restart_all(&bs->throttled_reqs);
945 busy = true;
946 }
947 }
948 } while (busy);
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000949
950 /* If requests are still pending there is a bug somewhere */
951 QTAILQ_FOREACH(bs, &bdrv_states, list) {
952 assert(QLIST_EMPTY(&bs->tracked_requests));
953 assert(qemu_co_queue_empty(&bs->throttled_reqs));
954 }
955}
956
Ryan Harperd22b2f42011-03-29 20:51:47 -0500957/* make a BlockDriverState anonymous by removing from bdrv_state list.
958 Also, NULL terminate the device_name to prevent double remove */
959void bdrv_make_anon(BlockDriverState *bs)
960{
961 if (bs->device_name[0] != '\0') {
962 QTAILQ_REMOVE(&bdrv_states, bs, list);
963 }
964 bs->device_name[0] = '\0';
965}
966
Paolo Bonzinie023b2e2012-05-08 16:51:41 +0200967static void bdrv_rebind(BlockDriverState *bs)
968{
969 if (bs->drv && bs->drv->bdrv_rebind) {
970 bs->drv->bdrv_rebind(bs);
971 }
972}
973
Jeff Cody8802d1f2012-02-28 15:54:06 -0500974/*
975 * Add new bs contents at the top of an image chain while the chain is
976 * live, while keeping required fields on the top layer.
977 *
978 * This will modify the BlockDriverState fields, and swap contents
979 * between bs_new and bs_top. Both bs_new and bs_top are modified.
980 *
Jeff Codyf6801b82012-03-27 16:30:19 -0400981 * bs_new is required to be anonymous.
982 *
Jeff Cody8802d1f2012-02-28 15:54:06 -0500983 * This function does not create any image files.
984 */
985void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
986{
987 BlockDriverState tmp;
988
Jeff Codyf6801b82012-03-27 16:30:19 -0400989 /* bs_new must be anonymous */
990 assert(bs_new->device_name[0] == '\0');
Jeff Cody8802d1f2012-02-28 15:54:06 -0500991
992 tmp = *bs_new;
993
994 /* there are some fields that need to stay on the top layer: */
Paolo Bonzini3a389e72012-05-08 16:51:42 +0200995 tmp.open_flags = bs_top->open_flags;
Jeff Cody8802d1f2012-02-28 15:54:06 -0500996
997 /* dev info */
998 tmp.dev_ops = bs_top->dev_ops;
999 tmp.dev_opaque = bs_top->dev_opaque;
1000 tmp.dev = bs_top->dev;
1001 tmp.buffer_alignment = bs_top->buffer_alignment;
1002 tmp.copy_on_read = bs_top->copy_on_read;
1003
Paolo Bonzinic4a248a2012-06-06 00:04:51 +02001004 tmp.enable_write_cache = bs_top->enable_write_cache;
1005
Jeff Cody8802d1f2012-02-28 15:54:06 -05001006 /* i/o timing parameters */
1007 tmp.slice_time = bs_top->slice_time;
1008 tmp.slice_start = bs_top->slice_start;
1009 tmp.slice_end = bs_top->slice_end;
1010 tmp.io_limits = bs_top->io_limits;
1011 tmp.io_base = bs_top->io_base;
1012 tmp.throttled_reqs = bs_top->throttled_reqs;
1013 tmp.block_timer = bs_top->block_timer;
1014 tmp.io_limits_enabled = bs_top->io_limits_enabled;
1015
1016 /* geometry */
1017 tmp.cyls = bs_top->cyls;
1018 tmp.heads = bs_top->heads;
1019 tmp.secs = bs_top->secs;
1020 tmp.translation = bs_top->translation;
1021
1022 /* r/w error */
1023 tmp.on_read_error = bs_top->on_read_error;
1024 tmp.on_write_error = bs_top->on_write_error;
1025
1026 /* i/o status */
1027 tmp.iostatus_enabled = bs_top->iostatus_enabled;
1028 tmp.iostatus = bs_top->iostatus;
1029
1030 /* keep the same entry in bdrv_states */
1031 pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name);
1032 tmp.list = bs_top->list;
1033
1034 /* The contents of 'tmp' will become bs_top, as we are
1035 * swapping bs_new and bs_top contents. */
1036 tmp.backing_hd = bs_new;
1037 pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename);
Jeff Codyf6801b82012-03-27 16:30:19 -04001038 bdrv_get_format(bs_top, tmp.backing_format, sizeof(tmp.backing_format));
Jeff Cody8802d1f2012-02-28 15:54:06 -05001039
1040 /* swap contents of the fixed new bs and the current top */
1041 *bs_new = *bs_top;
1042 *bs_top = tmp;
1043
Jeff Codyf6801b82012-03-27 16:30:19 -04001044 /* device_name[] was carried over from the old bs_top. bs_new
1045 * shouldn't be in bdrv_states, so we need to make device_name[]
1046 * reflect the anonymity of bs_new
1047 */
1048 bs_new->device_name[0] = '\0';
1049
Jeff Cody8802d1f2012-02-28 15:54:06 -05001050 /* clear the copied fields in the new backing file */
1051 bdrv_detach_dev(bs_new, bs_new->dev);
1052
1053 qemu_co_queue_init(&bs_new->throttled_reqs);
1054 memset(&bs_new->io_base, 0, sizeof(bs_new->io_base));
1055 memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits));
1056 bdrv_iostatus_disable(bs_new);
1057
1058 /* we don't use bdrv_io_limits_disable() for this, because we don't want
1059 * to affect or delete the block_timer, as it has been moved to bs_top */
1060 bs_new->io_limits_enabled = false;
1061 bs_new->block_timer = NULL;
1062 bs_new->slice_time = 0;
1063 bs_new->slice_start = 0;
1064 bs_new->slice_end = 0;
Paolo Bonzinie023b2e2012-05-08 16:51:41 +02001065
1066 bdrv_rebind(bs_new);
1067 bdrv_rebind(bs_top);
Jeff Cody8802d1f2012-02-28 15:54:06 -05001068}
1069
bellardb3380822004-03-14 21:38:54 +00001070void bdrv_delete(BlockDriverState *bs)
1071{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001072 assert(!bs->dev);
Paolo Bonzini3e914652012-03-30 13:17:11 +02001073 assert(!bs->job);
1074 assert(!bs->in_use);
Markus Armbruster18846de2010-06-29 16:58:30 +02001075
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01001076 /* remove from list, if necessary */
Ryan Harperd22b2f42011-03-29 20:51:47 -05001077 bdrv_make_anon(bs);
aurel3234c6f052008-04-08 19:51:21 +00001078
bellardb3380822004-03-14 21:38:54 +00001079 bdrv_close(bs);
Kevin Wolf66f82ce2010-04-14 14:17:38 +02001080
Markus Armbrusterf9092b12010-06-25 10:33:39 +02001081 assert(bs != bs_snapshots);
Anthony Liguori7267c092011-08-20 22:09:37 -05001082 g_free(bs);
bellardfc01f7e2003-06-30 10:03:06 +00001083}
1084
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001085int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1086/* TODO change to DeviceState *dev when all users are qdevified */
Markus Armbruster18846de2010-06-29 16:58:30 +02001087{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001088 if (bs->dev) {
Markus Armbruster18846de2010-06-29 16:58:30 +02001089 return -EBUSY;
1090 }
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001091 bs->dev = dev;
Luiz Capitulino28a72822011-09-26 17:43:50 -03001092 bdrv_iostatus_reset(bs);
Markus Armbruster18846de2010-06-29 16:58:30 +02001093 return 0;
1094}
1095
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001096/* TODO qdevified devices don't use this, remove when devices are qdevified */
1097void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
Markus Armbruster18846de2010-06-29 16:58:30 +02001098{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001099 if (bdrv_attach_dev(bs, dev) < 0) {
1100 abort();
1101 }
1102}
1103
1104void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1105/* TODO change to DeviceState *dev when all users are qdevified */
1106{
1107 assert(bs->dev == dev);
1108 bs->dev = NULL;
Markus Armbruster0e49de52011-08-03 15:07:41 +02001109 bs->dev_ops = NULL;
1110 bs->dev_opaque = NULL;
Markus Armbruster29e05f22011-09-06 18:58:57 +02001111 bs->buffer_alignment = 512;
Markus Armbruster18846de2010-06-29 16:58:30 +02001112}
1113
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001114/* TODO change to return DeviceState * when all users are qdevified */
1115void *bdrv_get_attached_dev(BlockDriverState *bs)
Markus Armbruster18846de2010-06-29 16:58:30 +02001116{
Markus Armbrusterfa879d62011-08-03 15:07:40 +02001117 return bs->dev;
Markus Armbruster18846de2010-06-29 16:58:30 +02001118}
1119
Markus Armbruster0e49de52011-08-03 15:07:41 +02001120void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
1121 void *opaque)
1122{
1123 bs->dev_ops = ops;
1124 bs->dev_opaque = opaque;
Markus Armbruster2c6942f2011-09-06 18:58:51 +02001125 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
1126 bs_snapshots = NULL;
1127 }
Markus Armbruster0e49de52011-08-03 15:07:41 +02001128}
1129
Luiz Capitulino329c0a42012-01-25 16:59:43 -02001130void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
1131 BlockQMPEventAction action, int is_read)
1132{
1133 QObject *data;
1134 const char *action_str;
1135
1136 switch (action) {
1137 case BDRV_ACTION_REPORT:
1138 action_str = "report";
1139 break;
1140 case BDRV_ACTION_IGNORE:
1141 action_str = "ignore";
1142 break;
1143 case BDRV_ACTION_STOP:
1144 action_str = "stop";
1145 break;
1146 default:
1147 abort();
1148 }
1149
1150 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1151 bdrv->device_name,
1152 action_str,
1153 is_read ? "read" : "write");
1154 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1155
1156 qobject_decref(data);
1157}
1158
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02001159static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
1160{
1161 QObject *data;
1162
1163 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1164 bdrv_get_device_name(bs), ejected);
1165 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
1166
1167 qobject_decref(data);
1168}
1169
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02001170static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
Markus Armbruster0e49de52011-08-03 15:07:41 +02001171{
Markus Armbruster145feb12011-08-03 15:07:42 +02001172 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02001173 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02001174 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02001175 if (tray_was_closed) {
1176 /* tray open */
1177 bdrv_emit_qmp_eject_event(bs, true);
1178 }
1179 if (load) {
1180 /* tray close */
1181 bdrv_emit_qmp_eject_event(bs, false);
1182 }
Markus Armbruster145feb12011-08-03 15:07:42 +02001183 }
1184}
1185
Markus Armbruster2c6942f2011-09-06 18:58:51 +02001186bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1187{
1188 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1189}
1190
Paolo Bonzini025ccaa2011-11-07 17:50:13 +01001191void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1192{
1193 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1194 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1195 }
1196}
1197
Markus Armbrustere4def802011-09-06 18:58:53 +02001198bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1199{
1200 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1201 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1202 }
1203 return false;
1204}
1205
Markus Armbruster145feb12011-08-03 15:07:42 +02001206static void bdrv_dev_resize_cb(BlockDriverState *bs)
1207{
1208 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1209 bs->dev_ops->resize_cb(bs->dev_opaque);
Markus Armbruster0e49de52011-08-03 15:07:41 +02001210 }
1211}
1212
Markus Armbrusterf1076392011-09-06 18:58:46 +02001213bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
1214{
1215 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
1216 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
1217 }
1218 return false;
1219}
1220
aliguorie97fc192009-04-21 23:11:50 +00001221/*
1222 * Run consistency checks on an image
1223 *
Kevin Wolfe076f332010-06-29 11:43:13 +02001224 * Returns 0 if the check could be completed (it doesn't mean that the image is
Stefan Weila1c72732011-04-28 17:20:38 +02001225 * free of errors) or -errno when an internal error occurred. The results of the
Kevin Wolfe076f332010-06-29 11:43:13 +02001226 * check are stored in res.
aliguorie97fc192009-04-21 23:11:50 +00001227 */
Kevin Wolf4534ff52012-05-11 16:07:02 +02001228int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
aliguorie97fc192009-04-21 23:11:50 +00001229{
1230 if (bs->drv->bdrv_check == NULL) {
1231 return -ENOTSUP;
1232 }
1233
Kevin Wolfe076f332010-06-29 11:43:13 +02001234 memset(res, 0, sizeof(*res));
Kevin Wolf4534ff52012-05-11 16:07:02 +02001235 return bs->drv->bdrv_check(bs, res, fix);
aliguorie97fc192009-04-21 23:11:50 +00001236}
1237
Kevin Wolf8a426612010-07-16 17:17:01 +02001238#define COMMIT_BUF_SECTORS 2048
1239
bellard33e39632003-07-06 17:15:21 +00001240/* commit COW file into the raw image */
1241int bdrv_commit(BlockDriverState *bs)
1242{
bellard19cb3732006-08-19 11:45:59 +00001243 BlockDriver *drv = bs->drv;
Kevin Wolfee181192010-08-05 13:05:22 +02001244 BlockDriver *backing_drv;
Kevin Wolf8a426612010-07-16 17:17:01 +02001245 int64_t sector, total_sectors;
1246 int n, ro, open_flags;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001247 int ret = 0, rw_ret = 0;
Kevin Wolf8a426612010-07-16 17:17:01 +02001248 uint8_t *buf;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001249 char filename[1024];
1250 BlockDriverState *bs_rw, *bs_ro;
bellard33e39632003-07-06 17:15:21 +00001251
bellard19cb3732006-08-19 11:45:59 +00001252 if (!drv)
1253 return -ENOMEDIUM;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001254
1255 if (!bs->backing_hd) {
1256 return -ENOTSUP;
bellard33e39632003-07-06 17:15:21 +00001257 }
1258
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001259 if (bs->backing_hd->keep_read_only) {
1260 return -EACCES;
1261 }
Kevin Wolfee181192010-08-05 13:05:22 +02001262
Stefan Hajnoczi2d3735d2012-01-18 14:40:41 +00001263 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
1264 return -EBUSY;
1265 }
1266
Kevin Wolfee181192010-08-05 13:05:22 +02001267 backing_drv = bs->backing_hd->drv;
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001268 ro = bs->backing_hd->read_only;
1269 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
1270 open_flags = bs->backing_hd->open_flags;
1271
1272 if (ro) {
1273 /* re-open as RW */
1274 bdrv_delete(bs->backing_hd);
1275 bs->backing_hd = NULL;
1276 bs_rw = bdrv_new("");
Kevin Wolfee181192010-08-05 13:05:22 +02001277 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
1278 backing_drv);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001279 if (rw_ret < 0) {
1280 bdrv_delete(bs_rw);
1281 /* try to re-open read-only */
1282 bs_ro = bdrv_new("");
Kevin Wolfee181192010-08-05 13:05:22 +02001283 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1284 backing_drv);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001285 if (ret < 0) {
1286 bdrv_delete(bs_ro);
1287 /* drive not functional anymore */
1288 bs->drv = NULL;
1289 return ret;
1290 }
1291 bs->backing_hd = bs_ro;
1292 return rw_ret;
1293 }
1294 bs->backing_hd = bs_rw;
bellard33e39632003-07-06 17:15:21 +00001295 }
bellardea2384d2004-08-01 21:59:26 +00001296
Jan Kiszka6ea44302009-11-30 18:21:19 +01001297 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
Anthony Liguori7267c092011-08-20 22:09:37 -05001298 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
bellardea2384d2004-08-01 21:59:26 +00001299
Kevin Wolf8a426612010-07-16 17:17:01 +02001300 for (sector = 0; sector < total_sectors; sector += n) {
Stefan Hajnoczi05c4af52011-11-14 12:44:18 +00001301 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
Kevin Wolf8a426612010-07-16 17:17:01 +02001302
1303 if (bdrv_read(bs, sector, buf, n) != 0) {
1304 ret = -EIO;
1305 goto ro_cleanup;
1306 }
1307
1308 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1309 ret = -EIO;
1310 goto ro_cleanup;
1311 }
bellardea2384d2004-08-01 21:59:26 +00001312 }
1313 }
bellard95389c82005-12-18 18:28:15 +00001314
Christoph Hellwig1d449522010-01-17 12:32:30 +01001315 if (drv->bdrv_make_empty) {
1316 ret = drv->bdrv_make_empty(bs);
1317 bdrv_flush(bs);
1318 }
bellard95389c82005-12-18 18:28:15 +00001319
Christoph Hellwig3f5075a2010-01-12 13:49:23 +01001320 /*
1321 * Make sure all data we wrote to the backing device is actually
1322 * stable on disk.
1323 */
1324 if (bs->backing_hd)
1325 bdrv_flush(bs->backing_hd);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001326
1327ro_cleanup:
Anthony Liguori7267c092011-08-20 22:09:37 -05001328 g_free(buf);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001329
1330 if (ro) {
1331 /* re-open as RO */
1332 bdrv_delete(bs->backing_hd);
1333 bs->backing_hd = NULL;
1334 bs_ro = bdrv_new("");
Kevin Wolfee181192010-08-05 13:05:22 +02001335 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
1336 backing_drv);
Naphtali Sprei4dca4b62010-02-14 13:39:18 +02001337 if (ret < 0) {
1338 bdrv_delete(bs_ro);
1339 /* drive not functional anymore */
1340 bs->drv = NULL;
1341 return ret;
1342 }
1343 bs->backing_hd = bs_ro;
1344 bs->backing_hd->keep_read_only = 0;
1345 }
1346
Christoph Hellwig1d449522010-01-17 12:32:30 +01001347 return ret;
bellard33e39632003-07-06 17:15:21 +00001348}
1349
Stefan Hajnoczie8877492012-03-05 18:10:11 +00001350int bdrv_commit_all(void)
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02001351{
1352 BlockDriverState *bs;
1353
1354 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Stefan Hajnoczie8877492012-03-05 18:10:11 +00001355 int ret = bdrv_commit(bs);
1356 if (ret < 0) {
1357 return ret;
1358 }
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02001359 }
Stefan Hajnoczie8877492012-03-05 18:10:11 +00001360 return 0;
Markus Armbruster6ab4b5a2010-06-02 18:55:18 +02001361}
1362
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001363struct BdrvTrackedRequest {
1364 BlockDriverState *bs;
1365 int64_t sector_num;
1366 int nb_sectors;
1367 bool is_write;
1368 QLIST_ENTRY(BdrvTrackedRequest) list;
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00001369 Coroutine *co; /* owner, used for deadlock detection */
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001370 CoQueue wait_queue; /* coroutines blocked on this request */
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001371};
1372
1373/**
1374 * Remove an active request from the tracked requests list
1375 *
1376 * This function should be called when a tracked request is completing.
1377 */
1378static void tracked_request_end(BdrvTrackedRequest *req)
1379{
1380 QLIST_REMOVE(req, list);
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001381 qemu_co_queue_restart_all(&req->wait_queue);
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001382}
1383
1384/**
1385 * Add an active request to the tracked requests list
1386 */
1387static void tracked_request_begin(BdrvTrackedRequest *req,
1388 BlockDriverState *bs,
1389 int64_t sector_num,
1390 int nb_sectors, bool is_write)
1391{
1392 *req = (BdrvTrackedRequest){
1393 .bs = bs,
1394 .sector_num = sector_num,
1395 .nb_sectors = nb_sectors,
1396 .is_write = is_write,
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00001397 .co = qemu_coroutine_self(),
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001398 };
1399
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001400 qemu_co_queue_init(&req->wait_queue);
1401
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001402 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1403}
1404
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001405/**
1406 * Round a region to cluster boundaries
1407 */
1408static void round_to_clusters(BlockDriverState *bs,
1409 int64_t sector_num, int nb_sectors,
1410 int64_t *cluster_sector_num,
1411 int *cluster_nb_sectors)
1412{
1413 BlockDriverInfo bdi;
1414
1415 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
1416 *cluster_sector_num = sector_num;
1417 *cluster_nb_sectors = nb_sectors;
1418 } else {
1419 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
1420 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
1421 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
1422 nb_sectors, c);
1423 }
1424}
1425
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001426static bool tracked_request_overlaps(BdrvTrackedRequest *req,
1427 int64_t sector_num, int nb_sectors) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001428 /* aaaa bbbb */
1429 if (sector_num >= req->sector_num + req->nb_sectors) {
1430 return false;
1431 }
1432 /* bbbb aaaa */
1433 if (req->sector_num >= sector_num + nb_sectors) {
1434 return false;
1435 }
1436 return true;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001437}
1438
1439static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
1440 int64_t sector_num, int nb_sectors)
1441{
1442 BdrvTrackedRequest *req;
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001443 int64_t cluster_sector_num;
1444 int cluster_nb_sectors;
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001445 bool retry;
1446
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001447 /* If we touch the same cluster it counts as an overlap. This guarantees
1448 * that allocating writes will be serialized and not race with each other
1449 * for the same cluster. For example, in copy-on-read it ensures that the
1450 * CoR read and write operations are atomic and guest writes cannot
1451 * interleave between them.
1452 */
1453 round_to_clusters(bs, sector_num, nb_sectors,
1454 &cluster_sector_num, &cluster_nb_sectors);
1455
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001456 do {
1457 retry = false;
1458 QLIST_FOREACH(req, &bs->tracked_requests, list) {
Stefan Hajnoczid83947a2011-11-23 11:47:56 +00001459 if (tracked_request_overlaps(req, cluster_sector_num,
1460 cluster_nb_sectors)) {
Stefan Hajnoczi5f8b6492011-11-30 12:23:42 +00001461 /* Hitting this means there was a reentrant request, for
1462 * example, a block driver issuing nested requests. This must
1463 * never happen since it means deadlock.
1464 */
1465 assert(qemu_coroutine_self() != req->co);
1466
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001467 qemu_co_queue_wait(&req->wait_queue);
1468 retry = true;
1469 break;
1470 }
1471 }
1472 } while (retry);
1473}
1474
Kevin Wolf756e6732010-01-12 12:55:17 +01001475/*
1476 * Return values:
1477 * 0 - success
1478 * -EINVAL - backing format specified, but no file
1479 * -ENOSPC - can't update the backing file because no space is left in the
1480 * image file header
1481 * -ENOTSUP - format driver doesn't support changing the backing file
1482 */
1483int bdrv_change_backing_file(BlockDriverState *bs,
1484 const char *backing_file, const char *backing_fmt)
1485{
1486 BlockDriver *drv = bs->drv;
Paolo Bonzini469ef352012-04-12 14:01:02 +02001487 int ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01001488
Paolo Bonzini5f377792012-04-12 14:01:01 +02001489 /* Backing file format doesn't make sense without a backing file */
1490 if (backing_fmt && !backing_file) {
1491 return -EINVAL;
1492 }
1493
Kevin Wolf756e6732010-01-12 12:55:17 +01001494 if (drv->bdrv_change_backing_file != NULL) {
Paolo Bonzini469ef352012-04-12 14:01:02 +02001495 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
Kevin Wolf756e6732010-01-12 12:55:17 +01001496 } else {
Paolo Bonzini469ef352012-04-12 14:01:02 +02001497 ret = -ENOTSUP;
Kevin Wolf756e6732010-01-12 12:55:17 +01001498 }
Paolo Bonzini469ef352012-04-12 14:01:02 +02001499
1500 if (ret == 0) {
1501 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
1502 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
1503 }
1504 return ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01001505}
1506
aliguori71d07702009-03-03 17:37:16 +00001507static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1508 size_t size)
1509{
1510 int64_t len;
1511
1512 if (!bdrv_is_inserted(bs))
1513 return -ENOMEDIUM;
1514
1515 if (bs->growable)
1516 return 0;
1517
1518 len = bdrv_getlength(bs);
1519
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02001520 if (offset < 0)
1521 return -EIO;
1522
1523 if ((offset > len) || (len - offset < size))
aliguori71d07702009-03-03 17:37:16 +00001524 return -EIO;
1525
1526 return 0;
1527}
1528
1529static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1530 int nb_sectors)
1531{
Jes Sorenseneb5a3162010-05-27 16:20:31 +02001532 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1533 nb_sectors * BDRV_SECTOR_SIZE);
aliguori71d07702009-03-03 17:37:16 +00001534}
1535
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001536typedef struct RwCo {
1537 BlockDriverState *bs;
1538 int64_t sector_num;
1539 int nb_sectors;
1540 QEMUIOVector *qiov;
1541 bool is_write;
1542 int ret;
1543} RwCo;
1544
1545static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1546{
1547 RwCo *rwco = opaque;
1548
1549 if (!rwco->is_write) {
1550 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001551 rwco->nb_sectors, rwco->qiov, 0);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001552 } else {
1553 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001554 rwco->nb_sectors, rwco->qiov, 0);
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001555 }
1556}
1557
1558/*
1559 * Process a synchronous request using coroutines
1560 */
1561static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1562 int nb_sectors, bool is_write)
1563{
1564 QEMUIOVector qiov;
1565 struct iovec iov = {
1566 .iov_base = (void *)buf,
1567 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1568 };
1569 Coroutine *co;
1570 RwCo rwco = {
1571 .bs = bs,
1572 .sector_num = sector_num,
1573 .nb_sectors = nb_sectors,
1574 .qiov = &qiov,
1575 .is_write = is_write,
1576 .ret = NOT_DONE,
1577 };
1578
1579 qemu_iovec_init_external(&qiov, &iov, 1);
1580
Zhi Yong Wu498e3862012-04-02 18:59:34 +08001581 /**
1582 * In sync call context, when the vcpu is blocked, this throttling timer
1583 * will not fire; so the I/O throttling function has to be disabled here
1584 * if it has been enabled.
1585 */
1586 if (bs->io_limits_enabled) {
1587 fprintf(stderr, "Disabling I/O throttling on '%s' due "
1588 "to synchronous I/O.\n", bdrv_get_device_name(bs));
1589 bdrv_io_limits_disable(bs);
1590 }
1591
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001592 if (qemu_in_coroutine()) {
1593 /* Fast-path if already in coroutine context */
1594 bdrv_rw_co_entry(&rwco);
1595 } else {
1596 co = qemu_coroutine_create(bdrv_rw_co_entry);
1597 qemu_coroutine_enter(co, &rwco);
1598 while (rwco.ret == NOT_DONE) {
1599 qemu_aio_wait();
1600 }
1601 }
1602 return rwco.ret;
1603}
1604
bellard19cb3732006-08-19 11:45:59 +00001605/* return < 0 if error. See bdrv_write() for the return codes */
ths5fafdf22007-09-16 21:08:06 +00001606int bdrv_read(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00001607 uint8_t *buf, int nb_sectors)
1608{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001609 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
bellardfc01f7e2003-06-30 10:03:06 +00001610}
1611
Paolo Bonzini71df14f2012-04-12 14:01:04 +02001612#define BITS_PER_LONG (sizeof(unsigned long) * 8)
1613
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02001614static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
Jan Kiszkaa55eb922009-11-30 18:21:19 +01001615 int nb_sectors, int dirty)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02001616{
1617 int64_t start, end;
Jan Kiszkac6d22832009-11-30 18:21:20 +01001618 unsigned long val, idx, bit;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01001619
Jan Kiszka6ea44302009-11-30 18:21:19 +01001620 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkac6d22832009-11-30 18:21:20 +01001621 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01001622
1623 for (; start <= end; start++) {
Paolo Bonzini71df14f2012-04-12 14:01:04 +02001624 idx = start / BITS_PER_LONG;
1625 bit = start % BITS_PER_LONG;
Jan Kiszkac6d22832009-11-30 18:21:20 +01001626 val = bs->dirty_bitmap[idx];
1627 if (dirty) {
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001628 if (!(val & (1UL << bit))) {
Liran Schouraaa0eb72010-01-26 10:31:48 +02001629 bs->dirty_count++;
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001630 val |= 1UL << bit;
Liran Schouraaa0eb72010-01-26 10:31:48 +02001631 }
Jan Kiszkac6d22832009-11-30 18:21:20 +01001632 } else {
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001633 if (val & (1UL << bit)) {
Liran Schouraaa0eb72010-01-26 10:31:48 +02001634 bs->dirty_count--;
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02001635 val &= ~(1UL << bit);
Liran Schouraaa0eb72010-01-26 10:31:48 +02001636 }
Jan Kiszkac6d22832009-11-30 18:21:20 +01001637 }
1638 bs->dirty_bitmap[idx] = val;
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02001639 }
1640}
1641
ths5fafdf22007-09-16 21:08:06 +00001642/* Return < 0 if error. Important errors are:
bellard19cb3732006-08-19 11:45:59 +00001643 -EIO generic I/O error (may happen for all errors)
1644 -ENOMEDIUM No media inserted.
1645 -EINVAL Invalid sector number or nb_sectors
1646 -EACCES Trying to write a read-only device
1647*/
ths5fafdf22007-09-16 21:08:06 +00001648int bdrv_write(BlockDriverState *bs, int64_t sector_num,
bellardfc01f7e2003-06-30 10:03:06 +00001649 const uint8_t *buf, int nb_sectors)
1650{
Stefan Hajnoczi1c9805a2011-10-13 13:08:22 +01001651 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
bellard83f64092006-08-01 16:21:11 +00001652}
1653
aliguorieda578e2009-03-12 19:57:16 +00001654int bdrv_pread(BlockDriverState *bs, int64_t offset,
1655 void *buf, int count1)
bellard83f64092006-08-01 16:21:11 +00001656{
Jan Kiszka6ea44302009-11-30 18:21:19 +01001657 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
bellard83f64092006-08-01 16:21:11 +00001658 int len, nb_sectors, count;
1659 int64_t sector_num;
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001660 int ret;
bellard83f64092006-08-01 16:21:11 +00001661
1662 count = count1;
1663 /* first read to align to sector start */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001664 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
bellard83f64092006-08-01 16:21:11 +00001665 if (len > count)
1666 len = count;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001667 sector_num = offset >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001668 if (len > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001669 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1670 return ret;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001671 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
bellard83f64092006-08-01 16:21:11 +00001672 count -= len;
1673 if (count == 0)
1674 return count1;
1675 sector_num++;
1676 buf += len;
1677 }
1678
1679 /* read the sectors "in place" */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001680 nb_sectors = count >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001681 if (nb_sectors > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001682 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1683 return ret;
bellard83f64092006-08-01 16:21:11 +00001684 sector_num += nb_sectors;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001685 len = nb_sectors << BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001686 buf += len;
1687 count -= len;
1688 }
1689
1690 /* add data from the last sector */
1691 if (count > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001692 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1693 return ret;
bellard83f64092006-08-01 16:21:11 +00001694 memcpy(buf, tmp_buf, count);
1695 }
1696 return count1;
1697}
1698
aliguorieda578e2009-03-12 19:57:16 +00001699int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1700 const void *buf, int count1)
bellard83f64092006-08-01 16:21:11 +00001701{
Jan Kiszka6ea44302009-11-30 18:21:19 +01001702 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
bellard83f64092006-08-01 16:21:11 +00001703 int len, nb_sectors, count;
1704 int64_t sector_num;
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001705 int ret;
bellard83f64092006-08-01 16:21:11 +00001706
1707 count = count1;
1708 /* first write to align to sector start */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001709 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
bellard83f64092006-08-01 16:21:11 +00001710 if (len > count)
1711 len = count;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001712 sector_num = offset >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001713 if (len > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001714 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1715 return ret;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001716 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001717 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1718 return ret;
bellard83f64092006-08-01 16:21:11 +00001719 count -= len;
1720 if (count == 0)
1721 return count1;
1722 sector_num++;
1723 buf += len;
1724 }
1725
1726 /* write the sectors "in place" */
Jan Kiszka6ea44302009-11-30 18:21:19 +01001727 nb_sectors = count >> BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001728 if (nb_sectors > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001729 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1730 return ret;
bellard83f64092006-08-01 16:21:11 +00001731 sector_num += nb_sectors;
Jan Kiszka6ea44302009-11-30 18:21:19 +01001732 len = nb_sectors << BDRV_SECTOR_BITS;
bellard83f64092006-08-01 16:21:11 +00001733 buf += len;
1734 count -= len;
1735 }
1736
1737 /* add data from the last sector */
1738 if (count > 0) {
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001739 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1740 return ret;
bellard83f64092006-08-01 16:21:11 +00001741 memcpy(tmp_buf, buf, count);
Kevin Wolf9a8c4cc2010-01-20 15:03:02 +01001742 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1743 return ret;
bellard83f64092006-08-01 16:21:11 +00001744 }
1745 return count1;
1746}
bellard83f64092006-08-01 16:21:11 +00001747
Kevin Wolff08145f2010-06-16 16:38:15 +02001748/*
1749 * Writes to the file and ensures that no writes are reordered across this
1750 * request (acts as a barrier)
1751 *
1752 * Returns 0 on success, -errno in error cases.
1753 */
1754int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1755 const void *buf, int count)
1756{
1757 int ret;
1758
1759 ret = bdrv_pwrite(bs, offset, buf, count);
1760 if (ret < 0) {
1761 return ret;
1762 }
1763
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02001764 /* No flush needed for cache modes that already do it */
1765 if (bs->enable_write_cache) {
Kevin Wolff08145f2010-06-16 16:38:15 +02001766 bdrv_flush(bs);
1767 }
1768
1769 return 0;
1770}
1771
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001772static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
Stefan Hajnocziab185922011-11-17 13:40:31 +00001773 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1774{
1775 /* Perform I/O through a temporary buffer so that users who scribble over
1776 * their read buffer while the operation is in progress do not end up
1777 * modifying the image file. This is critical for zero-copy guest I/O
1778 * where anything might happen inside guest memory.
1779 */
1780 void *bounce_buffer;
1781
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001782 BlockDriver *drv = bs->drv;
Stefan Hajnocziab185922011-11-17 13:40:31 +00001783 struct iovec iov;
1784 QEMUIOVector bounce_qiov;
1785 int64_t cluster_sector_num;
1786 int cluster_nb_sectors;
1787 size_t skip_bytes;
1788 int ret;
1789
1790 /* Cover entire cluster so no additional backing file I/O is required when
1791 * allocating cluster in the image file.
1792 */
1793 round_to_clusters(bs, sector_num, nb_sectors,
1794 &cluster_sector_num, &cluster_nb_sectors);
1795
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001796 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
1797 cluster_sector_num, cluster_nb_sectors);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001798
1799 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
1800 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
1801 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
1802
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001803 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
1804 &bounce_qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001805 if (ret < 0) {
1806 goto err;
1807 }
1808
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001809 if (drv->bdrv_co_write_zeroes &&
1810 buffer_is_zero(bounce_buffer, iov.iov_len)) {
Kevin Wolf621f0582012-03-20 15:12:58 +01001811 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
1812 cluster_nb_sectors);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001813 } else {
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02001814 /* This does not change the data on the disk, it is not necessary
1815 * to flush even in cache=writethrough mode.
1816 */
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001817 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
Stefan Hajnocziab185922011-11-17 13:40:31 +00001818 &bounce_qiov);
Stefan Hajnoczi79c053b2012-02-07 13:27:26 +00001819 }
1820
Stefan Hajnocziab185922011-11-17 13:40:31 +00001821 if (ret < 0) {
1822 /* It might be okay to ignore write errors for guest requests. If this
1823 * is a deliberate copy-on-read then we don't want to ignore the error.
1824 * Simply report it in all cases.
1825 */
1826 goto err;
1827 }
1828
1829 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
1830 qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
1831 nb_sectors * BDRV_SECTOR_SIZE);
1832
1833err:
1834 qemu_vfree(bounce_buffer);
1835 return ret;
1836}
1837
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001838/*
1839 * Handle a read request in coroutine context
1840 */
1841static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001842 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1843 BdrvRequestFlags flags)
Kevin Wolfda1fa912011-07-14 17:27:13 +02001844{
1845 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001846 BdrvTrackedRequest req;
1847 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02001848
Kevin Wolfda1fa912011-07-14 17:27:13 +02001849 if (!drv) {
1850 return -ENOMEDIUM;
1851 }
1852 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1853 return -EIO;
1854 }
1855
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001856 /* throttling disk read I/O */
1857 if (bs->io_limits_enabled) {
1858 bdrv_io_limits_intercept(bs, false, nb_sectors);
1859 }
1860
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001861 if (bs->copy_on_read) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001862 flags |= BDRV_REQ_COPY_ON_READ;
1863 }
1864 if (flags & BDRV_REQ_COPY_ON_READ) {
1865 bs->copy_on_read_in_flight++;
1866 }
1867
1868 if (bs->copy_on_read_in_flight) {
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001869 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1870 }
1871
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001872 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001873
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001874 if (flags & BDRV_REQ_COPY_ON_READ) {
Stefan Hajnocziab185922011-11-17 13:40:31 +00001875 int pnum;
1876
1877 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
1878 if (ret < 0) {
1879 goto out;
1880 }
1881
1882 if (!ret || pnum != nb_sectors) {
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001883 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001884 goto out;
1885 }
1886 }
1887
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001888 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
Stefan Hajnocziab185922011-11-17 13:40:31 +00001889
1890out:
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001891 tracked_request_end(&req);
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001892
1893 if (flags & BDRV_REQ_COPY_ON_READ) {
1894 bs->copy_on_read_in_flight--;
1895 }
1896
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001897 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02001898}
1899
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001900int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
Kevin Wolfda1fa912011-07-14 17:27:13 +02001901 int nb_sectors, QEMUIOVector *qiov)
1902{
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001903 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
Kevin Wolfda1fa912011-07-14 17:27:13 +02001904
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001905 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1906}
1907
1908int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1909 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1910{
1911 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1912
1913 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1914 BDRV_REQ_COPY_ON_READ);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001915}
1916
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001917static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1918 int64_t sector_num, int nb_sectors)
1919{
1920 BlockDriver *drv = bs->drv;
1921 QEMUIOVector qiov;
1922 struct iovec iov;
1923 int ret;
1924
Kevin Wolf621f0582012-03-20 15:12:58 +01001925 /* TODO Emulate only part of misaligned requests instead of letting block
1926 * drivers return -ENOTSUP and emulate everything */
1927
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001928 /* First try the efficient write zeroes operation */
1929 if (drv->bdrv_co_write_zeroes) {
Kevin Wolf621f0582012-03-20 15:12:58 +01001930 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
1931 if (ret != -ENOTSUP) {
1932 return ret;
1933 }
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001934 }
1935
1936 /* Fall back to bounce buffer if write zeroes is unsupported */
1937 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
1938 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
1939 memset(iov.iov_base, 0, iov.iov_len);
1940 qemu_iovec_init_external(&qiov, &iov, 1);
1941
1942 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
1943
1944 qemu_vfree(iov.iov_base);
1945 return ret;
1946}
1947
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001948/*
1949 * Handle a write request in coroutine context
1950 */
1951static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001952 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1953 BdrvRequestFlags flags)
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01001954{
1955 BlockDriver *drv = bs->drv;
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001956 BdrvTrackedRequest req;
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01001957 int ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02001958
1959 if (!bs->drv) {
1960 return -ENOMEDIUM;
1961 }
1962 if (bs->read_only) {
1963 return -EACCES;
1964 }
1965 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
1966 return -EIO;
1967 }
1968
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08001969 /* throttling disk write I/O */
1970 if (bs->io_limits_enabled) {
1971 bdrv_io_limits_intercept(bs, true, nb_sectors);
1972 }
1973
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00001974 if (bs->copy_on_read_in_flight) {
Stefan Hajnoczif4658282011-11-17 13:40:29 +00001975 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
1976 }
1977
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001978 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
1979
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00001980 if (flags & BDRV_REQ_ZERO_WRITE) {
1981 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
1982 } else {
1983 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1984 }
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01001985
Paolo Bonzinif05fa4a2012-06-06 00:04:49 +02001986 if (ret == 0 && !bs->enable_write_cache) {
1987 ret = bdrv_co_flush(bs);
1988 }
1989
Kevin Wolfda1fa912011-07-14 17:27:13 +02001990 if (bs->dirty_bitmap) {
1991 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1992 }
1993
1994 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
1995 bs->wr_highest_sector = sector_num + nb_sectors - 1;
1996 }
1997
Stefan Hajnoczidbffbdc2011-11-17 13:40:27 +00001998 tracked_request_end(&req);
1999
Stefan Hajnoczi6b7cb242011-10-13 13:08:24 +01002000 return ret;
Kevin Wolfda1fa912011-07-14 17:27:13 +02002001}
2002
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01002003int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
2004 int nb_sectors, QEMUIOVector *qiov)
2005{
2006 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
2007
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00002008 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
2009}
2010
2011int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
2012 int64_t sector_num, int nb_sectors)
2013{
2014 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2015
2016 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
2017 BDRV_REQ_ZERO_WRITE);
Stefan Hajnoczic5fbe572011-10-05 17:17:03 +01002018}
2019
bellard83f64092006-08-01 16:21:11 +00002020/**
bellard83f64092006-08-01 16:21:11 +00002021 * Truncate file to 'offset' bytes (needed only for file protocols)
2022 */
2023int bdrv_truncate(BlockDriverState *bs, int64_t offset)
2024{
2025 BlockDriver *drv = bs->drv;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002026 int ret;
bellard83f64092006-08-01 16:21:11 +00002027 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002028 return -ENOMEDIUM;
bellard83f64092006-08-01 16:21:11 +00002029 if (!drv->bdrv_truncate)
2030 return -ENOTSUP;
Naphtali Sprei59f26892009-10-26 16:25:16 +02002031 if (bs->read_only)
2032 return -EACCES;
Marcelo Tosatti85916752011-01-26 12:12:35 -02002033 if (bdrv_in_use(bs))
2034 return -EBUSY;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002035 ret = drv->bdrv_truncate(bs, offset);
2036 if (ret == 0) {
2037 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
Markus Armbruster145feb12011-08-03 15:07:42 +02002038 bdrv_dev_resize_cb(bs);
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002039 }
2040 return ret;
bellard83f64092006-08-01 16:21:11 +00002041}
2042
2043/**
Fam Zheng4a1d5e12011-07-12 19:56:39 +08002044 * Length of a allocated file in bytes. Sparse files are counted by actual
2045 * allocated space. Return < 0 if error or unknown.
2046 */
2047int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
2048{
2049 BlockDriver *drv = bs->drv;
2050 if (!drv) {
2051 return -ENOMEDIUM;
2052 }
2053 if (drv->bdrv_get_allocated_file_size) {
2054 return drv->bdrv_get_allocated_file_size(bs);
2055 }
2056 if (bs->file) {
2057 return bdrv_get_allocated_file_size(bs->file);
2058 }
2059 return -ENOTSUP;
2060}
2061
2062/**
bellard83f64092006-08-01 16:21:11 +00002063 * Length of a file in bytes. Return < 0 if error or unknown.
2064 */
2065int64_t bdrv_getlength(BlockDriverState *bs)
2066{
2067 BlockDriver *drv = bs->drv;
2068 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002069 return -ENOMEDIUM;
Stefan Hajnoczi51762282010-04-19 16:56:41 +01002070
Markus Armbruster2c6942f2011-09-06 18:58:51 +02002071 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01002072 if (drv->bdrv_getlength) {
2073 return drv->bdrv_getlength(bs);
2074 }
bellard83f64092006-08-01 16:21:11 +00002075 }
Stefan Hajnoczi46a4e4e2011-03-29 20:04:41 +01002076 return bs->total_sectors * BDRV_SECTOR_SIZE;
bellardfc01f7e2003-06-30 10:03:06 +00002077}
2078
bellard19cb3732006-08-19 11:45:59 +00002079/* return 0 as number of sectors if no device present or error */
ths96b8f132007-12-17 01:35:20 +00002080void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
bellardfc01f7e2003-06-30 10:03:06 +00002081{
bellard19cb3732006-08-19 11:45:59 +00002082 int64_t length;
2083 length = bdrv_getlength(bs);
2084 if (length < 0)
2085 length = 0;
2086 else
Jan Kiszka6ea44302009-11-30 18:21:19 +01002087 length = length >> BDRV_SECTOR_BITS;
bellard19cb3732006-08-19 11:45:59 +00002088 *nb_sectors_ptr = length;
bellardfc01f7e2003-06-30 10:03:06 +00002089}
bellardcf989512004-02-16 21:56:36 +00002090
aliguorif3d54fc2008-11-25 21:50:24 +00002091struct partition {
2092 uint8_t boot_ind; /* 0x80 - active */
2093 uint8_t head; /* starting head */
2094 uint8_t sector; /* starting sector */
2095 uint8_t cyl; /* starting cylinder */
2096 uint8_t sys_ind; /* What partition type */
2097 uint8_t end_head; /* end head */
2098 uint8_t end_sector; /* end sector */
2099 uint8_t end_cyl; /* end cylinder */
2100 uint32_t start_sect; /* starting sector counting from 0 */
2101 uint32_t nr_sects; /* nr of sectors in partition */
Stefan Weil541dc0d2011-08-31 12:38:01 +02002102} QEMU_PACKED;
aliguorif3d54fc2008-11-25 21:50:24 +00002103
2104/* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
2105static int guess_disk_lchs(BlockDriverState *bs,
2106 int *pcylinders, int *pheads, int *psectors)
2107{
Jes Sorenseneb5a3162010-05-27 16:20:31 +02002108 uint8_t buf[BDRV_SECTOR_SIZE];
aliguorif3d54fc2008-11-25 21:50:24 +00002109 int ret, i, heads, sectors, cylinders;
2110 struct partition *p;
2111 uint32_t nr_sects;
blueswir1a38131b2008-12-05 17:56:40 +00002112 uint64_t nb_sectors;
Zhi Yong Wu498e3862012-04-02 18:59:34 +08002113 bool enabled;
aliguorif3d54fc2008-11-25 21:50:24 +00002114
2115 bdrv_get_geometry(bs, &nb_sectors);
2116
Zhi Yong Wu498e3862012-04-02 18:59:34 +08002117 /**
2118 * The function will be invoked during startup not only in sync I/O mode,
2119 * but also in async I/O mode. So the I/O throttling function has to
2120 * be disabled temporarily here, not permanently.
2121 */
2122 enabled = bs->io_limits_enabled;
2123 bs->io_limits_enabled = false;
aliguorif3d54fc2008-11-25 21:50:24 +00002124 ret = bdrv_read(bs, 0, buf, 1);
Zhi Yong Wu498e3862012-04-02 18:59:34 +08002125 bs->io_limits_enabled = enabled;
aliguorif3d54fc2008-11-25 21:50:24 +00002126 if (ret < 0)
2127 return -1;
2128 /* test msdos magic */
2129 if (buf[510] != 0x55 || buf[511] != 0xaa)
2130 return -1;
2131 for(i = 0; i < 4; i++) {
2132 p = ((struct partition *)(buf + 0x1be)) + i;
2133 nr_sects = le32_to_cpu(p->nr_sects);
2134 if (nr_sects && p->end_head) {
2135 /* We make the assumption that the partition terminates on
2136 a cylinder boundary */
2137 heads = p->end_head + 1;
2138 sectors = p->end_sector & 63;
2139 if (sectors == 0)
2140 continue;
2141 cylinders = nb_sectors / (heads * sectors);
2142 if (cylinders < 1 || cylinders > 16383)
2143 continue;
2144 *pheads = heads;
2145 *psectors = sectors;
2146 *pcylinders = cylinders;
2147#if 0
2148 printf("guessed geometry: LCHS=%d %d %d\n",
2149 cylinders, heads, sectors);
2150#endif
2151 return 0;
2152 }
2153 }
2154 return -1;
2155}
2156
2157void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
2158{
2159 int translation, lba_detected = 0;
2160 int cylinders, heads, secs;
blueswir1a38131b2008-12-05 17:56:40 +00002161 uint64_t nb_sectors;
aliguorif3d54fc2008-11-25 21:50:24 +00002162
2163 /* if a geometry hint is available, use it */
2164 bdrv_get_geometry(bs, &nb_sectors);
2165 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
2166 translation = bdrv_get_translation_hint(bs);
2167 if (cylinders != 0) {
2168 *pcyls = cylinders;
2169 *pheads = heads;
2170 *psecs = secs;
2171 } else {
2172 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
2173 if (heads > 16) {
2174 /* if heads > 16, it means that a BIOS LBA
2175 translation was active, so the default
2176 hardware geometry is OK */
2177 lba_detected = 1;
2178 goto default_geometry;
2179 } else {
2180 *pcyls = cylinders;
2181 *pheads = heads;
2182 *psecs = secs;
2183 /* disable any translation to be in sync with
2184 the logical geometry */
2185 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
2186 bdrv_set_translation_hint(bs,
2187 BIOS_ATA_TRANSLATION_NONE);
2188 }
2189 }
2190 } else {
2191 default_geometry:
2192 /* if no geometry, use a standard physical disk geometry */
2193 cylinders = nb_sectors / (16 * 63);
2194
2195 if (cylinders > 16383)
2196 cylinders = 16383;
2197 else if (cylinders < 2)
2198 cylinders = 2;
2199 *pcyls = cylinders;
2200 *pheads = 16;
2201 *psecs = 63;
2202 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
2203 if ((*pcyls * *pheads) <= 131072) {
2204 bdrv_set_translation_hint(bs,
2205 BIOS_ATA_TRANSLATION_LARGE);
2206 } else {
2207 bdrv_set_translation_hint(bs,
2208 BIOS_ATA_TRANSLATION_LBA);
2209 }
2210 }
2211 }
2212 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
2213 }
2214}
2215
ths5fafdf22007-09-16 21:08:06 +00002216void bdrv_set_geometry_hint(BlockDriverState *bs,
bellardb3380822004-03-14 21:38:54 +00002217 int cyls, int heads, int secs)
2218{
2219 bs->cyls = cyls;
2220 bs->heads = heads;
2221 bs->secs = secs;
2222}
2223
bellard46d47672004-11-16 01:45:27 +00002224void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
2225{
2226 bs->translation = translation;
2227}
2228
ths5fafdf22007-09-16 21:08:06 +00002229void bdrv_get_geometry_hint(BlockDriverState *bs,
bellardb3380822004-03-14 21:38:54 +00002230 int *pcyls, int *pheads, int *psecs)
2231{
2232 *pcyls = bs->cyls;
2233 *pheads = bs->heads;
2234 *psecs = bs->secs;
2235}
2236
Zhi Yong Wu0563e192011-11-03 16:57:25 +08002237/* throttling disk io limits */
2238void bdrv_set_io_limits(BlockDriverState *bs,
2239 BlockIOLimit *io_limits)
2240{
2241 bs->io_limits = *io_limits;
2242 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
2243}
2244
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002245/* Recognize floppy formats */
2246typedef struct FDFormat {
2247 FDriveType drive;
2248 uint8_t last_sect;
2249 uint8_t max_track;
2250 uint8_t max_head;
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002251 FDriveRate rate;
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002252} FDFormat;
2253
2254static const FDFormat fd_formats[] = {
2255 /* First entry is default format */
2256 /* 1.44 MB 3"1/2 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002257 { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, },
2258 { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, },
2259 { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, },
2260 { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, },
2261 { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, },
2262 { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, },
2263 { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, },
2264 { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002265 /* 2.88 MB 3"1/2 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002266 { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, },
2267 { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, },
2268 { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, },
2269 { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, },
2270 { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002271 /* 720 kB 3"1/2 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002272 { FDRIVE_DRV_144, 9, 80, 1, FDRIVE_RATE_250K, },
2273 { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, },
2274 { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, },
2275 { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, },
2276 { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, },
2277 { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002278 /* 1.2 MB 5"1/4 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002279 { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, },
2280 { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, },
2281 { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, },
2282 { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, },
2283 { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002284 /* 720 kB 5"1/4 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002285 { FDRIVE_DRV_120, 9, 80, 1, FDRIVE_RATE_250K, },
2286 { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002287 /* 360 kB 5"1/4 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002288 { FDRIVE_DRV_120, 9, 40, 1, FDRIVE_RATE_300K, },
2289 { FDRIVE_DRV_120, 9, 40, 0, FDRIVE_RATE_300K, },
2290 { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, },
2291 { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002292 /* 320 kB 5"1/4 floppy disks */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002293 { FDRIVE_DRV_120, 8, 40, 1, FDRIVE_RATE_250K, },
2294 { FDRIVE_DRV_120, 8, 40, 0, FDRIVE_RATE_250K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002295 /* 360 kB must match 5"1/4 better than 3"1/2... */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002296 { FDRIVE_DRV_144, 9, 80, 0, FDRIVE_RATE_250K, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002297 /* end */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002298 { FDRIVE_DRV_NONE, -1, -1, 0, 0, },
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002299};
2300
2301void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
2302 int *max_track, int *last_sect,
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002303 FDriveType drive_in, FDriveType *drive,
2304 FDriveRate *rate)
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002305{
2306 const FDFormat *parse;
2307 uint64_t nb_sectors, size;
2308 int i, first_match, match;
2309
2310 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
2311 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
2312 /* User defined disk */
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002313 *rate = FDRIVE_RATE_500K;
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002314 } else {
2315 bdrv_get_geometry(bs, &nb_sectors);
2316 match = -1;
2317 first_match = -1;
2318 for (i = 0; ; i++) {
2319 parse = &fd_formats[i];
2320 if (parse->drive == FDRIVE_DRV_NONE) {
2321 break;
2322 }
2323 if (drive_in == parse->drive ||
2324 drive_in == FDRIVE_DRV_NONE) {
2325 size = (parse->max_head + 1) * parse->max_track *
2326 parse->last_sect;
2327 if (nb_sectors == size) {
2328 match = i;
2329 break;
2330 }
2331 if (first_match == -1) {
2332 first_match = i;
2333 }
2334 }
2335 }
2336 if (match == -1) {
2337 if (first_match == -1) {
2338 match = 1;
2339 } else {
2340 match = first_match;
2341 }
2342 parse = &fd_formats[match];
2343 }
2344 *nb_heads = parse->max_head + 1;
2345 *max_track = parse->max_track;
2346 *last_sect = parse->last_sect;
2347 *drive = parse->drive;
Hervé Poussineauf8d3d122012-02-06 22:29:07 +01002348 *rate = parse->rate;
Blue Swirl5bbdbb42011-02-12 20:43:32 +00002349 }
2350}
2351
bellard46d47672004-11-16 01:45:27 +00002352int bdrv_get_translation_hint(BlockDriverState *bs)
2353{
2354 return bs->translation;
2355}
2356
Markus Armbrusterabd7f682010-06-02 18:55:17 +02002357void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
2358 BlockErrorAction on_write_error)
2359{
2360 bs->on_read_error = on_read_error;
2361 bs->on_write_error = on_write_error;
2362}
2363
2364BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
2365{
2366 return is_read ? bs->on_read_error : bs->on_write_error;
2367}
2368
bellardb3380822004-03-14 21:38:54 +00002369int bdrv_is_read_only(BlockDriverState *bs)
2370{
2371 return bs->read_only;
2372}
2373
ths985a03b2007-12-24 16:10:43 +00002374int bdrv_is_sg(BlockDriverState *bs)
2375{
2376 return bs->sg;
2377}
2378
Christoph Hellwige900a7b2009-09-04 19:01:15 +02002379int bdrv_enable_write_cache(BlockDriverState *bs)
2380{
2381 return bs->enable_write_cache;
2382}
2383
Paolo Bonzini425b0142012-06-06 00:04:52 +02002384void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
2385{
2386 bs->enable_write_cache = wce;
2387}
2388
bellardea2384d2004-08-01 21:59:26 +00002389int bdrv_is_encrypted(BlockDriverState *bs)
2390{
2391 if (bs->backing_hd && bs->backing_hd->encrypted)
2392 return 1;
2393 return bs->encrypted;
2394}
2395
aliguoric0f4ce72009-03-05 23:01:01 +00002396int bdrv_key_required(BlockDriverState *bs)
2397{
2398 BlockDriverState *backing_hd = bs->backing_hd;
2399
2400 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2401 return 1;
2402 return (bs->encrypted && !bs->valid_key);
2403}
2404
bellardea2384d2004-08-01 21:59:26 +00002405int bdrv_set_key(BlockDriverState *bs, const char *key)
2406{
2407 int ret;
2408 if (bs->backing_hd && bs->backing_hd->encrypted) {
2409 ret = bdrv_set_key(bs->backing_hd, key);
2410 if (ret < 0)
2411 return ret;
2412 if (!bs->encrypted)
2413 return 0;
2414 }
Shahar Havivifd04a2a2010-03-06 00:26:13 +02002415 if (!bs->encrypted) {
2416 return -EINVAL;
2417 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2418 return -ENOMEDIUM;
2419 }
aliguoric0f4ce72009-03-05 23:01:01 +00002420 ret = bs->drv->bdrv_set_key(bs, key);
aliguoribb5fc202009-03-05 23:01:15 +00002421 if (ret < 0) {
2422 bs->valid_key = 0;
2423 } else if (!bs->valid_key) {
2424 bs->valid_key = 1;
2425 /* call the change callback now, we skipped it on open */
Markus Armbruster7d4b4ba2011-09-06 18:58:59 +02002426 bdrv_dev_change_media_cb(bs, true);
aliguoribb5fc202009-03-05 23:01:15 +00002427 }
aliguoric0f4ce72009-03-05 23:01:01 +00002428 return ret;
bellardea2384d2004-08-01 21:59:26 +00002429}
2430
2431void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
2432{
bellard19cb3732006-08-19 11:45:59 +00002433 if (!bs->drv) {
bellardea2384d2004-08-01 21:59:26 +00002434 buf[0] = '\0';
2435 } else {
2436 pstrcpy(buf, buf_size, bs->drv->format_name);
2437 }
2438}
2439
ths5fafdf22007-09-16 21:08:06 +00002440void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
bellardea2384d2004-08-01 21:59:26 +00002441 void *opaque)
2442{
2443 BlockDriver *drv;
2444
Stefan Hajnoczi8a22f022010-04-13 10:29:33 +01002445 QLIST_FOREACH(drv, &bdrv_drivers, list) {
bellardea2384d2004-08-01 21:59:26 +00002446 it(opaque, drv->format_name);
2447 }
2448}
2449
bellardb3380822004-03-14 21:38:54 +00002450BlockDriverState *bdrv_find(const char *name)
2451{
2452 BlockDriverState *bs;
2453
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002454 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2455 if (!strcmp(name, bs->device_name)) {
bellardb3380822004-03-14 21:38:54 +00002456 return bs;
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002457 }
bellardb3380822004-03-14 21:38:54 +00002458 }
2459 return NULL;
2460}
2461
Markus Armbruster2f399b02010-06-02 18:55:20 +02002462BlockDriverState *bdrv_next(BlockDriverState *bs)
2463{
2464 if (!bs) {
2465 return QTAILQ_FIRST(&bdrv_states);
2466 }
2467 return QTAILQ_NEXT(bs, list);
2468}
2469
aliguori51de9762009-03-05 23:00:43 +00002470void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
bellard81d09122004-07-14 17:21:37 +00002471{
2472 BlockDriverState *bs;
2473
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002474 QTAILQ_FOREACH(bs, &bdrv_states, list) {
aliguori51de9762009-03-05 23:00:43 +00002475 it(opaque, bs);
bellard81d09122004-07-14 17:21:37 +00002476 }
2477}
2478
bellardea2384d2004-08-01 21:59:26 +00002479const char *bdrv_get_device_name(BlockDriverState *bs)
2480{
2481 return bs->device_name;
2482}
2483
Markus Armbrusterc8433282012-06-05 16:49:24 +02002484int bdrv_get_flags(BlockDriverState *bs)
2485{
2486 return bs->open_flags;
2487}
2488
aliguoric6ca28d2008-10-06 13:55:43 +00002489void bdrv_flush_all(void)
2490{
2491 BlockDriverState *bs;
2492
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002493 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01002494 bdrv_flush(bs);
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002495 }
aliguoric6ca28d2008-10-06 13:55:43 +00002496}
2497
Kevin Wolff2feebb2010-04-14 17:30:35 +02002498int bdrv_has_zero_init(BlockDriverState *bs)
2499{
2500 assert(bs->drv);
2501
Kevin Wolf336c1c12010-07-28 11:26:29 +02002502 if (bs->drv->bdrv_has_zero_init) {
2503 return bs->drv->bdrv_has_zero_init(bs);
Kevin Wolff2feebb2010-04-14 17:30:35 +02002504 }
2505
2506 return 1;
2507}
2508
Stefan Hajnoczi376ae3f2011-11-14 12:44:19 +00002509typedef struct BdrvCoIsAllocatedData {
2510 BlockDriverState *bs;
2511 int64_t sector_num;
2512 int nb_sectors;
2513 int *pnum;
2514 int ret;
2515 bool done;
2516} BdrvCoIsAllocatedData;
2517
thsf58c7b32008-06-05 21:53:49 +00002518/*
2519 * Returns true iff the specified sector is present in the disk image. Drivers
2520 * not implementing the functionality are assumed to not support backing files,
2521 * hence all their sectors are reported as allocated.
2522 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002523 * If 'sector_num' is beyond the end of the disk image the return value is 0
2524 * and 'pnum' is set to 0.
2525 *
thsf58c7b32008-06-05 21:53:49 +00002526 * 'pnum' is set to the number of sectors (including and immediately following
2527 * the specified sector) that are known to be in the same
2528 * allocated/unallocated state.
2529 *
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002530 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2531 * beyond the end of the disk image it will be clamped.
thsf58c7b32008-06-05 21:53:49 +00002532 */
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00002533int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
2534 int nb_sectors, int *pnum)
thsf58c7b32008-06-05 21:53:49 +00002535{
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002536 int64_t n;
2537
2538 if (sector_num >= bs->total_sectors) {
2539 *pnum = 0;
2540 return 0;
2541 }
2542
2543 n = bs->total_sectors - sector_num;
2544 if (n < nb_sectors) {
2545 nb_sectors = n;
2546 }
2547
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00002548 if (!bs->drv->bdrv_co_is_allocated) {
Stefan Hajnoczibd9533e2011-11-29 13:49:51 +00002549 *pnum = nb_sectors;
thsf58c7b32008-06-05 21:53:49 +00002550 return 1;
2551 }
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00002552
Stefan Hajnoczi060f51c2011-11-14 12:44:26 +00002553 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
2554}
2555
2556/* Coroutine wrapper for bdrv_is_allocated() */
2557static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
2558{
2559 BdrvCoIsAllocatedData *data = opaque;
2560 BlockDriverState *bs = data->bs;
2561
2562 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
2563 data->pnum);
2564 data->done = true;
2565}
2566
2567/*
2568 * Synchronous wrapper around bdrv_co_is_allocated().
2569 *
2570 * See bdrv_co_is_allocated() for details.
2571 */
2572int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2573 int *pnum)
2574{
Stefan Hajnoczi6aebab12011-11-14 12:44:25 +00002575 Coroutine *co;
2576 BdrvCoIsAllocatedData data = {
2577 .bs = bs,
2578 .sector_num = sector_num,
2579 .nb_sectors = nb_sectors,
2580 .pnum = pnum,
2581 .done = false,
2582 };
2583
2584 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
2585 qemu_coroutine_enter(co, &data);
2586 while (!data.done) {
2587 qemu_aio_wait();
2588 }
2589 return data.ret;
thsf58c7b32008-06-05 21:53:49 +00002590}
2591
Paolo Bonzini188a7bb2012-05-08 16:52:01 +02002592/*
2593 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2594 *
2595 * Return true if the given sector is allocated in any image between
2596 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2597 * sector is allocated in any image of the chain. Return false otherwise.
2598 *
2599 * 'pnum' is set to the number of sectors (including and immediately following
2600 * the specified sector) that are known to be in the same
2601 * allocated/unallocated state.
2602 *
2603 */
2604int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2605 BlockDriverState *base,
2606 int64_t sector_num,
2607 int nb_sectors, int *pnum)
2608{
2609 BlockDriverState *intermediate;
2610 int ret, n = nb_sectors;
2611
2612 intermediate = top;
2613 while (intermediate && intermediate != base) {
2614 int pnum_inter;
2615 ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
2616 &pnum_inter);
2617 if (ret < 0) {
2618 return ret;
2619 } else if (ret) {
2620 *pnum = pnum_inter;
2621 return 1;
2622 }
2623
2624 /*
2625 * [sector_num, nb_sectors] is unallocated on top but intermediate
2626 * might have
2627 *
2628 * [sector_num+x, nr_sectors] allocated.
2629 */
2630 if (n > pnum_inter) {
2631 n = pnum_inter;
2632 }
2633
2634 intermediate = intermediate->backing_hd;
2635 }
2636
2637 *pnum = n;
2638 return 0;
2639}
2640
Luiz Capitulinob2023812011-09-21 17:16:47 -03002641BlockInfoList *qmp_query_block(Error **errp)
bellardb3380822004-03-14 21:38:54 +00002642{
Luiz Capitulinob2023812011-09-21 17:16:47 -03002643 BlockInfoList *head = NULL, *cur_item = NULL;
bellardb3380822004-03-14 21:38:54 +00002644 BlockDriverState *bs;
2645
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002646 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002647 BlockInfoList *info = g_malloc0(sizeof(*info));
Luiz Capitulinod15e5462009-12-10 17:16:06 -02002648
Luiz Capitulinob2023812011-09-21 17:16:47 -03002649 info->value = g_malloc0(sizeof(*info->value));
2650 info->value->device = g_strdup(bs->device_name);
2651 info->value->type = g_strdup("unknown");
2652 info->value->locked = bdrv_dev_is_medium_locked(bs);
2653 info->value->removable = bdrv_dev_has_removable_media(bs);
Luiz Capitulinod15e5462009-12-10 17:16:06 -02002654
Markus Armbrustere4def802011-09-06 18:58:53 +02002655 if (bdrv_dev_has_removable_media(bs)) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002656 info->value->has_tray_open = true;
2657 info->value->tray_open = bdrv_dev_is_tray_open(bs);
Markus Armbrustere4def802011-09-06 18:58:53 +02002658 }
Luiz Capitulinof04ef602011-09-26 17:43:54 -03002659
2660 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002661 info->value->has_io_status = true;
2662 info->value->io_status = bs->iostatus;
Luiz Capitulinof04ef602011-09-26 17:43:54 -03002663 }
2664
bellard19cb3732006-08-19 11:45:59 +00002665 if (bs->drv) {
Luiz Capitulinob2023812011-09-21 17:16:47 -03002666 info->value->has_inserted = true;
2667 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2668 info->value->inserted->file = g_strdup(bs->filename);
2669 info->value->inserted->ro = bs->read_only;
2670 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2671 info->value->inserted->encrypted = bs->encrypted;
2672 if (bs->backing_file[0]) {
2673 info->value->inserted->has_backing_file = true;
2674 info->value->inserted->backing_file = g_strdup(bs->backing_file);
aliguori376253e2009-03-05 23:01:23 +00002675 }
Zhi Yong Wu727f0052011-11-08 13:00:31 +08002676
2677 if (bs->io_limits_enabled) {
2678 info->value->inserted->bps =
2679 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2680 info->value->inserted->bps_rd =
2681 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2682 info->value->inserted->bps_wr =
2683 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2684 info->value->inserted->iops =
2685 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2686 info->value->inserted->iops_rd =
2687 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2688 info->value->inserted->iops_wr =
2689 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2690 }
bellardb3380822004-03-14 21:38:54 +00002691 }
Luiz Capitulinob2023812011-09-21 17:16:47 -03002692
2693 /* XXX: waiting for the qapi to support GSList */
2694 if (!cur_item) {
2695 head = cur_item = info;
2696 } else {
2697 cur_item->next = info;
2698 cur_item = info;
2699 }
bellardb3380822004-03-14 21:38:54 +00002700 }
Luiz Capitulinod15e5462009-12-10 17:16:06 -02002701
Luiz Capitulinob2023812011-09-21 17:16:47 -03002702 return head;
bellardb3380822004-03-14 21:38:54 +00002703}
thsa36e69d2007-12-02 05:18:19 +00002704
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002705/* Consider exposing this as a full fledged QMP command */
2706static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
thsa36e69d2007-12-02 05:18:19 +00002707{
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002708 BlockStats *s;
Luiz Capitulino218a5362009-12-10 17:16:07 -02002709
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002710 s = g_malloc0(sizeof(*s));
Luiz Capitulino218a5362009-12-10 17:16:07 -02002711
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002712 if (bs->device_name[0]) {
2713 s->has_device = true;
2714 s->device = g_strdup(bs->device_name);
Kevin Wolf294cc352010-04-28 14:34:01 +02002715 }
2716
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002717 s->stats = g_malloc0(sizeof(*s->stats));
2718 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2719 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2720 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2721 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2722 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2723 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2724 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2725 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2726 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2727
Kevin Wolf294cc352010-04-28 14:34:01 +02002728 if (bs->file) {
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002729 s->has_parent = true;
2730 s->parent = qmp_query_blockstat(bs->file, NULL);
Kevin Wolf294cc352010-04-28 14:34:01 +02002731 }
2732
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002733 return s;
Kevin Wolf294cc352010-04-28 14:34:01 +02002734}
2735
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002736BlockStatsList *qmp_query_blockstats(Error **errp)
Luiz Capitulino218a5362009-12-10 17:16:07 -02002737{
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002738 BlockStatsList *head = NULL, *cur_item = NULL;
thsa36e69d2007-12-02 05:18:19 +00002739 BlockDriverState *bs;
2740
Stefan Hajnoczi1b7bdbc2010-04-10 07:02:42 +01002741 QTAILQ_FOREACH(bs, &bdrv_states, list) {
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002742 BlockStatsList *info = g_malloc0(sizeof(*info));
2743 info->value = qmp_query_blockstat(bs, NULL);
2744
2745 /* XXX: waiting for the qapi to support GSList */
2746 if (!cur_item) {
2747 head = cur_item = info;
2748 } else {
2749 cur_item->next = info;
2750 cur_item = info;
2751 }
thsa36e69d2007-12-02 05:18:19 +00002752 }
Luiz Capitulino218a5362009-12-10 17:16:07 -02002753
Luiz Capitulinof11f57e2011-09-22 15:56:36 -03002754 return head;
thsa36e69d2007-12-02 05:18:19 +00002755}
bellardea2384d2004-08-01 21:59:26 +00002756
aliguori045df332009-03-05 23:00:48 +00002757const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2758{
2759 if (bs->backing_hd && bs->backing_hd->encrypted)
2760 return bs->backing_file;
2761 else if (bs->encrypted)
2762 return bs->filename;
2763 else
2764 return NULL;
2765}
2766
ths5fafdf22007-09-16 21:08:06 +00002767void bdrv_get_backing_filename(BlockDriverState *bs,
bellard83f64092006-08-01 16:21:11 +00002768 char *filename, int filename_size)
bellardea2384d2004-08-01 21:59:26 +00002769{
Kevin Wolf3574c602011-10-26 11:02:11 +02002770 pstrcpy(filename, filename_size, bs->backing_file);
bellardea2384d2004-08-01 21:59:26 +00002771}
2772
ths5fafdf22007-09-16 21:08:06 +00002773int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
bellardfaea38e2006-08-05 21:31:00 +00002774 const uint8_t *buf, int nb_sectors)
2775{
2776 BlockDriver *drv = bs->drv;
2777 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002778 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00002779 if (!drv->bdrv_write_compressed)
2780 return -ENOTSUP;
Kevin Wolffbb7b4e2009-05-08 14:47:24 +02002781 if (bdrv_check_request(bs, sector_num, nb_sectors))
2782 return -EIO;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01002783
Jan Kiszkac6d22832009-11-30 18:21:20 +01002784 if (bs->dirty_bitmap) {
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02002785 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2786 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +01002787
bellardfaea38e2006-08-05 21:31:00 +00002788 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2789}
ths3b46e622007-09-17 08:09:54 +00002790
bellardfaea38e2006-08-05 21:31:00 +00002791int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2792{
2793 BlockDriver *drv = bs->drv;
2794 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002795 return -ENOMEDIUM;
bellardfaea38e2006-08-05 21:31:00 +00002796 if (!drv->bdrv_get_info)
2797 return -ENOTSUP;
2798 memset(bdi, 0, sizeof(*bdi));
2799 return drv->bdrv_get_info(bs, bdi);
2800}
2801
Christoph Hellwig45566e92009-07-10 23:11:57 +02002802int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2803 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00002804{
2805 BlockDriver *drv = bs->drv;
2806 if (!drv)
2807 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002808 if (drv->bdrv_save_vmstate)
2809 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2810 if (bs->file)
2811 return bdrv_save_vmstate(bs->file, buf, pos, size);
2812 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00002813}
2814
Christoph Hellwig45566e92009-07-10 23:11:57 +02002815int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2816 int64_t pos, int size)
aliguori178e08a2009-04-05 19:10:55 +00002817{
2818 BlockDriver *drv = bs->drv;
2819 if (!drv)
2820 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002821 if (drv->bdrv_load_vmstate)
2822 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2823 if (bs->file)
2824 return bdrv_load_vmstate(bs->file, buf, pos, size);
2825 return -ENOTSUP;
aliguori178e08a2009-04-05 19:10:55 +00002826}
2827
Kevin Wolf8b9b0cc2010-03-15 17:27:00 +01002828void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2829{
2830 BlockDriver *drv = bs->drv;
2831
2832 if (!drv || !drv->bdrv_debug_event) {
2833 return;
2834 }
2835
2836 return drv->bdrv_debug_event(bs, event);
2837
2838}
2839
bellardfaea38e2006-08-05 21:31:00 +00002840/**************************************************************/
2841/* handling of snapshots */
2842
Miguel Di Ciurcio Filhofeeee5a2010-06-08 10:40:55 -03002843int bdrv_can_snapshot(BlockDriverState *bs)
2844{
2845 BlockDriver *drv = bs->drv;
Markus Armbruster07b70bf2011-08-03 15:08:11 +02002846 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Miguel Di Ciurcio Filhofeeee5a2010-06-08 10:40:55 -03002847 return 0;
2848 }
2849
2850 if (!drv->bdrv_snapshot_create) {
2851 if (bs->file != NULL) {
2852 return bdrv_can_snapshot(bs->file);
2853 }
2854 return 0;
2855 }
2856
2857 return 1;
2858}
2859
Blue Swirl199630b2010-07-25 20:49:34 +00002860int bdrv_is_snapshot(BlockDriverState *bs)
2861{
2862 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2863}
2864
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002865BlockDriverState *bdrv_snapshots(void)
2866{
2867 BlockDriverState *bs;
2868
Markus Armbruster3ac906f2010-07-01 09:30:38 +02002869 if (bs_snapshots) {
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002870 return bs_snapshots;
Markus Armbruster3ac906f2010-07-01 09:30:38 +02002871 }
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002872
2873 bs = NULL;
2874 while ((bs = bdrv_next(bs))) {
2875 if (bdrv_can_snapshot(bs)) {
Markus Armbruster3ac906f2010-07-01 09:30:38 +02002876 bs_snapshots = bs;
2877 return bs;
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002878 }
2879 }
2880 return NULL;
Markus Armbrusterf9092b12010-06-25 10:33:39 +02002881}
2882
ths5fafdf22007-09-16 21:08:06 +00002883int bdrv_snapshot_create(BlockDriverState *bs,
bellardfaea38e2006-08-05 21:31:00 +00002884 QEMUSnapshotInfo *sn_info)
2885{
2886 BlockDriver *drv = bs->drv;
2887 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002888 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002889 if (drv->bdrv_snapshot_create)
2890 return drv->bdrv_snapshot_create(bs, sn_info);
2891 if (bs->file)
2892 return bdrv_snapshot_create(bs->file, sn_info);
2893 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002894}
2895
ths5fafdf22007-09-16 21:08:06 +00002896int bdrv_snapshot_goto(BlockDriverState *bs,
bellardfaea38e2006-08-05 21:31:00 +00002897 const char *snapshot_id)
2898{
2899 BlockDriver *drv = bs->drv;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002900 int ret, open_ret;
2901
bellardfaea38e2006-08-05 21:31:00 +00002902 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002903 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002904 if (drv->bdrv_snapshot_goto)
2905 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2906
2907 if (bs->file) {
2908 drv->bdrv_close(bs);
2909 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2910 open_ret = drv->bdrv_open(bs, bs->open_flags);
2911 if (open_ret < 0) {
2912 bdrv_delete(bs->file);
2913 bs->drv = NULL;
2914 return open_ret;
2915 }
2916 return ret;
2917 }
2918
2919 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002920}
2921
2922int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2923{
2924 BlockDriver *drv = bs->drv;
2925 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002926 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002927 if (drv->bdrv_snapshot_delete)
2928 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2929 if (bs->file)
2930 return bdrv_snapshot_delete(bs->file, snapshot_id);
2931 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002932}
2933
ths5fafdf22007-09-16 21:08:06 +00002934int bdrv_snapshot_list(BlockDriverState *bs,
bellardfaea38e2006-08-05 21:31:00 +00002935 QEMUSnapshotInfo **psn_info)
2936{
2937 BlockDriver *drv = bs->drv;
2938 if (!drv)
bellard19cb3732006-08-19 11:45:59 +00002939 return -ENOMEDIUM;
MORITA Kazutaka7cdb1f62010-05-28 11:44:58 +09002940 if (drv->bdrv_snapshot_list)
2941 return drv->bdrv_snapshot_list(bs, psn_info);
2942 if (bs->file)
2943 return bdrv_snapshot_list(bs->file, psn_info);
2944 return -ENOTSUP;
bellardfaea38e2006-08-05 21:31:00 +00002945}
2946
edison51ef6722010-09-21 19:58:41 -07002947int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2948 const char *snapshot_name)
2949{
2950 BlockDriver *drv = bs->drv;
2951 if (!drv) {
2952 return -ENOMEDIUM;
2953 }
2954 if (!bs->read_only) {
2955 return -EINVAL;
2956 }
2957 if (drv->bdrv_snapshot_load_tmp) {
2958 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2959 }
2960 return -ENOTSUP;
2961}
2962
Marcelo Tosattie8a6bb92012-01-18 14:40:51 +00002963BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
2964 const char *backing_file)
2965{
2966 if (!bs->drv) {
2967 return NULL;
2968 }
2969
2970 if (bs->backing_hd) {
2971 if (strcmp(bs->backing_file, backing_file) == 0) {
2972 return bs->backing_hd;
2973 } else {
2974 return bdrv_find_backing_image(bs->backing_hd, backing_file);
2975 }
2976 }
2977
2978 return NULL;
2979}
2980
bellardfaea38e2006-08-05 21:31:00 +00002981#define NB_SUFFIXES 4
2982
2983char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2984{
2985 static const char suffixes[NB_SUFFIXES] = "KMGT";
2986 int64_t base;
2987 int i;
2988
2989 if (size <= 999) {
2990 snprintf(buf, buf_size, "%" PRId64, size);
2991 } else {
2992 base = 1024;
2993 for(i = 0; i < NB_SUFFIXES; i++) {
2994 if (size < (10 * base)) {
ths5fafdf22007-09-16 21:08:06 +00002995 snprintf(buf, buf_size, "%0.1f%c",
bellardfaea38e2006-08-05 21:31:00 +00002996 (double)size / base,
2997 suffixes[i]);
2998 break;
2999 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
ths5fafdf22007-09-16 21:08:06 +00003000 snprintf(buf, buf_size, "%" PRId64 "%c",
bellardfaea38e2006-08-05 21:31:00 +00003001 ((size + (base >> 1)) / base),
3002 suffixes[i]);
3003 break;
3004 }
3005 base = base * 1024;
3006 }
3007 }
3008 return buf;
3009}
3010
3011char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
3012{
3013 char buf1[128], date_buf[128], clock_buf[128];
bellard3b9f94e2007-01-07 17:27:07 +00003014#ifdef _WIN32
3015 struct tm *ptm;
3016#else
bellardfaea38e2006-08-05 21:31:00 +00003017 struct tm tm;
bellard3b9f94e2007-01-07 17:27:07 +00003018#endif
bellardfaea38e2006-08-05 21:31:00 +00003019 time_t ti;
3020 int64_t secs;
3021
3022 if (!sn) {
ths5fafdf22007-09-16 21:08:06 +00003023 snprintf(buf, buf_size,
3024 "%-10s%-20s%7s%20s%15s",
bellardfaea38e2006-08-05 21:31:00 +00003025 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
3026 } else {
3027 ti = sn->date_sec;
bellard3b9f94e2007-01-07 17:27:07 +00003028#ifdef _WIN32
3029 ptm = localtime(&ti);
3030 strftime(date_buf, sizeof(date_buf),
3031 "%Y-%m-%d %H:%M:%S", ptm);
3032#else
bellardfaea38e2006-08-05 21:31:00 +00003033 localtime_r(&ti, &tm);
3034 strftime(date_buf, sizeof(date_buf),
3035 "%Y-%m-%d %H:%M:%S", &tm);
bellard3b9f94e2007-01-07 17:27:07 +00003036#endif
bellardfaea38e2006-08-05 21:31:00 +00003037 secs = sn->vm_clock_nsec / 1000000000;
3038 snprintf(clock_buf, sizeof(clock_buf),
3039 "%02d:%02d:%02d.%03d",
3040 (int)(secs / 3600),
3041 (int)((secs / 60) % 60),
ths5fafdf22007-09-16 21:08:06 +00003042 (int)(secs % 60),
bellardfaea38e2006-08-05 21:31:00 +00003043 (int)((sn->vm_clock_nsec / 1000000) % 1000));
3044 snprintf(buf, buf_size,
ths5fafdf22007-09-16 21:08:06 +00003045 "%-10s%-20s%7s%20s%15s",
bellardfaea38e2006-08-05 21:31:00 +00003046 sn->id_str, sn->name,
3047 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
3048 date_buf,
3049 clock_buf);
3050 }
3051 return buf;
3052}
3053
bellard83f64092006-08-01 16:21:11 +00003054/**************************************************************/
3055/* async I/Os */
3056
aliguori3b69e4b2009-01-22 16:59:24 +00003057BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
aliguorif141eaf2009-04-07 18:43:24 +00003058 QEMUIOVector *qiov, int nb_sectors,
aliguori3b69e4b2009-01-22 16:59:24 +00003059 BlockDriverCompletionFunc *cb, void *opaque)
3060{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01003061 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
3062
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003063 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01003064 cb, opaque, false);
bellard83f64092006-08-01 16:21:11 +00003065}
3066
aliguorif141eaf2009-04-07 18:43:24 +00003067BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
3068 QEMUIOVector *qiov, int nb_sectors,
3069 BlockDriverCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00003070{
Stefan Hajnoczibbf0a442010-10-05 14:28:53 +01003071 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
3072
Stefan Hajnoczi1a6e1152011-10-13 13:08:25 +01003073 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01003074 cb, opaque, true);
bellard83f64092006-08-01 16:21:11 +00003075}
3076
Kevin Wolf40b4f532009-09-09 17:53:37 +02003077
3078typedef struct MultiwriteCB {
3079 int error;
3080 int num_requests;
3081 int num_callbacks;
3082 struct {
3083 BlockDriverCompletionFunc *cb;
3084 void *opaque;
3085 QEMUIOVector *free_qiov;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003086 } callbacks[];
3087} MultiwriteCB;
3088
3089static void multiwrite_user_cb(MultiwriteCB *mcb)
3090{
3091 int i;
3092
3093 for (i = 0; i < mcb->num_callbacks; i++) {
3094 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
Stefan Hajnoczi1e1ea482010-04-21 20:35:45 +01003095 if (mcb->callbacks[i].free_qiov) {
3096 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
3097 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003098 g_free(mcb->callbacks[i].free_qiov);
Kevin Wolf40b4f532009-09-09 17:53:37 +02003099 }
3100}
3101
3102static void multiwrite_cb(void *opaque, int ret)
3103{
3104 MultiwriteCB *mcb = opaque;
3105
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01003106 trace_multiwrite_cb(mcb, ret);
3107
Kevin Wolfcb6d3ca2010-04-01 22:48:44 +02003108 if (ret < 0 && !mcb->error) {
Kevin Wolf40b4f532009-09-09 17:53:37 +02003109 mcb->error = ret;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003110 }
3111
3112 mcb->num_requests--;
3113 if (mcb->num_requests == 0) {
Kevin Wolfde189a12010-07-01 16:08:51 +02003114 multiwrite_user_cb(mcb);
Anthony Liguori7267c092011-08-20 22:09:37 -05003115 g_free(mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02003116 }
3117}
3118
3119static int multiwrite_req_compare(const void *a, const void *b)
3120{
Christoph Hellwig77be4362010-05-19 20:53:10 +02003121 const BlockRequest *req1 = a, *req2 = b;
3122
3123 /*
3124 * Note that we can't simply subtract req2->sector from req1->sector
3125 * here as that could overflow the return value.
3126 */
3127 if (req1->sector > req2->sector) {
3128 return 1;
3129 } else if (req1->sector < req2->sector) {
3130 return -1;
3131 } else {
3132 return 0;
3133 }
Kevin Wolf40b4f532009-09-09 17:53:37 +02003134}
3135
3136/*
3137 * Takes a bunch of requests and tries to merge them. Returns the number of
3138 * requests that remain after merging.
3139 */
3140static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
3141 int num_reqs, MultiwriteCB *mcb)
3142{
3143 int i, outidx;
3144
3145 // Sort requests by start sector
3146 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
3147
3148 // Check if adjacent requests touch the same clusters. If so, combine them,
3149 // filling up gaps with zero sectors.
3150 outidx = 0;
3151 for (i = 1; i < num_reqs; i++) {
3152 int merge = 0;
3153 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
3154
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01003155 // Handle exactly sequential writes and overlapping writes.
Kevin Wolf40b4f532009-09-09 17:53:37 +02003156 if (reqs[i].sector <= oldreq_last) {
3157 merge = 1;
3158 }
3159
Christoph Hellwige2a305f2010-01-26 14:49:08 +01003160 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
3161 merge = 0;
3162 }
3163
Kevin Wolf40b4f532009-09-09 17:53:37 +02003164 if (merge) {
3165 size_t size;
Anthony Liguori7267c092011-08-20 22:09:37 -05003166 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
Kevin Wolf40b4f532009-09-09 17:53:37 +02003167 qemu_iovec_init(qiov,
3168 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
3169
3170 // Add the first request to the merged one. If the requests are
3171 // overlapping, drop the last sectors of the first request.
3172 size = (reqs[i].sector - reqs[outidx].sector) << 9;
3173 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
3174
Paolo Bonzinib6a127a2012-02-21 16:43:52 +01003175 // We should need to add any zeros between the two requests
3176 assert (reqs[i].sector <= oldreq_last);
Kevin Wolf40b4f532009-09-09 17:53:37 +02003177
3178 // Add the second request
3179 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
3180
Kevin Wolfcbf1dff2010-05-21 11:09:42 +02003181 reqs[outidx].nb_sectors = qiov->size >> 9;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003182 reqs[outidx].qiov = qiov;
3183
3184 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
3185 } else {
3186 outidx++;
3187 reqs[outidx].sector = reqs[i].sector;
3188 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
3189 reqs[outidx].qiov = reqs[i].qiov;
3190 }
3191 }
3192
3193 return outidx + 1;
3194}
3195
3196/*
3197 * Submit multiple AIO write requests at once.
3198 *
3199 * On success, the function returns 0 and all requests in the reqs array have
3200 * been submitted. In error case this function returns -1, and any of the
3201 * requests may or may not be submitted yet. In particular, this means that the
3202 * callback will be called for some of the requests, for others it won't. The
3203 * caller must check the error field of the BlockRequest to wait for the right
3204 * callbacks (if error != 0, no callback will be called).
3205 *
3206 * The implementation may modify the contents of the reqs array, e.g. to merge
3207 * requests. However, the fields opaque and error are left unmodified as they
3208 * are used to signal failure for a single request to the caller.
3209 */
3210int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
3211{
Kevin Wolf40b4f532009-09-09 17:53:37 +02003212 MultiwriteCB *mcb;
3213 int i;
3214
Ryan Harper301db7c2011-03-07 10:01:04 -06003215 /* don't submit writes if we don't have a medium */
3216 if (bs->drv == NULL) {
3217 for (i = 0; i < num_reqs; i++) {
3218 reqs[i].error = -ENOMEDIUM;
3219 }
3220 return -1;
3221 }
3222
Kevin Wolf40b4f532009-09-09 17:53:37 +02003223 if (num_reqs == 0) {
3224 return 0;
3225 }
3226
3227 // Create MultiwriteCB structure
Anthony Liguori7267c092011-08-20 22:09:37 -05003228 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
Kevin Wolf40b4f532009-09-09 17:53:37 +02003229 mcb->num_requests = 0;
3230 mcb->num_callbacks = num_reqs;
3231
3232 for (i = 0; i < num_reqs; i++) {
3233 mcb->callbacks[i].cb = reqs[i].cb;
3234 mcb->callbacks[i].opaque = reqs[i].opaque;
3235 }
3236
3237 // Check for mergable requests
3238 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
3239
Stefan Hajnoczi6d519a52010-05-22 18:15:08 +01003240 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
3241
Paolo Bonzinidf9309f2011-11-14 17:50:50 +01003242 /* Run the aio requests. */
3243 mcb->num_requests = num_reqs;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003244 for (i = 0; i < num_reqs; i++) {
Paolo Bonziniad54ae82011-11-30 09:12:30 +01003245 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
Kevin Wolf40b4f532009-09-09 17:53:37 +02003246 reqs[i].nb_sectors, multiwrite_cb, mcb);
Kevin Wolf40b4f532009-09-09 17:53:37 +02003247 }
3248
3249 return 0;
Kevin Wolf40b4f532009-09-09 17:53:37 +02003250}
3251
bellard83f64092006-08-01 16:21:11 +00003252void bdrv_aio_cancel(BlockDriverAIOCB *acb)
pbrookce1a14d2006-08-07 02:38:06 +00003253{
aliguori6bbff9a2009-03-20 18:25:59 +00003254 acb->pool->cancel(acb);
bellard83f64092006-08-01 16:21:11 +00003255}
3256
Zhi Yong Wu98f90db2011-11-08 13:00:14 +08003257/* block I/O throttling */
3258static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
3259 bool is_write, double elapsed_time, uint64_t *wait)
3260{
3261 uint64_t bps_limit = 0;
3262 double bytes_limit, bytes_base, bytes_res;
3263 double slice_time, wait_time;
3264
3265 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3266 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
3267 } else if (bs->io_limits.bps[is_write]) {
3268 bps_limit = bs->io_limits.bps[is_write];
3269 } else {
3270 if (wait) {
3271 *wait = 0;
3272 }
3273
3274 return false;
3275 }
3276
3277 slice_time = bs->slice_end - bs->slice_start;
3278 slice_time /= (NANOSECONDS_PER_SECOND);
3279 bytes_limit = bps_limit * slice_time;
3280 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
3281 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3282 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
3283 }
3284
3285 /* bytes_base: the bytes of data which have been read/written; and
3286 * it is obtained from the history statistic info.
3287 * bytes_res: the remaining bytes of data which need to be read/written.
3288 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3289 * the total time for completing reading/writting all data.
3290 */
3291 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
3292
3293 if (bytes_base + bytes_res <= bytes_limit) {
3294 if (wait) {
3295 *wait = 0;
3296 }
3297
3298 return false;
3299 }
3300
3301 /* Calc approx time to dispatch */
3302 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
3303
3304 /* When the I/O rate at runtime exceeds the limits,
3305 * bs->slice_end need to be extended in order that the current statistic
3306 * info can be kept until the timer fire, so it is increased and tuned
3307 * based on the result of experiment.
3308 */
3309 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3310 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3311 if (wait) {
3312 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3313 }
3314
3315 return true;
3316}
3317
3318static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
3319 double elapsed_time, uint64_t *wait)
3320{
3321 uint64_t iops_limit = 0;
3322 double ios_limit, ios_base;
3323 double slice_time, wait_time;
3324
3325 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3326 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
3327 } else if (bs->io_limits.iops[is_write]) {
3328 iops_limit = bs->io_limits.iops[is_write];
3329 } else {
3330 if (wait) {
3331 *wait = 0;
3332 }
3333
3334 return false;
3335 }
3336
3337 slice_time = bs->slice_end - bs->slice_start;
3338 slice_time /= (NANOSECONDS_PER_SECOND);
3339 ios_limit = iops_limit * slice_time;
3340 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
3341 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3342 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
3343 }
3344
3345 if (ios_base + 1 <= ios_limit) {
3346 if (wait) {
3347 *wait = 0;
3348 }
3349
3350 return false;
3351 }
3352
3353 /* Calc approx time to dispatch */
3354 wait_time = (ios_base + 1) / iops_limit;
3355 if (wait_time > elapsed_time) {
3356 wait_time = wait_time - elapsed_time;
3357 } else {
3358 wait_time = 0;
3359 }
3360
3361 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3362 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3363 if (wait) {
3364 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3365 }
3366
3367 return true;
3368}
3369
3370static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
3371 bool is_write, int64_t *wait)
3372{
3373 int64_t now, max_wait;
3374 uint64_t bps_wait = 0, iops_wait = 0;
3375 double elapsed_time;
3376 int bps_ret, iops_ret;
3377
3378 now = qemu_get_clock_ns(vm_clock);
3379 if ((bs->slice_start < now)
3380 && (bs->slice_end > now)) {
3381 bs->slice_end = now + bs->slice_time;
3382 } else {
3383 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
3384 bs->slice_start = now;
3385 bs->slice_end = now + bs->slice_time;
3386
3387 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
3388 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
3389
3390 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
3391 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
3392 }
3393
3394 elapsed_time = now - bs->slice_start;
3395 elapsed_time /= (NANOSECONDS_PER_SECOND);
3396
3397 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
3398 is_write, elapsed_time, &bps_wait);
3399 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
3400 elapsed_time, &iops_wait);
3401 if (bps_ret || iops_ret) {
3402 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
3403 if (wait) {
3404 *wait = max_wait;
3405 }
3406
3407 now = qemu_get_clock_ns(vm_clock);
3408 if (bs->slice_end < now + max_wait) {
3409 bs->slice_end = now + max_wait;
3410 }
3411
3412 return true;
3413 }
3414
3415 if (wait) {
3416 *wait = 0;
3417 }
3418
3419 return false;
3420}
pbrookce1a14d2006-08-07 02:38:06 +00003421
bellard83f64092006-08-01 16:21:11 +00003422/**************************************************************/
3423/* async block device emulation */
3424
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003425typedef struct BlockDriverAIOCBSync {
3426 BlockDriverAIOCB common;
3427 QEMUBH *bh;
3428 int ret;
3429 /* vector translation state */
3430 QEMUIOVector *qiov;
3431 uint8_t *bounce;
3432 int is_write;
3433} BlockDriverAIOCBSync;
3434
3435static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3436{
Kevin Wolfb666d232010-05-05 11:44:39 +02003437 BlockDriverAIOCBSync *acb =
3438 container_of(blockacb, BlockDriverAIOCBSync, common);
Dor Laor6a7ad292009-06-01 12:07:23 +03003439 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03003440 acb->bh = NULL;
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003441 qemu_aio_release(acb);
3442}
3443
3444static AIOPool bdrv_em_aio_pool = {
3445 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3446 .cancel = bdrv_aio_cancel_em,
3447};
3448
bellard83f64092006-08-01 16:21:11 +00003449static void bdrv_aio_bh_cb(void *opaque)
bellardbeac80c2006-06-26 20:08:57 +00003450{
pbrookce1a14d2006-08-07 02:38:06 +00003451 BlockDriverAIOCBSync *acb = opaque;
aliguorif141eaf2009-04-07 18:43:24 +00003452
aliguorif141eaf2009-04-07 18:43:24 +00003453 if (!acb->is_write)
3454 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
aliguoriceb42de2009-04-07 18:43:28 +00003455 qemu_vfree(acb->bounce);
pbrookce1a14d2006-08-07 02:38:06 +00003456 acb->common.cb(acb->common.opaque, acb->ret);
Dor Laor6a7ad292009-06-01 12:07:23 +03003457 qemu_bh_delete(acb->bh);
Avi Kivity36afc452009-06-23 16:20:36 +03003458 acb->bh = NULL;
pbrookce1a14d2006-08-07 02:38:06 +00003459 qemu_aio_release(acb);
bellardbeac80c2006-06-26 20:08:57 +00003460}
bellardbeac80c2006-06-26 20:08:57 +00003461
aliguorif141eaf2009-04-07 18:43:24 +00003462static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3463 int64_t sector_num,
3464 QEMUIOVector *qiov,
3465 int nb_sectors,
3466 BlockDriverCompletionFunc *cb,
3467 void *opaque,
3468 int is_write)
3469
bellardea2384d2004-08-01 21:59:26 +00003470{
pbrookce1a14d2006-08-07 02:38:06 +00003471 BlockDriverAIOCBSync *acb;
pbrookce1a14d2006-08-07 02:38:06 +00003472
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003473 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
aliguorif141eaf2009-04-07 18:43:24 +00003474 acb->is_write = is_write;
3475 acb->qiov = qiov;
aliguorie268ca52009-04-22 20:20:00 +00003476 acb->bounce = qemu_blockalign(bs, qiov->size);
Paolo Bonzini3f3aace2011-11-14 17:50:54 +01003477 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
aliguorif141eaf2009-04-07 18:43:24 +00003478
3479 if (is_write) {
3480 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01003481 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00003482 } else {
Stefan Hajnoczi1ed20ac2011-10-13 13:08:21 +01003483 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
aliguorif141eaf2009-04-07 18:43:24 +00003484 }
3485
pbrookce1a14d2006-08-07 02:38:06 +00003486 qemu_bh_schedule(acb->bh);
aliguorif141eaf2009-04-07 18:43:24 +00003487
pbrookce1a14d2006-08-07 02:38:06 +00003488 return &acb->common;
pbrook7a6cba62006-06-04 11:39:07 +00003489}
3490
aliguorif141eaf2009-04-07 18:43:24 +00003491static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3492 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
pbrookce1a14d2006-08-07 02:38:06 +00003493 BlockDriverCompletionFunc *cb, void *opaque)
bellard83f64092006-08-01 16:21:11 +00003494{
aliguorif141eaf2009-04-07 18:43:24 +00003495 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
bellard83f64092006-08-01 16:21:11 +00003496}
3497
aliguorif141eaf2009-04-07 18:43:24 +00003498static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3499 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3500 BlockDriverCompletionFunc *cb, void *opaque)
3501{
3502 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3503}
3504
Kevin Wolf68485422011-06-30 10:05:46 +02003505
3506typedef struct BlockDriverAIOCBCoroutine {
3507 BlockDriverAIOCB common;
3508 BlockRequest req;
3509 bool is_write;
3510 QEMUBH* bh;
3511} BlockDriverAIOCBCoroutine;
3512
3513static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3514{
3515 qemu_aio_flush();
3516}
3517
3518static AIOPool bdrv_em_co_aio_pool = {
3519 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3520 .cancel = bdrv_aio_co_cancel_em,
3521};
3522
Paolo Bonzini35246a62011-10-14 10:41:29 +02003523static void bdrv_co_em_bh(void *opaque)
Kevin Wolf68485422011-06-30 10:05:46 +02003524{
3525 BlockDriverAIOCBCoroutine *acb = opaque;
3526
3527 acb->common.cb(acb->common.opaque, acb->req.error);
3528 qemu_bh_delete(acb->bh);
3529 qemu_aio_release(acb);
3530}
3531
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003532/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3533static void coroutine_fn bdrv_co_do_rw(void *opaque)
3534{
3535 BlockDriverAIOCBCoroutine *acb = opaque;
3536 BlockDriverState *bs = acb->common.bs;
3537
3538 if (!acb->is_write) {
3539 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
Stefan Hajnoczi470c0502012-01-18 14:40:42 +00003540 acb->req.nb_sectors, acb->req.qiov, 0);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003541 } else {
3542 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
Stefan Hajnoczif08f2dd2012-02-07 13:27:25 +00003543 acb->req.nb_sectors, acb->req.qiov, 0);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003544 }
3545
Paolo Bonzini35246a62011-10-14 10:41:29 +02003546 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
Stefan Hajnoczib2a61372011-10-13 13:08:23 +01003547 qemu_bh_schedule(acb->bh);
3548}
3549
Kevin Wolf68485422011-06-30 10:05:46 +02003550static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3551 int64_t sector_num,
3552 QEMUIOVector *qiov,
3553 int nb_sectors,
3554 BlockDriverCompletionFunc *cb,
3555 void *opaque,
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01003556 bool is_write)
Kevin Wolf68485422011-06-30 10:05:46 +02003557{
3558 Coroutine *co;
3559 BlockDriverAIOCBCoroutine *acb;
3560
3561 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3562 acb->req.sector = sector_num;
3563 acb->req.nb_sectors = nb_sectors;
3564 acb->req.qiov = qiov;
3565 acb->is_write = is_write;
3566
Stefan Hajnoczi8c5873d2011-10-13 21:09:28 +01003567 co = qemu_coroutine_create(bdrv_co_do_rw);
Kevin Wolf68485422011-06-30 10:05:46 +02003568 qemu_coroutine_enter(co, acb);
3569
3570 return &acb->common;
3571}
3572
Paolo Bonzini07f07612011-10-17 12:32:12 +02003573static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003574{
Paolo Bonzini07f07612011-10-17 12:32:12 +02003575 BlockDriverAIOCBCoroutine *acb = opaque;
3576 BlockDriverState *bs = acb->common.bs;
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003577
Paolo Bonzini07f07612011-10-17 12:32:12 +02003578 acb->req.error = bdrv_co_flush(bs);
3579 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003580 qemu_bh_schedule(acb->bh);
Christoph Hellwigb2e12bc2009-09-04 19:01:49 +02003581}
3582
Paolo Bonzini07f07612011-10-17 12:32:12 +02003583BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
Alexander Graf016f5cf2010-05-26 17:51:49 +02003584 BlockDriverCompletionFunc *cb, void *opaque)
3585{
Paolo Bonzini07f07612011-10-17 12:32:12 +02003586 trace_bdrv_aio_flush(bs, opaque);
Alexander Graf016f5cf2010-05-26 17:51:49 +02003587
Paolo Bonzini07f07612011-10-17 12:32:12 +02003588 Coroutine *co;
3589 BlockDriverAIOCBCoroutine *acb;
Alexander Graf016f5cf2010-05-26 17:51:49 +02003590
Paolo Bonzini07f07612011-10-17 12:32:12 +02003591 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3592 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3593 qemu_coroutine_enter(co, acb);
Alexander Graf016f5cf2010-05-26 17:51:49 +02003594
Alexander Graf016f5cf2010-05-26 17:51:49 +02003595 return &acb->common;
3596}
3597
Paolo Bonzini4265d622011-10-17 12:32:14 +02003598static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3599{
3600 BlockDriverAIOCBCoroutine *acb = opaque;
3601 BlockDriverState *bs = acb->common.bs;
3602
3603 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3604 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3605 qemu_bh_schedule(acb->bh);
3606}
3607
3608BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3609 int64_t sector_num, int nb_sectors,
3610 BlockDriverCompletionFunc *cb, void *opaque)
3611{
3612 Coroutine *co;
3613 BlockDriverAIOCBCoroutine *acb;
3614
3615 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3616
3617 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3618 acb->req.sector = sector_num;
3619 acb->req.nb_sectors = nb_sectors;
3620 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3621 qemu_coroutine_enter(co, acb);
3622
3623 return &acb->common;
3624}
3625
bellardea2384d2004-08-01 21:59:26 +00003626void bdrv_init(void)
3627{
Anthony Liguori5efa9d52009-05-09 17:03:42 -05003628 module_call_init(MODULE_INIT_BLOCK);
bellardea2384d2004-08-01 21:59:26 +00003629}
pbrookce1a14d2006-08-07 02:38:06 +00003630
Markus Armbrustereb852012009-10-27 18:41:44 +01003631void bdrv_init_with_whitelist(void)
3632{
3633 use_bdrv_whitelist = 1;
3634 bdrv_init();
3635}
3636
Christoph Hellwigc16b5a22009-05-25 12:37:32 +02003637void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3638 BlockDriverCompletionFunc *cb, void *opaque)
aliguori6bbff9a2009-03-20 18:25:59 +00003639{
pbrookce1a14d2006-08-07 02:38:06 +00003640 BlockDriverAIOCB *acb;
3641
aliguori6bbff9a2009-03-20 18:25:59 +00003642 if (pool->free_aiocb) {
3643 acb = pool->free_aiocb;
3644 pool->free_aiocb = acb->next;
pbrookce1a14d2006-08-07 02:38:06 +00003645 } else {
Anthony Liguori7267c092011-08-20 22:09:37 -05003646 acb = g_malloc0(pool->aiocb_size);
aliguori6bbff9a2009-03-20 18:25:59 +00003647 acb->pool = pool;
pbrookce1a14d2006-08-07 02:38:06 +00003648 }
3649 acb->bs = bs;
3650 acb->cb = cb;
3651 acb->opaque = opaque;
3652 return acb;
3653}
3654
3655void qemu_aio_release(void *p)
3656{
aliguori6bbff9a2009-03-20 18:25:59 +00003657 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3658 AIOPool *pool = acb->pool;
3659 acb->next = pool->free_aiocb;
3660 pool->free_aiocb = acb;
pbrookce1a14d2006-08-07 02:38:06 +00003661}
bellard19cb3732006-08-19 11:45:59 +00003662
3663/**************************************************************/
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003664/* Coroutine block device emulation */
3665
3666typedef struct CoroutineIOCompletion {
3667 Coroutine *coroutine;
3668 int ret;
3669} CoroutineIOCompletion;
3670
3671static void bdrv_co_io_em_complete(void *opaque, int ret)
3672{
3673 CoroutineIOCompletion *co = opaque;
3674
3675 co->ret = ret;
3676 qemu_coroutine_enter(co->coroutine, NULL);
3677}
3678
3679static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3680 int nb_sectors, QEMUIOVector *iov,
3681 bool is_write)
3682{
3683 CoroutineIOCompletion co = {
3684 .coroutine = qemu_coroutine_self(),
3685 };
3686 BlockDriverAIOCB *acb;
3687
3688 if (is_write) {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01003689 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3690 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003691 } else {
Stefan Hajnoczia652d162011-10-05 17:17:02 +01003692 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3693 bdrv_co_io_em_complete, &co);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003694 }
3695
Stefan Hajnoczi59370aa2011-09-30 17:34:58 +01003696 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003697 if (!acb) {
3698 return -EIO;
3699 }
3700 qemu_coroutine_yield();
3701
3702 return co.ret;
3703}
3704
3705static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3706 int64_t sector_num, int nb_sectors,
3707 QEMUIOVector *iov)
3708{
3709 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3710}
3711
3712static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3713 int64_t sector_num, int nb_sectors,
3714 QEMUIOVector *iov)
3715{
3716 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3717}
3718
Paolo Bonzini07f07612011-10-17 12:32:12 +02003719static void coroutine_fn bdrv_flush_co_entry(void *opaque)
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003720{
Paolo Bonzini07f07612011-10-17 12:32:12 +02003721 RwCo *rwco = opaque;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003722
Paolo Bonzini07f07612011-10-17 12:32:12 +02003723 rwco->ret = bdrv_co_flush(rwco->bs);
3724}
3725
3726int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3727{
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003728 int ret;
3729
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003730 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
Paolo Bonzini07f07612011-10-17 12:32:12 +02003731 return 0;
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003732 }
3733
Kevin Wolfca716362011-11-10 18:13:59 +01003734 /* Write back cached data to the OS even with cache=unsafe */
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003735 if (bs->drv->bdrv_co_flush_to_os) {
3736 ret = bs->drv->bdrv_co_flush_to_os(bs);
3737 if (ret < 0) {
3738 return ret;
3739 }
3740 }
3741
Kevin Wolfca716362011-11-10 18:13:59 +01003742 /* But don't actually force it to the disk with cache=unsafe */
3743 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3744 return 0;
3745 }
3746
Kevin Wolfeb489bb2011-11-10 18:10:11 +01003747 if (bs->drv->bdrv_co_flush_to_disk) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003748 ret = bs->drv->bdrv_co_flush_to_disk(bs);
Paolo Bonzini07f07612011-10-17 12:32:12 +02003749 } else if (bs->drv->bdrv_aio_flush) {
3750 BlockDriverAIOCB *acb;
3751 CoroutineIOCompletion co = {
3752 .coroutine = qemu_coroutine_self(),
3753 };
3754
3755 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3756 if (acb == NULL) {
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003757 ret = -EIO;
Paolo Bonzini07f07612011-10-17 12:32:12 +02003758 } else {
3759 qemu_coroutine_yield();
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003760 ret = co.ret;
Paolo Bonzini07f07612011-10-17 12:32:12 +02003761 }
Paolo Bonzini07f07612011-10-17 12:32:12 +02003762 } else {
3763 /*
3764 * Some block drivers always operate in either writethrough or unsafe
3765 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3766 * know how the server works (because the behaviour is hardcoded or
3767 * depends on server-side configuration), so we can't ensure that
3768 * everything is safe on disk. Returning an error doesn't work because
3769 * that would break guests even if the server operates in writethrough
3770 * mode.
3771 *
3772 * Let's hope the user knows what he's doing.
3773 */
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003774 ret = 0;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003775 }
Paolo Bonzini29cdb252012-03-12 18:26:01 +01003776 if (ret < 0) {
3777 return ret;
3778 }
3779
3780 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3781 * in the case of cache=unsafe, so there are no useless flushes.
3782 */
3783 return bdrv_co_flush(bs->file);
Paolo Bonzini07f07612011-10-17 12:32:12 +02003784}
3785
Anthony Liguori0f154232011-11-14 15:09:45 -06003786void bdrv_invalidate_cache(BlockDriverState *bs)
3787{
3788 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3789 bs->drv->bdrv_invalidate_cache(bs);
3790 }
3791}
3792
3793void bdrv_invalidate_cache_all(void)
3794{
3795 BlockDriverState *bs;
3796
3797 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3798 bdrv_invalidate_cache(bs);
3799 }
3800}
3801
Benoît Canet07789262012-03-23 08:36:49 +01003802void bdrv_clear_incoming_migration_all(void)
3803{
3804 BlockDriverState *bs;
3805
3806 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3807 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
3808 }
3809}
3810
Paolo Bonzini07f07612011-10-17 12:32:12 +02003811int bdrv_flush(BlockDriverState *bs)
3812{
3813 Coroutine *co;
3814 RwCo rwco = {
3815 .bs = bs,
3816 .ret = NOT_DONE,
3817 };
3818
3819 if (qemu_in_coroutine()) {
3820 /* Fast-path if already in coroutine context */
3821 bdrv_flush_co_entry(&rwco);
3822 } else {
3823 co = qemu_coroutine_create(bdrv_flush_co_entry);
3824 qemu_coroutine_enter(co, &rwco);
3825 while (rwco.ret == NOT_DONE) {
3826 qemu_aio_wait();
3827 }
3828 }
3829
3830 return rwco.ret;
Kevin Wolfe7a8a782011-07-15 16:05:00 +02003831}
3832
Paolo Bonzini4265d622011-10-17 12:32:14 +02003833static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3834{
3835 RwCo *rwco = opaque;
3836
3837 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3838}
3839
3840int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3841 int nb_sectors)
3842{
3843 if (!bs->drv) {
3844 return -ENOMEDIUM;
3845 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
3846 return -EIO;
3847 } else if (bs->read_only) {
3848 return -EROFS;
3849 } else if (bs->drv->bdrv_co_discard) {
3850 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
3851 } else if (bs->drv->bdrv_aio_discard) {
3852 BlockDriverAIOCB *acb;
3853 CoroutineIOCompletion co = {
3854 .coroutine = qemu_coroutine_self(),
3855 };
3856
3857 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
3858 bdrv_co_io_em_complete, &co);
3859 if (acb == NULL) {
3860 return -EIO;
3861 } else {
3862 qemu_coroutine_yield();
3863 return co.ret;
3864 }
Paolo Bonzini4265d622011-10-17 12:32:14 +02003865 } else {
3866 return 0;
3867 }
3868}
3869
3870int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
3871{
3872 Coroutine *co;
3873 RwCo rwco = {
3874 .bs = bs,
3875 .sector_num = sector_num,
3876 .nb_sectors = nb_sectors,
3877 .ret = NOT_DONE,
3878 };
3879
3880 if (qemu_in_coroutine()) {
3881 /* Fast-path if already in coroutine context */
3882 bdrv_discard_co_entry(&rwco);
3883 } else {
3884 co = qemu_coroutine_create(bdrv_discard_co_entry);
3885 qemu_coroutine_enter(co, &rwco);
3886 while (rwco.ret == NOT_DONE) {
3887 qemu_aio_wait();
3888 }
3889 }
3890
3891 return rwco.ret;
3892}
3893
Kevin Wolff9f05dc2011-07-15 13:50:26 +02003894/**************************************************************/
bellard19cb3732006-08-19 11:45:59 +00003895/* removable device support */
3896
3897/**
3898 * Return TRUE if the media is present
3899 */
3900int bdrv_is_inserted(BlockDriverState *bs)
3901{
3902 BlockDriver *drv = bs->drv;
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02003903
bellard19cb3732006-08-19 11:45:59 +00003904 if (!drv)
3905 return 0;
3906 if (!drv->bdrv_is_inserted)
Markus Armbrustera1aff5b2011-09-06 18:58:41 +02003907 return 1;
3908 return drv->bdrv_is_inserted(bs);
bellard19cb3732006-08-19 11:45:59 +00003909}
3910
3911/**
Markus Armbruster8e49ca42011-08-03 15:08:08 +02003912 * Return whether the media changed since the last call to this
3913 * function, or -ENOTSUP if we don't know. Most drivers don't know.
bellard19cb3732006-08-19 11:45:59 +00003914 */
3915int bdrv_media_changed(BlockDriverState *bs)
3916{
3917 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00003918
Markus Armbruster8e49ca42011-08-03 15:08:08 +02003919 if (drv && drv->bdrv_media_changed) {
3920 return drv->bdrv_media_changed(bs);
3921 }
3922 return -ENOTSUP;
bellard19cb3732006-08-19 11:45:59 +00003923}
3924
3925/**
3926 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3927 */
Luiz Capitulinof36f3942012-02-03 16:24:53 -02003928void bdrv_eject(BlockDriverState *bs, bool eject_flag)
bellard19cb3732006-08-19 11:45:59 +00003929{
3930 BlockDriver *drv = bs->drv;
bellard19cb3732006-08-19 11:45:59 +00003931
Markus Armbruster822e1cd2011-07-20 18:23:42 +02003932 if (drv && drv->bdrv_eject) {
3933 drv->bdrv_eject(bs, eject_flag);
bellard19cb3732006-08-19 11:45:59 +00003934 }
Luiz Capitulino6f382ed2012-02-14 13:41:13 -02003935
3936 if (bs->device_name[0] != '\0') {
3937 bdrv_emit_qmp_eject_event(bs, eject_flag);
3938 }
bellard19cb3732006-08-19 11:45:59 +00003939}
3940
bellard19cb3732006-08-19 11:45:59 +00003941/**
3942 * Lock or unlock the media (if it is locked, the user won't be able
3943 * to eject it manually).
3944 */
Markus Armbruster025e8492011-09-06 18:58:47 +02003945void bdrv_lock_medium(BlockDriverState *bs, bool locked)
bellard19cb3732006-08-19 11:45:59 +00003946{
3947 BlockDriver *drv = bs->drv;
3948
Markus Armbruster025e8492011-09-06 18:58:47 +02003949 trace_bdrv_lock_medium(bs, locked);
Stefan Hajnoczib8c6d092011-03-29 20:04:40 +01003950
Markus Armbruster025e8492011-09-06 18:58:47 +02003951 if (drv && drv->bdrv_lock_medium) {
3952 drv->bdrv_lock_medium(bs, locked);
bellard19cb3732006-08-19 11:45:59 +00003953 }
3954}
ths985a03b2007-12-24 16:10:43 +00003955
3956/* needed for generic scsi interface */
3957
3958int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
3959{
3960 BlockDriver *drv = bs->drv;
3961
3962 if (drv && drv->bdrv_ioctl)
3963 return drv->bdrv_ioctl(bs, req, buf);
3964 return -ENOTSUP;
3965}
aliguori7d780662009-03-12 19:57:08 +00003966
aliguori221f7152009-03-28 17:28:41 +00003967BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
3968 unsigned long int req, void *buf,
3969 BlockDriverCompletionFunc *cb, void *opaque)
aliguori7d780662009-03-12 19:57:08 +00003970{
aliguori221f7152009-03-28 17:28:41 +00003971 BlockDriver *drv = bs->drv;
aliguori7d780662009-03-12 19:57:08 +00003972
aliguori221f7152009-03-28 17:28:41 +00003973 if (drv && drv->bdrv_aio_ioctl)
3974 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
3975 return NULL;
aliguori7d780662009-03-12 19:57:08 +00003976}
aliguorie268ca52009-04-22 20:20:00 +00003977
Markus Armbruster7b6f9302011-09-06 18:58:56 +02003978void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
3979{
3980 bs->buffer_alignment = align;
3981}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003982
aliguorie268ca52009-04-22 20:20:00 +00003983void *qemu_blockalign(BlockDriverState *bs, size_t size)
3984{
3985 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
3986}
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02003987
3988void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
3989{
3990 int64_t bitmap_size;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003991
Liran Schouraaa0eb72010-01-26 10:31:48 +02003992 bs->dirty_count = 0;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003993 if (enable) {
Jan Kiszkac6d22832009-11-30 18:21:20 +01003994 if (!bs->dirty_bitmap) {
3995 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
Paolo Bonzini71df14f2012-04-12 14:01:04 +02003996 BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG - 1;
3997 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01003998
Paolo Bonzini71df14f2012-04-12 14:01:04 +02003999 bs->dirty_bitmap = g_new0(unsigned long, bitmap_size);
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004000 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02004001 } else {
Jan Kiszkac6d22832009-11-30 18:21:20 +01004002 if (bs->dirty_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -05004003 g_free(bs->dirty_bitmap);
Jan Kiszkac6d22832009-11-30 18:21:20 +01004004 bs->dirty_bitmap = NULL;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004005 }
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02004006 }
4007}
4008
4009int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
4010{
Jan Kiszka6ea44302009-11-30 18:21:19 +01004011 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004012
Jan Kiszkac6d22832009-11-30 18:21:20 +01004013 if (bs->dirty_bitmap &&
4014 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
Marcelo Tosatti6d59fec2010-11-08 17:02:54 -02004015 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
4016 (1UL << (chunk % (sizeof(unsigned long) * 8))));
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02004017 } else {
4018 return 0;
4019 }
4020}
4021
Jan Kiszkaa55eb922009-11-30 18:21:19 +01004022void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
4023 int nr_sectors)
lirans@il.ibm.com7cd1e322009-11-02 15:40:41 +02004024{
4025 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
4026}
Liran Schouraaa0eb72010-01-26 10:31:48 +02004027
4028int64_t bdrv_get_dirty_count(BlockDriverState *bs)
4029{
4030 return bs->dirty_count;
4031}
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004032
Marcelo Tosattidb593f22011-01-26 12:12:34 -02004033void bdrv_set_in_use(BlockDriverState *bs, int in_use)
4034{
4035 assert(bs->in_use != in_use);
4036 bs->in_use = in_use;
4037}
4038
4039int bdrv_in_use(BlockDriverState *bs)
4040{
4041 return bs->in_use;
4042}
4043
Luiz Capitulino28a72822011-09-26 17:43:50 -03004044void bdrv_iostatus_enable(BlockDriverState *bs)
4045{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03004046 bs->iostatus_enabled = true;
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03004047 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03004048}
4049
4050/* The I/O status is only enabled if the drive explicitly
4051 * enables it _and_ the VM is configured to stop on errors */
4052bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
4053{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03004054 return (bs->iostatus_enabled &&
Luiz Capitulino28a72822011-09-26 17:43:50 -03004055 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
4056 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
4057 bs->on_read_error == BLOCK_ERR_STOP_ANY));
4058}
4059
4060void bdrv_iostatus_disable(BlockDriverState *bs)
4061{
Luiz Capitulinod6bf2792011-10-14 17:11:23 -03004062 bs->iostatus_enabled = false;
Luiz Capitulino28a72822011-09-26 17:43:50 -03004063}
4064
4065void bdrv_iostatus_reset(BlockDriverState *bs)
4066{
4067 if (bdrv_iostatus_is_enabled(bs)) {
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03004068 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
Luiz Capitulino28a72822011-09-26 17:43:50 -03004069 }
4070}
4071
4072/* XXX: Today this is set by device models because it makes the implementation
4073 quite simple. However, the block layer knows about the error, so it's
4074 possible to implement this without device models being involved */
4075void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
4076{
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03004077 if (bdrv_iostatus_is_enabled(bs) &&
4078 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
Luiz Capitulino28a72822011-09-26 17:43:50 -03004079 assert(error >= 0);
Luiz Capitulino58e21ef2011-10-14 17:22:24 -03004080 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
4081 BLOCK_DEVICE_IO_STATUS_FAILED;
Luiz Capitulino28a72822011-09-26 17:43:50 -03004082 }
4083}
4084
Christoph Hellwiga597e792011-08-25 08:26:01 +02004085void
4086bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
4087 enum BlockAcctType type)
4088{
4089 assert(type < BDRV_MAX_IOTYPE);
4090
4091 cookie->bytes = bytes;
Christoph Hellwigc488c7f2011-08-25 08:26:10 +02004092 cookie->start_time_ns = get_clock();
Christoph Hellwiga597e792011-08-25 08:26:01 +02004093 cookie->type = type;
4094}
4095
4096void
4097bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
4098{
4099 assert(cookie->type < BDRV_MAX_IOTYPE);
4100
4101 bs->nr_bytes[cookie->type] += cookie->bytes;
4102 bs->nr_ops[cookie->type]++;
Christoph Hellwigc488c7f2011-08-25 08:26:10 +02004103 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
Christoph Hellwiga597e792011-08-25 08:26:01 +02004104}
4105
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004106int bdrv_img_create(const char *filename, const char *fmt,
4107 const char *base_filename, const char *base_fmt,
4108 char *options, uint64_t img_size, int flags)
4109{
4110 QEMUOptionParameter *param = NULL, *create_options = NULL;
Kevin Wolfd2208942011-06-01 14:03:31 +02004111 QEMUOptionParameter *backing_fmt, *backing_file, *size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004112 BlockDriverState *bs = NULL;
4113 BlockDriver *drv, *proto_drv;
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00004114 BlockDriver *backing_drv = NULL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004115 int ret = 0;
4116
4117 /* Find driver and parse its options */
4118 drv = bdrv_find_format(fmt);
4119 if (!drv) {
4120 error_report("Unknown file format '%s'", fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01004121 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004122 goto out;
4123 }
4124
4125 proto_drv = bdrv_find_protocol(filename);
4126 if (!proto_drv) {
4127 error_report("Unknown protocol '%s'", filename);
Jes Sorensen4f70f242010-12-16 13:52:18 +01004128 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004129 goto out;
4130 }
4131
4132 create_options = append_option_parameters(create_options,
4133 drv->create_options);
4134 create_options = append_option_parameters(create_options,
4135 proto_drv->create_options);
4136
4137 /* Create parameter list with default values */
4138 param = parse_option_parameters("", create_options, param);
4139
4140 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
4141
4142 /* Parse -o options */
4143 if (options) {
4144 param = parse_option_parameters(options, create_options, param);
4145 if (param == NULL) {
4146 error_report("Invalid options for file format '%s'.", fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01004147 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004148 goto out;
4149 }
4150 }
4151
4152 if (base_filename) {
4153 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
4154 base_filename)) {
4155 error_report("Backing file not supported for file format '%s'",
4156 fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01004157 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004158 goto out;
4159 }
4160 }
4161
4162 if (base_fmt) {
4163 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
4164 error_report("Backing file format not supported for file "
4165 "format '%s'", fmt);
Jes Sorensen4f70f242010-12-16 13:52:18 +01004166 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004167 goto out;
4168 }
4169 }
4170
Jes Sorensen792da932010-12-16 13:52:17 +01004171 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
4172 if (backing_file && backing_file->value.s) {
4173 if (!strcmp(filename, backing_file->value.s)) {
4174 error_report("Error: Trying to create an image with the "
4175 "same filename as the backing file");
Jes Sorensen4f70f242010-12-16 13:52:18 +01004176 ret = -EINVAL;
Jes Sorensen792da932010-12-16 13:52:17 +01004177 goto out;
4178 }
4179 }
4180
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004181 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
4182 if (backing_fmt && backing_fmt->value.s) {
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00004183 backing_drv = bdrv_find_format(backing_fmt->value.s);
4184 if (!backing_drv) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004185 error_report("Unknown backing file format '%s'",
4186 backing_fmt->value.s);
Jes Sorensen4f70f242010-12-16 13:52:18 +01004187 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004188 goto out;
4189 }
4190 }
4191
4192 // The size for the image must always be specified, with one exception:
4193 // If we are using a backing file, we can obtain the size from there
Kevin Wolfd2208942011-06-01 14:03:31 +02004194 size = get_option_parameter(param, BLOCK_OPT_SIZE);
4195 if (size && size->value.n == -1) {
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004196 if (backing_file && backing_file->value.s) {
4197 uint64_t size;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004198 char buf[32];
Paolo Bonzini63090da2012-04-12 14:01:03 +02004199 int back_flags;
4200
4201 /* backing files always opened read-only */
4202 back_flags =
4203 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004204
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004205 bs = bdrv_new("");
4206
Paolo Bonzini63090da2012-04-12 14:01:03 +02004207 ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004208 if (ret < 0) {
Stefan Hajnoczi96df67d2011-01-24 09:32:20 +00004209 error_report("Could not open '%s'", backing_file->value.s);
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004210 goto out;
4211 }
4212 bdrv_get_geometry(bs, &size);
4213 size *= 512;
4214
4215 snprintf(buf, sizeof(buf), "%" PRId64, size);
4216 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
4217 } else {
4218 error_report("Image creation needs a size parameter");
Jes Sorensen4f70f242010-12-16 13:52:18 +01004219 ret = -EINVAL;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004220 goto out;
4221 }
4222 }
4223
4224 printf("Formatting '%s', fmt=%s ", filename, fmt);
4225 print_option_parameters(param);
4226 puts("");
4227
4228 ret = bdrv_create(drv, filename, param);
4229
4230 if (ret < 0) {
4231 if (ret == -ENOTSUP) {
4232 error_report("Formatting or formatting option not supported for "
4233 "file format '%s'", fmt);
4234 } else if (ret == -EFBIG) {
4235 error_report("The image size is too large for file format '%s'",
4236 fmt);
4237 } else {
4238 error_report("%s: error while creating %s: %s", filename, fmt,
4239 strerror(-ret));
4240 }
4241 }
4242
4243out:
4244 free_option_parameters(create_options);
4245 free_option_parameters(param);
4246
4247 if (bs) {
4248 bdrv_delete(bs);
4249 }
Jes Sorensen4f70f242010-12-16 13:52:18 +01004250
4251 return ret;
Jes Sorensenf88e1a42010-12-16 13:52:15 +01004252}
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004253
4254void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
Stefan Hajnoczic83c66c2012-04-25 16:51:03 +01004255 int64_t speed, BlockDriverCompletionFunc *cb,
4256 void *opaque, Error **errp)
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004257{
4258 BlockJob *job;
4259
4260 if (bs->job || bdrv_in_use(bs)) {
Stefan Hajnoczifd7f8c62012-04-25 16:51:00 +01004261 error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004262 return NULL;
4263 }
4264 bdrv_set_in_use(bs, 1);
4265
4266 job = g_malloc0(job_type->instance_size);
4267 job->job_type = job_type;
4268 job->bs = bs;
4269 job->cb = cb;
4270 job->opaque = opaque;
Paolo Bonzini4513eaf2012-05-08 16:51:45 +02004271 job->busy = true;
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004272 bs->job = job;
Stefan Hajnoczic83c66c2012-04-25 16:51:03 +01004273
4274 /* Only set speed when necessary to avoid NotSupported error */
4275 if (speed != 0) {
4276 Error *local_err = NULL;
4277
4278 block_job_set_speed(job, speed, &local_err);
4279 if (error_is_set(&local_err)) {
4280 bs->job = NULL;
4281 g_free(job);
4282 bdrv_set_in_use(bs, 0);
4283 error_propagate(errp, local_err);
4284 return NULL;
4285 }
4286 }
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004287 return job;
4288}
4289
4290void block_job_complete(BlockJob *job, int ret)
4291{
4292 BlockDriverState *bs = job->bs;
4293
4294 assert(bs->job == job);
4295 job->cb(job->opaque, ret);
4296 bs->job = NULL;
4297 g_free(job);
4298 bdrv_set_in_use(bs, 0);
4299}
4300
Stefan Hajnoczi882ec7c2012-04-25 16:51:02 +01004301void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004302{
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004303 Error *local_err = NULL;
Paolo Bonzini9f25ecc2012-03-30 13:17:12 +02004304
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004305 if (!job->job_type->set_speed) {
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004306 error_set(errp, QERR_NOT_SUPPORTED);
4307 return;
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004308 }
Stefan Hajnoczi882ec7c2012-04-25 16:51:02 +01004309 job->job_type->set_speed(job, speed, &local_err);
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004310 if (error_is_set(&local_err)) {
4311 error_propagate(errp, local_err);
4312 return;
Paolo Bonzini9f25ecc2012-03-30 13:17:12 +02004313 }
Stefan Hajnoczi9e6636c2012-04-25 16:51:01 +01004314
Stefan Hajnoczi882ec7c2012-04-25 16:51:02 +01004315 job->speed = speed;
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004316}
4317
4318void block_job_cancel(BlockJob *job)
4319{
4320 job->cancelled = true;
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004321 if (job->co && !job->busy) {
4322 qemu_coroutine_enter(job->co, NULL);
4323 }
Stefan Hajnoczieeec61f2012-01-18 14:40:43 +00004324}
4325
4326bool block_job_is_cancelled(BlockJob *job)
4327{
4328 return job->cancelled;
4329}
Paolo Bonzini3e914652012-03-30 13:17:11 +02004330
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004331struct BlockCancelData {
4332 BlockJob *job;
4333 BlockDriverCompletionFunc *cb;
4334 void *opaque;
4335 bool cancelled;
4336 int ret;
4337};
4338
4339static void block_job_cancel_cb(void *opaque, int ret)
Paolo Bonzini3e914652012-03-30 13:17:11 +02004340{
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004341 struct BlockCancelData *data = opaque;
4342
4343 data->cancelled = block_job_is_cancelled(data->job);
4344 data->ret = ret;
4345 data->cb(data->opaque, ret);
4346}
4347
4348int block_job_cancel_sync(BlockJob *job)
4349{
4350 struct BlockCancelData data;
Paolo Bonzini3e914652012-03-30 13:17:11 +02004351 BlockDriverState *bs = job->bs;
4352
4353 assert(bs->job == job);
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004354
4355 /* Set up our own callback to store the result and chain to
4356 * the original callback.
4357 */
4358 data.job = job;
4359 data.cb = job->cb;
4360 data.opaque = job->opaque;
4361 data.ret = -EINPROGRESS;
4362 job->cb = block_job_cancel_cb;
4363 job->opaque = &data;
Paolo Bonzini3e914652012-03-30 13:17:11 +02004364 block_job_cancel(job);
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004365 while (data.ret == -EINPROGRESS) {
Paolo Bonzini3e914652012-03-30 13:17:11 +02004366 qemu_aio_wait();
4367 }
Paolo Bonzinifa4478d2012-05-08 16:51:46 +02004368 return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
Paolo Bonzini3e914652012-03-30 13:17:11 +02004369}
Paolo Bonzini4513eaf2012-05-08 16:51:45 +02004370
4371void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
4372{
4373 /* Check cancellation *before* setting busy = false, too! */
4374 if (!block_job_is_cancelled(job)) {
4375 job->busy = false;
4376 co_sleep_ns(clock, ns);
4377 job->busy = true;
4378 }
4379}