aboutsummaryrefslogtreecommitdiff
path: root/block/blkdebug.c
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2017-04-29 14:14:16 -0500
committerMax Reitz <mreitz@redhat.com>2017-05-11 14:28:06 +0200
commit63188c245013dbe383e8b031e665f813e2452ea5 (patch)
treecaf0ab0519943bf1785c55f364f975e7a8ecfd92 /block/blkdebug.c
parentd157ed5f7235f3d2d5596a514ad7507b18e24b88 (diff)
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints on operations like write zero or discard, we first need blkdebug to manage these actions. It also allows us to inject errors on those operations, just like we can for read/write/flush. We can also test the contract promised by the block layer; namely, if a device has specified limits on alignment or maximum size, then those limits must be obeyed (for now, the blkdebug driver merely inherits limits from whatever it is wrapping, but the next patch will further enhance it to allow specific limit overrides). This patch intentionally refuses to service requests smaller than the requested alignments; this is because an upcoming patch adds a qemu-iotest to prove that the block layer is correctly handling fragmentation, but the test only works if there is a way to tell the difference at artificial alignment boundaries when blkdebug is using a larger-than-default alignment. If we let the blkdebug layer always defer to the underlying layer, which potentially has a smaller granularity, the iotest will be thwarted. Tested by setting up an NBD server with export 'foo', then invoking: $ ./qemu-io qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo qemu-io> d 0 15M qemu-io> w -z 0 15M Pre-patch, the server never sees the discard (it was silently eaten by the block layer); post-patch it is passed across the wire. Likewise, pre-patch the write is always passed with NBD_WRITE (with 15M of zeroes on the wire), while post-patch it can utilize NBD_WRITE_ZEROES (for less traffic). Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20170429191419.30051-7-eblake@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block/blkdebug.c')
-rw-r--r--block/blkdebug.c74
1 files changed, 74 insertions, 0 deletions
diff --git a/block/blkdebug.c b/block/blkdebug.c
index 554573fd14..b8cc87617e 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -1,6 +1,7 @@
/*
* Block protocol for I/O error injection
*
+ * Copyright (C) 2016-2017 Red Hat, Inc.
* Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -382,6 +383,11 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
goto out;
}
+ bs->supported_write_flags = BDRV_REQ_FUA &
+ bs->file->bs->supported_write_flags;
+ bs->supported_zero_flags = (BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP) &
+ bs->file->bs->supported_zero_flags;
+
/* Set request alignment */
align = qemu_opt_get_size(opts, "align", 0);
if (align < INT_MAX && is_power_of_2(align)) {
@@ -494,6 +500,72 @@ static int blkdebug_co_flush(BlockDriverState *bs)
return bdrv_co_flush(bs->file->bs);
}
+static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
+ int64_t offset, int count,
+ BdrvRequestFlags flags)
+{
+ uint32_t align = MAX(bs->bl.request_alignment,
+ bs->bl.pwrite_zeroes_alignment);
+ int err;
+
+ /* Only pass through requests that are larger than requested
+ * preferred alignment (so that we test the fallback to writes on
+ * unaligned portions), and check that the block layer never hands
+ * us anything unaligned that crosses an alignment boundary. */
+ if (count < align) {
+ assert(QEMU_IS_ALIGNED(offset, align) ||
+ QEMU_IS_ALIGNED(offset + count, align) ||
+ DIV_ROUND_UP(offset, align) ==
+ DIV_ROUND_UP(offset + count, align));
+ return -ENOTSUP;
+ }
+ assert(QEMU_IS_ALIGNED(offset, align));
+ assert(QEMU_IS_ALIGNED(count, align));
+ if (bs->bl.max_pwrite_zeroes) {
+ assert(count <= bs->bl.max_pwrite_zeroes);
+ }
+
+ err = rule_check(bs, offset, count);
+ if (err) {
+ return err;
+ }
+
+ return bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
+}
+
+static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
+ int64_t offset, int count)
+{
+ uint32_t align = bs->bl.pdiscard_alignment;
+ int err;
+
+ /* Only pass through requests that are larger than requested
+ * minimum alignment, and ensure that unaligned requests do not
+ * cross optimum discard boundaries. */
+ if (count < bs->bl.request_alignment) {
+ assert(QEMU_IS_ALIGNED(offset, align) ||
+ QEMU_IS_ALIGNED(offset + count, align) ||
+ DIV_ROUND_UP(offset, align) ==
+ DIV_ROUND_UP(offset + count, align));
+ return -ENOTSUP;
+ }
+ assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
+ assert(QEMU_IS_ALIGNED(count, bs->bl.request_alignment));
+ if (align && count >= align) {
+ assert(QEMU_IS_ALIGNED(offset, align));
+ assert(QEMU_IS_ALIGNED(count, align));
+ }
+ if (bs->bl.max_pdiscard) {
+ assert(count <= bs->bl.max_pdiscard);
+ }
+
+ err = rule_check(bs, offset, count);
+ if (err) {
+ return err;
+ }
+
+ return bdrv_co_pdiscard(bs->file->bs, offset, count);
+}
static void blkdebug_close(BlockDriverState *bs)
{
@@ -748,6 +820,8 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_co_preadv = blkdebug_co_preadv,
.bdrv_co_pwritev = blkdebug_co_pwritev,
.bdrv_co_flush_to_disk = blkdebug_co_flush,
+ .bdrv_co_pwrite_zeroes = blkdebug_co_pwrite_zeroes,
+ .bdrv_co_pdiscard = blkdebug_co_pdiscard,
.bdrv_debug_event = blkdebug_debug_event,
.bdrv_debug_breakpoint = blkdebug_debug_breakpoint,