aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/gluster.c77
-rwxr-xr-xconfigure8
2 files changed, 72 insertions, 13 deletions
diff --git a/block/gluster.c b/block/gluster.c
index 51e154c247..a8aaacf645 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -247,7 +247,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
if (!ret || ret == acb->size) {
acb->ret = 0; /* Success */
} else if (ret < 0) {
- acb->ret = ret; /* Read/Write failed */
+ acb->ret = -errno; /* Read/Write failed */
} else {
acb->ret = -EIO; /* Partial read/write - fail it */
}
@@ -314,6 +314,23 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
goto out;
}
+#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
+ /* Without this, if fsync fails for a recoverable reason (for instance,
+ * ENOSPC), gluster will dump its cache, preventing retries. This means
+ * almost certain data loss. Not all gluster versions support the
+ * 'resync-failed-syncs-after-fsync' key value, but there is no way to
+ * discover during runtime if it is supported (this api returns success for
+ * unknown key/value pairs) */
+ ret = glfs_set_xlator_option(s->glfs, "*-write-behind",
+ "resync-failed-syncs-after-fsync",
+ "on");
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
+ ret = -errno;
+ goto out;
+ }
+#endif
+
qemu_gluster_parse_flags(bdrv_flags, &open_flags);
s->fd = glfs_open(s->glfs, gconf->image, open_flags);
@@ -366,6 +383,16 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
goto exit;
}
+#ifdef CONFIG_GLUSTERFS_XLATOR_OPT
+ ret = glfs_set_xlator_option(reop_s->glfs, "*-write-behind",
+ "resync-failed-syncs-after-fsync", "on");
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "Unable to set xlator key/value pair");
+ ret = -errno;
+ goto exit;
+ }
+#endif
+
reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
if (reop_s->fd == NULL) {
/* reops->glfs will be cleaned up in _abort */
@@ -589,6 +616,17 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
}
+static void qemu_gluster_close(BlockDriverState *bs)
+{
+ BDRVGlusterState *s = bs->opaque;
+
+ if (s->fd) {
+ glfs_close(s->fd);
+ s->fd = NULL;
+ }
+ glfs_fini(s->glfs);
+}
+
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
{
int ret;
@@ -602,11 +640,35 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
if (ret < 0) {
- return -errno;
+ ret = -errno;
+ goto error;
}
qemu_coroutine_yield();
+ if (acb.ret < 0) {
+ ret = acb.ret;
+ goto error;
+ }
+
return acb.ret;
+
+error:
+ /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache
+ * after a fsync failure, so we have no way of allowing the guest to safely
+ * continue. Gluster versions prior to 3.5.6 don't retain the cache
+ * either, but will invalidate the fd on error, so this is again our only
+ * option.
+ *
+ * The 'resync-failed-syncs-after-fsync' xlator option for the
+ * write-behind cache will cause later gluster versions to retain its
+ * cache after error, so long as the fd remains open. However, we
+ * currently have no way of knowing if this option is supported.
+ *
+ * TODO: Once gluster provides a way for us to determine if the option
+ * is supported, bypass the closure and setting drv to NULL. */
+ qemu_gluster_close(bs);
+ bs->drv = NULL;
+ return ret;
}
#ifdef CONFIG_GLUSTERFS_DISCARD
@@ -661,17 +723,6 @@ static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
}
}
-static void qemu_gluster_close(BlockDriverState *bs)
-{
- BDRVGlusterState *s = bs->opaque;
-
- if (s->fd) {
- glfs_close(s->fd);
- s->fd = NULL;
- }
- glfs_fini(s->glfs);
-}
-
static int qemu_gluster_has_zero_init(BlockDriverState *bs)
{
/* GlusterFS volume could be backed by a block device */
diff --git a/configure b/configure
index f1c307bfc6..ab54f3c8ee 100755
--- a/configure
+++ b/configure
@@ -298,6 +298,7 @@ coroutine=""
coroutine_pool=""
seccomp=""
glusterfs=""
+glusterfs_xlator_opt="no"
glusterfs_discard="no"
glusterfs_zerofill="no"
archipelago="no"
@@ -3400,6 +3401,9 @@ if test "$glusterfs" != "no" ; then
glusterfs="yes"
glusterfs_cflags=`$pkg_config --cflags glusterfs-api`
glusterfs_libs=`$pkg_config --libs glusterfs-api`
+ if $pkg_config --atleast-version=4 glusterfs-api; then
+ glusterfs_xlator_opt="yes"
+ fi
if $pkg_config --atleast-version=5 glusterfs-api; then
glusterfs_discard="yes"
fi
@@ -5342,6 +5346,10 @@ if test "$glusterfs" = "yes" ; then
echo "GLUSTERFS_LIBS=$glusterfs_libs" >> $config_host_mak
fi
+if test "$glusterfs_xlator_opt" = "yes" ; then
+ echo "CONFIG_GLUSTERFS_XLATOR_OPT=y" >> $config_host_mak
+fi
+
if test "$glusterfs_discard" = "yes" ; then
echo "CONFIG_GLUSTERFS_DISCARD=y" >> $config_host_mak
fi