aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2.h
diff options
context:
space:
mode:
Diffstat (limited to 'block/qcow2.h')
-rw-r--r--block/qcow2.h680
1 files changed, 527 insertions, 153 deletions
diff --git a/block/qcow2.h b/block/qcow2.h
index 438a1dee9e..a9e3481c6e 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -28,6 +28,7 @@
#include "crypto/block.h"
#include "qemu/coroutine.h"
#include "qemu/units.h"
+#include "block/block_int.h"
//#define DEBUG_ALLOC
//#define DEBUG_ALLOC2
@@ -50,20 +51,26 @@
/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
-#define QCOW_MAX_REFTABLE_SIZE S_8MiB
+#define QCOW_MAX_REFTABLE_SIZE (8 * MiB)
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
-#define QCOW_MAX_L1_SIZE S_32MiB
+#define QCOW_MAX_L1_SIZE (32 * MiB)
/* Allow for an average of 1k per snapshot table entry, should be plenty of
* space for snapshot names and IDs */
#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS)
+/* Maximum amount of extra data per snapshot table entry to accept */
+#define QCOW_MAX_SNAPSHOT_EXTRA_DATA 1024
+
/* Bitmap header extension constraints */
#define QCOW2_MAX_BITMAPS 65535
#define QCOW2_MAX_BITMAP_DIRECTORY_SIZE (1024 * QCOW2_MAX_BITMAPS)
+/* Maximum of parallel sub-request per guest request */
+#define QCOW2_MAX_WORKERS 8
+
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
/* indicate that the cluster is compressed (they never have the copied flag) */
@@ -71,9 +78,39 @@
/* The cluster reads as all zeros */
#define QCOW_OFLAG_ZERO (1ULL << 0)
+#define QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER 32
+
+/* The subcluster X [0..31] is allocated */
+#define QCOW_OFLAG_SUB_ALLOC(X) (1ULL << (X))
+/* The subcluster X [0..31] reads as zeroes */
+#define QCOW_OFLAG_SUB_ZERO(X) (QCOW_OFLAG_SUB_ALLOC(X) << 32)
+/* Subclusters [X, Y) (0 <= X <= Y <= 32) are allocated */
+#define QCOW_OFLAG_SUB_ALLOC_RANGE(X, Y) \
+ (QCOW_OFLAG_SUB_ALLOC(Y) - QCOW_OFLAG_SUB_ALLOC(X))
+/* Subclusters [X, Y) (0 <= X <= Y <= 32) read as zeroes */
+#define QCOW_OFLAG_SUB_ZERO_RANGE(X, Y) \
+ (QCOW_OFLAG_SUB_ALLOC_RANGE(X, Y) << 32)
+/* L2 entry bitmap with all allocation bits set */
+#define QCOW_L2_BITMAP_ALL_ALLOC (QCOW_OFLAG_SUB_ALLOC_RANGE(0, 32))
+/* L2 entry bitmap with all "read as zeroes" bits set */
+#define QCOW_L2_BITMAP_ALL_ZEROES (QCOW_OFLAG_SUB_ZERO_RANGE(0, 32))
+
+/* Size of normal and extended L2 entries */
+#define L2E_SIZE_NORMAL (sizeof(uint64_t))
+#define L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
+
+/* Size of L1 table entries */
+#define L1E_SIZE (sizeof(uint64_t))
+
+/* Size of reftable entries */
+#define REFTABLE_ENTRY_SIZE (sizeof(uint64_t))
+
#define MIN_CLUSTER_BITS 9
#define MAX_CLUSTER_BITS 21
+/* Defined in the qcow2 spec (compressed cluster descriptor) */
+#define QCOW2_COMPRESSED_SECTOR_SIZE 512U
+
/* Must be at least 2 to cover COW */
#define MIN_L2_CACHE_SIZE 2 /* cache entries */
@@ -81,20 +118,22 @@
#define MIN_REFCOUNT_CACHE_SIZE 4 /* clusters */
#ifdef CONFIG_LINUX
-#define DEFAULT_L2_CACHE_MAX_SIZE S_32MiB
+#define DEFAULT_L2_CACHE_MAX_SIZE (32 * MiB)
#define DEFAULT_CACHE_CLEAN_INTERVAL 600 /* seconds */
#else
-#define DEFAULT_L2_CACHE_MAX_SIZE S_8MiB
+#define DEFAULT_L2_CACHE_MAX_SIZE (8 * MiB)
/* Cache clean interval is currently available only on Linux, so must be 0 */
#define DEFAULT_CACHE_CLEAN_INTERVAL 0
#endif
-#define DEFAULT_CLUSTER_SIZE S_64KiB
+#define DEFAULT_CLUSTER_SIZE 65536
+#define QCOW2_OPT_DATA_FILE "data-file"
#define QCOW2_OPT_LAZY_REFCOUNTS "lazy-refcounts"
#define QCOW2_OPT_DISCARD_REQUEST "pass-discard-request"
#define QCOW2_OPT_DISCARD_SNAPSHOT "pass-discard-snapshot"
#define QCOW2_OPT_DISCARD_OTHER "pass-discard-other"
+#define QCOW2_OPT_DISCARD_NO_UNREF "discard-no-unref"
#define QCOW2_OPT_OVERLAP "overlap-check"
#define QCOW2_OPT_OVERLAP_TEMPLATE "overlap-check.template"
#define QCOW2_OPT_OVERLAP_MAIN_HEADER "overlap-check.main-header"
@@ -134,8 +173,16 @@ typedef struct QCowHeader {
uint32_t refcount_order;
uint32_t header_length;
+
+ /* Additional fields */
+ uint8_t compression_type;
+
+ /* header must be a multiple of 8 */
+ uint8_t padding[7];
} QEMU_PACKED QCowHeader;
+QEMU_BUILD_BUG_ON(!QEMU_IS_ALIGNED(sizeof(QCowHeader), 8));
+
typedef struct QEMU_PACKED QCowSnapshotHeader {
/* header is 8 byte aligned */
uint64_t l1_table_offset;
@@ -159,6 +206,7 @@ typedef struct QEMU_PACKED QCowSnapshotHeader {
typedef struct QEMU_PACKED QCowSnapshotExtraData {
uint64_t vm_state_size_large;
uint64_t disk_size;
+ uint64_t icount;
} QCowSnapshotExtraData;
@@ -172,6 +220,12 @@ typedef struct QCowSnapshot {
uint32_t date_sec;
uint32_t date_nsec;
uint64_t vm_clock_nsec;
+ /* icount value for the moment when snapshot was taken */
+ uint64_t icount;
+ /* Size of all extra data, including QCowSnapshotExtraData if available */
+ uint32_t extra_data_size;
+ /* Data beyond QCowSnapshotExtraData, if any */
+ void *unknown_extra_data;
} QCowSnapshot;
struct Qcow2Cache;
@@ -197,13 +251,22 @@ enum {
/* Incompatible feature bits */
enum {
- QCOW2_INCOMPAT_DIRTY_BITNR = 0,
- QCOW2_INCOMPAT_CORRUPT_BITNR = 1,
- QCOW2_INCOMPAT_DIRTY = 1 << QCOW2_INCOMPAT_DIRTY_BITNR,
- QCOW2_INCOMPAT_CORRUPT = 1 << QCOW2_INCOMPAT_CORRUPT_BITNR,
-
- QCOW2_INCOMPAT_MASK = QCOW2_INCOMPAT_DIRTY
- | QCOW2_INCOMPAT_CORRUPT,
+ QCOW2_INCOMPAT_DIRTY_BITNR = 0,
+ QCOW2_INCOMPAT_CORRUPT_BITNR = 1,
+ QCOW2_INCOMPAT_DATA_FILE_BITNR = 2,
+ QCOW2_INCOMPAT_COMPRESSION_BITNR = 3,
+ QCOW2_INCOMPAT_EXTL2_BITNR = 4,
+ QCOW2_INCOMPAT_DIRTY = 1 << QCOW2_INCOMPAT_DIRTY_BITNR,
+ QCOW2_INCOMPAT_CORRUPT = 1 << QCOW2_INCOMPAT_CORRUPT_BITNR,
+ QCOW2_INCOMPAT_DATA_FILE = 1 << QCOW2_INCOMPAT_DATA_FILE_BITNR,
+ QCOW2_INCOMPAT_COMPRESSION = 1 << QCOW2_INCOMPAT_COMPRESSION_BITNR,
+ QCOW2_INCOMPAT_EXTL2 = 1 << QCOW2_INCOMPAT_EXTL2_BITNR,
+
+ QCOW2_INCOMPAT_MASK = QCOW2_INCOMPAT_DIRTY
+ | QCOW2_INCOMPAT_CORRUPT
+ | QCOW2_INCOMPAT_DATA_FILE
+ | QCOW2_INCOMPAT_COMPRESSION
+ | QCOW2_INCOMPAT_EXTL2,
};
/* Compatible feature bits */
@@ -216,10 +279,13 @@ enum {
/* Autoclear feature bits */
enum {
- QCOW2_AUTOCLEAR_BITMAPS_BITNR = 0,
- QCOW2_AUTOCLEAR_BITMAPS = 1 << QCOW2_AUTOCLEAR_BITMAPS_BITNR,
+ QCOW2_AUTOCLEAR_BITMAPS_BITNR = 0,
+ QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR = 1,
+ QCOW2_AUTOCLEAR_BITMAPS = 1 << QCOW2_AUTOCLEAR_BITMAPS_BITNR,
+ QCOW2_AUTOCLEAR_DATA_FILE_RAW = 1 << QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
- QCOW2_AUTOCLEAR_MASK = QCOW2_AUTOCLEAR_BITMAPS,
+ QCOW2_AUTOCLEAR_MASK = QCOW2_AUTOCLEAR_BITMAPS
+ | QCOW2_AUTOCLEAR_DATA_FILE_RAW,
};
enum qcow2_discard_type {
@@ -256,11 +322,15 @@ typedef struct Qcow2BitmapHeaderExt {
uint64_t bitmap_directory_offset;
} QEMU_PACKED Qcow2BitmapHeaderExt;
+#define QCOW2_MAX_THREADS 4
+
typedef struct BDRVQcow2State {
int cluster_bits;
int cluster_size;
- int cluster_sectors;
int l2_slice_size;
+ int subcluster_bits;
+ int subcluster_size;
+ int subclusters_per_cluster;
int l2_bits;
int l2_size;
int l1_size;
@@ -273,14 +343,11 @@ typedef struct BDRVQcow2State {
uint64_t l1_table_offset;
uint64_t *l1_table;
- Qcow2Cache* l2_table_cache;
- Qcow2Cache* refcount_block_cache;
+ Qcow2Cache *l2_table_cache;
+ Qcow2Cache *refcount_block_cache;
QEMUTimer *cache_clean_timer;
unsigned cache_clean_interval;
- uint8_t *cluster_cache;
- uint8_t *cluster_data;
- uint64_t cluster_cache_offset;
QLIST_HEAD(, QCowL2Meta) cluster_allocs;
uint64_t *refcount_table;
@@ -319,6 +386,8 @@ typedef struct BDRVQcow2State {
bool discard_passthrough[QCOW2_DISCARD_MAX];
+ bool discard_no_unref;
+
int overlap_check; /* bitmask of Qcow2MetadataOverlap values */
bool signaled_corruption;
@@ -327,7 +396,7 @@ typedef struct BDRVQcow2State {
uint64_t autoclear_features;
size_t unknown_header_fields_size;
- void* unknown_header_fields;
+ void *unknown_header_fields;
QLIST_HEAD(, Qcow2UnknownHeaderExtension) unknown_header_ext;
QTAILQ_HEAD (, Qcow2DiscardRegion) discards;
bool cache_discards;
@@ -337,9 +406,22 @@ typedef struct BDRVQcow2State {
* override) */
char *image_backing_file;
char *image_backing_format;
+ char *image_data_file;
+
+ CoQueue thread_task_queue;
+ int nb_threads;
+
+ BdrvChild *data_file;
- CoQueue compress_wait_queue;
- int nb_compress_threads;
+ bool metadata_preallocation_checked;
+ bool metadata_preallocation;
+ /*
+ * Compression type used for the image. Default: 0 - ZLIB
+ * The image compression type is set on image creation.
+ * For now, the only way to change the compression type
+ * is to convert the image with the desired compression type set.
+ */
+ Qcow2CompressionType compression_type;
} BDRVQcow2State;
typedef struct Qcow2COWRegion {
@@ -355,17 +437,18 @@ typedef struct Qcow2COWRegion {
/**
* Describes an in-flight (part of a) write request that writes to clusters
- * that are not referenced in their L2 table yet.
+ * that need to have their L2 table entries updated (because they are
+ * newly allocated or need changes in their L2 bitmaps)
*/
typedef struct QCowL2Meta
{
- /** Guest offset of the first newly allocated cluster */
+ /** Guest offset of the first updated cluster */
uint64_t offset;
- /** Host offset of the first newly allocated cluster */
+ /** Host offset of the first updated cluster */
uint64_t alloc_offset;
- /** Number of newly allocated clusters */
+ /** Number of updated clusters */
int nb_clusters;
/** Do not free the old clusters */
@@ -378,23 +461,40 @@ typedef struct QCowL2Meta
CoQueue dependent_requests;
/**
- * The COW Region between the start of the first allocated cluster and the
- * area the guest actually writes to.
+ * The COW Region immediately before the area the guest actually
+ * writes to. This (part of the) write request starts at
+ * cow_start.offset + cow_start.nb_bytes.
*/
Qcow2COWRegion cow_start;
/**
- * The COW Region between the area the guest actually writes to and the
- * end of the last allocated cluster.
+ * The COW Region immediately after the area the guest actually
+ * writes to. This (part of the) write request ends at cow_end.offset
+ * (which must always be set even when cow_end.nb_bytes is 0).
*/
Qcow2COWRegion cow_end;
+ /*
+ * Indicates that COW regions are already handled and do not require
+ * any more processing.
+ */
+ bool skip_cow;
+
+ /**
+ * Indicates that this is not a normal write request but a preallocation.
+ * If the image has extended L2 entries this means that no new individual
+ * subclusters will be marked as allocated in the L2 bitmap (but any
+ * existing contents of that bitmap will be kept).
+ */
+ bool prealloc;
+
/**
* The I/O vector with the data from the actual guest write request.
* If non-NULL, this is meant to be merged together with the data
* from @cow_start and @cow_end into one single write operation.
*/
QEMUIOVector *data_qiov;
+ size_t data_qiov_offset;
/** Pointer to next L2Meta of the same write request */
struct QCowL2Meta *next;
@@ -402,6 +502,33 @@ typedef struct QCowL2Meta
QLIST_ENTRY(QCowL2Meta) next_in_flight;
} QCowL2Meta;
+/*
+ * In images with standard L2 entries all clusters are treated as if
+ * they had one subcluster so QCow2ClusterType and QCow2SubclusterType
+ * can be mapped to each other and have the exact same meaning
+ * (QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC cannot happen in these images).
+ *
+ * In images with extended L2 entries QCow2ClusterType refers to the
+ * complete cluster and QCow2SubclusterType to each of the individual
+ * subclusters, so there are several possible combinations:
+ *
+ * |--------------+---------------------------|
+ * | Cluster type | Possible subcluster types |
+ * |--------------+---------------------------|
+ * | UNALLOCATED | UNALLOCATED_PLAIN |
+ * | | ZERO_PLAIN |
+ * |--------------+---------------------------|
+ * | NORMAL | UNALLOCATED_ALLOC |
+ * | | ZERO_ALLOC |
+ * | | NORMAL |
+ * |--------------+---------------------------|
+ * | COMPRESSED | COMPRESSED |
+ * |--------------+---------------------------|
+ *
+ * QCOW2_SUBCLUSTER_INVALID means that the L2 entry is incorrect and
+ * the image should be marked corrupt.
+ */
+
typedef enum QCow2ClusterType {
QCOW2_CLUSTER_UNALLOCATED,
QCOW2_CLUSTER_ZERO_PLAIN,
@@ -410,6 +537,16 @@ typedef enum QCow2ClusterType {
QCOW2_CLUSTER_COMPRESSED,
} QCow2ClusterType;
+typedef enum QCow2SubclusterType {
+ QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN,
+ QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC,
+ QCOW2_SUBCLUSTER_ZERO_PLAIN,
+ QCOW2_SUBCLUSTER_ZERO_ALLOC,
+ QCOW2_SUBCLUSTER_NORMAL,
+ QCOW2_SUBCLUSTER_COMPRESSED,
+ QCOW2_SUBCLUSTER_INVALID,
+} QCow2SubclusterType;
+
typedef enum QCow2MetadataOverlap {
QCOW2_OL_MAIN_HEADER_BITNR = 0,
QCOW2_OL_ACTIVE_L1_BITNR = 1,
@@ -452,10 +589,69 @@ typedef enum QCow2MetadataOverlap {
(QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2)
#define L1E_OFFSET_MASK 0x00fffffffffffe00ULL
+#define L1E_RESERVED_MASK 0x7f000000000001ffULL
#define L2E_OFFSET_MASK 0x00fffffffffffe00ULL
-#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
+#define L2E_STD_RESERVED_MASK 0x3f000000000001feULL
#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL
+#define REFT_RESERVED_MASK 0x1ffULL
+
+#define INV_OFFSET (-1ULL)
+
+static inline bool has_subclusters(BDRVQcow2State *s)
+{
+ return s->incompatible_features & QCOW2_INCOMPAT_EXTL2;
+}
+
+static inline size_t l2_entry_size(BDRVQcow2State *s)
+{
+ return has_subclusters(s) ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
+}
+
+static inline uint64_t get_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice,
+ int idx)
+{
+ idx *= l2_entry_size(s) / sizeof(uint64_t);
+ return be64_to_cpu(l2_slice[idx]);
+}
+
+static inline uint64_t get_l2_bitmap(BDRVQcow2State *s, uint64_t *l2_slice,
+ int idx)
+{
+ if (has_subclusters(s)) {
+ idx *= l2_entry_size(s) / sizeof(uint64_t);
+ return be64_to_cpu(l2_slice[idx + 1]);
+ } else {
+ return 0; /* For convenience only; this value has no meaning. */
+ }
+}
+
+static inline void set_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice,
+ int idx, uint64_t entry)
+{
+ idx *= l2_entry_size(s) / sizeof(uint64_t);
+ l2_slice[idx] = cpu_to_be64(entry);
+}
+
+static inline void set_l2_bitmap(BDRVQcow2State *s, uint64_t *l2_slice,
+ int idx, uint64_t bitmap)
+{
+ assert(has_subclusters(s));
+ idx *= l2_entry_size(s) / sizeof(uint64_t);
+ l2_slice[idx + 1] = cpu_to_be64(bitmap);
+}
+
+static inline bool GRAPH_RDLOCK has_data_file(BlockDriverState *bs)
+{
+ BDRVQcow2State *s = bs->opaque;
+ return (s->data_file != bs->file);
+}
+
+static inline bool data_file_is_raw(BlockDriverState *bs)
+{
+ BDRVQcow2State *s = bs->opaque;
+ return !!(s->autoclear_features & QCOW2_AUTOCLEAR_DATA_FILE_RAW);
+}
static inline int64_t start_of_cluster(BDRVQcow2State *s, int64_t offset)
{
@@ -467,11 +663,21 @@ static inline int64_t offset_into_cluster(BDRVQcow2State *s, int64_t offset)
return offset & (s->cluster_size - 1);
}
+static inline int64_t offset_into_subcluster(BDRVQcow2State *s, int64_t offset)
+{
+ return offset & (s->subcluster_size - 1);
+}
+
static inline uint64_t size_to_clusters(BDRVQcow2State *s, uint64_t size)
{
return (size + (s->cluster_size - 1)) >> s->cluster_bits;
}
+static inline uint64_t size_to_subclusters(BDRVQcow2State *s, uint64_t size)
+{
+ return (size + (s->subcluster_size - 1)) >> s->subcluster_bits;
+}
+
static inline int64_t size_to_l1(BDRVQcow2State *s, int64_t size)
{
int shift = s->cluster_bits + s->l2_bits;
@@ -493,27 +699,109 @@ static inline int offset_to_l2_slice_index(BDRVQcow2State *s, int64_t offset)
return (offset >> s->cluster_bits) & (s->l2_slice_size - 1);
}
+static inline int offset_to_sc_index(BDRVQcow2State *s, int64_t offset)
+{
+ return (offset >> s->subcluster_bits) & (s->subclusters_per_cluster - 1);
+}
+
static inline int64_t qcow2_vm_state_offset(BDRVQcow2State *s)
{
return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
}
-static inline QCow2ClusterType qcow2_get_cluster_type(uint64_t l2_entry)
+static inline QCow2ClusterType GRAPH_RDLOCK
+qcow2_get_cluster_type(BlockDriverState *bs, uint64_t l2_entry)
{
+ BDRVQcow2State *s = bs->opaque;
+
if (l2_entry & QCOW_OFLAG_COMPRESSED) {
return QCOW2_CLUSTER_COMPRESSED;
- } else if (l2_entry & QCOW_OFLAG_ZERO) {
+ } else if ((l2_entry & QCOW_OFLAG_ZERO) && !has_subclusters(s)) {
if (l2_entry & L2E_OFFSET_MASK) {
return QCOW2_CLUSTER_ZERO_ALLOC;
}
return QCOW2_CLUSTER_ZERO_PLAIN;
} else if (!(l2_entry & L2E_OFFSET_MASK)) {
- return QCOW2_CLUSTER_UNALLOCATED;
+ /* Offset 0 generally means unallocated, but it is ambiguous with
+ * external data files because 0 is a valid offset there. However, all
+ * clusters in external data files always have refcount 1, so we can
+ * rely on QCOW_OFLAG_COPIED to disambiguate. */
+ if (has_data_file(bs) && (l2_entry & QCOW_OFLAG_COPIED)) {
+ return QCOW2_CLUSTER_NORMAL;
+ } else {
+ return QCOW2_CLUSTER_UNALLOCATED;
+ }
} else {
return QCOW2_CLUSTER_NORMAL;
}
}
+/*
+ * In an image without subsclusters @l2_bitmap is ignored and
+ * @sc_index must be 0.
+ * Return QCOW2_SUBCLUSTER_INVALID if an invalid l2 entry is detected
+ * (this checks the whole entry and bitmap, not only the bits related
+ * to subcluster @sc_index).
+ */
+static inline GRAPH_RDLOCK
+QCow2SubclusterType qcow2_get_subcluster_type(BlockDriverState *bs,
+ uint64_t l2_entry,
+ uint64_t l2_bitmap,
+ unsigned sc_index)
+{
+ BDRVQcow2State *s = bs->opaque;
+ QCow2ClusterType type = qcow2_get_cluster_type(bs, l2_entry);
+ assert(sc_index < s->subclusters_per_cluster);
+
+ if (has_subclusters(s)) {
+ switch (type) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ return QCOW2_SUBCLUSTER_COMPRESSED;
+ case QCOW2_CLUSTER_NORMAL:
+ if ((l2_bitmap >> 32) & l2_bitmap) {
+ return QCOW2_SUBCLUSTER_INVALID;
+ } else if (l2_bitmap & QCOW_OFLAG_SUB_ZERO(sc_index)) {
+ return QCOW2_SUBCLUSTER_ZERO_ALLOC;
+ } else if (l2_bitmap & QCOW_OFLAG_SUB_ALLOC(sc_index)) {
+ return QCOW2_SUBCLUSTER_NORMAL;
+ } else {
+ return QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC;
+ }
+ case QCOW2_CLUSTER_UNALLOCATED:
+ if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC) {
+ return QCOW2_SUBCLUSTER_INVALID;
+ } else if (l2_bitmap & QCOW_OFLAG_SUB_ZERO(sc_index)) {
+ return QCOW2_SUBCLUSTER_ZERO_PLAIN;
+ } else {
+ return QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN;
+ }
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (type) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ return QCOW2_SUBCLUSTER_COMPRESSED;
+ case QCOW2_CLUSTER_ZERO_PLAIN:
+ return QCOW2_SUBCLUSTER_ZERO_PLAIN;
+ case QCOW2_CLUSTER_ZERO_ALLOC:
+ return QCOW2_SUBCLUSTER_ZERO_ALLOC;
+ case QCOW2_CLUSTER_NORMAL:
+ return QCOW2_SUBCLUSTER_NORMAL;
+ case QCOW2_CLUSTER_UNALLOCATED:
+ return QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static inline bool qcow2_cluster_is_allocated(QCow2ClusterType type)
+{
+ return (type == QCOW2_CLUSTER_COMPRESSED || type == QCOW2_CLUSTER_NORMAL ||
+ type == QCOW2_CLUSTER_ZERO_ALLOC);
+}
+
/* Check whether refcounts are eager or lazy */
static inline bool qcow2_need_accurate_refcounts(BDRVQcow2State *s)
{
@@ -546,14 +834,14 @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
int refcount_order, bool generous_increase,
uint64_t *refblock_count);
-int qcow2_mark_dirty(BlockDriverState *bs);
-int qcow2_mark_corrupt(BlockDriverState *bs);
-int qcow2_mark_consistent(BlockDriverState *bs);
-int qcow2_update_header(BlockDriverState *bs);
+int GRAPH_RDLOCK qcow2_mark_dirty(BlockDriverState *bs);
+int GRAPH_RDLOCK qcow2_mark_corrupt(BlockDriverState *bs);
+int GRAPH_RDLOCK qcow2_update_header(BlockDriverState *bs);
-void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
- int64_t size, const char *message_format, ...)
- GCC_FMT_ATTR(5, 6);
+void GRAPH_RDLOCK
+qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
+ int64_t size, const char *message_format, ...)
+ G_GNUC_PRINTF(5, 6);
int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
uint64_t entries, size_t entry_len,
@@ -561,140 +849,226 @@ int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
Error **errp);
/* qcow2-refcount.c functions */
-int qcow2_refcount_init(BlockDriverState *bs);
+int coroutine_fn GRAPH_RDLOCK qcow2_refcount_init(BlockDriverState *bs);
void qcow2_refcount_close(BlockDriverState *bs);
-int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
- uint64_t *refcount);
-
-int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
- uint64_t addend, bool decrease,
- enum qcow2_discard_type type);
-
-int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
- uint64_t additional_clusters, bool exact_size,
- int new_refblock_index,
- uint64_t new_refblock_offset);
-
-int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
-int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
- int64_t nb_clusters);
-int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
-void qcow2_free_clusters(BlockDriverState *bs,
- int64_t offset, int64_t size,
- enum qcow2_discard_type type);
-void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
- int nb_clusters, enum qcow2_discard_type type);
-
-int qcow2_update_snapshot_refcount(BlockDriverState *bs,
- int64_t l1_table_offset, int l1_size, int addend);
-
-int coroutine_fn qcow2_flush_caches(BlockDriverState *bs);
-int coroutine_fn qcow2_write_caches(BlockDriverState *bs);
-int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
- BdrvCheckMode fix);
-
-void qcow2_process_discards(BlockDriverState *bs, int ret);
-
-int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
- int64_t size);
-int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
- int64_t size);
-int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
- void **refcount_table,
- int64_t *refcount_table_size,
- int64_t offset, int64_t size);
-
-int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
- BlockDriverAmendStatusCB *status_cb,
- void *cb_opaque, Error **errp);
-int qcow2_shrink_reftable(BlockDriverState *bs);
-int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
+int GRAPH_RDLOCK qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
+ uint64_t *refcount);
+
+int GRAPH_RDLOCK
+qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
+ uint64_t addend, bool decrease,
+ enum qcow2_discard_type type);
+
+int64_t GRAPH_RDLOCK
+qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
+ uint64_t additional_clusters, bool exact_size,
+ int new_refblock_index,
+ uint64_t new_refblock_offset);
+
+int64_t GRAPH_RDLOCK
+qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
+
+int64_t GRAPH_RDLOCK coroutine_fn
+qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
+ int64_t nb_clusters);
+
+int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size);
+void GRAPH_RDLOCK qcow2_free_clusters(BlockDriverState *bs,
+ int64_t offset, int64_t size,
+ enum qcow2_discard_type type);
+void GRAPH_RDLOCK
+qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
+ enum qcow2_discard_type type);
+
+int GRAPH_RDLOCK
+qcow2_update_snapshot_refcount(BlockDriverState *bs, int64_t l1_table_offset,
+ int l1_size, int addend);
+
+int GRAPH_RDLOCK qcow2_flush_caches(BlockDriverState *bs);
+int GRAPH_RDLOCK qcow2_write_caches(BlockDriverState *bs);
+int coroutine_fn qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
+ BdrvCheckMode fix);
+
+void GRAPH_RDLOCK qcow2_process_discards(BlockDriverState *bs, int ret);
+
+int GRAPH_RDLOCK
+qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
+ int64_t size);
+int GRAPH_RDLOCK
+qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
+ int64_t size, bool data_file);
+
+int coroutine_fn qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
+ void **refcount_table,
+ int64_t *refcount_table_size,
+ int64_t offset, int64_t size);
+
+int GRAPH_RDLOCK
+qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
+ BlockDriverAmendStatusCB *status_cb,
+ void *cb_opaque, Error **errp);
+int coroutine_fn GRAPH_RDLOCK qcow2_shrink_reftable(BlockDriverState *bs);
+
+int64_t coroutine_fn GRAPH_RDLOCK
+qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_detect_metadata_preallocation(BlockDriverState *bs);
/* qcow2-cluster.c functions */
-int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
- bool exact_size);
-int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
-int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
+int GRAPH_RDLOCK
+qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, bool exact_size);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
+
+int GRAPH_RDLOCK qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
uint8_t *buf, int nb_sectors, bool enc, Error **errp);
-int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
- unsigned int *bytes, uint64_t *cluster_offset);
-int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
- unsigned int *bytes, uint64_t *host_offset,
- QCowL2Meta **m);
-uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
- uint64_t offset,
- int compressed_size);
-
-int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
-void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
-int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
- uint64_t bytes, enum qcow2_discard_type type,
- bool full_discard);
-int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
- uint64_t bytes, int flags);
-
-int qcow2_expand_zero_clusters(BlockDriverState *bs,
- BlockDriverAmendStatusCB *status_cb,
- void *cb_opaque);
+int GRAPH_RDLOCK
+qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
+ unsigned int *bytes, uint64_t *host_offset,
+ QCow2SubclusterType *subcluster_type);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
+ unsigned int *bytes, uint64_t *host_offset,
+ QCowL2Meta **m);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
+ int compressed_size, uint64_t *host_offset);
+void GRAPH_RDLOCK
+qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
+ uint64_t *coffset, int *csize);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
+
+void coroutine_fn GRAPH_RDLOCK
+qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
+
+int GRAPH_RDLOCK
+qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ enum qcow2_discard_type type, bool full_discard);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ int flags);
+
+int GRAPH_RDLOCK
+qcow2_expand_zero_clusters(BlockDriverState *bs,
+ BlockDriverAmendStatusCB *status_cb,
+ void *cb_opaque);
/* qcow2-snapshot.c functions */
-int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
-int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
-int qcow2_snapshot_delete(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
-int qcow2_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab);
-int qcow2_snapshot_load_tmp(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
+int GRAPH_RDLOCK
+qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
+
+int GRAPH_RDLOCK
+qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
+
+int GRAPH_RDLOCK
+qcow2_snapshot_delete(BlockDriverState *bs, const char *snapshot_id,
+ const char *name, Error **errp);
+
+int GRAPH_RDLOCK
+qcow2_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab);
+
+int GRAPH_RDLOCK
+qcow2_snapshot_load_tmp(BlockDriverState *bs, const char *snapshot_id,
+ const char *name, Error **errp);
void qcow2_free_snapshots(BlockDriverState *bs);
-int qcow2_read_snapshots(BlockDriverState *bs);
+int coroutine_fn GRAPH_RDLOCK
+qcow2_read_snapshots(BlockDriverState *bs, Error **errp);
+int GRAPH_RDLOCK qcow2_write_snapshots(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_check_read_snapshot_table(BlockDriverState *bs, BdrvCheckResult *result,
+ BdrvCheckMode fix);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_check_fix_snapshot_table(BlockDriverState *bs, BdrvCheckResult *result,
+ BdrvCheckMode fix);
/* qcow2-cache.c functions */
-Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
- unsigned table_size);
+Qcow2Cache * GRAPH_RDLOCK
+qcow2_cache_create(BlockDriverState *bs, int num_tables, unsigned table_size);
+
int qcow2_cache_destroy(Qcow2Cache *c);
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table);
-int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
-int qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
-int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
- Qcow2Cache *dependency);
+int GRAPH_RDLOCK qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
+int GRAPH_RDLOCK qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
+int GRAPH_RDLOCK qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
+ Qcow2Cache *dependency);
void qcow2_cache_depends_on_flush(Qcow2Cache *c);
void qcow2_cache_clean_unused(Qcow2Cache *c);
-int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
+int GRAPH_RDLOCK qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
+
+int GRAPH_RDLOCK
+qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
+ void **table);
+
+int GRAPH_RDLOCK
+qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
+ void **table);
-int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
- void **table);
-int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
- void **table);
void qcow2_cache_put(Qcow2Cache *c, void **table);
void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset);
void qcow2_cache_discard(Qcow2Cache *c, void *table);
/* qcow2-bitmap.c functions */
-int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
- void **refcount_table,
- int64_t *refcount_table_size);
-bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, Error **errp);
-int qcow2_reopen_bitmaps_rw_hint(BlockDriverState *bs, bool *header_updated,
- Error **errp);
-int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
-void qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp);
-int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
-bool qcow2_can_store_new_dirty_bitmap(BlockDriverState *bs,
- const char *name,
- uint32_t granularity,
- Error **errp);
-void qcow2_remove_persistent_dirty_bitmap(BlockDriverState *bs,
- const char *name,
- Error **errp);
+int coroutine_fn GRAPH_RDLOCK
+qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
+ void **refcount_table,
+ int64_t *refcount_table_size);
+
+bool coroutine_fn GRAPH_RDLOCK
+qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated,
+ Error **errp);
+
+bool GRAPH_RDLOCK
+qcow2_get_bitmap_info_list(BlockDriverState *bs,
+ Qcow2BitmapInfoList **info_list, Error **errp);
+
+int GRAPH_RDLOCK qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
+int GRAPH_RDLOCK qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
+
+bool GRAPH_RDLOCK
+qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, bool release_stored,
+ Error **errp);
+
+bool coroutine_fn GRAPH_RDLOCK
+qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
+
+int coroutine_fn GRAPH_RDLOCK
+qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
+ Error **errp);
+
+bool qcow2_supports_persistent_dirty_bitmap(BlockDriverState *bs);
+uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *bs,
+ uint32_t cluster_size);
+
+ssize_t coroutine_fn
+qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size,
+ const void *src, size_t src_size);
+ssize_t coroutine_fn
+qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
+ const void *src, size_t src_size);
+int coroutine_fn
+qcow2_co_encrypt(BlockDriverState *bs, uint64_t host_offset,
+ uint64_t guest_offset, void *buf, size_t len);
+int coroutine_fn
+qcow2_co_decrypt(BlockDriverState *bs, uint64_t host_offset,
+ uint64_t guest_offset, void *buf, size_t len);
#endif