aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-cluster.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/qcow2-cluster.c')
-rw-r--r--block/qcow2-cluster.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 7f4f73eaca..202adb4807 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -759,31 +759,41 @@ out:
* Check if there already is an AIO write request in flight which allocates
* the same cluster. In this case we need to wait until the previous
* request has completed and updated the L2 table accordingly.
+ *
+ * Returns:
+ * 0 if there was no dependency. *cur_bytes indicates the number of
+ * bytes from guest_offset that can be read before the next
+ * dependency must be processed (or the request is complete)
+ *
+ * -EAGAIN if we had to wait for another request, previously gathered
+ * information on cluster allocation may be invalid now. The caller
+ * must start over anyway, so consider *cur_bytes undefined.
*/
static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
- unsigned int *nb_clusters)
+ uint64_t *cur_bytes)
{
BDRVQcowState *s = bs->opaque;
QCowL2Meta *old_alloc;
+ uint64_t bytes = *cur_bytes;
QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
- uint64_t start = guest_offset >> s->cluster_bits;
- uint64_t end = start + *nb_clusters;
- uint64_t old_start = old_alloc->offset >> s->cluster_bits;
- uint64_t old_end = old_start + old_alloc->nb_clusters;
+ uint64_t start = guest_offset;
+ uint64_t end = start + bytes;
+ uint64_t old_start = l2meta_cow_start(old_alloc);
+ uint64_t old_end = l2meta_cow_end(old_alloc);
if (end <= old_start || start >= old_end) {
/* No intersection */
} else {
if (start < old_start) {
/* Stop at the start of a running allocation */
- *nb_clusters = old_start - start;
+ bytes = old_start - start;
} else {
- *nb_clusters = 0;
+ bytes = 0;
}
- if (*nb_clusters == 0) {
+ if (bytes == 0) {
/* Wait for the dependency to complete. We need to recheck
* the free/allocated clusters when we continue. */
qemu_co_mutex_unlock(&s->lock);
@@ -794,9 +804,9 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
}
}
- if (!*nb_clusters) {
- abort();
- }
+ /* Make sure that existing clusters and new allocations are only used up to
+ * the next dependency if we shortened the request above */
+ *cur_bytes = bytes;
return 0;
}
@@ -875,6 +885,7 @@ int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
uint64_t *l2_table;
unsigned int nb_clusters, keep_clusters;
uint64_t cluster_offset;
+ uint64_t cur_bytes;
trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
n_start, n_end);
@@ -887,6 +898,7 @@ again:
l2_index = offset_to_l2_index(s, offset);
nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS),
s->l2_size - l2_index);
+ n_end = MIN(n_end, nb_clusters * s->cluster_sectors);
/*
* Now start gathering as many contiguous clusters as possible:
@@ -911,7 +923,8 @@ again:
* 3. If the request still hasn't completed, allocate new clusters,
* considering any cluster_offset of steps 1c or 2.
*/
- ret = handle_dependencies(bs, offset, &nb_clusters);
+ cur_bytes = (n_end - n_start) * BDRV_SECTOR_SIZE;
+ ret = handle_dependencies(bs, offset, &cur_bytes);
if (ret == -EAGAIN) {
goto again;
} else if (ret < 0) {
@@ -922,6 +935,9 @@ again:
* correctly during the next loop iteration. */
}
+ nb_clusters = size_to_clusters(s, offset + cur_bytes)
+ - (offset >> s->cluster_bits);
+
/* Find L2 entry for the first involved cluster */
ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
if (ret < 0) {