aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2011-12-05 09:39:25 -0600
committerAnthony Liguori <aliguori@us.ibm.com>2011-12-05 09:39:25 -0600
commiteb5d5beaebd102599a915f6c4813d445ddc9dc84 (patch)
tree12ce2331571a30c67bde0b8f4ddb55996dd0ba65 /hw
parentf6480ca3f3423be5bee8b673ee6f5cc387659def (diff)
parent922453bca6a927bb527068ae8679d587cfa45dbc (diff)
Merge remote-tracking branch 'kwolf/for-anthony' into staging
Diffstat (limited to 'hw')
-rw-r--r--hw/ide/macio.c5
-rw-r--r--hw/ide/pci.c2
-rw-r--r--hw/virtio-blk.c2
-rw-r--r--hw/xen_disk.c86
-rw-r--r--hw/xen_platform.c2
5 files changed, 8 insertions, 89 deletions
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 70b33422d2..c09d2e0a35 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -200,8 +200,9 @@ static void pmac_ide_flush(DBDMA_io *io)
{
MACIOIDEState *m = io->opaque;
- if (m->aiocb)
- qemu_aio_flush();
+ if (m->aiocb) {
+ bdrv_drain_all();
+ }
}
/* PowerMac IDE memory IO */
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 49b823df79..5078c0b565 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -309,7 +309,7 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
* aio operation with preadv/pwritev.
*/
if (bm->bus->dma->aiocb) {
- qemu_aio_flush();
+ bdrv_drain_all();
assert(bm->bus->dma->aiocb == NULL);
assert((bm->status & BM_STATUS_DMAING) == 0);
}
diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c
index d6d1f87cda..4b0d113ba8 100644
--- a/hw/virtio-blk.c
+++ b/hw/virtio-blk.c
@@ -474,7 +474,7 @@ static void virtio_blk_reset(VirtIODevice *vdev)
* This should cancel pending requests, but can't do nicely until there
* are per-device request lists.
*/
- qemu_aio_flush();
+ bdrv_drain_all();
}
/* coalesce internal state, copy to pci i/o region 0
diff --git a/hw/xen_disk.c b/hw/xen_disk.c
index 286bbac54a..192e81746f 100644
--- a/hw/xen_disk.c
+++ b/hw/xen_disk.c
@@ -49,7 +49,6 @@ static int syncwrite = 0;
static int batch_maps = 0;
static int max_requests = 32;
-static int use_aio = 1;
/* ------------------------------------------------------------- */
@@ -314,76 +313,6 @@ static int ioreq_map(struct ioreq *ioreq)
return 0;
}
-static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
-{
- struct XenBlkDev *blkdev = ioreq->blkdev;
- int i, rc;
- off_t pos;
-
- if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
- goto err_no_map;
- }
- if (ioreq->presync) {
- bdrv_flush(blkdev->bs);
- }
-
- switch (ioreq->req.operation) {
- case BLKIF_OP_READ:
- pos = ioreq->start;
- for (i = 0; i < ioreq->v.niov; i++) {
- rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE,
- ioreq->v.iov[i].iov_base,
- ioreq->v.iov[i].iov_len / BLOCK_SIZE);
- if (rc != 0) {
- xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n",
- ioreq->v.iov[i].iov_base,
- ioreq->v.iov[i].iov_len);
- goto err;
- }
- pos += ioreq->v.iov[i].iov_len;
- }
- break;
- case BLKIF_OP_WRITE:
- case BLKIF_OP_WRITE_BARRIER:
- if (!ioreq->req.nr_segments) {
- break;
- }
- pos = ioreq->start;
- for (i = 0; i < ioreq->v.niov; i++) {
- rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE,
- ioreq->v.iov[i].iov_base,
- ioreq->v.iov[i].iov_len / BLOCK_SIZE);
- if (rc != 0) {
- xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n",
- ioreq->v.iov[i].iov_base,
- ioreq->v.iov[i].iov_len);
- goto err;
- }
- pos += ioreq->v.iov[i].iov_len;
- }
- break;
- default:
- /* unknown operation (shouldn't happen -- parse catches this) */
- goto err;
- }
-
- if (ioreq->postsync) {
- bdrv_flush(blkdev->bs);
- }
- ioreq->status = BLKIF_RSP_OKAY;
-
- ioreq_unmap(ioreq);
- ioreq_finish(ioreq);
- return 0;
-
-err:
- ioreq_unmap(ioreq);
-err_no_map:
- ioreq_finish(ioreq);
- ioreq->status = BLKIF_RSP_ERROR;
- return -1;
-}
-
static void qemu_aio_complete(void *opaque, int ret)
{
struct ioreq *ioreq = opaque;
@@ -554,9 +483,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
rp = blkdev->rings.common.sring->req_prod;
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
- if (use_aio) {
- blk_send_response_all(blkdev);
- }
+ blk_send_response_all(blkdev);
while (rc != rp) {
/* pull request from ring */
if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
@@ -579,16 +506,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
continue;
}
- if (use_aio) {
- /* run i/o in aio mode */
- ioreq_runio_qemu_aio(ioreq);
- } else {
- /* run i/o in sync mode */
- ioreq_runio_qemu_sync(ioreq);
- }
- }
- if (!use_aio) {
- blk_send_response_all(blkdev);
+ ioreq_runio_qemu_aio(ioreq);
}
if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
diff --git a/hw/xen_platform.c b/hw/xen_platform.c
index 5e792f56f6..e62eaef7d1 100644
--- a/hw/xen_platform.c
+++ b/hw/xen_platform.c
@@ -120,7 +120,7 @@ static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t v
devices, and bit 2 the non-primary-master IDE devices. */
if (val & UNPLUG_ALL_IDE_DISKS) {
DPRINTF("unplug disks\n");
- qemu_aio_flush();
+ bdrv_drain_all();
bdrv_flush_all();
pci_unplug_disks(s->pci_dev.bus);
}