aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--aio-posix.c18
-rw-r--r--aio-win32.c24
-rw-r--r--arch_init.c12
-rw-r--r--async.c20
-rw-r--r--audio/audio.c6
-rw-r--r--audio/noaudio.c4
-rw-r--r--audio/spiceaudio.c4
-rw-r--r--audio/wavaudio.c2
-rw-r--r--backends/baum.c16
-rw-r--r--block.c46
-rw-r--r--block/backup.c4
-rw-r--r--block/commit.c2
-rw-r--r--block/iscsi.c14
-rw-r--r--block/mirror.c10
-rw-r--r--block/qcow2.c5
-rw-r--r--block/qed.c10
-rw-r--r--block/stream.c2
-rw-r--r--block/vmdk.c52
-rw-r--r--block/win32-aio.c10
-rw-r--r--blockdev.c8
-rw-r--r--blockjob.c4
-rwxr-xr-xconfigure37
-rw-r--r--cpus.c138
-rw-r--r--dma-helpers.c1
-rw-r--r--hmp.c8
-rw-r--r--hw/acpi/core.c8
-rw-r--r--hw/acpi/piix4.c2
-rw-r--r--hw/alpha/typhoon.c2
-rw-r--r--hw/arm/omap1.c52
-rw-r--r--hw/arm/pxa2xx.c61
-rw-r--r--hw/arm/spitz.c6
-rw-r--r--hw/arm/stellaris.c10
-rw-r--r--hw/arm/strongarm.c34
-rw-r--r--hw/audio/adlib.c2
-rw-r--r--hw/audio/intel-hda.c4
-rw-r--r--hw/audio/sb16.c6
-rw-r--r--hw/block/fdc.c6
-rw-r--r--hw/block/nvme.c20
-rw-r--r--hw/block/pflash_cfi01.c2
-rw-r--r--hw/block/pflash_cfi02.c10
-rw-r--r--hw/bt/hci-csr.c4
-rw-r--r--hw/bt/hci.c38
-rw-r--r--hw/bt/l2cap.c8
-rw-r--r--hw/char/cadence_uart.c12
-rw-r--r--hw/char/serial.c22
-rw-r--r--hw/char/virtio-serial-bus.c10
-rw-r--r--hw/core/ptimer.c18
-rw-r--r--hw/display/qxl-logger.c2
-rw-r--r--hw/display/qxl.c2
-rw-r--r--hw/display/vga.c6
-rw-r--r--hw/dma/pl330.c6
-rw-r--r--hw/dma/rc4030.c4
-rw-r--r--hw/dma/soc_dma.c8
-rw-r--r--hw/dma/xilinx_axidma.c1
-rw-r--r--hw/i386/kvm/apic.c2
-rw-r--r--hw/i386/kvm/i8254.c6
-rw-r--r--hw/i386/xen_domainbuild.c6
-rw-r--r--hw/ide/core.c6
-rw-r--r--hw/input/hid.c10
-rw-r--r--hw/input/lm832x.c8
-rw-r--r--hw/input/tsc2005.c16
-rw-r--r--hw/input/tsc210x.c32
-rw-r--r--hw/intc/apic.c16
-rw-r--r--hw/intc/apic_common.c2
-rw-r--r--hw/intc/armv7m_nvic.c16
-rw-r--r--hw/intc/i8259.c4
-rw-r--r--hw/mips/cputimer.c16
-rw-r--r--hw/misc/arm_sysctl.c2
-rw-r--r--hw/misc/macio/cuda.c34
-rw-r--r--hw/misc/macio/macio.c4
-rw-r--r--hw/misc/vfio.c14
-rw-r--r--hw/net/dp8393x.c20
-rw-r--r--hw/net/e1000.c12
-rw-r--r--hw/net/lan9118.c4
-rw-r--r--hw/net/pcnet-pci.c4
-rw-r--r--hw/net/pcnet.c10
-rw-r--r--hw/net/rtl8139.c28
-rw-r--r--hw/net/virtio-net.c20
-rw-r--r--hw/openrisc/cputimer.c10
-rw-r--r--hw/ppc/ppc.c64
-rw-r--r--hw/ppc/ppc405_uc.c8
-rw-r--r--hw/ppc/ppc_booke.c10
-rw-r--r--hw/ppc/spapr.c8
-rw-r--r--hw/sd/sdhci.c28
-rw-r--r--hw/sparc64/sun4u.c24
-rw-r--r--hw/timer/arm_mptimer.c12
-rw-r--r--hw/timer/arm_timer.c1
-rw-r--r--hw/timer/cadence_ttc.c6
-rw-r--r--hw/timer/etraxfs_timer.c2
-rw-r--r--hw/timer/exynos4210_mct.c3
-rw-r--r--hw/timer/exynos4210_pwm.c1
-rw-r--r--hw/timer/grlib_gptimer.c2
-rw-r--r--hw/timer/hpet.c20
-rw-r--r--hw/timer/i8254.c26
-rw-r--r--hw/timer/i8254_common.c4
-rw-r--r--hw/timer/imx_epit.c1
-rw-r--r--hw/timer/imx_gpt.c1
-rw-r--r--hw/timer/lm32_timer.c1
-rw-r--r--hw/timer/m48t59.c18
-rw-r--r--hw/timer/mc146818rtc.c50
-rw-r--r--hw/timer/omap_gptimer.c24
-rw-r--r--hw/timer/omap_synctimer.c2
-rw-r--r--hw/timer/pl031.c19
-rw-r--r--hw/timer/puv3_ost.c1
-rw-r--r--hw/timer/pxa2xx_timer.c34
-rw-r--r--hw/timer/sh_timer.c1
-rw-r--r--hw/timer/slavio_timer.c1
-rw-r--r--hw/timer/tusb6010.c12
-rw-r--r--hw/timer/twl92230.c14
-rw-r--r--hw/timer/xilinx_timer.c1
-rw-r--r--hw/tpm/tpm_tis.c1
-rw-r--r--hw/usb/hcd-ehci.c10
-rw-r--r--hw/usb/hcd-musb.c6
-rw-r--r--hw/usb/hcd-ohci.c12
-rw-r--r--hw/usb/hcd-uhci.c15
-rw-r--r--hw/usb/hcd-xhci.c26
-rw-r--r--hw/usb/host-libusb.c6
-rw-r--r--hw/usb/host-linux.c6
-rw-r--r--hw/usb/redirect.c16
-rw-r--r--hw/virtio/virtio-balloon.c8
-rw-r--r--hw/virtio/virtio-rng.c14
-rw-r--r--hw/watchdog/wdt_i6300esb.c6
-rw-r--r--hw/watchdog/wdt_ib700.c10
-rw-r--r--hw/xtensa/pic_cpu.c10
-rw-r--r--include/block/aio.h52
-rw-r--r--include/block/block_int.h4
-rw-r--r--include/block/blockjob.h2
-rw-r--r--include/block/coroutine.h3
-rw-r--r--include/hw/acpi/acpi.h2
-rw-r--r--include/qemu/ratelimit.h2
-rw-r--r--include/qemu/timer.h676
-rw-r--r--include/qemu/typedefs.h3
-rw-r--r--include/sysemu/sysemu.h2
-rw-r--r--main-loop.c57
-rw-r--r--migration-exec.c1
-rw-r--r--migration-fd.c1
-rw-r--r--migration-tcp.c1
-rw-r--r--migration-unix.c1
-rw-r--r--migration.c17
-rw-r--r--monitor.c8
-rw-r--r--nbd.c1
-rw-r--r--net/dump.c2
-rw-r--r--net/net.c1
-rw-r--r--net/socket.c1
-rw-r--r--qemu-char.c2
-rw-r--r--qemu-coroutine-io.c1
-rw-r--r--qemu-coroutine-sleep.c10
-rw-r--r--qemu-io-cmds.c1
-rw-r--r--qemu-nbd.c1
-rw-r--r--qemu-timer.c834
-rw-r--r--qtest.c10
-rw-r--r--savevm.c24
-rwxr-xr-xscripts/switch-timer-api178
-rw-r--r--slirp/if.c2
-rw-r--r--slirp/misc.c1
-rw-r--r--slirp/slirp.c4
-rw-r--r--stubs/clock-warp.c2
-rw-r--r--target-alpha/sys_helper.c12
-rw-r--r--target-arm/cpu.c4
-rw-r--r--target-arm/helper.c10
-rw-r--r--target-ppc/kvm.c8
-rw-r--r--target-ppc/kvm_ppc.c6
-rw-r--r--target-s390x/cpu.c4
-rw-r--r--target-s390x/misc_helper.c6
-rw-r--r--target-xtensa/op_helper.c2
-rw-r--r--tests/libqtest.h24
-rw-r--r--tests/test-aio.c132
-rw-r--r--tests/test-thread-pool.c3
-rw-r--r--thread-pool.c1
-rw-r--r--ui/console.c30
-rw-r--r--ui/input.c6
-rw-r--r--ui/spice-core.c10
-rw-r--r--ui/vnc-auth-sasl.h1
-rw-r--r--ui/vnc-auth-vencrypt.c2
-rw-r--r--ui/vnc-ws.c1
-rw-r--r--vl.c14
-rw-r--r--xen-all.c12
177 files changed, 2362 insertions, 1503 deletions
diff --git a/aio-posix.c b/aio-posix.c
index 2440eb9c27..bd06f33c78 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -165,6 +165,10 @@ static bool aio_dispatch(AioContext *ctx)
g_free(tmp);
}
}
+
+ /* Run our timers */
+ progress |= timerlistgroup_run_timers(&ctx->tlg);
+
return progress;
}
@@ -219,9 +223,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
/* wait until next event */
- ret = g_poll((GPollFD *)ctx->pollfds->data,
- ctx->pollfds->len,
- blocking ? -1 : 0);
+ ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
+ ctx->pollfds->len,
+ blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0);
/* if we have any readable fds, dispatch event */
if (ret > 0) {
@@ -232,9 +236,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
node->pfd.revents = pfd->revents;
}
}
- if (aio_dispatch(ctx)) {
- progress = true;
- }
+ }
+
+ /* Run dispatch even if there were no readable fds to run timers */
+ if (aio_dispatch(ctx)) {
+ progress = true;
}
return progress;
diff --git a/aio-win32.c b/aio-win32.c
index 78b2801c51..f9cfbb75ac 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -95,6 +95,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
bool progress;
int count;
+ int timeout;
progress = false;
@@ -108,6 +109,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress = true;
}
+ /* Run timers */
+ progress |= timerlistgroup_run_timers(&ctx->tlg);
+
/*
* Then dispatch any pending callbacks from the GSource.
*
@@ -125,7 +129,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
node->io_notify(node->e);
/* aio_notify() does not count as progress */
- if (node->opaque != &ctx->notifier) {
+ if (node->e != &ctx->notifier) {
progress = true;
}
}
@@ -164,8 +168,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* wait until next event */
while (count > 0) {
- int timeout = blocking ? INFINITE : 0;
- int ret = WaitForMultipleObjects(count, events, FALSE, timeout);
+ int ret;
+
+ timeout = blocking ?
+ qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0;
+ ret = WaitForMultipleObjects(count, events, FALSE, timeout);
/* if we have any signaled events, dispatch event */
if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
@@ -188,7 +195,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
node->io_notify(node->e);
/* aio_notify() does not count as progress */
- if (node->opaque != &ctx->notifier) {
+ if (node->e != &ctx->notifier) {
progress = true;
}
}
@@ -208,5 +215,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
events[ret - WAIT_OBJECT_0] = events[--count];
}
+ if (blocking) {
+ /* Run the timers a second time. We do this because otherwise aio_wait
+ * will not note progress - and will stop a drain early - if we have
+ * a timer that was not ready to run entering g_poll but is ready
+ * after g_poll. This will only do anything if a timer has expired.
+ */
+ progress |= timerlistgroup_run_timers(&ctx->tlg);
+ }
+
return progress;
}
diff --git a/arch_init.c b/arch_init.c
index 94d45e1d2a..fdd5243770 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -392,7 +392,7 @@ static void migration_bitmap_sync(void)
}
if (!start_time) {
- start_time = qemu_get_clock_ms(rt_clock);
+ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
}
trace_migration_bitmap_sync_start();
@@ -410,7 +410,7 @@ static void migration_bitmap_sync(void)
trace_migration_bitmap_sync_end(migration_dirty_pages
- num_dirty_pages_init);
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
- end_time = qemu_get_clock_ms(rt_clock);
+ end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* more than 1 second = 1000 millisecons */
if (end_time > start_time + 1000) {
@@ -672,7 +672,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
ram_control_before_iterate(f, RAM_CONTROL_ROUND);
- t0 = qemu_get_clock_ns(rt_clock);
+ t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
@@ -691,7 +691,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
iterations
*/
if ((i & 63) == 0) {
- uint64_t t1 = (qemu_get_clock_ns(rt_clock) - t0) / 1000000;
+ uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
if (t1 > MAX_WAIT) {
DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
t1, i);
@@ -1217,11 +1217,11 @@ static void check_guest_throttling(void)
}
if (!t0) {
- t0 = qemu_get_clock_ns(rt_clock);
+ t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
return;
}
- t1 = qemu_get_clock_ns(rt_clock);
+ t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
/* If it has been more than 40 ms since the last time the guest
* was throttled then do it again.
diff --git a/async.c b/async.c
index 9791d8e6a1..5fb3fa61df 100644
--- a/async.c
+++ b/async.c
@@ -150,7 +150,10 @@ aio_ctx_prepare(GSource *source, gint *timeout)
{
AioContext *ctx = (AioContext *) source;
QEMUBH *bh;
+ int deadline;
+ /* We assume there is no timeout already supplied */
+ *timeout = -1;
for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
if (bh->idle) {
@@ -166,6 +169,14 @@ aio_ctx_prepare(GSource *source, gint *timeout)
}
}
+ deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
+ if (deadline == 0) {
+ *timeout = 0;
+ return true;
+ } else {
+ *timeout = qemu_soonest_timeout(*timeout, deadline);
+ }
+
return false;
}
@@ -180,7 +191,7 @@ aio_ctx_check(GSource *source)
return true;
}
}
- return aio_pending(ctx);
+ return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
}
static gboolean
@@ -205,6 +216,7 @@ aio_ctx_finalize(GSource *source)
event_notifier_cleanup(&ctx->notifier);
qemu_mutex_destroy(&ctx->bh_lock);
g_array_free(ctx->pollfds, TRUE);
+ timerlistgroup_deinit(&ctx->tlg);
}
static GSourceFuncs aio_source_funcs = {
@@ -233,6 +245,11 @@ void aio_notify(AioContext *ctx)
event_notifier_set(&ctx->notifier);
}
+static void aio_timerlist_notify(void *opaque)
+{
+ aio_notify(opaque);
+}
+
AioContext *aio_context_new(void)
{
AioContext *ctx;
@@ -244,6 +261,7 @@ AioContext *aio_context_new(void)
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
event_notifier_test_and_clear);
+ timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
return ctx;
}
diff --git a/audio/audio.c b/audio/audio.c
index 02bb8861f8..af4cdf60e7 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -1124,10 +1124,10 @@ static int audio_is_timer_needed (void)
static void audio_reset_timer (AudioState *s)
{
if (audio_is_timer_needed ()) {
- qemu_mod_timer (s->ts, qemu_get_clock_ns (vm_clock) + 1);
+ timer_mod (s->ts, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
}
else {
- qemu_del_timer (s->ts);
+ timer_del (s->ts);
}
}
@@ -1834,7 +1834,7 @@ static void audio_init (void)
QLIST_INIT (&s->cap_head);
atexit (audio_atexit);
- s->ts = qemu_new_timer_ns (vm_clock, audio_timer, s);
+ s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
if (!s->ts) {
hw_error("Could not create audio timer\n");
}
diff --git a/audio/noaudio.c b/audio/noaudio.c
index 9f23aa2cb3..cb386620ae 100644
--- a/audio/noaudio.c
+++ b/audio/noaudio.c
@@ -46,7 +46,7 @@ static int no_run_out (HWVoiceOut *hw, int live)
int64_t ticks;
int64_t bytes;
- now = qemu_get_clock_ns (vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ticks = now - no->old_ticks;
bytes = muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
bytes = audio_MIN (bytes, INT_MAX);
@@ -102,7 +102,7 @@ static int no_run_in (HWVoiceIn *hw)
int samples = 0;
if (dead) {
- int64_t now = qemu_get_clock_ns (vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - no->old_ticks;
int64_t bytes =
muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
diff --git a/audio/spiceaudio.c b/audio/spiceaudio.c
index bc24557de4..5af436c31d 100644
--- a/audio/spiceaudio.c
+++ b/audio/spiceaudio.c
@@ -81,7 +81,7 @@ static void spice_audio_fini (void *opaque)
static void rate_start (SpiceRateCtl *rate)
{
memset (rate, 0, sizeof (*rate));
- rate->start_ticks = qemu_get_clock_ns (vm_clock);
+ rate->start_ticks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
static int rate_get_samples (struct audio_pcm_info *info, SpiceRateCtl *rate)
@@ -91,7 +91,7 @@ static int rate_get_samples (struct audio_pcm_info *info, SpiceRateCtl *rate)
int64_t bytes;
int64_t samples;
- now = qemu_get_clock_ns (vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ticks = now - rate->start_ticks;
bytes = muldiv64 (ticks, info->bytes_per_second, get_ticks_per_sec ());
samples = (bytes - rate->bytes_sent) >> info->shift;
diff --git a/audio/wavaudio.c b/audio/wavaudio.c
index 950fa8f19c..6846a1a9f7 100644
--- a/audio/wavaudio.c
+++ b/audio/wavaudio.c
@@ -52,7 +52,7 @@ static int wav_run_out (HWVoiceOut *hw, int live)
int rpos, decr, samples;
uint8_t *dst;
struct st_sample *src;
- int64_t now = qemu_get_clock_ns (vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - wav->old_ticks;
int64_t bytes =
muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
diff --git a/backends/baum.c b/backends/baum.c
index 62aa784436..1132899026 100644
--- a/backends/baum.c
+++ b/backends/baum.c
@@ -314,9 +314,9 @@ static int baum_eat_packet(BaumDriverState *baum, const uint8_t *buf, int len)
return 0; \
if (*cur++ != ESC) { \
DPRINTF("Broken packet %#2x, tossing\n", req); \
- if (qemu_timer_pending(baum->cellCount_timer)) { \
- qemu_del_timer(baum->cellCount_timer); \
- baum_cellCount_timer_cb(baum); \
+ if (timer_pending(baum->cellCount_timer)) { \
+ timer_del(baum->cellCount_timer); \
+ baum_cellCount_timer_cb(baum); \
} \
return (cur - 2 - buf); \
} \
@@ -334,7 +334,7 @@ static int baum_eat_packet(BaumDriverState *baum, const uint8_t *buf, int len)
int i;
/* Allow 100ms to complete the DisplayData packet */
- qemu_mod_timer(baum->cellCount_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(baum->cellCount_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 10);
for (i = 0; i < baum->x * baum->y ; i++) {
EAT(c);
@@ -348,7 +348,7 @@ static int baum_eat_packet(BaumDriverState *baum, const uint8_t *buf, int len)
c = '?';
text[i] = c;
}
- qemu_del_timer(baum->cellCount_timer);
+ timer_del(baum->cellCount_timer);
memset(zero, 0, sizeof(zero));
@@ -553,7 +553,7 @@ static void baum_close(struct CharDriverState *chr)
{
BaumDriverState *baum = chr->opaque;
- qemu_free_timer(baum->cellCount_timer);
+ timer_free(baum->cellCount_timer);
if (baum->brlapi) {
brlapi__closeConnection(baum->brlapi);
g_free(baum->brlapi);
@@ -588,7 +588,7 @@ CharDriverState *chr_baum_init(void)
goto fail_handle;
}
- baum->cellCount_timer = qemu_new_timer_ns(vm_clock, baum_cellCount_timer_cb, baum);
+ baum->cellCount_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, baum_cellCount_timer_cb, baum);
if (brlapi__getDisplaySize(handle, &baum->x, &baum->y) == -1) {
brlapi_perror("baum_init: brlapi_getDisplaySize");
@@ -614,7 +614,7 @@ CharDriverState *chr_baum_init(void)
return chr;
fail:
- qemu_free_timer(baum->cellCount_timer);
+ timer_free(baum->cellCount_timer);
brlapi__closeConnection(handle);
fail_handle:
g_free(handle);
diff --git a/block.c b/block.c
index 45a545bf10..a387c1ad68 100644
--- a/block.c
+++ b/block.c
@@ -130,8 +130,8 @@ void bdrv_io_limits_disable(BlockDriverState *bs)
do {} while (qemu_co_enter_next(&bs->throttled_reqs));
if (bs->block_timer) {
- qemu_del_timer(bs->block_timer);
- qemu_free_timer(bs->block_timer);
+ timer_del(bs->block_timer);
+ timer_free(bs->block_timer);
bs->block_timer = NULL;
}
@@ -148,7 +148,7 @@ static void bdrv_block_timer(void *opaque)
void bdrv_io_limits_enable(BlockDriverState *bs)
{
- bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
+ bs->block_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, bdrv_block_timer, bs);
bs->io_limits_enabled = true;
}
@@ -180,8 +180,8 @@ static void bdrv_io_limits_intercept(BlockDriverState *bs,
*/
while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
- qemu_mod_timer(bs->block_timer,
- wait_time + qemu_get_clock_ns(vm_clock));
+ timer_mod(bs->block_timer,
+ wait_time + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
}
@@ -706,6 +706,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
bs->open_flags = flags;
bs->buffer_alignment = 512;
+ bs->zero_beyond_eof = true;
open_flags = bdrv_open_flags(bs, flags);
bs->read_only = !(open_flags & BDRV_O_RDWR);
@@ -1402,6 +1403,7 @@ void bdrv_close(BlockDriverState *bs)
bs->valid_key = 0;
bs->sg = 0;
bs->growable = 0;
+ bs->zero_beyond_eof = false;
QDECREF(bs->options);
bs->options = NULL;
@@ -2569,7 +2571,35 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
}
}
- ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+ if (!(bs->zero_beyond_eof && bs->growable)) {
+ ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+ } else {
+ /* Read zeros after EOF of growable BDSes */
+ int64_t len, total_sectors, max_nb_sectors;
+
+ len = bdrv_getlength(bs);
+ if (len < 0) {
+ ret = len;
+ goto out;
+ }
+
+ total_sectors = len >> BDRV_SECTOR_BITS;
+ max_nb_sectors = MAX(0, total_sectors - sector_num);
+ if (max_nb_sectors > 0) {
+ ret = drv->bdrv_co_readv(bs, sector_num,
+ MIN(nb_sectors, max_nb_sectors), qiov);
+ } else {
+ ret = 0;
+ }
+
+ /* Reading beyond end of file is supposed to produce zeroes */
+ if (ret == 0 && total_sectors < sector_num + nb_sectors) {
+ uint64_t offset = MAX(0, total_sectors - sector_num);
+ uint64_t bytes = (sector_num + nb_sectors - offset) *
+ BDRV_SECTOR_SIZE;
+ qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
+ }
+ }
out:
tracked_request_end(&req);
@@ -3717,7 +3747,7 @@ static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
double elapsed_time;
int bps_ret, iops_ret;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (now > bs->slice_end) {
bs->slice_start = now;
bs->slice_end = now + BLOCK_IO_SLICE_TIME;
@@ -3737,7 +3767,7 @@ static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
*wait = max_wait;
}
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (bs->slice_end < now + max_wait) {
bs->slice_end = now + max_wait;
}
diff --git a/block/backup.c b/block/backup.c
index 6ae8a05a3e..e12b3b1461 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -272,9 +272,9 @@ static void coroutine_fn backup_run(void *opaque)
uint64_t delay_ns = ratelimit_calculate_delay(
&job->limit, job->sectors_read);
job->sectors_read = 0;
- block_job_sleep_ns(&job->common, rt_clock, delay_ns);
+ block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
} else {
- block_job_sleep_ns(&job->common, rt_clock, 0);
+ block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
}
if (block_job_is_cancelled(&job->common)) {
diff --git a/block/commit.c b/block/commit.c
index 2227fc2e6c..51a1ab3678 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -103,7 +103,7 @@ wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
- block_job_sleep_ns(&s->common, rt_clock, delay_ns);
+ block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
}
diff --git a/block/iscsi.c b/block/iscsi.c
index 47a3adc9b5..2bbee1f6e5 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -960,7 +960,7 @@ static void iscsi_nop_timed_event(void *opaque)
return;
}
- qemu_mod_timer(iscsilun->nop_timer, qemu_get_clock_ms(rt_clock) + NOP_INTERVAL);
+ timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun);
}
#endif
@@ -1173,8 +1173,8 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags)
#if defined(LIBISCSI_FEATURE_NOP_COUNTER)
/* Set up a timer for sending out iSCSI NOPs */
- iscsilun->nop_timer = qemu_new_timer_ms(rt_clock, iscsi_nop_timed_event, iscsilun);
- qemu_mod_timer(iscsilun->nop_timer, qemu_get_clock_ms(rt_clock) + NOP_INTERVAL);
+ iscsilun->nop_timer = timer_new_ms(QEMU_CLOCK_REALTIME, iscsi_nop_timed_event, iscsilun);
+ timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
#endif
out:
@@ -1204,8 +1204,8 @@ static void iscsi_close(BlockDriverState *bs)
struct iscsi_context *iscsi = iscsilun->iscsi;
if (iscsilun->nop_timer) {
- qemu_del_timer(iscsilun->nop_timer);
- qemu_free_timer(iscsilun->nop_timer);
+ timer_del(iscsilun->nop_timer);
+ timer_free(iscsilun->nop_timer);
}
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL);
iscsi_destroy_context(iscsi);
@@ -1267,8 +1267,8 @@ static int iscsi_create(const char *filename, QEMUOptionParameter *options)
goto out;
}
if (iscsilun->nop_timer) {
- qemu_del_timer(iscsilun->nop_timer);
- qemu_free_timer(iscsilun->nop_timer);
+ timer_del(iscsilun->nop_timer);
+ timer_free(iscsilun->nop_timer);
}
if (iscsilun->type != TYPE_DISK) {
ret = -ENODEV;
diff --git a/block/mirror.c b/block/mirror.c
index bed4a7eadd..86de4582b4 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -356,7 +356,7 @@ static void coroutine_fn mirror_run(void *opaque)
}
bdrv_dirty_iter_init(bs, &s->hbi);
- last_pause_ns = qemu_get_clock_ns(rt_clock);
+ last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
for (;;) {
uint64_t delay_ns;
int64_t cnt;
@@ -374,7 +374,7 @@ static void coroutine_fn mirror_run(void *opaque)
* We do so every SLICE_TIME nanoseconds, or when there is an error,
* or when the source is clean, whichever comes first.
*/
- if (qemu_get_clock_ns(rt_clock) - last_pause_ns < SLICE_TIME &&
+ if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
(cnt == 0 && s->in_flight > 0)) {
@@ -439,13 +439,13 @@ static void coroutine_fn mirror_run(void *opaque)
delay_ns = 0;
}
- block_job_sleep_ns(&s->common, rt_clock, delay_ns);
+ block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
}
} else if (!should_complete) {
delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
- block_job_sleep_ns(&s->common, rt_clock, delay_ns);
+ block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
} else if (cnt == 0) {
/* The two disks are in sync. Exit and report successful
* completion.
@@ -454,7 +454,7 @@ static void coroutine_fn mirror_run(void *opaque)
s->common.cancelled = false;
break;
}
- last_pause_ns = qemu_get_clock_ns(rt_clock);
+ last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
immediate_exit:
diff --git a/block/qcow2.c b/block/qcow2.c
index 3376901bd7..78097e5173 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1402,7 +1402,7 @@ static int qcow2_create(const char *filename, QEMUOptionParameter *options)
int flags = 0;
size_t cluster_size = DEFAULT_CLUSTER_SIZE;
int prealloc = 0;
- int version = 2;
+ int version = 3;
/* Read out options */
while (options && options->name) {
@@ -1722,12 +1722,15 @@ static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
{
BDRVQcowState *s = bs->opaque;
int growable = bs->growable;
+ bool zero_beyond_eof = bs->zero_beyond_eof;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
bs->growable = 1;
+ bs->zero_beyond_eof = false;
ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
bs->growable = growable;
+ bs->zero_beyond_eof = zero_beyond_eof;
return ret;
}
diff --git a/block/qed.c b/block/qed.c
index f767b0528c..cc904c4834 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -353,10 +353,10 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
{
trace_qed_start_need_check_timer(s);
- /* Use vm_clock so we don't alter the image file while suspended for
+ /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
* migration.
*/
- qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
}
@@ -364,7 +364,7 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
static void qed_cancel_need_check_timer(BDRVQEDState *s)
{
trace_qed_cancel_need_check_timer(s);
- qemu_del_timer(s->need_check_timer);
+ timer_del(s->need_check_timer);
}
static void bdrv_qed_rebind(BlockDriverState *bs)
@@ -494,7 +494,7 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags)
}
}
- s->need_check_timer = qemu_new_timer_ns(vm_clock,
+ s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
qed_need_check_timer_cb, s);
out:
@@ -518,7 +518,7 @@ static void bdrv_qed_close(BlockDriverState *bs)
BDRVQEDState *s = bs->opaque;
qed_cancel_need_check_timer(s);
- qemu_free_timer(s->need_check_timer);
+ timer_free(s->need_check_timer);
/* Ensure writes reach stable storage */
bdrv_flush(bs->file);
diff --git a/block/stream.c b/block/stream.c
index db49b4d85f..99821252b1 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -114,7 +114,7 @@ wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
- block_job_sleep_ns(&s->common, rt_clock, delay_ns);
+ block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
}
diff --git a/block/vmdk.c b/block/vmdk.c
index 346bb5cad9..63b489d29e 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -401,6 +401,14 @@ static int vmdk_add_extent(BlockDriverState *bs,
error_report("invalid granularity, image may be corrupt");
return -EINVAL;
}
+ if (l1_size > 512 * 1024 * 1024) {
+ /* Although with big capacity and small l1_entry_sectors, we can get a
+ * big l1_size, we don't want unbounded value to allocate the table.
+ * Limit it to 512M, which is 16PB for default cluster and L2 table
+ * size */
+ error_report("L1 size too big");
+ return -EFBIG;
+ }
s->extents = g_realloc(s->extents,
(s->num_extents + 1) * sizeof(VmdkExtent));
@@ -473,9 +481,9 @@ static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent)
return ret;
}
-static int vmdk_open_vmdk3(BlockDriverState *bs,
- BlockDriverState *file,
- int flags)
+static int vmdk_open_vmfs_sparse(BlockDriverState *bs,
+ BlockDriverState *file,
+ int flags)
{
int ret;
uint32_t magic;
@@ -486,14 +494,14 @@ static int vmdk_open_vmdk3(BlockDriverState *bs,
if (ret < 0) {
return ret;
}
-
- ret = vmdk_add_extent(bs,
- bs->file, false,
- le32_to_cpu(header.disk_sectors),
- le32_to_cpu(header.l1dir_offset) << 9,
- 0, 1 << 6, 1 << 9,
- le32_to_cpu(header.granularity),
- &extent);
+ ret = vmdk_add_extent(bs, file, false,
+ le32_to_cpu(header.disk_sectors),
+ le32_to_cpu(header.l1dir_offset) << 9,
+ 0,
+ le32_to_cpu(header.l1dir_size),
+ 4096,
+ le32_to_cpu(header.granularity),
+ &extent);
if (ret < 0) {
return ret;
}
@@ -598,14 +606,6 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
}
l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1)
/ l1_entry_sectors;
- if (l1_size > 512 * 1024 * 1024) {
- /* although with big capacity and small l1_entry_sectors, we can get a
- * big l1_size, we don't want unbounded value to allocate the table.
- * Limit it to 512M, which is 16PB for default cluster and L2 table
- * size */
- error_report("L1 size too big");
- return -EFBIG;
- }
if (le32_to_cpu(header.flags) & VMDK4_FLAG_RGD) {
l1_backup_offset = le64_to_cpu(header.rgd_offset) << 9;
}
@@ -674,7 +674,7 @@ static int vmdk_open_sparse(BlockDriverState *bs,
magic = be32_to_cpu(magic);
switch (magic) {
case VMDK3_MAGIC:
- return vmdk_open_vmdk3(bs, file, flags);
+ return vmdk_open_vmfs_sparse(bs, file, flags);
break;
case VMDK4_MAGIC:
return vmdk_open_vmdk4(bs, file, flags);
@@ -718,7 +718,8 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
}
if (sectors <= 0 ||
- (strcmp(type, "FLAT") && strcmp(type, "SPARSE")) ||
+ (strcmp(type, "FLAT") && strcmp(type, "SPARSE") &&
+ strcmp(type, "VMFS") && strcmp(type, "VMFSSPARSE")) ||
(strcmp(access, "RW"))) {
goto next_line;
}
@@ -731,7 +732,7 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
}
/* save to extents array */
- if (!strcmp(type, "FLAT")) {
+ if (!strcmp(type, "FLAT") || !strcmp(type, "VMFS")) {
/* FLAT extent */
VmdkExtent *extent;
@@ -741,8 +742,8 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
return ret;
}
extent->flat_start_offset = flat_offset << 9;
- } else if (!strcmp(type, "SPARSE")) {
- /* SPARSE extent */
+ } else if (!strcmp(type, "SPARSE") || !strcmp(type, "VMFSSPARSE")) {
+ /* SPARSE extent and VMFSSPARSE extent are both "COWD" sparse file*/
ret = vmdk_open_sparse(bs, extent_file, bs->open_flags);
if (ret) {
bdrv_delete(extent_file);
@@ -789,6 +790,8 @@ static int vmdk_open_desc_file(BlockDriverState *bs, int flags,
goto exit;
}
if (strcmp(ct, "monolithicFlat") &&
+ strcmp(ct, "vmfs") &&
+ strcmp(ct, "vmfsSparse") &&
strcmp(ct, "twoGbMaxExtentSparse") &&
strcmp(ct, "twoGbMaxExtentFlat")) {
fprintf(stderr,
@@ -1381,7 +1384,6 @@ static int coroutine_fn vmdk_co_write_zeroes(BlockDriverState *bs,
return ret;
}
-
static int vmdk_create_extent(const char *filename, int64_t filesize,
bool flat, bool compress, bool zeroed_grain)
{
diff --git a/block/win32-aio.c b/block/win32-aio.c
index fcb7c754da..5d1d199b61 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -105,13 +105,6 @@ static void win32_aio_completion_cb(EventNotifier *e)
}
}
-static int win32_aio_flush_cb(EventNotifier *e)
-{
- QEMUWin32AIOState *s = container_of(e, QEMUWin32AIOState, e);
-
- return (s->count > 0) ? 1 : 0;
-}
-
static void win32_aio_cancel(BlockDriverAIOCB *blockacb)
{
QEMUWin32AIOCB *waiocb = (QEMUWin32AIOCB *)blockacb;
@@ -201,8 +194,7 @@ QEMUWin32AIOState *win32_aio_init(void)
goto out_close_efd;
}
- qemu_aio_set_event_notifier(&s->e, win32_aio_completion_cb,
- win32_aio_flush_cb);
+ qemu_aio_set_event_notifier(&s->e, win32_aio_completion_cb);
return s;
diff --git a/blockdev.c b/blockdev.c
index bc7016a8d4..121520ecbc 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -487,7 +487,11 @@ static DriveInfo *blockdev_init(QemuOpts *all_opts,
drv = bdrv_find_whitelisted_format(buf, ro);
if (!drv) {
- error_report("'%s' invalid format", buf);
+ if (!ro && bdrv_find_whitelisted_format(buf, !ro)) {
+ error_report("'%s' can be only used as read-only device.", buf);
+ } else {
+ error_report("'%s' invalid format", buf);
+ }
return NULL;
}
}
@@ -1295,7 +1299,7 @@ void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
bdrv_io_limits_disable(bs);
} else {
if (bs->block_timer) {
- qemu_mod_timer(bs->block_timer, qemu_get_clock_ns(vm_clock));
+ timer_mod(bs->block_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
}
}
diff --git a/blockjob.c b/blockjob.c
index ca80df1d0e..7edc945119 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -187,7 +187,7 @@ int block_job_cancel_sync(BlockJob *job)
return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
}
-void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
+void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
{
assert(job->busy);
@@ -200,7 +200,7 @@ void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
if (block_job_is_paused(job)) {
qemu_coroutine_yield();
} else {
- co_sleep_ns(clock, ns);
+ co_sleep_ns(type, ns);
}
job->busy = true;
}
diff --git a/configure b/configure
index 18fa60824b..0a55c20252 100755
--- a/configure
+++ b/configure
@@ -2818,6 +2818,37 @@ if compile_prog "" "" ; then
dup3=yes
fi
+# check for ppoll support
+ppoll=no
+cat > $TMPC << EOF
+#include <poll.h>
+
+int main(void)
+{
+ struct pollfd pfd = { .fd = 0, .events = 0, .revents = 0 };
+ ppoll(&pfd, 1, 0, 0);
+ return 0;
+}
+EOF
+if compile_prog "" "" ; then
+ ppoll=yes
+fi
+
+# check for prctl(PR_SET_TIMERSLACK , ... ) support
+prctl_pr_set_timerslack=no
+cat > $TMPC << EOF
+#include <sys/prctl.h>
+
+int main(void)
+{
+ prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0);
+ return 0;
+}
+EOF
+if compile_prog "" "" ; then
+ prctl_pr_set_timerslack=yes
+fi
+
# check for epoll support
epoll=no
cat > $TMPC << EOF
@@ -3814,6 +3845,12 @@ fi
if test "$dup3" = "yes" ; then
echo "CONFIG_DUP3=y" >> $config_host_mak
fi
+if test "$ppoll" = "yes" ; then
+ echo "CONFIG_PPOLL=y" >> $config_host_mak
+fi
+if test "$prctl_pr_set_timerslack" = "yes" ; then
+ echo "CONFIG_PRCTL_PR_SET_TIMERSLACK=y" >> $config_host_mak
+fi
if test "$epoll" = "yes" ; then
echo "CONFIG_EPOLL=y" >> $config_host_mak
fi
diff --git a/cpus.c b/cpus.c
index 70cc6171e2..b9e5685e16 100644
--- a/cpus.c
+++ b/cpus.c
@@ -207,7 +207,7 @@ static void icount_adjust(void)
return;
}
cur_time = cpu_get_clock();
- cur_icount = qemu_get_clock_ns(vm_clock);
+ cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delta = cur_icount - cur_time;
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
if (delta > 0
@@ -228,15 +228,16 @@ static void icount_adjust(void)
static void icount_adjust_rt(void *opaque)
{
- qemu_mod_timer(icount_rt_timer,
- qemu_get_clock_ms(rt_clock) + 1000);
+ timer_mod(icount_rt_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
icount_adjust();
}
static void icount_adjust_vm(void *opaque)
{
- qemu_mod_timer(icount_vm_timer,
- qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
+ timer_mod(icount_vm_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ get_ticks_per_sec() / 10);
icount_adjust();
}
@@ -252,22 +253,22 @@ static void icount_warp_rt(void *opaque)
}
if (runstate_is_running()) {
- int64_t clock = qemu_get_clock_ns(rt_clock);
+ int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
int64_t warp_delta = clock - vm_clock_warp_start;
if (use_icount == 1) {
qemu_icount_bias += warp_delta;
} else {
/*
- * In adaptive mode, do not let the vm_clock run too
+ * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
* far ahead of real time.
*/
int64_t cur_time = cpu_get_clock();
- int64_t cur_icount = qemu_get_clock_ns(vm_clock);
+ int64_t cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t delta = cur_time - cur_icount;
qemu_icount_bias += MIN(warp_delta, delta);
}
- if (qemu_clock_expired(vm_clock)) {
- qemu_notify_event();
+ if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
}
vm_clock_warp_start = -1;
@@ -275,19 +276,19 @@ static void icount_warp_rt(void *opaque)
void qtest_clock_warp(int64_t dest)
{
- int64_t clock = qemu_get_clock_ns(vm_clock);
+ int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
assert(qtest_enabled());
while (clock < dest) {
- int64_t deadline = qemu_clock_deadline(vm_clock);
+ int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
int64_t warp = MIN(dest - clock, deadline);
qemu_icount_bias += warp;
- qemu_run_timers(vm_clock);
- clock = qemu_get_clock_ns(vm_clock);
+ qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
+ clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
- qemu_notify_event();
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
-void qemu_clock_warp(QEMUClock *clock)
+void qemu_clock_warp(QEMUClockType type)
{
int64_t deadline;
@@ -296,20 +297,20 @@ void qemu_clock_warp(QEMUClock *clock)
* applicable to other clocks. But a clock argument removes the
* need for if statements all over the place.
*/
- if (clock != vm_clock || !use_icount) {
+ if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
return;
}
/*
- * If the CPUs have been sleeping, advance the vm_clock timer now. This
- * ensures that the deadline for the timer is computed correctly below.
+ * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
+ * This ensures that the deadline for the timer is computed correctly below.
* This also makes sure that the insn counter is synchronized before the
* CPU starts running, in case the CPU is woken by an event other than
- * the earliest vm_clock timer.
+ * the earliest QEMU_CLOCK_VIRTUAL timer.
*/
icount_warp_rt(NULL);
- if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
- qemu_del_timer(icount_warp_timer);
+ if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
+ timer_del(icount_warp_timer);
return;
}
@@ -318,28 +319,40 @@ void qemu_clock_warp(QEMUClock *clock)
return;
}
- vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
- deadline = qemu_clock_deadline(vm_clock);
+ vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ /* We want to use the earliest deadline from ALL vm_clocks */
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ /* Maintain prior (possibly buggy) behaviour where if no deadline
+ * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
+ * INT32_MAX nanoseconds ahead, we still use INT32_MAX
+ * nanoseconds.
+ */
+ if ((deadline < 0) || (deadline > INT32_MAX)) {
+ deadline = INT32_MAX;
+ }
+
if (deadline > 0) {
/*
- * Ensure the vm_clock proceeds even when the virtual CPU goes to
+ * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
* sleep. Otherwise, the CPU might be waiting for a future timer
* interrupt to wake it up, but the interrupt never comes because
* the vCPU isn't running any insns and thus doesn't advance the
- * vm_clock.
+ * QEMU_CLOCK_VIRTUAL.
*
* An extreme solution for this problem would be to never let VCPUs
- * sleep in icount mode if there is a pending vm_clock timer; rather
- * time could just advance to the next vm_clock event. Instead, we
- * do stop VCPUs and only advance vm_clock after some "real" time,
- * (related to the time left until the next event) has passed. This
- * rt_clock timer will do this. This avoids that the warps are too
- * visible externally---for example, you will not be sending network
- * packets continuously instead of every 100ms.
+ * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
+ * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
+ * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
+ * after some e"real" time, (related to the time left until the next
+ * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
+ * This avoids that the warps are visible externally; for example,
+ * you will not be sending network packets continuously instead of
+ * every 100ms.
*/
- qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
- } else {
- qemu_notify_event();
+ timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
+ } else if (deadline == 0) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
}
@@ -363,7 +376,8 @@ void configure_icount(const char *option)
return;
}
- icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
+ icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
+ icount_warp_rt, NULL);
if (strcmp(option, "auto") != 0) {
icount_time_shift = strtol(option, NULL, 0);
use_icount = 1;
@@ -381,12 +395,15 @@ void configure_icount(const char *option)
the virtual time trigger catches emulated time passing too fast.
Realtime triggers occur even when idle, so use them less frequently
than VM triggers. */
- icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
- qemu_mod_timer(icount_rt_timer,
- qemu_get_clock_ms(rt_clock) + 1000);
- icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
- qemu_mod_timer(icount_vm_timer,
- qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
+ icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
+ icount_adjust_rt, NULL);
+ timer_mod(icount_rt_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
+ icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ icount_adjust_vm, NULL);
+ timer_mod(icount_vm_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ get_ticks_per_sec() / 10);
}
/***********************************************************/
@@ -735,7 +752,7 @@ static void qemu_tcg_wait_io_event(void)
while (all_cpu_threads_idle()) {
/* Start accounting real time to the virtual clock if the CPUs
are idle. */
- qemu_clock_warp(vm_clock);
+ qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
}
@@ -866,8 +883,13 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
while (1) {
tcg_exec_all();
- if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
- qemu_notify_event();
+
+ if (use_icount) {
+ int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ if (deadline == 0) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ }
}
qemu_tcg_wait_io_event();
}
@@ -985,7 +1007,7 @@ void pause_all_vcpus(void)
{
CPUState *cpu = first_cpu;
- qemu_clock_enable(vm_clock, false);
+ qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
while (cpu) {
cpu->stop = true;
qemu_cpu_kick(cpu);
@@ -1026,7 +1048,7 @@ void resume_all_vcpus(void)
{
CPUState *cpu = first_cpu;
- qemu_clock_enable(vm_clock, true);
+ qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
while (cpu) {
cpu_resume(cpu);
cpu = cpu->next_cpu;
@@ -1145,11 +1167,23 @@ static int tcg_cpu_exec(CPUArchState *env)
#endif
if (use_icount) {
int64_t count;
+ int64_t deadline;
int decr;
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
env->icount_decr.u16.low = 0;
env->icount_extra = 0;
- count = qemu_icount_round(qemu_clock_deadline(vm_clock));
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ /* Maintain prior (possibly buggy) behaviour where if no deadline
+ * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
+ * INT32_MAX nanoseconds ahead, we still use INT32_MAX
+ * nanoseconds.
+ */
+ if ((deadline < 0) || (deadline > INT32_MAX)) {
+ deadline = INT32_MAX;
+ }
+
+ count = qemu_icount_round(deadline);
qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
@@ -1175,8 +1209,8 @@ static void tcg_exec_all(void)
{
int r;
- /* Account partial waits to the vm_clock. */
- qemu_clock_warp(vm_clock);
+ /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
+ qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
if (next_cpu == NULL) {
next_cpu = first_cpu;
@@ -1185,7 +1219,7 @@ static void tcg_exec_all(void)
CPUState *cpu = next_cpu;
CPUArchState *env = cpu->env_ptr;
- qemu_clock_enable(vm_clock,
+ qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
(cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
if (cpu_can_run(cpu)) {
diff --git a/dma-helpers.c b/dma-helpers.c
index 499550fc23..c9620a5bbd 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -11,6 +11,7 @@
#include "trace.h"
#include "qemu/range.h"
#include "qemu/thread.h"
+#include "qemu/main-loop.h"
/* #define DEBUG_IOMMU */
diff --git a/hmp.c b/hmp.c
index c45514b6b1..fcca6aea8f 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1195,13 +1195,13 @@ static void hmp_migrate_status_cb(void *opaque)
monitor_flush(status->mon);
}
- qemu_mod_timer(status->timer, qemu_get_clock_ms(rt_clock) + 1000);
+ timer_mod(status->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
} else {
if (status->is_block_migration) {
monitor_printf(status->mon, "\n");
}
monitor_resume(status->mon);
- qemu_del_timer(status->timer);
+ timer_del(status->timer);
g_free(status);
}
@@ -1235,9 +1235,9 @@ void hmp_migrate(Monitor *mon, const QDict *qdict)
status = g_malloc0(sizeof(*status));
status->mon = mon;
status->is_block_migration = blk || inc;
- status->timer = qemu_new_timer_ms(rt_clock, hmp_migrate_status_cb,
+ status->timer = timer_new_ms(QEMU_CLOCK_REALTIME, hmp_migrate_status_cb,
status);
- qemu_mod_timer(status->timer, qemu_get_clock_ms(rt_clock));
+ timer_mod(status->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
}
diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index b07fedac59..7467b88e27 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -433,9 +433,9 @@ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable)
if (enable) {
expire_time = muldiv64(ar->tmr.overflow_time, get_ticks_per_sec(),
PM_TIMER_FREQUENCY);
- qemu_mod_timer(ar->tmr.timer, expire_time);
+ timer_mod(ar->tmr.timer, expire_time);
} else {
- qemu_del_timer(ar->tmr.timer);
+ timer_del(ar->tmr.timer);
}
}
@@ -481,7 +481,7 @@ void acpi_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
MemoryRegion *parent)
{
ar->tmr.update_sci = update_sci;
- ar->tmr.timer = qemu_new_timer_ns(vm_clock, acpi_pm_tmr_timer, ar);
+ ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar);
memory_region_init_io(&ar->tmr.io, memory_region_owner(parent),
&acpi_pm_tmr_ops, ar, "acpi-tmr", 4);
memory_region_add_subregion(parent, 8, &ar->tmr.io);
@@ -490,7 +490,7 @@ void acpi_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
void acpi_pm_tmr_reset(ACPIREGS *ar)
{
ar->tmr.overflow_time = 0;
- qemu_del_timer(ar->tmr.timer);
+ timer_del(ar->tmr.timer);
}
/* ACPI PM1aCNT */
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index c88569061c..613d98736a 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -263,7 +263,7 @@ static int acpi_load_old(QEMUFile *f, void *opaque, int version_id)
return ret;
}
- qemu_get_timer(f, s->ar.tmr.timer);
+ timer_get(f, s->ar.tmr.timer);
qemu_get_sbe64s(f, &s->ar.tmr.overflow_time);
qemu_get_be16s(f, (uint16_t *)s->ar.gpe.sts);
diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index 245004530c..aac9a32e0c 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -833,7 +833,7 @@ PCIBus *typhoon_init(ram_addr_t ram_size, ISABus **isa_bus,
AlphaCPU *cpu = cpus[i];
s->cchip.cpu[i] = cpu;
if (cpu != NULL) {
- cpu->alarm_timer = qemu_new_timer_ns(vm_clock,
+ cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
typhoon_alarm_timer,
(void *)((uintptr_t)s + i));
}
diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c
index b6a0b27b02..47511d2cae 100644
--- a/hw/arm/omap1.c
+++ b/hw/arm/omap1.c
@@ -99,7 +99,7 @@ struct omap_mpu_timer_s {
static inline uint32_t omap_timer_read(struct omap_mpu_timer_s *timer)
{
- uint64_t distance = qemu_get_clock_ns(vm_clock) - timer->time;
+ uint64_t distance = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->time;
if (timer->st && timer->enable && timer->rate)
return timer->val - muldiv64(distance >> (timer->ptv + 1),
@@ -111,7 +111,7 @@ static inline uint32_t omap_timer_read(struct omap_mpu_timer_s *timer)
static inline void omap_timer_sync(struct omap_mpu_timer_s *timer)
{
timer->val = omap_timer_read(timer);
- timer->time = qemu_get_clock_ns(vm_clock);
+ timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
static inline void omap_timer_update(struct omap_mpu_timer_s *timer)
@@ -130,11 +130,11 @@ static inline void omap_timer_update(struct omap_mpu_timer_s *timer)
* in a busy loop when it wants to sleep just a couple of CPU
* ticks. */
if (expires > (get_ticks_per_sec() >> 10) || timer->ar)
- qemu_mod_timer(timer->timer, timer->time + expires);
+ timer_mod(timer->timer, timer->time + expires);
else
qemu_bh_schedule(timer->tick);
} else
- qemu_del_timer(timer->timer);
+ timer_del(timer->timer);
}
static void omap_timer_fire(void *opaque)
@@ -240,7 +240,7 @@ static const MemoryRegionOps omap_mpu_timer_ops = {
static void omap_mpu_timer_reset(struct omap_mpu_timer_s *s)
{
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
s->enable = 0;
s->reset_val = 31337;
s->val = 0;
@@ -259,7 +259,7 @@ static struct omap_mpu_timer_s *omap_mpu_timer_init(MemoryRegion *system_memory,
s->irq = irq;
s->clk = clk;
- s->timer = qemu_new_timer_ns(vm_clock, omap_timer_tick, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_timer_tick, s);
s->tick = qemu_bh_new(omap_timer_fire, s);
omap_mpu_timer_reset(s);
omap_timer_clk_setup(s);
@@ -363,7 +363,7 @@ static const MemoryRegionOps omap_wd_timer_ops = {
static void omap_wd_timer_reset(struct omap_watchdog_timer_s *s)
{
- qemu_del_timer(s->timer.timer);
+ timer_del(s->timer.timer);
if (!s->mode)
omap_clk_get(s->timer.clk);
s->mode = 1;
@@ -388,7 +388,7 @@ static struct omap_watchdog_timer_s *omap_wd_timer_init(MemoryRegion *memory,
s->timer.irq = irq;
s->timer.clk = clk;
- s->timer.timer = qemu_new_timer_ns(vm_clock, omap_timer_tick, &s->timer);
+ s->timer.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_timer_tick, &s->timer);
omap_wd_timer_reset(s);
omap_timer_clk_setup(&s->timer);
@@ -475,7 +475,7 @@ static const MemoryRegionOps omap_os_timer_ops = {
static void omap_os_timer_reset(struct omap_32khz_timer_s *s)
{
- qemu_del_timer(s->timer.timer);
+ timer_del(s->timer.timer);
s->timer.enable = 0;
s->timer.it_ena = 0;
s->timer.reset_val = 0x00ffffff;
@@ -494,7 +494,7 @@ static struct omap_32khz_timer_s *omap_os_timer_init(MemoryRegion *memory,
s->timer.irq = irq;
s->timer.clk = clk;
- s->timer.timer = qemu_new_timer_ns(vm_clock, omap_timer_tick, &s->timer);
+ s->timer.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_timer_tick, &s->timer);
omap_os_timer_reset(s);
omap_timer_clk_setup(&s->timer);
@@ -600,7 +600,7 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr,
case 0x10: /* GAUGING_CTRL */
/* Bits 0 and 1 seem to be confused in the OMAP 310 TRM */
if ((s->ulpd_pm_regs[addr >> 2] ^ value) & 1) {
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (value & 1)
s->ulpd_gauge_start = now;
@@ -2881,7 +2881,7 @@ static void omap_rtc_tick(void *opaque)
if (s->auto_comp && !s->current_tm.tm_sec && !s->current_tm.tm_min)
s->tick += s->comp_reg * 1000 / 32768;
- qemu_mod_timer(s->clk, s->tick);
+ timer_mod(s->clk, s->tick);
}
static void omap_rtc_reset(struct omap_rtc_s *s)
@@ -2894,7 +2894,7 @@ static void omap_rtc_reset(struct omap_rtc_s *s)
s->pm_am = 0;
s->auto_comp = 0;
s->round = 0;
- s->tick = qemu_get_clock_ms(rtc_clock);
+ s->tick = qemu_clock_get_ms(rtc_clock);
memset(&s->alarm_tm, 0, sizeof(s->alarm_tm));
s->alarm_tm.tm_mday = 0x01;
s->status = 1 << 7;
@@ -2915,7 +2915,7 @@ static struct omap_rtc_s *omap_rtc_init(MemoryRegion *system_memory,
s->irq = timerirq;
s->alarm = alarmirq;
- s->clk = qemu_new_timer_ms(rtc_clock, omap_rtc_tick, s);
+ s->clk = timer_new_ms(rtc_clock, omap_rtc_tick, s);
omap_rtc_reset(s);
@@ -3009,7 +3009,7 @@ static void omap_mcbsp_source_tick(void *opaque)
s->rx_req = s->rx_rate << bps[(s->rcr[0] >> 5) & 7];
omap_mcbsp_rx_newdata(s);
- qemu_mod_timer(s->source_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->source_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec());
}
@@ -3025,7 +3025,7 @@ static void omap_mcbsp_rx_start(struct omap_mcbsp_s *s)
static void omap_mcbsp_rx_stop(struct omap_mcbsp_s *s)
{
- qemu_del_timer(s->source_timer);
+ timer_del(s->source_timer);
}
static void omap_mcbsp_rx_done(struct omap_mcbsp_s *s)
@@ -3055,7 +3055,7 @@ static void omap_mcbsp_sink_tick(void *opaque)
s->tx_req = s->tx_rate << bps[(s->xcr[0] >> 5) & 7];
omap_mcbsp_tx_newdata(s);
- qemu_mod_timer(s->sink_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->sink_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec());
}
@@ -3082,7 +3082,7 @@ static void omap_mcbsp_tx_stop(struct omap_mcbsp_s *s)
{
s->tx_req = 0;
omap_mcbsp_tx_done(s);
- qemu_del_timer(s->sink_timer);
+ timer_del(s->sink_timer);
}
static void omap_mcbsp_req_update(struct omap_mcbsp_s *s)
@@ -3432,8 +3432,8 @@ static void omap_mcbsp_reset(struct omap_mcbsp_s *s)
s->rx_req = 0;
s->tx_rate = 0;
s->rx_rate = 0;
- qemu_del_timer(s->source_timer);
- qemu_del_timer(s->sink_timer);
+ timer_del(s->source_timer);
+ timer_del(s->sink_timer);
}
static struct omap_mcbsp_s *omap_mcbsp_init(MemoryRegion *system_memory,
@@ -3448,8 +3448,8 @@ static struct omap_mcbsp_s *omap_mcbsp_init(MemoryRegion *system_memory,
s->rxirq = rxirq;
s->txdrq = dma[0];
s->rxdrq = dma[1];
- s->sink_timer = qemu_new_timer_ns(vm_clock, omap_mcbsp_sink_tick, s);
- s->source_timer = qemu_new_timer_ns(vm_clock, omap_mcbsp_source_tick, s);
+ s->sink_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_mcbsp_sink_tick, s);
+ s->source_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_mcbsp_source_tick, s);
omap_mcbsp_reset(s);
memory_region_init_io(&s->iomem, NULL, &omap_mcbsp_ops, s, "omap-mcbsp", 0x800);
@@ -3503,9 +3503,9 @@ static void omap_lpg_tick(void *opaque)
struct omap_lpg_s *s = opaque;
if (s->cycle)
- qemu_mod_timer(s->tm, qemu_get_clock_ms(vm_clock) + s->period - s->on);
+ timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->period - s->on);
else
- qemu_mod_timer(s->tm, qemu_get_clock_ms(vm_clock) + s->on);
+ timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->on);
s->cycle = !s->cycle;
printf("%s: LED is %s\n", __FUNCTION__, s->cycle ? "on" : "off");
@@ -3527,7 +3527,7 @@ static void omap_lpg_update(struct omap_lpg_s *s)
per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */
}
- qemu_del_timer(s->tm);
+ timer_del(s->tm);
if (on == period && s->on < s->period)
printf("%s: LED is on\n", __FUNCTION__);
else if (on == 0 && s->on)
@@ -3623,7 +3623,7 @@ static struct omap_lpg_s *omap_lpg_init(MemoryRegion *system_memory,
struct omap_lpg_s *s = (struct omap_lpg_s *)
g_malloc0(sizeof(struct omap_lpg_s));
- s->tm = qemu_new_timer_ms(vm_clock, omap_lpg_tick, s);
+ s->tm = timer_new_ms(QEMU_CLOCK_VIRTUAL, omap_lpg_tick, s);
omap_lpg_reset(s);
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index 17ddd3fab8..02b7016a04 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -335,7 +335,7 @@ static int pxa2xx_cpccnt_read(CPUARMState *env, const ARMCPRegInfo *ri,
{
PXA2xxState *s = (PXA2xxState *)ri->opaque;
if (s->pmnc & 1) {
- *value = qemu_get_clock_ns(vm_clock);
+ *value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
} else {
*value = 0;
}
@@ -842,7 +842,7 @@ static inline void pxa2xx_rtc_int_update(PXA2xxRTCState *s)
static void pxa2xx_rtc_hzupdate(PXA2xxRTCState *s)
{
- int64_t rt = qemu_get_clock_ms(rtc_clock);
+ int64_t rt = qemu_clock_get_ms(rtc_clock);
s->last_rcnr += ((rt - s->last_hz) << 15) /
(1000 * ((s->rttr & 0xffff) + 1));
s->last_rdcr += ((rt - s->last_hz) << 15) /
@@ -852,7 +852,7 @@ static void pxa2xx_rtc_hzupdate(PXA2xxRTCState *s)
static void pxa2xx_rtc_swupdate(PXA2xxRTCState *s)
{
- int64_t rt = qemu_get_clock_ms(rtc_clock);
+ int64_t rt = qemu_clock_get_ms(rtc_clock);
if (s->rtsr & (1 << 12))
s->last_swcr += (rt - s->last_sw) / 10;
s->last_sw = rt;
@@ -860,7 +860,7 @@ static void pxa2xx_rtc_swupdate(PXA2xxRTCState *s)
static void pxa2xx_rtc_piupdate(PXA2xxRTCState *s)
{
- int64_t rt = qemu_get_clock_ms(rtc_clock);
+ int64_t rt = qemu_clock_get_ms(rtc_clock);
if (s->rtsr & (1 << 15))
s->last_swcr += rt - s->last_pi;
s->last_pi = rt;
@@ -870,43 +870,43 @@ static inline void pxa2xx_rtc_alarm_update(PXA2xxRTCState *s,
uint32_t rtsr)
{
if ((rtsr & (1 << 2)) && !(rtsr & (1 << 0)))
- qemu_mod_timer(s->rtc_hz, s->last_hz +
+ timer_mod(s->rtc_hz, s->last_hz +
(((s->rtar - s->last_rcnr) * 1000 *
((s->rttr & 0xffff) + 1)) >> 15));
else
- qemu_del_timer(s->rtc_hz);
+ timer_del(s->rtc_hz);
if ((rtsr & (1 << 5)) && !(rtsr & (1 << 4)))
- qemu_mod_timer(s->rtc_rdal1, s->last_hz +
+ timer_mod(s->rtc_rdal1, s->last_hz +
(((s->rdar1 - s->last_rdcr) * 1000 *
((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */
else
- qemu_del_timer(s->rtc_rdal1);
+ timer_del(s->rtc_rdal1);
if ((rtsr & (1 << 7)) && !(rtsr & (1 << 6)))
- qemu_mod_timer(s->rtc_rdal2, s->last_hz +
+ timer_mod(s->rtc_rdal2, s->last_hz +
(((s->rdar2 - s->last_rdcr) * 1000 *
((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */
else
- qemu_del_timer(s->rtc_rdal2);
+ timer_del(s->rtc_rdal2);
if ((rtsr & 0x1200) == 0x1200 && !(rtsr & (1 << 8)))
- qemu_mod_timer(s->rtc_swal1, s->last_sw +
+ timer_mod(s->rtc_swal1, s->last_sw +
(s->swar1 - s->last_swcr) * 10); /* TODO: fixup */
else
- qemu_del_timer(s->rtc_swal1);
+ timer_del(s->rtc_swal1);
if ((rtsr & 0x1800) == 0x1800 && !(rtsr & (1 << 10)))
- qemu_mod_timer(s->rtc_swal2, s->last_sw +
+ timer_mod(s->rtc_swal2, s->last_sw +
(s->swar2 - s->last_swcr) * 10); /* TODO: fixup */
else
- qemu_del_timer(s->rtc_swal2);
+ timer_del(s->rtc_swal2);
if ((rtsr & 0xc000) == 0xc000 && !(rtsr & (1 << 13)))
- qemu_mod_timer(s->rtc_pi, s->last_pi +
+ timer_mod(s->rtc_pi, s->last_pi +
(s->piar & 0xffff) - s->last_rtcpicr);
else
- qemu_del_timer(s->rtc_pi);
+ timer_del(s->rtc_pi);
}
static inline void pxa2xx_rtc_hz_tick(void *opaque)
@@ -986,16 +986,19 @@ static uint64_t pxa2xx_rtc_read(void *opaque, hwaddr addr,
case PIAR:
return s->piar;
case RCNR:
- return s->last_rcnr + ((qemu_get_clock_ms(rtc_clock) - s->last_hz) << 15) /
- (1000 * ((s->rttr & 0xffff) + 1));
+ return s->last_rcnr +
+ ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) /
+ (1000 * ((s->rttr & 0xffff) + 1));
case RDCR:
- return s->last_rdcr + ((qemu_get_clock_ms(rtc_clock) - s->last_hz) << 15) /
- (1000 * ((s->rttr & 0xffff) + 1));
+ return s->last_rdcr +
+ ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) /
+ (1000 * ((s->rttr & 0xffff) + 1));
case RYCR:
return s->last_rycr;
case SWCR:
if (s->rtsr & (1 << 12))
- return s->last_swcr + (qemu_get_clock_ms(rtc_clock) - s->last_sw) / 10;
+ return s->last_swcr +
+ (qemu_clock_get_ms(rtc_clock) - s->last_sw) / 10;
else
return s->last_swcr;
default:
@@ -1135,14 +1138,14 @@ static int pxa2xx_rtc_init(SysBusDevice *dev)
s->last_swcr = (tm.tm_hour << 19) |
(tm.tm_min << 13) | (tm.tm_sec << 7);
s->last_rtcpicr = 0;
- s->last_hz = s->last_sw = s->last_pi = qemu_get_clock_ms(rtc_clock);
-
- s->rtc_hz = qemu_new_timer_ms(rtc_clock, pxa2xx_rtc_hz_tick, s);
- s->rtc_rdal1 = qemu_new_timer_ms(rtc_clock, pxa2xx_rtc_rdal1_tick, s);
- s->rtc_rdal2 = qemu_new_timer_ms(rtc_clock, pxa2xx_rtc_rdal2_tick, s);
- s->rtc_swal1 = qemu_new_timer_ms(rtc_clock, pxa2xx_rtc_swal1_tick, s);
- s->rtc_swal2 = qemu_new_timer_ms(rtc_clock, pxa2xx_rtc_swal2_tick, s);
- s->rtc_pi = qemu_new_timer_ms(rtc_clock, pxa2xx_rtc_pi_tick, s);
+ s->last_hz = s->last_sw = s->last_pi = qemu_clock_get_ms(rtc_clock);
+
+ s->rtc_hz = timer_new_ms(rtc_clock, pxa2xx_rtc_hz_tick, s);
+ s->rtc_rdal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal1_tick, s);
+ s->rtc_rdal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal2_tick, s);
+ s->rtc_swal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal1_tick, s);
+ s->rtc_swal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal2_tick, s);
+ s->rtc_pi = timer_new_ms(rtc_clock, pxa2xx_rtc_pi_tick, s);
sysbus_init_irq(dev, &s->rtc_irq);
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index 34f958268d..9b9ce95c5a 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -393,7 +393,7 @@ static void spitz_keyboard_tick(void *opaque)
s->fifopos = 0;
}
- qemu_mod_timer(s->kbdtimer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 32);
}
@@ -485,7 +485,7 @@ static void spitz_keyboard_register(PXA2xxState *cpu)
qdev_connect_gpio_out(cpu->gpio, spitz_gpio_key_strobe[i],
qdev_get_gpio_in(dev, i));
- qemu_mod_timer(s->kbdtimer, qemu_get_clock_ns(vm_clock));
+ timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
qemu_add_kbd_event_handler(spitz_keyboard_handler, s);
}
@@ -505,7 +505,7 @@ static int spitz_keyboard_init(SysBusDevice *sbd)
spitz_keyboard_pre_map(s);
- s->kbdtimer = qemu_new_timer_ns(vm_clock, spitz_keyboard_tick, s);
+ s->kbdtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, spitz_keyboard_tick, s);
qdev_init_gpio_in(dev, spitz_keyboard_strobe, SPITZ_KEY_STROBE_NUM);
qdev_init_gpio_out(dev, s->sense, SPITZ_KEY_SENSE_NUM);
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
index 79f6b4e310..3237b30260 100644
--- a/hw/arm/stellaris.c
+++ b/hw/arm/stellaris.c
@@ -78,14 +78,14 @@ static void gptm_update_irq(gptm_state *s)
static void gptm_stop(gptm_state *s, int n)
{
- qemu_del_timer(s->timer[n]);
+ timer_del(s->timer[n]);
}
static void gptm_reload(gptm_state *s, int n, int reset)
{
int64_t tick;
if (reset)
- tick = qemu_get_clock_ns(vm_clock);
+ tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
else
tick = s->tick[n];
@@ -103,7 +103,7 @@ static void gptm_reload(gptm_state *s, int n, int reset)
hw_error("TODO: 16-bit timer mode 0x%x\n", s->mode[n]);
}
s->tick[n] = tick;
- qemu_mod_timer(s->timer[n], tick);
+ timer_mod(s->timer[n], tick);
}
static void gptm_tick(void *opaque)
@@ -318,8 +318,8 @@ static int stellaris_gptm_init(SysBusDevice *sbd)
sysbus_init_mmio(sbd, &s->iomem);
s->opaque[0] = s->opaque[1] = s;
- s->timer[0] = qemu_new_timer_ns(vm_clock, gptm_tick, &s->opaque[0]);
- s->timer[1] = qemu_new_timer_ns(vm_clock, gptm_tick, &s->opaque[1]);
+ s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]);
+ s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]);
vmstate_register(dev, -1, &vmstate_stellaris_gptm, s);
return 0;
}
diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c
index 7b8ef8cbeb..170d0ce267 100644
--- a/hw/arm/strongarm.c
+++ b/hw/arm/strongarm.c
@@ -269,7 +269,7 @@ static inline void strongarm_rtc_int_update(StrongARMRTCState *s)
static void strongarm_rtc_hzupdate(StrongARMRTCState *s)
{
- int64_t rt = qemu_get_clock_ms(rtc_clock);
+ int64_t rt = qemu_clock_get_ms(rtc_clock);
s->last_rcnr += ((rt - s->last_hz) << 15) /
(1000 * ((s->rttr & 0xffff) + 1));
s->last_hz = rt;
@@ -278,17 +278,17 @@ static void strongarm_rtc_hzupdate(StrongARMRTCState *s)
static inline void strongarm_rtc_timer_update(StrongARMRTCState *s)
{
if ((s->rtsr & RTSR_HZE) && !(s->rtsr & RTSR_HZ)) {
- qemu_mod_timer(s->rtc_hz, s->last_hz + 1000);
+ timer_mod(s->rtc_hz, s->last_hz + 1000);
} else {
- qemu_del_timer(s->rtc_hz);
+ timer_del(s->rtc_hz);
}
if ((s->rtsr & RTSR_ALE) && !(s->rtsr & RTSR_AL)) {
- qemu_mod_timer(s->rtc_alarm, s->last_hz +
+ timer_mod(s->rtc_alarm, s->last_hz +
(((s->rtar - s->last_rcnr) * 1000 *
((s->rttr & 0xffff) + 1)) >> 15));
} else {
- qemu_del_timer(s->rtc_alarm);
+ timer_del(s->rtc_alarm);
}
}
@@ -322,7 +322,7 @@ static uint64_t strongarm_rtc_read(void *opaque, hwaddr addr,
return s->rtar;
case RCNR:
return s->last_rcnr +
- ((qemu_get_clock_ms(rtc_clock) - s->last_hz) << 15) /
+ ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) /
(1000 * ((s->rttr & 0xffff) + 1));
default:
printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
@@ -388,10 +388,10 @@ static int strongarm_rtc_init(SysBusDevice *dev)
qemu_get_timedate(&tm, 0);
s->last_rcnr = (uint32_t) mktimegm(&tm);
- s->last_hz = qemu_get_clock_ms(rtc_clock);
+ s->last_hz = qemu_clock_get_ms(rtc_clock);
- s->rtc_alarm = qemu_new_timer_ms(rtc_clock, strongarm_rtc_alarm_tick, s);
- s->rtc_hz = qemu_new_timer_ms(rtc_clock, strongarm_rtc_hz_tick, s);
+ s->rtc_alarm = timer_new_ms(rtc_clock, strongarm_rtc_alarm_tick, s);
+ s->rtc_hz = timer_new_ms(rtc_clock, strongarm_rtc_hz_tick, s);
sysbus_init_irq(dev, &s->rtc_irq);
sysbus_init_irq(dev, &s->rtc_hz_irq);
@@ -1085,8 +1085,8 @@ static void strongarm_uart_receive(void *opaque, const uint8_t *buf, int size)
}
/* call the timeout receive callback in 3 char transmit time */
- qemu_mod_timer(s->rx_timeout_timer,
- qemu_get_clock_ns(vm_clock) + s->char_transmit_time * 3);
+ timer_mod(s->rx_timeout_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 3);
strongarm_uart_update_status(s);
strongarm_uart_update_int_status(s);
@@ -1107,7 +1107,7 @@ static void strongarm_uart_event(void *opaque, int event)
static void strongarm_uart_tx(void *opaque)
{
StrongARMUARTState *s = opaque;
- uint64_t new_xmit_ts = qemu_get_clock_ns(vm_clock);
+ uint64_t new_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (s->utcr3 & UTCR3_LBM) /* loopback */ {
strongarm_uart_receive(s, &s->tx_fifo[s->tx_start], 1);
@@ -1118,7 +1118,7 @@ static void strongarm_uart_tx(void *opaque)
s->tx_start = (s->tx_start + 1) % 8;
s->tx_len--;
if (s->tx_len) {
- qemu_mod_timer(s->tx_timer, new_xmit_ts + s->char_transmit_time);
+ timer_mod(s->tx_timer, new_xmit_ts + s->char_transmit_time);
}
strongarm_uart_update_status(s);
strongarm_uart_update_int_status(s);
@@ -1237,8 +1237,8 @@ static int strongarm_uart_init(SysBusDevice *dev)
sysbus_init_mmio(dev, &s->iomem);
sysbus_init_irq(dev, &s->irq);
- s->rx_timeout_timer = qemu_new_timer_ns(vm_clock, strongarm_uart_rx_to, s);
- s->tx_timer = qemu_new_timer_ns(vm_clock, strongarm_uart_tx, s);
+ s->rx_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, strongarm_uart_rx_to, s);
+ s->tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, strongarm_uart_tx, s);
if (s->chr) {
qemu_chr_add_handlers(s->chr,
@@ -1282,8 +1282,8 @@ static int strongarm_uart_post_load(void *opaque, int version_id)
/* restart rx timeout timer */
if (s->rx_len) {
- qemu_mod_timer(s->rx_timeout_timer,
- qemu_get_clock_ns(vm_clock) + s->char_transmit_time * 3);
+ timer_mod(s->rx_timeout_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 3);
}
return 0;
diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c
index 0421d473ff..0c792475d1 100644
--- a/hw/audio/adlib.c
+++ b/hw/audio/adlib.c
@@ -173,7 +173,7 @@ static void timer_handler (int c, double interval_Sec)
s->ticking[n] = 1;
#ifdef DEBUG
interval = get_ticks_per_sec () * interval_Sec;
- exp = qemu_get_clock_ns (vm_clock) + interval;
+ exp = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + interval;
s->exp[n] = exp;
#endif
diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
index 32e44adb53..78f9d282e0 100644
--- a/hw/audio/intel-hda.c
+++ b/hw/audio/intel-hda.c
@@ -526,7 +526,7 @@ static void intel_hda_get_wall_clk(IntelHDAState *d, const IntelHDAReg *reg)
{
int64_t ns;
- ns = qemu_get_clock_ns(vm_clock) - d->wall_base_ns;
+ ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - d->wall_base_ns;
d->wall_clk = (uint32_t)(ns * 24 / 1000); /* 24 MHz */
}
@@ -1111,7 +1111,7 @@ static void intel_hda_reset(DeviceState *dev)
HDACodecDevice *cdev;
intel_hda_regs_reset(d);
- d->wall_base_ns = qemu_get_clock_ns(vm_clock);
+ d->wall_base_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* reset codecs */
QTAILQ_FOREACH(kid, &d->codecs.qbus.children, sibling) {
diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c
index 3e586888eb..db79131cf1 100644
--- a/hw/audio/sb16.c
+++ b/hw/audio/sb16.c
@@ -768,9 +768,9 @@ static void complete (SB16State *s)
}
else {
if (s->aux_ts) {
- qemu_mod_timer (
+ timer_mod (
s->aux_ts,
- qemu_get_clock_ns (vm_clock) + ticks
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks
);
}
}
@@ -1378,7 +1378,7 @@ static void sb16_realizefn (DeviceState *dev, Error **errp)
s->csp_regs[9] = 0xf8;
reset_mixer (s);
- s->aux_ts = qemu_new_timer_ns (vm_clock, aux_timer, s);
+ s->aux_ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, aux_timer, s);
if (!s->aux_ts) {
dolog ("warning: Could not create auxiliary timer\n");
}
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index e35ed2eabb..c5a6c21215 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -1647,8 +1647,8 @@ static void fdctrl_handle_readid(FDCtrl *fdctrl, int direction)
FDrive *cur_drv = get_cur_drv(fdctrl);
cur_drv->head = (fdctrl->fifo[1] >> 2) & 1;
- qemu_mod_timer(fdctrl->result_timer,
- qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() / 50));
+ timer_mod(fdctrl->result_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 50));
}
static void fdctrl_handle_format_track(FDCtrl *fdctrl, int direction)
@@ -2108,7 +2108,7 @@ static void fdctrl_realize_common(FDCtrl *fdctrl, Error **errp)
FLOPPY_DPRINTF("init controller\n");
fdctrl->fifo = qemu_memalign(512, FD_SECTOR_LEN);
fdctrl->fifo_size = 512;
- fdctrl->result_timer = qemu_new_timer_ns(vm_clock,
+ fdctrl->result_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
fdctrl_result_timer, fdctrl);
fdctrl->version = 0x90; /* Intel 82078 controller */
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 0263e5c636..5dee229734 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -187,7 +187,7 @@ static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
assert(cq->cqid == req->sq->cqid);
QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
- qemu_mod_timer(cq->timer, qemu_get_clock_ns(vm_clock) + 500);
+ timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
static void nvme_rw_cb(void *opaque, int ret)
@@ -264,8 +264,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
{
n->sq[sq->sqid] = NULL;
- qemu_del_timer(sq->timer);
- qemu_free_timer(sq->timer);
+ timer_del(sq->timer);
+ timer_free(sq->timer);
g_free(sq->io_req);
if (sq->sqid) {
g_free(sq);
@@ -327,7 +327,7 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
sq->io_req[i].sq = sq;
QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
}
- sq->timer = qemu_new_timer_ns(vm_clock, nvme_process_sq, sq);
+ sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
assert(n->cq[cqid]);
cq = n->cq[cqid];
@@ -369,8 +369,8 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
{
n->cq[cq->cqid] = NULL;
- qemu_del_timer(cq->timer);
- qemu_free_timer(cq->timer);
+ timer_del(cq->timer);
+ timer_free(cq->timer);
msix_vector_unuse(&n->parent_obj, cq->vector);
if (cq->cqid) {
g_free(cq);
@@ -410,7 +410,7 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
QTAILQ_INIT(&cq->sq_list);
msix_vector_use(&n->parent_obj, cq->vector);
n->cq[cqid] = cq;
- cq->timer = qemu_new_timer_ns(vm_clock, nvme_post_cqes, cq);
+ cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
}
static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
@@ -691,9 +691,9 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
if (start_sqs) {
NvmeSQueue *sq;
QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
- qemu_mod_timer(sq->timer, qemu_get_clock_ns(vm_clock) + 500);
+ timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
- qemu_mod_timer(cq->timer, qemu_get_clock_ns(vm_clock) + 500);
+ timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
if (cq->tail != cq->head) {
@@ -714,7 +714,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
}
sq->tail = new_tail;
- qemu_mod_timer(sq->timer, qemu_get_clock_ns(vm_clock) + 500);
+ timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
}
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 825011d8cb..018a9677ba 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -613,7 +613,7 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
pfl->ro = 0;
}
- pfl->timer = qemu_new_timer_ns(vm_clock, pflash_timer, pfl);
+ pfl->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pflash_timer, pfl);
pfl->wcycle = 0;
pfl->cmd = 0;
pfl->status = 0;
diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c
index 9fc02e3d64..99445b09b9 100644
--- a/hw/block/pflash_cfi02.c
+++ b/hw/block/pflash_cfi02.c
@@ -430,8 +430,8 @@ static void pflash_write (pflash_t *pfl, hwaddr offset,
}
pfl->status = 0x00;
/* Let's wait 5 seconds before chip erase is done */
- qemu_mod_timer(pfl->timer,
- qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() * 5));
+ timer_mod(pfl->timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() * 5));
break;
case 0x30:
/* Sector erase */
@@ -445,8 +445,8 @@ static void pflash_write (pflash_t *pfl, hwaddr offset,
}
pfl->status = 0x00;
/* Let's wait 1/2 second before sector erase is done */
- qemu_mod_timer(pfl->timer,
- qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() / 2));
+ timer_mod(pfl->timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 2));
break;
default:
DPRINTF("%s: invalid command %02x (wc 5)\n", __func__, cmd);
@@ -633,7 +633,7 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp)
pfl->ro = 0;
}
- pfl->timer = qemu_new_timer_ns(vm_clock, pflash_timer, pfl);
+ pfl->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pflash_timer, pfl);
pfl->wcycle = 0;
pfl->cmd = 0;
pfl->status = 0;
diff --git a/hw/bt/hci-csr.c b/hw/bt/hci-csr.c
index 16a25cb349..7b9b91608a 100644
--- a/hw/bt/hci-csr.c
+++ b/hw/bt/hci-csr.c
@@ -87,7 +87,7 @@ static inline void csrhci_fifo_wake(struct csrhci_s *s)
}
if (s->out_len)
- qemu_mod_timer(s->out_tm, qemu_get_clock_ns(vm_clock) + s->baud_delay);
+ timer_mod(s->out_tm, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->baud_delay);
}
#define csrhci_out_packetz(s, len) memset(csrhci_out_packet(s, len), 0, len)
@@ -446,7 +446,7 @@ CharDriverState *uart_hci_init(qemu_irq wakeup)
s->hci->evt_recv = csrhci_out_hci_packet_event;
s->hci->acl_recv = csrhci_out_hci_packet_acl;
- s->out_tm = qemu_new_timer_ns(vm_clock, csrhci_out_tick, s);
+ s->out_tm = timer_new_ns(QEMU_CLOCK_VIRTUAL, csrhci_out_tick, s);
s->pins = qemu_allocate_irqs(csrhci_pins, s, __csrhci_pins);
csrhci_reset(s);
diff --git a/hw/bt/hci.c b/hw/bt/hci.c
index b53cd5dea2..d1c0604a9b 100644
--- a/hw/bt/hci.c
+++ b/hw/bt/hci.c
@@ -576,7 +576,7 @@ static void bt_hci_inquiry_result(struct bt_hci_s *hci,
static void bt_hci_mod_timer_1280ms(QEMUTimer *timer, int period)
{
- qemu_mod_timer(timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(period << 7, get_ticks_per_sec(), 100));
}
@@ -657,7 +657,7 @@ static void bt_hci_lmp_link_establish(struct bt_hci_s *hci,
if (master) {
link->acl_mode = acl_active;
hci->lm.handle[hci->lm.last_handle].acl_mode_timer =
- qemu_new_timer_ns(vm_clock, bt_hci_mode_tick, link);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, bt_hci_mode_tick, link);
}
}
@@ -667,8 +667,8 @@ static void bt_hci_lmp_link_teardown(struct bt_hci_s *hci, uint16_t handle)
hci->lm.handle[handle].link = NULL;
if (bt_hci_role_master(hci, handle)) {
- qemu_del_timer(hci->lm.handle[handle].acl_mode_timer);
- qemu_free_timer(hci->lm.handle[handle].acl_mode_timer);
+ timer_del(hci->lm.handle[handle].acl_mode_timer);
+ timer_free(hci->lm.handle[handle].acl_mode_timer);
}
}
@@ -1080,7 +1080,7 @@ static int bt_hci_mode_change(struct bt_hci_s *hci, uint16_t handle,
bt_hci_event_status(hci, HCI_SUCCESS);
- qemu_mod_timer(link->acl_mode_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(link->acl_mode_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(interval * 625, get_ticks_per_sec(), 1000000));
bt_hci_lmp_mode_change_master(hci, link->link, mode, interval);
@@ -1103,7 +1103,7 @@ static int bt_hci_mode_cancel(struct bt_hci_s *hci, uint16_t handle, int mode)
bt_hci_event_status(hci, HCI_SUCCESS);
- qemu_del_timer(link->acl_mode_timer);
+ timer_del(link->acl_mode_timer);
bt_hci_lmp_mode_change_master(hci, link->link, acl_active, 0);
return 0;
@@ -1146,10 +1146,10 @@ static void bt_hci_reset(struct bt_hci_s *hci)
hci->psb_handle = 0x000;
hci->asb_handle = 0x000;
- /* XXX: qemu_del_timer(sl->acl_mode_timer); for all links */
- qemu_del_timer(hci->lm.inquiry_done);
- qemu_del_timer(hci->lm.inquiry_next);
- qemu_del_timer(hci->conn_accept_timer);
+ /* XXX: timer_del(sl->acl_mode_timer); for all links */
+ timer_del(hci->lm.inquiry_done);
+ timer_del(hci->lm.inquiry_next);
+ timer_del(hci->conn_accept_timer);
}
static void bt_hci_read_local_version_rp(struct bt_hci_s *hci)
@@ -1514,7 +1514,7 @@ static void bt_submit_hci(struct HCIInfo *info,
}
hci->lm.inquire = 0;
- qemu_del_timer(hci->lm.inquiry_done);
+ timer_del(hci->lm.inquiry_done);
bt_hci_event_complete_status(hci, HCI_SUCCESS);
break;
@@ -1552,8 +1552,8 @@ static void bt_submit_hci(struct HCIInfo *info,
break;
}
hci->lm.inquire = 0;
- qemu_del_timer(hci->lm.inquiry_done);
- qemu_del_timer(hci->lm.inquiry_next);
+ timer_del(hci->lm.inquiry_done);
+ timer_del(hci->lm.inquiry_next);
bt_hci_event_complete_status(hci, HCI_SUCCESS);
break;
@@ -2141,10 +2141,10 @@ struct HCIInfo *bt_new_hci(struct bt_scatternet_s *net)
{
struct bt_hci_s *s = g_malloc0(sizeof(struct bt_hci_s));
- s->lm.inquiry_done = qemu_new_timer_ns(vm_clock, bt_hci_inquiry_done, s);
- s->lm.inquiry_next = qemu_new_timer_ns(vm_clock, bt_hci_inquiry_next, s);
+ s->lm.inquiry_done = timer_new_ns(QEMU_CLOCK_VIRTUAL, bt_hci_inquiry_done, s);
+ s->lm.inquiry_next = timer_new_ns(QEMU_CLOCK_VIRTUAL, bt_hci_inquiry_next, s);
s->conn_accept_timer =
- qemu_new_timer_ns(vm_clock, bt_hci_conn_accept_timeout, s);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, bt_hci_conn_accept_timeout, s);
s->evt_packet = bt_hci_evt_packet;
s->evt_submit = bt_hci_evt_submit;
@@ -2209,9 +2209,9 @@ static void bt_hci_done(struct HCIInfo *info)
* s->device.lmp_connection_complete to free the remaining bits once
* hci->lm.awaiting_bdaddr[] is empty. */
- qemu_free_timer(hci->lm.inquiry_done);
- qemu_free_timer(hci->lm.inquiry_next);
- qemu_free_timer(hci->conn_accept_timer);
+ timer_free(hci->lm.inquiry_done);
+ timer_free(hci->lm.inquiry_next);
+ timer_free(hci->conn_accept_timer);
g_free(hci);
}
diff --git a/hw/bt/l2cap.c b/hw/bt/l2cap.c
index 521587a112..2301d6f87f 100644
--- a/hw/bt/l2cap.c
+++ b/hw/bt/l2cap.c
@@ -166,9 +166,9 @@ static void l2cap_retransmission_timer_update(struct l2cap_chan_s *ch)
{
#if 0
if (ch->mode != L2CAP_MODE_BASIC && ch->rexmit)
- qemu_mod_timer(ch->retransmission_timer);
+ timer_mod(ch->retransmission_timer);
else
- qemu_del_timer(ch->retransmission_timer);
+ timer_del(ch->retransmission_timer);
#endif
}
@@ -176,9 +176,9 @@ static void l2cap_monitor_timer_update(struct l2cap_chan_s *ch)
{
#if 0
if (ch->mode != L2CAP_MODE_BASIC && !ch->rexmit)
- qemu_mod_timer(ch->monitor_timer);
+ timer_mod(ch->monitor_timer);
else
- qemu_del_timer(ch->monitor_timer);
+ timer_del(ch->monitor_timer);
#endif
}
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index 3c2e96097b..f8ccbdd13a 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -141,9 +141,9 @@ static void fifo_trigger_update(void *opaque)
static void uart_tx_redo(UartState *s)
{
- uint64_t new_tx_time = qemu_get_clock_ns(vm_clock);
+ uint64_t new_tx_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- qemu_mod_timer(s->tx_time_handle, new_tx_time + s->char_tx_time);
+ timer_mod(s->tx_time_handle, new_tx_time + s->char_tx_time);
s->r[R_SR] |= UART_SR_INTR_TEMPTY;
@@ -265,7 +265,7 @@ static void uart_ctrl_update(UartState *s)
static void uart_write_rx_fifo(void *opaque, const uint8_t *buf, int size)
{
UartState *s = (UartState *)opaque;
- uint64_t new_rx_time = qemu_get_clock_ns(vm_clock);
+ uint64_t new_rx_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int i;
if ((s->r[R_CR] & UART_CR_RX_DIS) || !(s->r[R_CR] & UART_CR_RX_EN)) {
@@ -291,7 +291,7 @@ static void uart_write_rx_fifo(void *opaque, const uint8_t *buf, int size)
s->r[R_SR] |= UART_SR_INTR_RTRIG;
}
}
- qemu_mod_timer(s->fifo_trigger_handle, new_rx_time +
+ timer_mod(s->fifo_trigger_handle, new_rx_time +
(s->char_tx_time * 4));
}
uart_update_status(s);
@@ -452,10 +452,10 @@ static int cadence_uart_init(SysBusDevice *dev)
sysbus_init_mmio(dev, &s->iomem);
sysbus_init_irq(dev, &s->irq);
- s->fifo_trigger_handle = qemu_new_timer_ns(vm_clock,
+ s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
(QEMUTimerCB *)fifo_trigger_update, s);
- s->tx_time_handle = qemu_new_timer_ns(vm_clock,
+ s->tx_time_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
(QEMUTimerCB *)uart_tx_write, s);
s->char_tx_time = (get_ticks_per_sec() / 9600) * 10;
diff --git a/hw/char/serial.c b/hw/char/serial.c
index a31eb5756a..27dab7d9d6 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -189,7 +189,7 @@ static void serial_update_msl(SerialState *s)
uint8_t omsr;
int flags;
- qemu_del_timer(s->modem_status_poll);
+ timer_del(s->modem_status_poll);
if (qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_GET_TIOCM, &flags) == -ENOTSUP) {
s->poll_msl = -1;
@@ -216,7 +216,7 @@ static void serial_update_msl(SerialState *s)
We'll be lazy and poll only every 10ms, and only poll it at all if MSI interrupts are turned on */
if (s->poll_msl)
- qemu_mod_timer(s->modem_status_poll, qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 100);
+ timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 100);
}
static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
@@ -253,7 +253,7 @@ static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
s->tsr_retry = 0;
}
- s->last_xmit_ts = qemu_get_clock_ns(vm_clock);
+ s->last_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (s->lsr & UART_LSR_THRE) {
s->lsr |= UART_LSR_TEMT;
@@ -307,7 +307,7 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
s->poll_msl = 1;
serial_update_msl(s);
} else {
- qemu_del_timer(s->modem_status_poll);
+ timer_del(s->modem_status_poll);
s->poll_msl = 0;
}
}
@@ -330,7 +330,7 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
/* FIFO clear */
if (val & UART_FCR_RFR) {
- qemu_del_timer(s->fifo_timeout_timer);
+ timer_del(s->fifo_timeout_timer);
s->timeout_ipending=0;
fifo8_reset(&s->recv_fifo);
}
@@ -398,7 +398,7 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_SET_TIOCM, &flags);
/* Update the modem status after a one-character-send wait-time, since there may be a response
from the device/computer at the other end of the serial line */
- qemu_mod_timer(s->modem_status_poll, qemu_get_clock_ns(vm_clock) + s->char_transmit_time);
+ timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time);
}
}
break;
@@ -430,7 +430,7 @@ static uint64_t serial_ioport_read(void *opaque, hwaddr addr, unsigned size)
if (s->recv_fifo.num == 0) {
s->lsr &= ~(UART_LSR_DR | UART_LSR_BI);
} else {
- qemu_mod_timer(s->fifo_timeout_timer, qemu_get_clock_ns (vm_clock) + s->char_transmit_time * 4);
+ timer_mod(s->fifo_timeout_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 4);
}
s->timeout_ipending = 0;
} else {
@@ -557,7 +557,7 @@ static void serial_receive1(void *opaque, const uint8_t *buf, int size)
}
s->lsr |= UART_LSR_DR;
/* call the timeout receive callback in 4 char transmit time */
- qemu_mod_timer(s->fifo_timeout_timer, qemu_get_clock_ns (vm_clock) + s->char_transmit_time * 4);
+ timer_mod(s->fifo_timeout_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 4);
} else {
if (s->lsr & UART_LSR_DR)
s->lsr |= UART_LSR_OE;
@@ -636,7 +636,7 @@ static void serial_reset(void *opaque)
fifo8_reset(&s->recv_fifo);
fifo8_reset(&s->xmit_fifo);
- s->last_xmit_ts = qemu_get_clock_ns(vm_clock);
+ s->last_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->thr_ipending = 0;
s->last_break_enable = 0;
@@ -650,9 +650,9 @@ void serial_realize_core(SerialState *s, Error **errp)
return;
}
- s->modem_status_poll = qemu_new_timer_ns(vm_clock, (QEMUTimerCB *) serial_update_msl, s);
+ s->modem_status_poll = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) serial_update_msl, s);
- s->fifo_timeout_timer = qemu_new_timer_ns(vm_clock, (QEMUTimerCB *) fifo_timeout_int, s);
+ s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) fifo_timeout_int, s);
qemu_register_reset(serial_reset, s);
qemu_chr_add_handlers(s->chr, serial_can_receive1, serial_receive1,
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index da417c7010..f23f555dde 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -603,7 +603,7 @@ static void virtio_serial_post_load_timer_cb(void *opaque)
}
}
g_free(s->post_load->connected);
- qemu_free_timer(s->post_load->timer);
+ timer_free(s->post_load->timer);
g_free(s->post_load);
s->post_load = NULL;
}
@@ -618,7 +618,7 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id,
s->post_load->connected =
g_malloc0(sizeof(*s->post_load->connected) * nr_active_ports);
- s->post_load->timer = qemu_new_timer_ns(vm_clock,
+ s->post_load->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_serial_post_load_timer_cb,
s);
@@ -660,7 +660,7 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id,
}
}
}
- qemu_mod_timer(s->post_load->timer, 1);
+ timer_mod(s->post_load->timer, 1);
return 0;
}
@@ -999,8 +999,8 @@ static int virtio_serial_device_exit(DeviceState *dev)
g_free(vser->ports_map);
if (vser->post_load) {
g_free(vser->post_load->connected);
- qemu_del_timer(vser->post_load->timer);
- qemu_free_timer(vser->post_load->timer);
+ timer_del(vser->post_load->timer);
+ timer_free(vser->post_load->timer);
g_free(vser->post_load);
}
virtio_cleanup(vdev);
diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c
index 4bc96c9fa2..3036bde1f3 100644
--- a/hw/core/ptimer.c
+++ b/hw/core/ptimer.c
@@ -48,7 +48,7 @@ static void ptimer_reload(ptimer_state *s)
if (s->period_frac) {
s->next_event += ((int64_t)s->period_frac * s->delta) >> 32;
}
- qemu_mod_timer(s->timer, s->next_event);
+ timer_mod(s->timer, s->next_event);
}
static void ptimer_tick(void *opaque)
@@ -69,7 +69,7 @@ uint64_t ptimer_get_count(ptimer_state *s)
uint64_t counter;
if (s->enabled) {
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* Figure out the current counter value. */
if (now - s->next_event > 0
|| s->period == 0) {
@@ -123,7 +123,7 @@ void ptimer_set_count(ptimer_state *s, uint64_t count)
{
s->delta = count;
if (s->enabled) {
- s->next_event = qemu_get_clock_ns(vm_clock);
+ s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s);
}
}
@@ -138,7 +138,7 @@ void ptimer_run(ptimer_state *s, int oneshot)
return;
}
s->enabled = oneshot ? 2 : 1;
- s->next_event = qemu_get_clock_ns(vm_clock);
+ s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s);
}
@@ -150,7 +150,7 @@ void ptimer_stop(ptimer_state *s)
return;
s->delta = ptimer_get_count(s);
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
s->enabled = 0;
}
@@ -160,7 +160,7 @@ void ptimer_set_period(ptimer_state *s, int64_t period)
s->period = period;
s->period_frac = 0;
if (s->enabled) {
- s->next_event = qemu_get_clock_ns(vm_clock);
+ s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s);
}
}
@@ -171,7 +171,7 @@ void ptimer_set_freq(ptimer_state *s, uint32_t freq)
s->period = 1000000000ll / freq;
s->period_frac = (1000000000ll << 32) / freq;
if (s->enabled) {
- s->next_event = qemu_get_clock_ns(vm_clock);
+ s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s);
}
}
@@ -197,7 +197,7 @@ void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload)
if (reload)
s->delta = limit;
if (s->enabled && reload) {
- s->next_event = qemu_get_clock_ns(vm_clock);
+ s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s);
}
}
@@ -226,6 +226,6 @@ ptimer_state *ptimer_init(QEMUBH *bh)
s = (ptimer_state *)g_malloc0(sizeof(ptimer_state));
s->bh = bh;
- s->timer = qemu_new_timer_ns(vm_clock, ptimer_tick, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ptimer_tick, s);
return s;
}
diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c
index 3cd85d9b97..c900c2ca4f 100644
--- a/hw/display/qxl-logger.c
+++ b/hw/display/qxl-logger.c
@@ -242,7 +242,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
if (!qxl->cmdlog) {
return 0;
}
- fprintf(stderr, "%" PRId64 " qxl-%d/%s:", qemu_get_clock_ns(vm_clock),
+ fprintf(stderr, "%" PRId64 " qxl-%d/%s:", qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
qxl->id, ring);
fprintf(stderr, " cmd @ 0x%" PRIx64 " %s%s", ext->cmd.data,
qxl_name(qxl_type, ext->cmd.type),
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index c5370575ea..7649f2b1f4 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -1596,7 +1596,7 @@ async_common:
trace_qxl_io_log(d->id, d->ram->log_buf);
if (d->guestdebug) {
fprintf(stderr, "qxl/guest-%d: %" PRId64 ": %s", d->id,
- qemu_get_clock_ns(vm_clock), d->ram->log_buf);
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), d->ram->log_buf);
}
break;
case QXL_IO_RESET:
diff --git a/hw/display/vga.c b/hw/display/vga.c
index 06f44a808c..7b91d9c54e 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -318,7 +318,7 @@ static uint8_t vga_precise_retrace(VGACommonState *s)
int cur_line, cur_line_char, cur_char;
int64_t cur_tick;
- cur_tick = qemu_get_clock_ns(vm_clock);
+ cur_tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
cur_char = (cur_tick / r->ticks_per_char) % r->total_chars;
cur_line = cur_char / r->htotal;
@@ -1304,7 +1304,7 @@ static void vga_draw_text(VGACommonState *s, int full_update)
uint32_t *ch_attr_ptr;
vga_draw_glyph8_func *vga_draw_glyph8;
vga_draw_glyph9_func *vga_draw_glyph9;
- int64_t now = qemu_get_clock_ms(vm_clock);
+ int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
/* compute font data address (in plane 2) */
v = s->sr[VGA_SEQ_CHARACTER_MAP];
@@ -1907,7 +1907,7 @@ static void vga_update_display(void *opaque)
}
if (graphic_mode != s->graphic_mode) {
s->graphic_mode = graphic_mode;
- s->cursor_blink_time = qemu_get_clock_ms(vm_clock);
+ s->cursor_blink_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
full_update = 1;
}
switch(graphic_mode) {
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
index ddcc4135d7..401399d330 100644
--- a/hw/dma/pl330.c
+++ b/hw/dma/pl330.c
@@ -1256,7 +1256,7 @@ static void pl330_dma_stop_irq(void *opaque, int irq, int level)
if (s->periph_busy[irq] != level) {
s->periph_busy[irq] = level;
- qemu_mod_timer(s->timer, qemu_get_clock_ns(vm_clock));
+ timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
}
@@ -1519,7 +1519,7 @@ static void pl330_reset(DeviceState *d)
s->periph_busy[i] = 0;
}
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
static void pl330_realize(DeviceState *dev, Error **errp)
@@ -1532,7 +1532,7 @@ static void pl330_realize(DeviceState *dev, Error **errp)
"dma", PL330_IOMEM_SIZE);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
- s->timer = qemu_new_timer_ns(vm_clock, pl330_exec_cycle_timer, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pl330_exec_cycle_timer, s);
s->cfg[0] = (s->mgr_ns_at_rst ? 0x4 : 0) |
(s->num_periph_req > 0 ? 1 : 0) |
diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c
index 4ec433f957..af2663256e 100644
--- a/hw/dma/rc4030.c
+++ b/hw/dma/rc4030.c
@@ -107,7 +107,7 @@ static void set_next_tick(rc4030State *s)
tm_hz = 1000 / (s->itr + 1);
- qemu_mod_timer(s->periodic_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->periodic_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / tm_hz);
}
@@ -806,7 +806,7 @@ void *rc4030_init(qemu_irq timer, qemu_irq jazz_bus,
*irqs = qemu_allocate_irqs(rc4030_irq_jazz_request, s, 16);
*dmas = rc4030_allocate_dmas(s, 4);
- s->periodic_timer = qemu_new_timer_ns(vm_clock, rc4030_periodic_timer, s);
+ s->periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, rc4030_periodic_timer, s);
s->timer_irq = timer;
s->jazz_bus_irq = jazz_bus;
diff --git a/hw/dma/soc_dma.c b/hw/dma/soc_dma.c
index 5e3491d373..c06aabb406 100644
--- a/hw/dma/soc_dma.c
+++ b/hw/dma/soc_dma.c
@@ -84,10 +84,10 @@ struct dma_s {
static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
{
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
struct dma_s *dma = (struct dma_s *) ch->dma;
- qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
+ timer_mod(ch->timer, now + delay_bytes / dma->channel_freq);
}
static void soc_dma_ch_run(void *opaque)
@@ -217,7 +217,7 @@ void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
ch->enable = level;
if (!ch->enable)
- qemu_del_timer(ch->timer);
+ timer_del(ch->timer);
else if (!ch->running)
soc_dma_ch_run(ch);
else
@@ -246,7 +246,7 @@ struct soc_dma_s *soc_dma_init(int n)
for (i = 0; i < n; i ++) {
s->ch[i].dma = &s->soc;
s->ch[i].num = i;
- s->ch[i].timer = qemu_new_timer_ns(vm_clock, soc_dma_ch_run, &s->ch[i]);
+ s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]);
}
soc_dma_reset(&s->soc);
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index a48e3baa99..59e8e35a4c 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -27,6 +27,7 @@
#include "hw/ptimer.h"
#include "qemu/log.h"
#include "qapi/qmp/qerror.h"
+#include "qemu/main-loop.h"
#include "hw/stream.h"
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index 179b806d96..5609063120 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -79,7 +79,7 @@ void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic)
v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
s->count_shift = (v + 1) & 7;
- s->initial_count_load_time = qemu_get_clock_ns(vm_clock);
+ s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
apic_next_timer(s, s->initial_count_load_time);
}
diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c
index c1f40948f9..20b6457fbd 100644
--- a/hw/i386/kvm/i8254.c
+++ b/hw/i386/kvm/i8254.c
@@ -65,12 +65,12 @@ static void kvm_pit_update_clock_offset(KVMPITState *s)
/*
* Measure the delta between CLOCK_MONOTONIC, the base used for
- * kvm_pit_channel_state::count_load_time, and vm_clock. Take the
+ * kvm_pit_channel_state::count_load_time, and QEMU_CLOCK_VIRTUAL. Take the
* minimum of several samples to filter out scheduling noise.
*/
clock_offset = INT64_MAX;
for (i = 0; i < CALIBRATION_ROUNDS; i++) {
- offset = qemu_get_clock_ns(vm_clock);
+ offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
clock_gettime(CLOCK_MONOTONIC, &ts);
offset -= ts.tv_nsec;
offset -= (int64_t)ts.tv_sec * 1000000000;
@@ -194,7 +194,7 @@ static void kvm_pit_set_gate(PITCommonState *s, PITChannelState *sc, int val)
case 5:
if (sc->gate < val) {
/* restart counting on rising edge */
- sc->count_load_time = qemu_get_clock_ns(vm_clock);
+ sc->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
break;
}
diff --git a/hw/i386/xen_domainbuild.c b/hw/i386/xen_domainbuild.c
index 4e2cf95ae5..c0ab7537df 100644
--- a/hw/i386/xen_domainbuild.c
+++ b/hw/i386/xen_domainbuild.c
@@ -148,7 +148,7 @@ static void xen_domain_poll(void *opaque)
goto quit;
}
- qemu_mod_timer(xen_poll, qemu_get_clock_ms(rt_clock) + 1000);
+ timer_mod(xen_poll, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
return;
quit:
@@ -290,8 +290,8 @@ int xen_domain_build_pv(const char *kernel, const char *ramdisk,
goto err;
}
- xen_poll = qemu_new_timer_ms(rt_clock, xen_domain_poll, NULL);
- qemu_mod_timer(xen_poll, qemu_get_clock_ms(rt_clock) + 1000);
+ xen_poll = timer_new_ms(QEMU_CLOCK_REALTIME, xen_domain_poll, NULL);
+ timer_mod(xen_poll, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
return 0;
err:
diff --git a/hw/ide/core.c b/hw/ide/core.c
index a73af7252a..399b1bae68 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -768,8 +768,8 @@ static void ide_sector_write_cb(void *opaque, int ret)
that at the expense of slower write performances. Use this
option _only_ to install Windows 2000. You must disable it
for normal use. */
- qemu_mod_timer(s->sector_write_timer,
- qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() / 1000));
+ timer_mod(s->sector_write_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
} else {
ide_set_irq(s->bus);
}
@@ -2163,7 +2163,7 @@ static void ide_init1(IDEBus *bus, int unit)
s->smart_selftest_data = qemu_blockalign(s->bs, 512);
memset(s->smart_selftest_data, 0, 512);
- s->sector_write_timer = qemu_new_timer_ns(vm_clock,
+ s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
ide_sector_write_timer_cb, s);
}
diff --git a/hw/input/hid.c b/hw/input/hid.c
index 14b3125956..bb0fa6a619 100644
--- a/hw/input/hid.c
+++ b/hw/input/hid.c
@@ -85,8 +85,8 @@ static void hid_idle_timer(void *opaque)
static void hid_del_idle_timer(HIDState *hs)
{
if (hs->idle_timer) {
- qemu_del_timer(hs->idle_timer);
- qemu_free_timer(hs->idle_timer);
+ timer_del(hs->idle_timer);
+ timer_free(hs->idle_timer);
hs->idle_timer = NULL;
}
}
@@ -94,12 +94,12 @@ static void hid_del_idle_timer(HIDState *hs)
void hid_set_next_idle(HIDState *hs)
{
if (hs->idle) {
- uint64_t expire_time = qemu_get_clock_ns(vm_clock) +
+ uint64_t expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() * hs->idle * 4 / 1000;
if (!hs->idle_timer) {
- hs->idle_timer = qemu_new_timer_ns(vm_clock, hid_idle_timer, hs);
+ hs->idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hid_idle_timer, hs);
}
- qemu_mod_timer_ns(hs->idle_timer, expire_time);
+ timer_mod_ns(hs->idle_timer, expire_time);
} else {
hid_del_idle_timer(hs);
}
diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c
index bacbeb2343..f583cf0279 100644
--- a/hw/input/lm832x.c
+++ b/hw/input/lm832x.c
@@ -365,7 +365,7 @@ static void lm_kbd_write(LM823KbdState *s, int reg, int byte, uint8_t value)
break;
}
- qemu_del_timer(s->pwm.tm[(value & 3) - 1]);
+ timer_del(s->pwm.tm[(value & 3) - 1]);
break;
case LM832x_GENERAL_ERROR:
@@ -463,9 +463,9 @@ static int lm8323_init(I2CSlave *i2c)
LM823KbdState *s = FROM_I2C_SLAVE(LM823KbdState, i2c);
s->model = 0x8323;
- s->pwm.tm[0] = qemu_new_timer_ns(vm_clock, lm_kbd_pwm0_tick, s);
- s->pwm.tm[1] = qemu_new_timer_ns(vm_clock, lm_kbd_pwm1_tick, s);
- s->pwm.tm[2] = qemu_new_timer_ns(vm_clock, lm_kbd_pwm2_tick, s);
+ s->pwm.tm[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm0_tick, s);
+ s->pwm.tm[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm1_tick, s);
+ s->pwm.tm[2] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm2_tick, s);
qdev_init_gpio_out(&i2c->qdev, &s->nirq, 1);
lm_kbd_reset(s);
diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c
index a771cd5e52..21d4f4dbbd 100644
--- a/hw/input/tsc2005.c
+++ b/hw/input/tsc2005.c
@@ -201,7 +201,7 @@ static void tsc2005_write(TSC2005State *s, int reg, uint16_t data)
fprintf(stderr, "%s: touchscreen sense %sabled\n",
__FUNCTION__, s->enabled ? "en" : "dis");
if (s->busy && !s->enabled)
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
s->busy &= s->enabled;
}
s->nextprecision = (data >> 13) & 1;
@@ -290,8 +290,8 @@ static void tsc2005_pin_update(TSC2005State *s)
s->precision = s->nextprecision;
s->function = s->nextfunction;
s->pdst = !s->pnd0; /* Synchronised on internal clock */
- expires = qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() >> 7);
- qemu_mod_timer(s->timer, expires);
+ expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() >> 7);
+ timer_mod(s->timer, expires);
}
static void tsc2005_reset(TSC2005State *s)
@@ -337,7 +337,7 @@ static uint8_t tsc2005_txrx_word(void *opaque, uint8_t value)
fprintf(stderr, "%s: touchscreen sense %sabled\n",
__FUNCTION__, s->enabled ? "en" : "dis");
if (s->busy && !s->enabled)
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
s->busy &= s->enabled;
}
tsc2005_pin_update(s);
@@ -449,7 +449,7 @@ static void tsc2005_save(QEMUFile *f, void *opaque)
qemu_put_be16s(f, &s->dav);
qemu_put_be16s(f, &s->data);
- qemu_put_timer(f, s->timer);
+ timer_put(f, s->timer);
qemu_put_byte(f, s->enabled);
qemu_put_byte(f, s->host_mode);
qemu_put_byte(f, s->function);
@@ -490,7 +490,7 @@ static int tsc2005_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_be16s(f, &s->dav);
qemu_get_be16s(f, &s->data);
- qemu_get_timer(f, s->timer);
+ timer_get(f, s->timer);
s->enabled = qemu_get_byte(f);
s->host_mode = qemu_get_byte(f);
s->function = qemu_get_byte(f);
@@ -513,7 +513,7 @@ static int tsc2005_load(QEMUFile *f, void *opaque, int version_id)
for (i = 0; i < 8; i ++)
s->tr[i] = qemu_get_be32(f);
- s->busy = qemu_timer_pending(s->timer);
+ s->busy = timer_pending(s->timer);
tsc2005_pin_update(s);
return 0;
@@ -529,7 +529,7 @@ void *tsc2005_init(qemu_irq pintdav)
s->y = 240;
s->pressure = 0;
s->precision = s->nextprecision = 0;
- s->timer = qemu_new_timer_ns(vm_clock, tsc2005_timer_tick, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc2005_timer_tick, s);
s->pint = pintdav;
s->model = 0x2005;
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
index 9b854e77dd..485c9e5753 100644
--- a/hw/input/tsc210x.c
+++ b/hw/input/tsc210x.c
@@ -503,9 +503,9 @@ static uint16_t tsc2102_audio_register_read(TSC210xState *s, int reg)
l_ch = 1;
r_ch = 1;
if (s->softstep && !(s->dac_power & (1 << 10))) {
- l_ch = (qemu_get_clock_ns(vm_clock) >
+ l_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
s->volume_change + TSC_SOFTSTEP_DELAY);
- r_ch = (qemu_get_clock_ns(vm_clock) >
+ r_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
s->volume_change + TSC_SOFTSTEP_DELAY);
}
@@ -514,7 +514,7 @@ static uint16_t tsc2102_audio_register_read(TSC210xState *s, int reg)
case 0x05: /* Stereo DAC Power Control */
return 0x2aa0 | s->dac_power |
(((s->dac_power & (1 << 10)) &&
- (qemu_get_clock_ns(vm_clock) >
+ (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
s->powerdown + TSC_POWEROFF_DELAY)) << 6);
case 0x06: /* Audio Control 3 */
@@ -594,7 +594,7 @@ static void tsc2102_control_register_write(
s->host_mode = value >> 15;
s->enabled = !(value & 0x4000);
if (s->busy && !s->enabled)
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
s->busy &= s->enabled;
s->nextfunction = (value >> 10) & 0xf;
s->nextprecision = (value >> 8) & 3;
@@ -629,7 +629,7 @@ static void tsc2102_control_register_write(
case 0x04: /* Reset */
if (value == 0xbb00) {
if (s->busy)
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
tsc210x_reset(s);
#ifdef TSC_VERBOSE
} else {
@@ -695,7 +695,7 @@ static void tsc2102_audio_register_write(
case 0x02: /* DAC Volume Control */
s->volume = value;
- s->volume_change = qemu_get_clock_ns(vm_clock);
+ s->volume_change = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
return;
case 0x03:
@@ -717,7 +717,7 @@ static void tsc2102_audio_register_write(
case 0x05: /* Stereo DAC Power Control */
if ((value & ~s->dac_power) & (1 << 10))
- s->powerdown = qemu_get_clock_ns(vm_clock);
+ s->powerdown = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->dac_power = value & 0x9543;
#ifdef TSC_VERBOSE
@@ -864,8 +864,8 @@ static void tsc210x_pin_update(TSC210xState *s)
s->busy = 1;
s->precision = s->nextprecision;
s->function = s->nextfunction;
- expires = qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() >> 10);
- qemu_mod_timer(s->timer, expires);
+ expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() >> 10);
+ timer_mod(s->timer, expires);
}
static uint16_t tsc210x_read(TSC210xState *s)
@@ -1005,7 +1005,7 @@ static void tsc210x_i2s_set_rate(TSC210xState *s, int in, int out)
static void tsc210x_save(QEMUFile *f, void *opaque)
{
TSC210xState *s = (TSC210xState *) opaque;
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int i;
qemu_put_be16(f, s->x);
@@ -1020,7 +1020,7 @@ static void tsc210x_save(QEMUFile *f, void *opaque)
qemu_put_byte(f, s->irq);
qemu_put_be16s(f, &s->dav);
- qemu_put_timer(f, s->timer);
+ timer_put(f, s->timer);
qemu_put_byte(f, s->enabled);
qemu_put_byte(f, s->host_mode);
qemu_put_byte(f, s->function);
@@ -1051,7 +1051,7 @@ static void tsc210x_save(QEMUFile *f, void *opaque)
static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
{
TSC210xState *s = (TSC210xState *) opaque;
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int i;
s->x = qemu_get_be16(f);
@@ -1066,7 +1066,7 @@ static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
s->irq = qemu_get_byte(f);
qemu_get_be16s(f, &s->dav);
- qemu_get_timer(f, s->timer);
+ timer_get(f, s->timer);
s->enabled = qemu_get_byte(f);
s->host_mode = qemu_get_byte(f);
s->function = qemu_get_byte(f);
@@ -1093,7 +1093,7 @@ static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
for (i = 0; i < 0x14; i ++)
qemu_get_be16s(f, &s->filter_data[i]);
- s->busy = qemu_timer_pending(s->timer);
+ s->busy = timer_pending(s->timer);
qemu_set_irq(s->pint, !s->irq);
qemu_set_irq(s->davint, !s->dav);
@@ -1111,7 +1111,7 @@ uWireSlave *tsc2102_init(qemu_irq pint)
s->y = 160;
s->pressure = 0;
s->precision = s->nextprecision = 0;
- s->timer = qemu_new_timer_ns(vm_clock, tsc210x_timer_tick, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s);
s->pint = pint;
s->model = 0x2102;
s->name = "tsc2102";
@@ -1160,7 +1160,7 @@ uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav)
s->y = 240;
s->pressure = 0;
s->precision = s->nextprecision = 0;
- s->timer = qemu_new_timer_ns(vm_clock, tsc210x_timer_tick, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s);
s->pint = penirq;
s->kbint = kbirq;
s->davint = dav;
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 5e3b96e4db..a913186ed0 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -606,7 +606,7 @@ static uint32_t apic_get_current_count(APICCommonState *s)
{
int64_t d;
uint32_t val;
- d = (qemu_get_clock_ns(vm_clock) - s->initial_count_load_time) >>
+ d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >>
s->count_shift;
if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
/* periodic */
@@ -623,9 +623,9 @@ static uint32_t apic_get_current_count(APICCommonState *s)
static void apic_timer_update(APICCommonState *s, int64_t current_time)
{
if (apic_next_timer(s, current_time)) {
- qemu_mod_timer(s->timer, s->next_time);
+ timer_mod(s->timer, s->next_time);
} else {
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
}
@@ -822,7 +822,7 @@ static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
int n = index - 0x32;
s->lvt[n] = val;
if (n == APIC_LVT_TIMER) {
- apic_timer_update(s, qemu_get_clock_ns(vm_clock));
+ apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
} else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
apic_update_irq(s);
}
@@ -830,7 +830,7 @@ static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
break;
case 0x38:
s->initial_count = val;
- s->initial_count_load_time = qemu_get_clock_ns(vm_clock);
+ s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
apic_timer_update(s, s->initial_count_load_time);
break;
case 0x39:
@@ -857,9 +857,9 @@ static void apic_pre_save(APICCommonState *s)
static void apic_post_load(APICCommonState *s)
{
if (s->timer_expiry != -1) {
- qemu_mod_timer(s->timer, s->timer_expiry);
+ timer_mod(s->timer, s->timer_expiry);
} else {
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
}
@@ -876,7 +876,7 @@ static void apic_init(APICCommonState *s)
memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
APIC_SPACE_SIZE);
- s->timer = qemu_new_timer_ns(vm_clock, apic_timer, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
local_apics[s->idx] = s;
msi_supported = true;
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index b03e904a7a..a0beb10863 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -198,7 +198,7 @@ void apic_init_reset(DeviceState *d)
s->wait_for_sipi = 1;
if (s->timer) {
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
s->timer_expiry = -1;
}
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 178344b5a3..6066fa6838 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -78,9 +78,9 @@ static inline int64_t systick_scale(nvic_state *s)
static void systick_reload(nvic_state *s, int reset)
{
if (reset)
- s->systick.tick = qemu_get_clock_ns(vm_clock);
+ s->systick.tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->systick.tick += (s->systick.reload + 1) * systick_scale(s);
- qemu_mod_timer(s->systick.timer, s->systick.tick);
+ timer_mod(s->systick.timer, s->systick.tick);
}
static void systick_timer_tick(void * opaque)
@@ -103,7 +103,7 @@ static void systick_reset(nvic_state *s)
s->systick.control = 0;
s->systick.reload = 0;
s->systick.tick = 0;
- qemu_del_timer(s->systick.timer);
+ timer_del(s->systick.timer);
}
/* The external routines use the hardware vector numbering, ie. the first
@@ -158,7 +158,7 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
int64_t t;
if ((s->systick.control & SYSTICK_ENABLE) == 0)
return 0;
- t = qemu_get_clock_ns(vm_clock);
+ t = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (t >= s->systick.tick)
return 0;
val = ((s->systick.tick - (t + 1)) / systick_scale(s)) + 1;
@@ -290,16 +290,16 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value)
s->systick.control &= 0xfffffff8;
s->systick.control |= value & 7;
if ((oldval ^ value) & SYSTICK_ENABLE) {
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (value & SYSTICK_ENABLE) {
if (s->systick.tick) {
s->systick.tick += now;
- qemu_mod_timer(s->systick.timer, s->systick.tick);
+ timer_mod(s->systick.timer, s->systick.tick);
} else {
systick_reload(s, 1);
}
} else {
- qemu_del_timer(s->systick.timer);
+ timer_del(s->systick.timer);
s->systick.tick -= now;
if (s->systick.tick < 0)
s->systick.tick = 0;
@@ -511,7 +511,7 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
* by the v7M architecture.
*/
memory_region_add_subregion(get_system_memory(), 0xe000e000, &s->container);
- s->systick.timer = qemu_new_timer_ns(vm_clock, systick_timer_tick, s);
+ s->systick.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, systick_timer_tick, s);
}
static void armv7m_nvic_instance_init(Object *obj)
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index 1415bda93f..c6f248b145 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -150,7 +150,7 @@ static void pic_set_irq(void *opaque, int irq, int level)
#endif
#ifdef DEBUG_IRQ_LATENCY
if (level) {
- irq_time[irq_index] = qemu_get_clock_ns(vm_clock);
+ irq_time[irq_index] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
#endif
@@ -228,7 +228,7 @@ int pic_read_irq(DeviceState *d)
#ifdef DEBUG_IRQ_LATENCY
printf("IRQ%d latency=%0.3fus\n",
irq,
- (double)(qemu_get_clock_ns(vm_clock) -
+ (double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
irq_time[irq]) * 1000000.0 / get_ticks_per_sec());
#endif
DPRINTF("pic_interrupt: irq=%d\n", irq);
diff --git a/hw/mips/cputimer.c b/hw/mips/cputimer.c
index e0266bf15a..c8b4b000cd 100644
--- a/hw/mips/cputimer.c
+++ b/hw/mips/cputimer.c
@@ -47,11 +47,11 @@ static void cpu_mips_timer_update(CPUMIPSState *env)
uint64_t now, next;
uint32_t wait;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
wait = env->CP0_Compare - env->CP0_Count -
(uint32_t)muldiv64(now, TIMER_FREQ, get_ticks_per_sec());
next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ);
- qemu_mod_timer(env->timer, next);
+ timer_mod(env->timer, next);
}
/* Expire the timer. */
@@ -71,9 +71,9 @@ uint32_t cpu_mips_get_count (CPUMIPSState *env)
} else {
uint64_t now;
- now = qemu_get_clock_ns(vm_clock);
- if (qemu_timer_pending(env->timer)
- && qemu_timer_expired(env->timer, now)) {
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ if (timer_pending(env->timer)
+ && timer_expired(env->timer, now)) {
/* The timer has already expired. */
cpu_mips_timer_expire(env);
}
@@ -90,7 +90,7 @@ void cpu_mips_store_count (CPUMIPSState *env, uint32_t count)
else {
/* Store new count register */
env->CP0_Count =
- count - (uint32_t)muldiv64(qemu_get_clock_ns(vm_clock),
+ count - (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
TIMER_FREQ, get_ticks_per_sec());
/* Update timer timer */
cpu_mips_timer_update(env);
@@ -115,7 +115,7 @@ void cpu_mips_start_count(CPUMIPSState *env)
void cpu_mips_stop_count(CPUMIPSState *env)
{
/* Store the current value */
- env->CP0_Count += (uint32_t)muldiv64(qemu_get_clock_ns(vm_clock),
+ env->CP0_Count += (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
TIMER_FREQ, get_ticks_per_sec());
}
@@ -141,7 +141,7 @@ static void mips_timer_cb (void *opaque)
void cpu_mips_clock_init (CPUMIPSState *env)
{
- env->timer = qemu_new_timer_ns(vm_clock, &mips_timer_cb, env);
+ env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &mips_timer_cb, env);
env->CP0_Compare = 0;
cpu_mips_store_count(env, 1);
}
diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c
index 4a911d4f8c..0fc26d29a5 100644
--- a/hw/misc/arm_sysctl.c
+++ b/hw/misc/arm_sysctl.c
@@ -170,7 +170,7 @@ static uint64_t arm_sysctl_read(void *opaque, hwaddr offset,
case 0x58: /* BOOTCS */
return 0;
case 0x5c: /* 24MHz */
- return muldiv64(qemu_get_clock_ns(vm_clock), 24000000, get_ticks_per_sec());
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24000000, get_ticks_per_sec());
case 0x60: /* MISC */
return 0;
case 0x84: /* PROCID0 */
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
index c0fd7da118..c811b9519b 100644
--- a/hw/misc/macio/cuda.c
+++ b/hw/misc/macio/cuda.c
@@ -128,7 +128,7 @@ static unsigned int get_counter(CUDATimer *s)
int64_t d;
unsigned int counter;
- d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time,
+ d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->load_time,
CUDA_TIMER_FREQ, get_ticks_per_sec());
if (s->index == 0) {
/* the timer goes down from latch to -1 (period of latch + 2) */
@@ -147,7 +147,7 @@ static unsigned int get_counter(CUDATimer *s)
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
{
CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
- ti->load_time = qemu_get_clock_ns(vm_clock);
+ ti->load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ti->counter_value = val;
cuda_timer_update(s, ti, ti->load_time);
}
@@ -191,10 +191,10 @@ static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
if (!ti->timer)
return;
if ((s->acr & T1MODE) != T1MODE_CONT) {
- qemu_del_timer(ti->timer);
+ timer_del(ti->timer);
} else {
ti->next_irq_time = get_next_irq_time(ti, current_time);
- qemu_mod_timer(ti->timer, ti->next_irq_time);
+ timer_mod(ti->timer, ti->next_irq_time);
}
}
@@ -304,7 +304,7 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
break;
case 4:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case 5:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
@@ -313,12 +313,12 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
break;
case 6:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case 7:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
s->ifr &= ~T1_INT;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case 8:
s->timers[1].latch = val;
@@ -332,7 +332,7 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
break;
case 11:
s->acr = val;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
cuda_update(s);
break;
case 12:
@@ -463,8 +463,8 @@ static void cuda_adb_poll(void *opaque)
obuf[1] = 0x40; /* polled data */
cuda_send_packet_to_host(s, obuf, olen + 2);
}
- qemu_mod_timer(s->adb_poll_timer,
- qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->adb_poll_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
}
@@ -481,11 +481,11 @@ static void cuda_receive_packet(CUDAState *s,
if (autopoll != s->autopoll) {
s->autopoll = autopoll;
if (autopoll) {
- qemu_mod_timer(s->adb_poll_timer,
- qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->adb_poll_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
} else {
- qemu_del_timer(s->adb_poll_timer);
+ timer_del(s->adb_poll_timer);
}
}
obuf[0] = CUDA_PACKET;
@@ -494,14 +494,14 @@ static void cuda_receive_packet(CUDAState *s,
break;
case CUDA_SET_TIME:
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
- s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
+ s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
obuf[0] = CUDA_PACKET;
obuf[1] = 0;
obuf[2] = 0;
cuda_send_packet_to_host(s, obuf, 3);
break;
case CUDA_GET_TIME:
- ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
+ ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
obuf[0] = CUDA_PACKET;
obuf[1] = 0;
obuf[2] = 0;
@@ -689,12 +689,12 @@ static void cuda_realizefn(DeviceState *dev, Error **errp)
CUDAState *s = CUDA(dev);
struct tm tm;
- s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
+ s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
qemu_get_timedate(&tm, 0);
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
- s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s);
+ s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s);
}
static void cuda_initfn(Object *obj)
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
index c0d0bf7287..9cc33d8f96 100644
--- a/hw/misc/macio/macio.c
+++ b/hw/misc/macio/macio.c
@@ -245,10 +245,10 @@ static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size)
switch (addr) {
case 0x38:
- value = qemu_get_clock_ns(vm_clock);
+ value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
break;
case 0x3c:
- value = qemu_get_clock_ns(vm_clock) >> 32;
+ value = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 32;
break;
}
diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
index 017e69352a..d215e29e46 100644
--- a/hw/misc/vfio.c
+++ b/hw/misc/vfio.c
@@ -276,8 +276,8 @@ static void vfio_intx_mmap_enable(void *opaque)
VFIODevice *vdev = opaque;
if (vdev->intx.pending) {
- qemu_mod_timer(vdev->intx.mmap_timer,
- qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
+ timer_mod(vdev->intx.mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
return;
}
@@ -300,8 +300,8 @@ static void vfio_intx_interrupt(void *opaque)
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
vfio_mmap_set_enabled(vdev, false);
if (vdev->intx.mmap_timeout) {
- qemu_mod_timer(vdev->intx.mmap_timer,
- qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
+ timer_mod(vdev->intx.mmap_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
}
}
@@ -543,7 +543,7 @@ static void vfio_disable_intx(VFIODevice *vdev)
{
int fd;
- qemu_del_timer(vdev->intx.mmap_timer);
+ timer_del(vdev->intx.mmap_timer);
vfio_disable_intx_kvm(vdev);
vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
@@ -3176,7 +3176,7 @@ static int vfio_initfn(PCIDevice *pdev)
}
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
- vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock,
+ vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
vfio_intx_mmap_enable, vdev);
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
ret = vfio_enable_intx(vdev);
@@ -3210,7 +3210,7 @@ static void vfio_exitfn(PCIDevice *pdev)
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
vfio_disable_interrupts(vdev);
if (vdev->intx.mmap_timer) {
- qemu_free_timer(vdev->intx.mmap_timer);
+ timer_free(vdev->intx.mmap_timer);
}
vfio_teardown_msi(vdev);
vfio_unmap_bars(vdev);
diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c
index 049aa704c1..789d385743 100644
--- a/hw/net/dp8393x.c
+++ b/hw/net/dp8393x.c
@@ -274,7 +274,7 @@ static void do_read_rra(dp8393xState *s)
static void do_software_reset(dp8393xState *s)
{
- qemu_del_timer(s->watchdog);
+ timer_del(s->watchdog);
s->regs[SONIC_CR] &= ~(SONIC_CR_LCAM | SONIC_CR_RRRA | SONIC_CR_TXP | SONIC_CR_HTX);
s->regs[SONIC_CR] |= SONIC_CR_RST | SONIC_CR_RXDIS;
@@ -286,14 +286,14 @@ static void set_next_tick(dp8393xState *s)
int64_t delay;
if (s->regs[SONIC_CR] & SONIC_CR_STP) {
- qemu_del_timer(s->watchdog);
+ timer_del(s->watchdog);
return;
}
ticks = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0];
- s->wt_last_update = qemu_get_clock_ns(vm_clock);
+ s->wt_last_update = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delay = get_ticks_per_sec() * ticks / 5000000;
- qemu_mod_timer(s->watchdog, s->wt_last_update + delay);
+ timer_mod(s->watchdog, s->wt_last_update + delay);
}
static void update_wt_regs(dp8393xState *s)
@@ -302,11 +302,11 @@ static void update_wt_regs(dp8393xState *s)
uint32_t val;
if (s->regs[SONIC_CR] & SONIC_CR_STP) {
- qemu_del_timer(s->watchdog);
+ timer_del(s->watchdog);
return;
}
- elapsed = s->wt_last_update - qemu_get_clock_ns(vm_clock);
+ elapsed = s->wt_last_update - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
val = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0];
val -= elapsed / 5000000;
s->regs[SONIC_WT1] = (val >> 16) & 0xffff;
@@ -838,7 +838,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
static void nic_reset(void *opaque)
{
dp8393xState *s = opaque;
- qemu_del_timer(s->watchdog);
+ timer_del(s->watchdog);
s->regs[SONIC_CR] = SONIC_CR_RST | SONIC_CR_STP | SONIC_CR_RXDIS;
s->regs[SONIC_DCR] &= ~(SONIC_DCR_EXBUS | SONIC_DCR_LBR);
@@ -866,8 +866,8 @@ static void nic_cleanup(NetClientState *nc)
memory_region_del_subregion(s->address_space, &s->mmio);
memory_region_destroy(&s->mmio);
- qemu_del_timer(s->watchdog);
- qemu_free_timer(s->watchdog);
+ timer_del(s->watchdog);
+ timer_free(s->watchdog);
g_free(s);
}
@@ -896,7 +896,7 @@ void dp83932_init(NICInfo *nd, hwaddr base, int it_shift,
s->memory_rw = memory_rw;
s->it_shift = it_shift;
s->irq = irq;
- s->watchdog = qemu_new_timer_ns(vm_clock, dp8393x_watchdog, s);
+ s->watchdog = timer_new_ns(QEMU_CLOCK_VIRTUAL, dp8393x_watchdog, s);
s->regs[SONIC_SR] = 0x0004; /* only revision recognized by Linux */
s->conf.macaddr = nd->macaddr;
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index fdb1f890b4..f5ebed46ab 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -190,7 +190,7 @@ set_phy_ctrl(E1000State *s, int index, uint16_t val)
e1000_link_down(s);
s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
DBGOUT(PHY, "Start link auto negotiation\n");
- qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
+ timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
}
}
@@ -306,7 +306,7 @@ static void e1000_reset(void *opaque)
uint8_t *macaddr = d->conf.macaddr.a;
int i;
- qemu_del_timer(d->autoneg_timer);
+ timer_del(d->autoneg_timer);
memset(d->phy_reg, 0, sizeof d->phy_reg);
memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
memset(d->mac_reg, 0, sizeof d->mac_reg);
@@ -1184,7 +1184,7 @@ static int e1000_post_load(void *opaque, int version_id)
s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
!(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
nc->link_down = false;
- qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
+ timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
}
return 0;
@@ -1314,8 +1314,8 @@ pci_e1000_uninit(PCIDevice *dev)
{
E1000State *d = E1000(dev);
- qemu_del_timer(d->autoneg_timer);
- qemu_free_timer(d->autoneg_timer);
+ timer_del(d->autoneg_timer);
+ timer_free(d->autoneg_timer);
memory_region_destroy(&d->mmio);
memory_region_destroy(&d->io);
qemu_del_nic(d->nic);
@@ -1370,7 +1370,7 @@ static int pci_e1000_init(PCIDevice *pci_dev)
add_boot_device_path(d->conf.bootindex, dev, "/ethernet-phy@0");
- d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
+ d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
return 0;
}
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 2c838f67dc..2315f996d4 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -439,7 +439,7 @@ static void lan9118_reset(DeviceState *d)
s->afc_cfg = 0;
s->e2p_cmd = 0;
s->e2p_data = 0;
- s->free_timer_start = qemu_get_clock_ns(vm_clock) / 40;
+ s->free_timer_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 40;
ptimer_stop(s->timer);
ptimer_set_count(s->timer, 0xffff);
@@ -1236,7 +1236,7 @@ static uint64_t lan9118_readl(void *opaque, hwaddr offset,
case CSR_WORD_SWAP:
return s->word_swap;
case CSR_FREE_RUN:
- return (qemu_get_clock_ns(vm_clock) / 40) - s->free_timer_start;
+ return (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 40) - s->free_timer_start;
case CSR_RX_DROP:
/* TODO: Implement dropped frames counter. */
return 0;
diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c
index 2c2301c360..a8931652b3 100644
--- a/hw/net/pcnet-pci.c
+++ b/hw/net/pcnet-pci.c
@@ -284,8 +284,8 @@ static void pci_pcnet_uninit(PCIDevice *dev)
memory_region_destroy(&d->state.mmio);
memory_region_destroy(&d->io_bar);
- qemu_del_timer(d->state.poll_timer);
- qemu_free_timer(d->state.poll_timer);
+ timer_del(d->state.poll_timer);
+ timer_free(d->state.poll_timer);
qemu_del_nic(d->state.nic);
}
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index 63aa73a241..7cb47b3f1f 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -1331,7 +1331,7 @@ static void pcnet_poll_timer(void *opaque)
{
PCNetState *s = opaque;
- qemu_del_timer(s->poll_timer);
+ timer_del(s->poll_timer);
if (CSR_TDMD(s)) {
pcnet_transmit(s);
@@ -1340,7 +1340,7 @@ static void pcnet_poll_timer(void *opaque)
pcnet_update_irq(s);
if (!CSR_STOP(s) && !CSR_SPND(s) && !CSR_DPOLL(s)) {
- uint64_t now = qemu_get_clock_ns(vm_clock) * 33;
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) * 33;
if (!s->timer || !now)
s->timer = now;
else {
@@ -1351,8 +1351,8 @@ static void pcnet_poll_timer(void *opaque)
} else
CSR_POLL(s) = t;
}
- qemu_mod_timer(s->poll_timer,
- pcnet_get_next_poll_time(s,qemu_get_clock_ns(vm_clock)));
+ timer_mod(s->poll_timer,
+ pcnet_get_next_poll_time(s,qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)));
}
}
@@ -1731,7 +1731,7 @@ int pcnet_common_init(DeviceState *dev, PCNetState *s, NetClientInfo *info)
int i;
uint16_t checksum;
- s->poll_timer = qemu_new_timer_ns(vm_clock, pcnet_poll_timer, s);
+ s->poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pcnet_poll_timer, s);
qemu_macaddr_default_if_unset(&s->conf.macaddr);
s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s);
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index ee3b6903a1..c31199f8c8 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -2648,7 +2648,7 @@ static void rtl8139_IntrMask_write(RTL8139State *s, uint32_t val)
s->IntrMask = val;
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
rtl8139_update_irq(s);
}
@@ -2689,7 +2689,7 @@ static void rtl8139_IntrStatus_write(RTL8139State *s, uint32_t val)
* and probably emulated is slower is better to assume this resetting was
* done before testing on previous rtl8139_update_irq lead to IRQ losing
*/
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
rtl8139_update_irq(s);
#endif
@@ -2697,7 +2697,7 @@ static void rtl8139_IntrStatus_write(RTL8139State *s, uint32_t val)
static uint32_t rtl8139_IntrStatus_read(RTL8139State *s)
{
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
uint32_t ret = s->IntrStatus;
@@ -2913,7 +2913,7 @@ static void rtl8139_set_next_tctr_time(RTL8139State *s, int64_t current_time)
s->TimerExpire = next_time;
if ((s->IntrMask & PCSTimeout) != 0 && (s->IntrStatus & PCSTimeout) == 0) {
- qemu_mod_timer(s->timer, next_time);
+ timer_mod(s->timer, next_time);
}
}
@@ -2960,7 +2960,7 @@ static void rtl8139_io_writel(void *opaque, uint8_t addr, uint32_t val)
case Timer:
DPRINTF("TCTR Timer reset on write\n");
- s->TCTR_base = qemu_get_clock_ns(vm_clock);
+ s->TCTR_base = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
rtl8139_set_next_tctr_time(s, s->TCTR_base);
break;
@@ -2968,7 +2968,7 @@ static void rtl8139_io_writel(void *opaque, uint8_t addr, uint32_t val)
DPRINTF("FlashReg TimerInt write val=0x%08x\n", val);
if (s->TimerInt != val) {
s->TimerInt = val;
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
break;
@@ -3183,7 +3183,7 @@ static uint32_t rtl8139_io_readl(void *opaque, uint8_t addr)
break;
case Timer:
- ret = muldiv64(qemu_get_clock_ns(vm_clock) - s->TCTR_base,
+ ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->TCTR_base,
PCI_FREQUENCY, get_ticks_per_sec());
DPRINTF("TCTR Timer read val=0x%08x\n", ret);
break;
@@ -3245,7 +3245,7 @@ static uint32_t rtl8139_mmio_readl(void *opaque, hwaddr addr)
static int rtl8139_post_load(void *opaque, int version_id)
{
RTL8139State* s = opaque;
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
if (version_id < 4) {
s->cplus_enabled = s->CpCmd != 0;
}
@@ -3275,7 +3275,7 @@ static const VMStateDescription vmstate_rtl8139_hotplug_ready ={
static void rtl8139_pre_save(void *opaque)
{
RTL8139State* s = opaque;
- int64_t current_time = qemu_get_clock_ns(vm_clock);
+ int64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* set IntrStatus correctly */
rtl8139_set_next_tctr_time(s, current_time);
@@ -3446,7 +3446,7 @@ static void rtl8139_timer(void *opaque)
s->IntrStatus |= PCSTimeout;
rtl8139_update_irq(s);
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
static void rtl8139_cleanup(NetClientState *nc)
@@ -3466,8 +3466,8 @@ static void pci_rtl8139_uninit(PCIDevice *dev)
g_free(s->cplus_txbuffer);
s->cplus_txbuffer = NULL;
}
- qemu_del_timer(s->timer);
- qemu_free_timer(s->timer);
+ timer_del(s->timer);
+ timer_free(s->timer);
qemu_del_nic(s->nic);
}
@@ -3535,8 +3535,8 @@ static int pci_rtl8139_init(PCIDevice *dev)
s->cplus_txbuffer_offset = 0;
s->TimerExpire = 0;
- s->timer = qemu_new_timer_ns(vm_clock, rtl8139_timer, s);
- rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, rtl8139_timer, s);
+ rtl8139_set_next_tctr_time(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
add_boot_device_path(s->conf.bootindex, d, "/ethernet-phy@0");
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index aa1880cb87..dd41008fb0 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -162,14 +162,14 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
if (virtio_net_started(n, queue_status) && !n->vhost_started) {
if (q->tx_timer) {
- qemu_mod_timer(q->tx_timer,
- qemu_get_clock_ns(vm_clock) + n->tx_timeout);
+ timer_mod(q->tx_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
} else {
qemu_bh_schedule(q->tx_bh);
}
} else {
if (q->tx_timer) {
- qemu_del_timer(q->tx_timer);
+ timer_del(q->tx_timer);
} else {
qemu_bh_cancel(q->tx_bh);
}
@@ -1131,12 +1131,12 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
if (q->tx_waiting) {
virtio_queue_set_notification(vq, 1);
- qemu_del_timer(q->tx_timer);
+ timer_del(q->tx_timer);
q->tx_waiting = 0;
virtio_net_flush_tx(q);
} else {
- qemu_mod_timer(q->tx_timer,
- qemu_get_clock_ns(vm_clock) + n->tx_timeout);
+ timer_mod(q->tx_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
q->tx_waiting = 1;
virtio_queue_set_notification(vq, 0);
}
@@ -1233,7 +1233,7 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
if (n->vqs[i].tx_timer) {
n->vqs[i].tx_vq =
virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
- n->vqs[i].tx_timer = qemu_new_timer_ns(vm_clock,
+ n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_net_tx_timer,
&n->vqs[i]);
} else {
@@ -1513,7 +1513,7 @@ static int virtio_net_device_init(VirtIODevice *vdev)
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
virtio_net_handle_tx_timer);
- n->vqs[0].tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer,
+ n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
&n->vqs[0]);
} else {
n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
@@ -1598,8 +1598,8 @@ static int virtio_net_device_exit(DeviceState *qdev)
qemu_purge_queued_packets(nc);
if (q->tx_timer) {
- qemu_del_timer(q->tx_timer);
- qemu_free_timer(q->tx_timer);
+ timer_del(q->tx_timer);
+ timer_free(q->tx_timer);
} else {
qemu_bh_delete(q->tx_bh);
}
diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c
index 4144b34be7..988ca20898 100644
--- a/hw/openrisc/cputimer.c
+++ b/hw/openrisc/cputimer.c
@@ -33,9 +33,9 @@ void cpu_openrisc_count_update(OpenRISCCPU *cpu)
uint64_t now, next;
uint32_t wait;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (!is_counting) {
- qemu_del_timer(cpu->env.timer);
+ timer_del(cpu->env.timer);
last_clk = now;
return;
}
@@ -52,7 +52,7 @@ void cpu_openrisc_count_update(OpenRISCCPU *cpu)
}
next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ);
- qemu_mod_timer(cpu->env.timer, next);
+ timer_mod(cpu->env.timer, next);
}
void cpu_openrisc_count_start(OpenRISCCPU *cpu)
@@ -72,7 +72,7 @@ static void openrisc_timer_cb(void *opaque)
OpenRISCCPU *cpu = opaque;
if ((cpu->env.ttmr & TTMR_IE) &&
- qemu_timer_expired(cpu->env.timer, qemu_get_clock_ns(vm_clock))) {
+ timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) {
CPUState *cs = CPU(cpu);
cpu->env.ttmr |= TTMR_IP;
@@ -97,7 +97,7 @@ static void openrisc_timer_cb(void *opaque)
void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
{
- cpu->env.timer = qemu_new_timer_ns(vm_clock, &openrisc_timer_cb, cpu);
+ cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu);
cpu->env.ttmr = 0x00000000;
cpu->env.ttcr = 0x00000000;
}
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index e1c095c7e2..59b41cbc6f 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -471,7 +471,7 @@ uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
return env->spr[SPR_TBL];
}
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->tb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
return tb;
@@ -482,7 +482,7 @@ static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->tb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
return tb >> 32;
@@ -510,9 +510,9 @@ void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->tb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
tb &= 0xFFFFFFFF00000000ULL;
- cpu_ppc_store_tb(tb_env, qemu_get_clock_ns(vm_clock),
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
&tb_env->tb_offset, tb | (uint64_t)value);
}
@@ -521,9 +521,9 @@ static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->tb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
tb &= 0x00000000FFFFFFFFULL;
- cpu_ppc_store_tb(tb_env, qemu_get_clock_ns(vm_clock),
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
&tb_env->tb_offset, ((uint64_t)value << 32) | tb);
}
@@ -537,7 +537,7 @@ uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->atb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
return tb;
@@ -548,7 +548,7 @@ uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->atb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
return tb >> 32;
@@ -559,9 +559,9 @@ void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->atb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
tb &= 0xFFFFFFFF00000000ULL;
- cpu_ppc_store_tb(tb_env, qemu_get_clock_ns(vm_clock),
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
&tb_env->atb_offset, tb | (uint64_t)value);
}
@@ -570,9 +570,9 @@ void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
- tb = cpu_ppc_get_tb(tb_env, qemu_get_clock_ns(vm_clock), tb_env->atb_offset);
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
tb &= 0x00000000FFFFFFFFULL;
- cpu_ppc_store_tb(tb_env, qemu_get_clock_ns(vm_clock),
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
&tb_env->atb_offset, ((uint64_t)value << 32) | tb);
}
@@ -583,7 +583,7 @@ static void cpu_ppc_tb_stop (CPUPPCState *env)
/* If the time base is already frozen, do nothing */
if (tb_env->tb_freq != 0) {
- vmclk = qemu_get_clock_ns(vm_clock);
+ vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* Get the time base */
tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
/* Get the alternate time base */
@@ -605,7 +605,7 @@ static void cpu_ppc_tb_start (CPUPPCState *env)
/* If the time base is not frozen, do nothing */
if (tb_env->tb_freq == 0) {
- vmclk = qemu_get_clock_ns(vm_clock);
+ vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* Get the time base from tb_offset */
tb = tb_env->tb_offset;
/* Get the alternate time base from atb_offset */
@@ -625,7 +625,7 @@ static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
uint32_t decr;
int64_t diff;
- diff = next - qemu_get_clock_ns(vm_clock);
+ diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (diff >= 0) {
decr = muldiv64(diff, tb_env->decr_freq, get_ticks_per_sec());
} else if (tb_env->flags & PPC_TIMER_BOOKE) {
@@ -661,7 +661,7 @@ uint64_t cpu_ppc_load_purr (CPUPPCState *env)
ppc_tb_t *tb_env = env->tb_env;
uint64_t diff;
- diff = qemu_get_clock_ns(vm_clock) - tb_env->purr_start;
+ diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
return tb_env->purr_load + muldiv64(diff, tb_env->tb_freq, get_ticks_per_sec());
}
@@ -701,7 +701,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
return;
}
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
if (is_excp) {
next += *nextp - now;
@@ -711,7 +711,7 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
}
*nextp = next;
/* Adjust timer */
- qemu_mod_timer(timer, next);
+ timer_mod(timer, next);
/* If we set a negative value and the decrementer was positive, raise an
* exception.
@@ -776,7 +776,7 @@ static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
ppc_tb_t *tb_env = cpu->env.tb_env;
tb_env->purr_load = value;
- tb_env->purr_start = qemu_get_clock_ns(vm_clock);
+ tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
@@ -806,11 +806,11 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
env->tb_env = tb_env;
tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
/* Create new timer */
- tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &cpu_ppc_decr_cb, cpu);
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
if (0) {
/* XXX: find a suitable condition to enable the hypervisor decrementer
*/
- tb_env->hdecr_timer = qemu_new_timer_ns(vm_clock, &cpu_ppc_hdecr_cb,
+ tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
cpu);
} else {
tb_env->hdecr_timer = NULL;
@@ -877,7 +877,7 @@ static void cpu_4xx_fit_cb (void *opaque)
cpu = ppc_env_get_cpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
case 0:
next = 1 << 9;
@@ -898,7 +898,7 @@ static void cpu_4xx_fit_cb (void *opaque)
next = now + muldiv64(next, get_ticks_per_sec(), tb_env->tb_freq);
if (next == now)
next++;
- qemu_mod_timer(ppc40x_timer->fit_timer, next);
+ timer_mod(ppc40x_timer->fit_timer, next);
env->spr[SPR_40x_TSR] |= 1 << 26;
if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
@@ -920,18 +920,18 @@ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
(is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
/* Stop PIT */
LOG_TB("%s: stop PIT\n", __func__);
- qemu_del_timer(tb_env->decr_timer);
+ timer_del(tb_env->decr_timer);
} else {
LOG_TB("%s: start PIT %016" PRIx64 "\n",
__func__, ppc40x_timer->pit_reload);
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
next = now + muldiv64(ppc40x_timer->pit_reload,
get_ticks_per_sec(), tb_env->decr_freq);
if (is_excp)
next += tb_env->decr_next - now;
if (next == now)
next++;
- qemu_mod_timer(tb_env->decr_timer, next);
+ timer_mod(tb_env->decr_timer, next);
tb_env->decr_next = next;
}
}
@@ -973,7 +973,7 @@ static void cpu_4xx_wdt_cb (void *opaque)
cpu = ppc_env_get_cpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
case 0:
next = 1 << 17;
@@ -999,12 +999,12 @@ static void cpu_4xx_wdt_cb (void *opaque)
switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
case 0x0:
case 0x1:
- qemu_mod_timer(ppc40x_timer->wdt_timer, next);
+ timer_mod(ppc40x_timer->wdt_timer, next);
ppc40x_timer->wdt_next = next;
env->spr[SPR_40x_TSR] |= 1 << 31;
break;
case 0x2:
- qemu_mod_timer(ppc40x_timer->wdt_timer, next);
+ timer_mod(ppc40x_timer->wdt_timer, next);
ppc40x_timer->wdt_next = next;
env->spr[SPR_40x_TSR] |= 1 << 30;
if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
@@ -1076,11 +1076,11 @@ clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
if (ppc40x_timer != NULL) {
/* We use decr timer for PIT */
- tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &cpu_4xx_pit_cb, env);
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
ppc40x_timer->fit_timer =
- qemu_new_timer_ns(vm_clock, &cpu_4xx_fit_cb, env);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
ppc40x_timer->wdt_timer =
- qemu_new_timer_ns(vm_clock, &cpu_4xx_wdt_cb, env);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
ppc40x_timer->decr_excp = decr_excp;
}
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
index 290f71ab69..0ef5254cd7 100644
--- a/hw/ppc/ppc405_uc.c
+++ b/hw/ppc/ppc405_uc.c
@@ -1348,7 +1348,7 @@ static uint32_t ppc4xx_gpt_readl (void *opaque, hwaddr addr)
switch (addr) {
case 0x00:
/* Time base counter */
- ret = muldiv64(qemu_get_clock_ns(vm_clock) + gpt->tb_offset,
+ ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset,
gpt->tb_freq, get_ticks_per_sec());
break;
case 0x10:
@@ -1405,7 +1405,7 @@ static void ppc4xx_gpt_writel (void *opaque,
case 0x00:
/* Time base counter */
gpt->tb_offset = muldiv64(value, get_ticks_per_sec(), gpt->tb_freq)
- - qemu_get_clock_ns(vm_clock);
+ - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ppc4xx_gpt_compute_timer(gpt);
break;
case 0x10:
@@ -1476,7 +1476,7 @@ static void ppc4xx_gpt_reset (void *opaque)
int i;
gpt = opaque;
- qemu_del_timer(gpt->timer);
+ timer_del(gpt->timer);
gpt->oe = 0x00000000;
gpt->ol = 0x00000000;
gpt->im = 0x00000000;
@@ -1497,7 +1497,7 @@ static void ppc4xx_gpt_init(hwaddr base, qemu_irq irqs[5])
for (i = 0; i < 5; i++) {
gpt->irqs[i] = irqs[i];
}
- gpt->timer = qemu_new_timer_ns(vm_clock, &ppc4xx_gpt_cb, gpt);
+ gpt->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, gpt);
#ifdef DEBUG_GPT
printf("%s: offset " TARGET_FMT_plx "\n", __func__, base);
#endif
diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c
index 000c27f2e8..8bbfc728de 100644
--- a/hw/ppc/ppc_booke.c
+++ b/hw/ppc/ppc_booke.c
@@ -136,7 +136,7 @@ static void booke_update_fixed_timer(CPUPPCState *env,
uint64_t period;
uint64_t now;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
tb = cpu_ppc_get_tb(tb_env, now, tb_env->tb_offset);
period = 1ULL << target_bit;
delta_tick = period - (tb & (period - 1));
@@ -167,7 +167,7 @@ static void booke_update_fixed_timer(CPUPPCState *env,
(*next)++;
}
- qemu_mod_timer(timer, *next);
+ timer_mod(timer, *next);
}
static void booke_decr_cb(void *opaque)
@@ -303,12 +303,12 @@ void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags)
tb_env->tb_freq = freq;
tb_env->decr_freq = freq;
tb_env->opaque = booke_timer;
- tb_env->decr_timer = qemu_new_timer_ns(vm_clock, &booke_decr_cb, cpu);
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_decr_cb, cpu);
booke_timer->fit_timer =
- qemu_new_timer_ns(vm_clock, &booke_fit_cb, cpu);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_fit_cb, cpu);
booke_timer->wdt_timer =
- qemu_new_timer_ns(vm_clock, &booke_wdt_cb, cpu);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_wdt_cb, cpu);
ret = kvmppc_booke_watchdog_enable(cpu);
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 16bfab90b0..4b566aa410 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -789,7 +789,7 @@ static void htab_save_first_pass(QEMUFile *f, sPAPREnvironment *spapr,
{
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
int index = spapr->htab_save_index;
- int64_t starttime = qemu_get_clock_ns(rt_clock);
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
assert(spapr->htab_first_pass);
@@ -820,7 +820,7 @@ static void htab_save_first_pass(QEMUFile *f, sPAPREnvironment *spapr,
qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
HASH_PTE_SIZE_64 * n_valid);
- if ((qemu_get_clock_ns(rt_clock) - starttime) > max_ns) {
+ if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
break;
}
}
@@ -841,7 +841,7 @@ static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr,
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
int examined = 0, sent = 0;
int index = spapr->htab_save_index;
- int64_t starttime = qemu_get_clock_ns(rt_clock);
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
assert(!spapr->htab_first_pass);
@@ -886,7 +886,7 @@ static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr,
HASH_PTE_SIZE_64 * n_valid);
sent += index - chunkstart;
- if (!final && (qemu_get_clock_ns(rt_clock) - starttime) > max_ns) {
+ if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
break;
}
}
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
index d2dbddc11e..1483e196cd 100644
--- a/hw/sd/sdhci.c
+++ b/hw/sd/sdhci.c
@@ -134,8 +134,8 @@ static void sdhci_raise_insertion_irq(void *opaque)
SDHCIState *s = (SDHCIState *)opaque;
if (s->norintsts & SDHC_NIS_REMOVE) {
- qemu_mod_timer(s->insert_timer,
- qemu_get_clock_ns(vm_clock) + SDHC_INSERTION_DELAY);
+ timer_mod(s->insert_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
} else {
s->prnsts = 0x1ff0000;
if (s->norintstsen & SDHC_NISEN_INSERT) {
@@ -152,8 +152,8 @@ static void sdhci_insert_eject_cb(void *opaque, int irq, int level)
if ((s->norintsts & SDHC_NIS_REMOVE) && level) {
/* Give target some time to notice card ejection */
- qemu_mod_timer(s->insert_timer,
- qemu_get_clock_ns(vm_clock) + SDHC_INSERTION_DELAY);
+ timer_mod(s->insert_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
} else {
if (level) {
s->prnsts = 0x1ff0000;
@@ -186,8 +186,8 @@ static void sdhci_card_readonly_cb(void *opaque, int irq, int level)
static void sdhci_reset(SDHCIState *s)
{
- qemu_del_timer(s->insert_timer);
- qemu_del_timer(s->transfer_timer);
+ timer_del(s->insert_timer);
+ timer_del(s->transfer_timer);
/* Set all registers to 0. Capabilities registers are not cleared
* and assumed to always preserve their value, given to them during
* initialization */
@@ -764,8 +764,8 @@ static void sdhci_do_adma(SDHCIState *s)
}
/* we have unfinished business - reschedule to continue ADMA */
- qemu_mod_timer(s->transfer_timer,
- qemu_get_clock_ns(vm_clock) + SDHC_TRANSFER_DELAY);
+ timer_mod(s->transfer_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY);
}
/* Perform data transfer according to controller configuration */
@@ -1170,18 +1170,18 @@ static void sdhci_initfn(Object *obj)
s->ro_cb = qemu_allocate_irqs(sdhci_card_readonly_cb, s, 1)[0];
sd_set_cb(s->card, s->ro_cb, s->eject_cb);
- s->insert_timer = qemu_new_timer_ns(vm_clock, sdhci_raise_insertion_irq, s);
- s->transfer_timer = qemu_new_timer_ns(vm_clock, sdhci_do_data_transfer, s);
+ s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
+ s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_do_data_transfer, s);
}
static void sdhci_uninitfn(Object *obj)
{
SDHCIState *s = SDHCI(obj);
- qemu_del_timer(s->insert_timer);
- qemu_free_timer(s->insert_timer);
- qemu_del_timer(s->transfer_timer);
- qemu_free_timer(s->transfer_timer);
+ timer_del(s->insert_timer);
+ timer_free(s->insert_timer);
+ timer_del(s->transfer_timer);
+ timer_free(s->transfer_timer);
qemu_free_irqs(&s->eject_cb);
qemu_free_irqs(&s->ro_cb);
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index a7214a3fc7..50a9f24a2e 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -363,7 +363,7 @@ void cpu_put_timer(QEMUFile *f, CPUTimer *s)
qemu_put_be64s(f, &s->disabled_mask);
qemu_put_sbe64s(f, &s->clock_offset);
- qemu_put_timer(f, s->qtimer);
+ timer_put(f, s->qtimer);
}
void cpu_get_timer(QEMUFile *f, CPUTimer *s)
@@ -373,7 +373,7 @@ void cpu_get_timer(QEMUFile *f, CPUTimer *s)
qemu_get_be64s(f, &s->disabled_mask);
qemu_get_sbe64s(f, &s->clock_offset);
- qemu_get_timer(f, s->qtimer);
+ timer_get(f, s->qtimer);
}
static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu,
@@ -387,9 +387,9 @@ static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu,
timer->disabled_mask = disabled_mask;
timer->disabled = 1;
- timer->clock_offset = qemu_get_clock_ns(vm_clock);
+ timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- timer->qtimer = qemu_new_timer_ns(vm_clock, cb, cpu);
+ timer->qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cb, cpu);
return timer;
}
@@ -397,9 +397,9 @@ static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu,
static void cpu_timer_reset(CPUTimer *timer)
{
timer->disabled = 1;
- timer->clock_offset = qemu_get_clock_ns(vm_clock);
+ timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- qemu_del_timer(timer->qtimer);
+ timer_del(timer->qtimer);
}
static void main_cpu_reset(void *opaque)
@@ -495,7 +495,7 @@ void cpu_tick_set_count(CPUTimer *timer, uint64_t count)
uint64_t real_count = count & ~timer->disabled_mask;
uint64_t disabled_bit = count & timer->disabled_mask;
- int64_t vm_clock_offset = qemu_get_clock_ns(vm_clock) -
+ int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
cpu_to_timer_ticks(real_count, timer->frequency);
TIMER_DPRINTF("%s set_count count=0x%016lx (%s) p=%p\n",
@@ -509,7 +509,7 @@ void cpu_tick_set_count(CPUTimer *timer, uint64_t count)
uint64_t cpu_tick_get_count(CPUTimer *timer)
{
uint64_t real_count = timer_to_cpu_ticks(
- qemu_get_clock_ns(vm_clock) - timer->clock_offset,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->clock_offset,
timer->frequency);
TIMER_DPRINTF("%s get_count count=0x%016lx (%s) p=%p\n",
@@ -524,7 +524,7 @@ uint64_t cpu_tick_get_count(CPUTimer *timer)
void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit)
{
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
uint64_t real_limit = limit & ~timer->disabled_mask;
timer->disabled = (limit & timer->disabled_mask) ? 1 : 0;
@@ -548,11 +548,11 @@ void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit)
if (!real_limit) {
TIMER_DPRINTF("%s set_limit limit=ZERO - not starting timer\n",
timer->name);
- qemu_del_timer(timer->qtimer);
+ timer_del(timer->qtimer);
} else if (timer->disabled) {
- qemu_del_timer(timer->qtimer);
+ timer_del(timer->qtimer);
} else {
- qemu_mod_timer(timer->qtimer, expires);
+ timer_mod(timer->qtimer, expires);
}
}
diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c
index 92773155d2..8020c9f4b5 100644
--- a/hw/timer/arm_mptimer.c
+++ b/hw/timer/arm_mptimer.c
@@ -81,10 +81,10 @@ static void timerblock_reload(TimerBlock *tb, int restart)
return;
}
if (restart) {
- tb->tick = qemu_get_clock_ns(vm_clock);
+ tb->tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
tb->tick += (int64_t)tb->count * timerblock_scale(tb);
- qemu_mod_timer(tb->timer, tb->tick);
+ timer_mod(tb->timer, tb->tick);
}
static void timerblock_tick(void *opaque)
@@ -113,7 +113,7 @@ static uint64_t timerblock_read(void *opaque, hwaddr addr,
return 0;
}
/* Slow and ugly, but hopefully won't happen too often. */
- val = tb->tick - qemu_get_clock_ns(vm_clock);
+ val = tb->tick - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
val /= timerblock_scale(tb);
if (val < 0) {
val = 0;
@@ -140,7 +140,7 @@ static void timerblock_write(void *opaque, hwaddr addr,
case 4: /* Counter. */
if ((tb->control & 1) && tb->count) {
/* Cancel the previous timer. */
- qemu_del_timer(tb->timer);
+ timer_del(tb->timer);
}
tb->count = value;
if (tb->control & 1) {
@@ -211,7 +211,7 @@ static void timerblock_reset(TimerBlock *tb)
tb->status = 0;
tb->tick = 0;
if (tb->timer) {
- qemu_del_timer(tb->timer);
+ timer_del(tb->timer);
}
}
@@ -248,7 +248,7 @@ static int arm_mptimer_init(SysBusDevice *dev)
sysbus_init_mmio(dev, &s->iomem);
for (i = 0; i < s->num_cpu; i++) {
TimerBlock *tb = &s->timerblock[i];
- tb->timer = qemu_new_timer_ns(vm_clock, timerblock_tick, tb);
+ tb->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, timerblock_tick, tb);
sysbus_init_irq(dev, &tb->irq);
memory_region_init_io(&tb->iomem, OBJECT(s), &timerblock_ops, tb,
"arm_mptimer_timerblock", 0x20);
diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c
index acfea59779..a47afde23a 100644
--- a/hw/timer/arm_timer.c
+++ b/hw/timer/arm_timer.c
@@ -12,6 +12,7 @@
#include "qemu-common.h"
#include "hw/qdev.h"
#include "hw/ptimer.h"
+#include "qemu/main-loop.h"
/* Common timer implementation. */
diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c
index 888f9ce000..a279bced78 100644
--- a/hw/timer/cadence_ttc.c
+++ b/hw/timer/cadence_ttc.c
@@ -172,7 +172,7 @@ static void cadence_timer_run(CadenceTimerState *s)
event_interval = next_value - (int64_t)s->reg_value;
event_interval = (event_interval < 0) ? -event_interval : event_interval;
- qemu_mod_timer(s->timer, s->cpu_time +
+ timer_mod(s->timer, s->cpu_time +
cadence_timer_get_ns(s, event_interval));
}
@@ -184,7 +184,7 @@ static void cadence_timer_sync(CadenceTimerState *s)
(int64_t)s->reg_interval + 1 : 0x10000ULL) << 16;
uint64_t old_time = s->cpu_time;
- s->cpu_time = qemu_get_clock_ns(vm_clock);
+ s->cpu_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
DB_PRINT("cpu time: %lld ns\n", (long long)old_time);
if (!s->cpu_time_valid || old_time == s->cpu_time) {
@@ -401,7 +401,7 @@ static void cadence_timer_init(uint32_t freq, CadenceTimerState *s)
cadence_timer_reset(s);
- s->timer = qemu_new_timer_ns(vm_clock, cadence_timer_tick, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cadence_timer_tick, s);
}
static int cadence_ttc_init(SysBusDevice *dev)
diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c
index a38d9e4eb6..aee4990eb1 100644
--- a/hw/timer/etraxfs_timer.c
+++ b/hw/timer/etraxfs_timer.c
@@ -93,7 +93,7 @@ timer_read(void *opaque, hwaddr addr, unsigned int size)
r = ptimer_get_count(t->ptimer_t1);
break;
case R_TIME:
- r = qemu_get_clock_ns(vm_clock) / 10;
+ r = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 10;
break;
case RW_INTR_MASK:
r = t->rw_intr_mask;
diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c
index a8009a4316..86f4fcd3e8 100644
--- a/hw/timer/exynos4210_mct.c
+++ b/hw/timer/exynos4210_mct.c
@@ -54,6 +54,7 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qemu/main-loop.h"
#include "qemu-common.h"
#include "hw/ptimer.h"
@@ -905,7 +906,7 @@ static void exynos4210_ltick_event(void *opaque)
/* raise interrupt if enabled */
if (s->reg.int_enb & L_INT_INTENB_ICNTEIE) {
#ifdef DEBUG_MCT
- time2[s->id] = qemu_get_clock_ns(vm_clock);
+ time2[s->id] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
DPRINTF("local timer[%d] IRQ: %llx\n", s->id,
time2[s->id] - time1[s->id]);
time1[s->id] = time2[s->id];
diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c
index a52f0f6c6b..1aa8f4d07a 100644
--- a/hw/timer/exynos4210_pwm.c
+++ b/hw/timer/exynos4210_pwm.c
@@ -23,6 +23,7 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
#include "qemu-common.h"
+#include "qemu/main-loop.h"
#include "hw/ptimer.h"
#include "hw/arm/exynos4210.h"
diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c
index 7c1055a99c..74c16d6c90 100644
--- a/hw/timer/grlib_gptimer.c
+++ b/hw/timer/grlib_gptimer.c
@@ -25,6 +25,8 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
+#include "qemu/timer.h"
+#include "qemu/main-loop.h"
#include "trace.h"
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index 648b38362d..fcd22aea59 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -152,7 +152,7 @@ static int deactivating_bit(uint64_t old, uint64_t new, uint64_t mask)
static uint64_t hpet_get_ticks(HPETState *s)
{
- return ns_to_ticks(qemu_get_clock_ns(vm_clock) + s->hpet_offset);
+ return ns_to_ticks(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->hpet_offset);
}
/*
@@ -233,7 +233,7 @@ static int hpet_post_load(void *opaque, int version_id)
HPETState *s = opaque;
/* Recalculate the offset between the main counter and guest time */
- s->hpet_offset = ticks_to_ns(s->hpet_counter) - qemu_get_clock_ns(vm_clock);
+ s->hpet_offset = ticks_to_ns(s->hpet_counter) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* Push number of timers into capability returned via HPET_ID */
s->capability &= ~HPET_ID_NUM_TIM_MASK;
@@ -332,12 +332,12 @@ static void hpet_timer(void *opaque)
}
}
diff = hpet_calculate_diff(t, cur_tick);
- qemu_mod_timer(t->qemu_timer,
- qemu_get_clock_ns(vm_clock) + (int64_t)ticks_to_ns(diff));
+ timer_mod(t->qemu_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff));
} else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) {
if (t->wrap_flag) {
diff = hpet_calculate_diff(t, cur_tick);
- qemu_mod_timer(t->qemu_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(int64_t)ticks_to_ns(diff));
t->wrap_flag = 0;
}
@@ -365,13 +365,13 @@ static void hpet_set_timer(HPETTimer *t)
t->wrap_flag = 1;
}
}
- qemu_mod_timer(t->qemu_timer,
- qemu_get_clock_ns(vm_clock) + (int64_t)ticks_to_ns(diff));
+ timer_mod(t->qemu_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff));
}
static void hpet_del_timer(HPETTimer *t)
{
- qemu_del_timer(t->qemu_timer);
+ timer_del(t->qemu_timer);
update_irq(t, 0);
}
@@ -567,7 +567,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) {
/* Enable main counter and interrupt generation. */
s->hpet_offset =
- ticks_to_ns(s->hpet_counter) - qemu_get_clock_ns(vm_clock);
+ ticks_to_ns(s->hpet_counter) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
for (i = 0; i < s->num_timers; i++) {
if ((&s->timer[i])->cmp != ~0ULL) {
hpet_set_timer(&s->timer[i]);
@@ -726,7 +726,7 @@ static void hpet_realize(DeviceState *dev, Error **errp)
}
for (i = 0; i < HPET_MAX_TIMERS; i++) {
timer = &s->timer[i];
- timer->qemu_timer = qemu_new_timer_ns(vm_clock, hpet_timer, timer);
+ timer->qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hpet_timer, timer);
timer->tn = i;
timer->state = s;
}
diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c
index cd5214064f..cdbf481951 100644
--- a/hw/timer/i8254.c
+++ b/hw/timer/i8254.c
@@ -51,7 +51,7 @@ static int pit_get_count(PITChannelState *s)
uint64_t d;
int counter;
- d = muldiv64(qemu_get_clock_ns(vm_clock) - s->count_load_time, PIT_FREQ,
+ d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->count_load_time, PIT_FREQ,
get_ticks_per_sec());
switch(s->mode) {
case 0:
@@ -85,7 +85,7 @@ static void pit_set_channel_gate(PITCommonState *s, PITChannelState *sc,
case 5:
if (sc->gate < val) {
/* restart counting on rising edge */
- sc->count_load_time = qemu_get_clock_ns(vm_clock);
+ sc->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
pit_irq_timer_update(sc, sc->count_load_time);
}
break;
@@ -93,7 +93,7 @@ static void pit_set_channel_gate(PITCommonState *s, PITChannelState *sc,
case 3:
if (sc->gate < val) {
/* restart counting on rising edge */
- sc->count_load_time = qemu_get_clock_ns(vm_clock);
+ sc->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
pit_irq_timer_update(sc, sc->count_load_time);
}
/* XXX: disable/enable counting */
@@ -106,7 +106,7 @@ static inline void pit_load_count(PITChannelState *s, int val)
{
if (val == 0)
val = 0x10000;
- s->count_load_time = qemu_get_clock_ns(vm_clock);
+ s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->count = val;
pit_irq_timer_update(s, s->count_load_time);
}
@@ -143,7 +143,7 @@ static void pit_ioport_write(void *opaque, hwaddr addr,
/* XXX: add BCD and null count */
s->status =
(pit_get_out(s,
- qemu_get_clock_ns(vm_clock)) << 7) |
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) << 7) |
(s->rw_mode << 4) |
(s->mode << 1) |
s->bcd;
@@ -260,9 +260,9 @@ static void pit_irq_timer_update(PITChannelState *s, int64_t current_time)
#endif
s->next_transition_time = expire_time;
if (expire_time != -1)
- qemu_mod_timer(s->irq_timer, expire_time);
+ timer_mod(s->irq_timer, expire_time);
else
- qemu_del_timer(s->irq_timer);
+ timer_del(s->irq_timer);
}
static void pit_irq_timer(void *opaque)
@@ -281,7 +281,7 @@ static void pit_reset(DeviceState *dev)
s = &pit->channels[0];
if (!s->irq_disabled) {
- qemu_mod_timer(s->irq_timer, s->next_transition_time);
+ timer_mod(s->irq_timer, s->next_transition_time);
}
}
@@ -294,10 +294,10 @@ static void pit_irq_control(void *opaque, int n, int enable)
if (enable) {
s->irq_disabled = 0;
- pit_irq_timer_update(s, qemu_get_clock_ns(vm_clock));
+ pit_irq_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
} else {
s->irq_disabled = 1;
- qemu_del_timer(s->irq_timer);
+ timer_del(s->irq_timer);
}
}
@@ -316,9 +316,9 @@ static void pit_post_load(PITCommonState *s)
PITChannelState *sc = &s->channels[0];
if (sc->next_transition_time != -1) {
- qemu_mod_timer(sc->irq_timer, sc->next_transition_time);
+ timer_mod(sc->irq_timer, sc->next_transition_time);
} else {
- qemu_del_timer(sc->irq_timer);
+ timer_del(sc->irq_timer);
}
}
@@ -330,7 +330,7 @@ static void pit_realizefn(DeviceState *dev, Error **err)
s = &pit->channels[0];
/* the timer 0 is connected to an IRQ */
- s->irq_timer = qemu_new_timer_ns(vm_clock, pit_irq_timer, s);
+ s->irq_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pit_irq_timer, s);
qdev_init_gpio_out(dev, &s->irq, 1);
memory_region_init_io(&pit->ioports, OBJECT(pit), &pit_ioport_ops,
diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c
index 4e5bf0b63c..e8fb971488 100644
--- a/hw/timer/i8254_common.c
+++ b/hw/timer/i8254_common.c
@@ -136,7 +136,7 @@ void pit_get_channel_info_common(PITCommonState *s, PITChannelState *sc,
info->gate = sc->gate;
info->mode = sc->mode;
info->initial_count = sc->count;
- info->out = pit_get_out(sc, qemu_get_clock_ns(vm_clock));
+ info->out = pit_get_out(sc, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
void pit_get_channel_info(ISADevice *dev, int channel, PITChannelInfo *info)
@@ -157,7 +157,7 @@ void pit_reset_common(PITCommonState *pit)
s = &pit->channels[i];
s->mode = 3;
s->gate = (i != 2);
- s->count_load_time = qemu_get_clock_ns(vm_clock);
+ s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->count = 0x10000;
if (i == 0 && !s->irq_disabled) {
s->next_transition_time =
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
index dc73d6525d..0dbe15c99b 100644
--- a/hw/timer/imx_epit.c
+++ b/hw/timer/imx_epit.c
@@ -18,6 +18,7 @@
#include "hw/ptimer.h"
#include "hw/sysbus.h"
#include "hw/arm/imx.h"
+#include "qemu/main-loop.h"
#define TYPE_IMX_EPIT "imx.epit"
diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c
index 87db0e195c..f2d1975e70 100644
--- a/hw/timer/imx_gpt.c
+++ b/hw/timer/imx_gpt.c
@@ -18,6 +18,7 @@
#include "hw/ptimer.h"
#include "hw/sysbus.h"
#include "hw/arm/imx.h"
+#include "qemu/main-loop.h"
#define TYPE_IMX_GPT "imx.gpt"
diff --git a/hw/timer/lm32_timer.c b/hw/timer/lm32_timer.c
index 986e6a19d2..8ed138cc0e 100644
--- a/hw/timer/lm32_timer.c
+++ b/hw/timer/lm32_timer.c
@@ -27,6 +27,7 @@
#include "qemu/timer.h"
#include "hw/ptimer.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
#define DEFAULT_FREQUENCY (50*1000000)
diff --git a/hw/timer/m48t59.c b/hw/timer/m48t59.c
index 0cc9e5b5ee..d3d78ec5a8 100644
--- a/hw/timer/m48t59.c
+++ b/hw/timer/m48t59.c
@@ -137,7 +137,7 @@ static void alarm_cb (void *opaque)
/* Repeat once a second */
next_time = 1;
}
- qemu_mod_timer(NVRAM->alrm_timer, qemu_get_clock_ns(rtc_clock) +
+ timer_mod(NVRAM->alrm_timer, qemu_clock_get_ns(rtc_clock) +
next_time * 1000);
qemu_set_irq(NVRAM->IRQ, 0);
}
@@ -146,10 +146,10 @@ static void set_alarm(M48t59State *NVRAM)
{
int diff;
if (NVRAM->alrm_timer != NULL) {
- qemu_del_timer(NVRAM->alrm_timer);
+ timer_del(NVRAM->alrm_timer);
diff = qemu_timedate_diff(&NVRAM->alarm) - NVRAM->time_offset;
if (diff > 0)
- qemu_mod_timer(NVRAM->alrm_timer, diff * 1000);
+ timer_mod(NVRAM->alrm_timer, diff * 1000);
}
}
@@ -188,10 +188,10 @@ static void set_up_watchdog(M48t59State *NVRAM, uint8_t value)
NVRAM->buffer[0x1FF0] &= ~0x80;
if (NVRAM->wd_timer != NULL) {
- qemu_del_timer(NVRAM->wd_timer);
+ timer_del(NVRAM->wd_timer);
if (value != 0) {
interval = (1 << (2 * (value & 0x03))) * ((value >> 2) & 0x1F);
- qemu_mod_timer(NVRAM->wd_timer, ((uint64_t)time(NULL) * 1000) +
+ timer_mod(NVRAM->wd_timer, ((uint64_t)time(NULL) * 1000) +
((interval * 1000) >> 4));
}
}
@@ -609,10 +609,10 @@ static void m48t59_reset_common(M48t59State *NVRAM)
NVRAM->addr = 0;
NVRAM->lock = 0;
if (NVRAM->alrm_timer != NULL)
- qemu_del_timer(NVRAM->alrm_timer);
+ timer_del(NVRAM->alrm_timer);
if (NVRAM->wd_timer != NULL)
- qemu_del_timer(NVRAM->wd_timer);
+ timer_del(NVRAM->wd_timer);
}
static void m48t59_reset_isa(DeviceState *d)
@@ -700,8 +700,8 @@ static void m48t59_realize_common(M48t59State *s, Error **errp)
{
s->buffer = g_malloc0(s->size);
if (s->model == 59) {
- s->alrm_timer = qemu_new_timer_ns(rtc_clock, &alarm_cb, s);
- s->wd_timer = qemu_new_timer_ns(vm_clock, &watchdog_cb, s);
+ s->alrm_timer = timer_new_ns(rtc_clock, &alarm_cb, s);
+ s->wd_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &watchdog_cb, s);
}
qemu_get_timedate(&s->alarm, 0);
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index 3c3baaccfa..7230a6e4fa 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -102,7 +102,7 @@ static inline bool rtc_running(RTCState *s)
static uint64_t get_guest_rtc_ns(RTCState *s)
{
uint64_t guest_rtc;
- uint64_t guest_clock = qemu_get_clock_ns(rtc_clock);
+ uint64_t guest_clock = qemu_clock_get_ns(rtc_clock);
guest_rtc = s->base_rtc * NSEC_PER_SEC
+ guest_clock - s->last_update + s->offset;
@@ -113,13 +113,13 @@ static uint64_t get_guest_rtc_ns(RTCState *s)
static void rtc_coalesced_timer_update(RTCState *s)
{
if (s->irq_coalesced == 0) {
- qemu_del_timer(s->coalesced_timer);
+ timer_del(s->coalesced_timer);
} else {
/* divide each RTC interval to 2 - 8 smaller intervals */
int c = MIN(s->irq_coalesced, 7) + 1;
- int64_t next_clock = qemu_get_clock_ns(rtc_clock) +
+ int64_t next_clock = qemu_clock_get_ns(rtc_clock) +
muldiv64(s->period / c, get_ticks_per_sec(), RTC_CLOCK_RATE);
- qemu_mod_timer(s->coalesced_timer, next_clock);
+ timer_mod(s->coalesced_timer, next_clock);
}
}
@@ -169,12 +169,12 @@ static void periodic_timer_update(RTCState *s, int64_t current_time)
next_irq_clock = (cur_clock & ~(period - 1)) + period;
s->next_periodic_time =
muldiv64(next_irq_clock, get_ticks_per_sec(), RTC_CLOCK_RATE) + 1;
- qemu_mod_timer(s->periodic_timer, s->next_periodic_time);
+ timer_mod(s->periodic_timer, s->next_periodic_time);
} else {
#ifdef TARGET_I386
s->irq_coalesced = 0;
#endif
- qemu_del_timer(s->periodic_timer);
+ timer_del(s->periodic_timer);
}
}
@@ -222,23 +222,23 @@ static void check_update_timer(RTCState *s)
* from occurring, because the time of day is not updated.
*/
if ((s->cmos_data[RTC_REG_A] & 0x60) == 0x60) {
- qemu_del_timer(s->update_timer);
+ timer_del(s->update_timer);
return;
}
if ((s->cmos_data[RTC_REG_C] & REG_C_UF) &&
(s->cmos_data[RTC_REG_B] & REG_B_SET)) {
- qemu_del_timer(s->update_timer);
+ timer_del(s->update_timer);
return;
}
if ((s->cmos_data[RTC_REG_C] & REG_C_UF) &&
(s->cmos_data[RTC_REG_C] & REG_C_AF)) {
- qemu_del_timer(s->update_timer);
+ timer_del(s->update_timer);
return;
}
guest_nsec = get_guest_rtc_ns(s) % NSEC_PER_SEC;
/* if UF is clear, reprogram to next second */
- next_update_time = qemu_get_clock_ns(rtc_clock)
+ next_update_time = qemu_clock_get_ns(rtc_clock)
+ NSEC_PER_SEC - guest_nsec;
/* Compute time of next alarm. One second is already accounted
@@ -252,8 +252,8 @@ static void check_update_timer(RTCState *s)
* the alarm time. */
next_update_time = s->next_alarm_time;
}
- if (next_update_time != qemu_timer_expire_time_ns(s->update_timer)) {
- qemu_mod_timer(s->update_timer, next_update_time);
+ if (next_update_time != timer_expire_time_ns(s->update_timer)) {
+ timer_mod(s->update_timer, next_update_time);
}
}
@@ -371,7 +371,7 @@ static void rtc_update_timer(void *opaque)
rtc_update_time(s);
s->cmos_data[RTC_REG_A] &= ~REG_A_UIP;
- if (qemu_get_clock_ns(rtc_clock) >= s->next_alarm_time) {
+ if (qemu_clock_get_ns(rtc_clock) >= s->next_alarm_time) {
irqs |= REG_C_AF;
if (s->cmos_data[RTC_REG_B] & REG_B_AIE) {
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_RTC);
@@ -445,7 +445,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
/* UIP bit is read only */
s->cmos_data[RTC_REG_A] = (data & ~REG_A_UIP) |
(s->cmos_data[RTC_REG_A] & REG_A_UIP);
- periodic_timer_update(s, qemu_get_clock_ns(rtc_clock));
+ periodic_timer_update(s, qemu_clock_get_ns(rtc_clock));
check_update_timer(s);
break;
case RTC_REG_B:
@@ -475,7 +475,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
qemu_irq_lower(s->irq);
}
s->cmos_data[RTC_REG_B] = data;
- periodic_timer_update(s, qemu_get_clock_ns(rtc_clock));
+ periodic_timer_update(s, qemu_clock_get_ns(rtc_clock));
check_update_timer(s);
break;
case RTC_REG_C:
@@ -535,7 +535,7 @@ static void rtc_set_time(RTCState *s)
rtc_get_time(s, &tm);
s->base_rtc = mktimegm(&tm);
- s->last_update = qemu_get_clock_ns(rtc_clock);
+ s->last_update = qemu_clock_get_ns(rtc_clock);
rtc_change_mon_event(&tm);
}
@@ -587,10 +587,11 @@ static int update_in_progress(RTCState *s)
if (!rtc_running(s)) {
return 0;
}
- if (qemu_timer_pending(s->update_timer)) {
- int64_t next_update_time = qemu_timer_expire_time_ns(s->update_timer);
+ if (timer_pending(s->update_timer)) {
+ int64_t next_update_time = timer_expire_time_ns(s->update_timer);
/* Latch UIP until the timer expires. */
- if (qemu_get_clock_ns(rtc_clock) >= (next_update_time - UIP_HOLD_LENGTH)) {
+ if (qemu_clock_get_ns(rtc_clock) >=
+ (next_update_time - UIP_HOLD_LENGTH)) {
s->cmos_data[RTC_REG_A] |= REG_A_UIP;
return 1;
}
@@ -695,7 +696,7 @@ static void rtc_set_date_from_host(ISADevice *dev)
qemu_get_timedate(&tm, 0);
s->base_rtc = mktimegm(&tm);
- s->last_update = qemu_get_clock_ns(rtc_clock);
+ s->last_update = qemu_clock_get_ns(rtc_clock);
s->offset = 0;
/* set the CMOS date */
@@ -843,7 +844,7 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
switch (s->lost_tick_policy) {
case LOST_TICK_SLEW:
s->coalesced_timer =
- qemu_new_timer_ns(rtc_clock, rtc_coalesced_timer, s);
+ timer_new_ns(rtc_clock, rtc_coalesced_timer, s);
break;
case LOST_TICK_DISCARD:
break;
@@ -853,12 +854,13 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
}
#endif
- s->periodic_timer = qemu_new_timer_ns(rtc_clock, rtc_periodic_timer, s);
- s->update_timer = qemu_new_timer_ns(rtc_clock, rtc_update_timer, s);
+ s->periodic_timer = timer_new_ns(rtc_clock, rtc_periodic_timer, s);
+ s->update_timer = timer_new_ns(rtc_clock, rtc_update_timer, s);
check_update_timer(s);
s->clock_reset_notifier.notify = rtc_notify_clock_reset;
- qemu_register_clock_reset_notifier(rtc_clock, &s->clock_reset_notifier);
+ qemu_clock_register_reset_notifier(QEMU_CLOCK_REALTIME,
+ &s->clock_reset_notifier);
s->suspend_notifier.notify = rtc_notify_suspend;
qemu_register_suspend_notifier(&s->suspend_notifier);
diff --git a/hw/timer/omap_gptimer.c b/hw/timer/omap_gptimer.c
index ac389d87ee..016207f626 100644
--- a/hw/timer/omap_gptimer.c
+++ b/hw/timer/omap_gptimer.c
@@ -103,7 +103,7 @@ static inline uint32_t omap_gp_timer_read(struct omap_gp_timer_s *timer)
uint64_t distance;
if (timer->st && timer->rate) {
- distance = qemu_get_clock_ns(vm_clock) - timer->time;
+ distance = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->time;
distance = muldiv64(distance, timer->rate, timer->ticks_per_sec);
if (distance >= 0xffffffff - timer->val)
@@ -118,7 +118,7 @@ static inline void omap_gp_timer_sync(struct omap_gp_timer_s *timer)
{
if (timer->st) {
timer->val = omap_gp_timer_read(timer);
- timer->time = qemu_get_clock_ns(vm_clock);
+ timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
}
@@ -129,17 +129,17 @@ static inline void omap_gp_timer_update(struct omap_gp_timer_s *timer)
if (timer->st && timer->rate) {
expires = muldiv64(0x100000000ll - timer->val,
timer->ticks_per_sec, timer->rate);
- qemu_mod_timer(timer->timer, timer->time + expires);
+ timer_mod(timer->timer, timer->time + expires);
if (timer->ce && timer->match_val >= timer->val) {
matches = muldiv64(timer->match_val - timer->val,
timer->ticks_per_sec, timer->rate);
- qemu_mod_timer(timer->match, timer->time + matches);
+ timer_mod(timer->match, timer->time + matches);
} else
- qemu_del_timer(timer->match);
+ timer_del(timer->match);
} else {
- qemu_del_timer(timer->timer);
- qemu_del_timer(timer->match);
+ timer_del(timer->timer);
+ timer_del(timer->match);
omap_gp_timer_out(timer, timer->scpwm);
}
}
@@ -164,7 +164,7 @@ static void omap_gp_timer_tick(void *opaque)
timer->val = 0;
} else {
timer->val = timer->load_val;
- timer->time = qemu_get_clock_ns(vm_clock);
+ timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
if (timer->trigger == gpt_trigger_overflow ||
@@ -406,7 +406,7 @@ static void omap_gp_timer_write(void *opaque, hwaddr addr,
break;
case 0x28: /* TCRR */
- s->time = qemu_get_clock_ns(vm_clock);
+ s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->val = value;
omap_gp_timer_update(s);
break;
@@ -416,7 +416,7 @@ static void omap_gp_timer_write(void *opaque, hwaddr addr,
break;
case 0x30: /* TTGR */
- s->time = qemu_get_clock_ns(vm_clock);
+ s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->val = s->load_val;
omap_gp_timer_update(s);
break;
@@ -474,8 +474,8 @@ struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta,
s->ta = ta;
s->irq = irq;
s->clk = fclk;
- s->timer = qemu_new_timer_ns(vm_clock, omap_gp_timer_tick, s);
- s->match = qemu_new_timer_ns(vm_clock, omap_gp_timer_match, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_tick, s);
+ s->match = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_match, s);
s->in = qemu_allocate_irqs(omap_gp_timer_input, s, 1)[0];
omap_gp_timer_reset(s);
omap_gp_timer_clk_setup(s);
diff --git a/hw/timer/omap_synctimer.c b/hw/timer/omap_synctimer.c
index a12aca20df..8e50488d17 100644
--- a/hw/timer/omap_synctimer.c
+++ b/hw/timer/omap_synctimer.c
@@ -28,7 +28,7 @@ struct omap_synctimer_s {
/* 32-kHz Sync Timer of the OMAP2 */
static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) {
- return muldiv64(qemu_get_clock_ns(vm_clock), 0x8000, get_ticks_per_sec());
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000, get_ticks_per_sec());
}
void omap_synctimer_reset(struct omap_synctimer_s *s)
diff --git a/hw/timer/pl031.c b/hw/timer/pl031.c
index d5e2f3e265..65928a4819 100644
--- a/hw/timer/pl031.c
+++ b/hw/timer/pl031.c
@@ -78,7 +78,7 @@ static void pl031_interrupt(void * opaque)
static uint32_t pl031_get_count(PL031State *s)
{
- int64_t now = qemu_get_clock_ns(rtc_clock);
+ int64_t now = qemu_clock_get_ns(rtc_clock);
return s->tick_offset + now / get_ticks_per_sec();
}
@@ -91,11 +91,11 @@ static void pl031_set_alarm(PL031State *s)
ticks = s->mr - pl031_get_count(s);
DPRINTF("Alarm set in %ud ticks\n", ticks);
if (ticks == 0) {
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
pl031_interrupt(s);
} else {
- int64_t now = qemu_get_clock_ns(rtc_clock);
- qemu_mod_timer(s->timer, now + (int64_t)ticks * get_ticks_per_sec());
+ int64_t now = qemu_clock_get_ns(rtc_clock);
+ timer_mod(s->timer, now + (int64_t)ticks * get_ticks_per_sec());
}
}
@@ -201,9 +201,10 @@ static int pl031_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->irq);
qemu_get_timedate(&tm, 0);
- s->tick_offset = mktimegm(&tm) - qemu_get_clock_ns(rtc_clock) / get_ticks_per_sec();
+ s->tick_offset = mktimegm(&tm) -
+ qemu_clock_get_ns(rtc_clock) / get_ticks_per_sec();
- s->timer = qemu_new_timer_ns(rtc_clock, pl031_interrupt, s);
+ s->timer = timer_new_ns(rtc_clock, pl031_interrupt, s);
return 0;
}
@@ -212,8 +213,8 @@ static void pl031_pre_save(void *opaque)
PL031State *s = opaque;
/* tick_offset is base_time - rtc_clock base time. Instead, we want to
- * store the base time relative to the vm_clock for backwards-compatibility. */
- int64_t delta = qemu_get_clock_ns(rtc_clock) - qemu_get_clock_ns(vm_clock);
+ * store the base time relative to the QEMU_CLOCK_VIRTUAL for backwards-compatibility. */
+ int64_t delta = qemu_clock_get_ns(rtc_clock) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset_vmstate = s->tick_offset + delta / get_ticks_per_sec();
}
@@ -221,7 +222,7 @@ static int pl031_post_load(void *opaque, int version_id)
{
PL031State *s = opaque;
- int64_t delta = qemu_get_clock_ns(rtc_clock) - qemu_get_clock_ns(vm_clock);
+ int64_t delta = qemu_clock_get_ns(rtc_clock) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset = s->tick_offset_vmstate - delta / get_ticks_per_sec();
pl031_set_alarm(s);
return 0;
diff --git a/hw/timer/puv3_ost.c b/hw/timer/puv3_ost.c
index 4bd2b76cb8..fa9eefd925 100644
--- a/hw/timer/puv3_ost.c
+++ b/hw/timer/puv3_ost.c
@@ -10,6 +10,7 @@
*/
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "qemu/main-loop.h"
#undef DEBUG_PUV3
#include "hw/unicore32/puv3.h"
diff --git a/hw/timer/pxa2xx_timer.c b/hw/timer/pxa2xx_timer.c
index cdabccdd15..0f546c4121 100644
--- a/hw/timer/pxa2xx_timer.c
+++ b/hw/timer/pxa2xx_timer.c
@@ -123,7 +123,7 @@ static void pxa2xx_timer_update(void *opaque, uint64_t now_qemu)
for (i = 0; i < 4; i ++) {
new_qemu = now_qemu + muldiv64((uint32_t) (s->timer[i].value - now_vm),
get_ticks_per_sec(), s->freq);
- qemu_mod_timer(s->timer[i].qtimer, new_qemu);
+ timer_mod(s->timer[i].qtimer, new_qemu);
}
}
@@ -141,7 +141,7 @@ static void pxa2xx_timer_update4(void *opaque, uint64_t now_qemu, int n)
counter = counters[n];
if (!s->tm4[counter].freq) {
- qemu_del_timer(s->tm4[n].tm.qtimer);
+ timer_del(s->tm4[n].tm.qtimer);
return;
}
@@ -151,7 +151,7 @@ static void pxa2xx_timer_update4(void *opaque, uint64_t now_qemu, int n)
new_qemu = now_qemu + muldiv64((uint32_t) (s->tm4[n].tm.value - now_vm),
get_ticks_per_sec(), s->tm4[counter].freq);
- qemu_mod_timer(s->tm4[n].tm.qtimer, new_qemu);
+ timer_mod(s->tm4[n].tm.qtimer, new_qemu);
}
static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
@@ -188,7 +188,7 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
goto badreg;
return s->tm4[tm].tm.value;
case OSCR:
- return s->clock + muldiv64(qemu_get_clock_ns(vm_clock) -
+ return s->clock + muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->lastload, s->freq, get_ticks_per_sec());
case OSCR11: tm ++;
/* fall through */
@@ -211,7 +211,7 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
if ((tm == 9 - 4 || tm == 11 - 4) && (s->tm4[tm].control & (1 << 9))) {
if (s->tm4[tm - 1].freq)
s->snapshot = s->tm4[tm - 1].clock + muldiv64(
- qemu_get_clock_ns(vm_clock) -
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->tm4[tm - 1].lastload,
s->tm4[tm - 1].freq, get_ticks_per_sec());
else
@@ -220,7 +220,7 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset,
if (!s->tm4[tm].freq)
return s->tm4[tm].clock;
- return s->tm4[tm].clock + muldiv64(qemu_get_clock_ns(vm_clock) -
+ return s->tm4[tm].clock + muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
s->tm4[tm].lastload, s->tm4[tm].freq, get_ticks_per_sec());
case OIER:
return s->irq_enabled;
@@ -271,7 +271,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
/* fall through */
case OSMR0:
s->timer[tm].value = value;
- pxa2xx_timer_update(s, qemu_get_clock_ns(vm_clock));
+ pxa2xx_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case OSMR11: tm ++;
/* fall through */
@@ -291,11 +291,11 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
if (!pxa2xx_timer_has_tm4(s))
goto badreg;
s->tm4[tm].tm.value = value;
- pxa2xx_timer_update4(s, qemu_get_clock_ns(vm_clock), tm);
+ pxa2xx_timer_update4(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tm);
break;
case OSCR:
s->oldclock = s->clock;
- s->lastload = qemu_get_clock_ns(vm_clock);
+ s->lastload = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->clock = value;
pxa2xx_timer_update(s, s->lastload);
break;
@@ -317,7 +317,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
if (!pxa2xx_timer_has_tm4(s))
goto badreg;
s->tm4[tm].oldclock = s->tm4[tm].clock;
- s->tm4[tm].lastload = qemu_get_clock_ns(vm_clock);
+ s->tm4[tm].lastload = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tm4[tm].clock = value;
pxa2xx_timer_update4(s, s->tm4[tm].lastload, tm);
break;
@@ -351,7 +351,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
s->tm4[tm].freq = pxa2xx_timer4_freq[value & 7];
else {
s->tm4[tm].freq = 0;
- pxa2xx_timer_update4(s, qemu_get_clock_ns(vm_clock), tm);
+ pxa2xx_timer_update4(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tm);
}
break;
case OMCR11: tm ++;
@@ -370,7 +370,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset,
pxa2xx_timer4_freq[(value & (1 << 8)) ? 0 : (value & 7)];
else {
s->tm4[tm].freq = 0;
- pxa2xx_timer_update4(s, qemu_get_clock_ns(vm_clock), tm);
+ pxa2xx_timer_update4(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tm);
}
break;
default:
@@ -411,7 +411,7 @@ static void pxa2xx_timer_tick4(void *opaque)
if (t->control & (1 << 3))
t->clock = 0;
if (t->control & (1 << 6))
- pxa2xx_timer_update4(i, qemu_get_clock_ns(vm_clock), t->tm.num - 4);
+ pxa2xx_timer_update4(i, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), t->tm.num - 4);
if (i->events & 0xff0)
qemu_irq_raise(i->irq4);
}
@@ -422,7 +422,7 @@ static int pxa25x_timer_post_load(void *opaque, int version_id)
int64_t now;
int i;
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
pxa2xx_timer_update(s, now);
if (pxa2xx_timer_has_tm4(s))
@@ -440,7 +440,7 @@ static int pxa2xx_timer_init(SysBusDevice *dev)
s->irq_enabled = 0;
s->oldclock = 0;
s->clock = 0;
- s->lastload = qemu_get_clock_ns(vm_clock);
+ s->lastload = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->reset3 = 0;
for (i = 0; i < 4; i ++) {
@@ -448,7 +448,7 @@ static int pxa2xx_timer_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->timer[i].irq);
s->timer[i].info = s;
s->timer[i].num = i;
- s->timer[i].qtimer = qemu_new_timer_ns(vm_clock,
+ s->timer[i].qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
pxa2xx_timer_tick, &s->timer[i]);
}
if (s->flags & (1 << PXA2XX_TIMER_HAVE_TM4)) {
@@ -460,7 +460,7 @@ static int pxa2xx_timer_init(SysBusDevice *dev)
s->tm4[i].tm.num = i + 4;
s->tm4[i].freq = 0;
s->tm4[i].control = 0x0;
- s->tm4[i].tm.qtimer = qemu_new_timer_ns(vm_clock,
+ s->tm4[i].tm.qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
pxa2xx_timer_tick4, &s->tm4[i]);
}
}
diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c
index 251a10dbfa..07f0670b5d 100644
--- a/hw/timer/sh_timer.c
+++ b/hw/timer/sh_timer.c
@@ -11,6 +11,7 @@
#include "hw/hw.h"
#include "hw/sh4/sh.h"
#include "qemu/timer.h"
+#include "qemu/main-loop.h"
#include "exec/address-spaces.h"
#include "hw/ptimer.h"
diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c
index 33e8f6c15c..f75b914951 100644
--- a/hw/timer/slavio_timer.c
+++ b/hw/timer/slavio_timer.c
@@ -27,6 +27,7 @@
#include "hw/ptimer.h"
#include "hw/sysbus.h"
#include "trace.h"
+#include "qemu/main-loop.h"
/*
* Registers of hardware timer in sun4m.
diff --git a/hw/timer/tusb6010.c b/hw/timer/tusb6010.c
index c48ecf8ee7..bd2a89e020 100644
--- a/hw/timer/tusb6010.c
+++ b/hw/timer/tusb6010.c
@@ -516,11 +516,11 @@ static void tusb_async_writew(void *opaque, hwaddr addr,
case TUSB_DEV_OTG_TIMER:
s->otg_timer_val = value;
if (value & TUSB_DEV_OTG_TIMER_ENABLE)
- qemu_mod_timer(s->otg_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(s->otg_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(TUSB_DEV_OTG_TIMER_VAL(value),
get_ticks_per_sec(), TUSB_DEVCLOCK));
else
- qemu_del_timer(s->otg_timer);
+ timer_del(s->otg_timer);
break;
case TUSB_PRCM_CONF:
@@ -728,8 +728,8 @@ static void tusb6010_power(TUSBState *s, int on)
/* Pull the interrupt down after TUSB6010 comes up. */
s->intr_ok = 0;
tusb_intr_update(s);
- qemu_mod_timer(s->pwr_timer,
- qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 2);
+ timer_mod(s->pwr_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 2);
}
}
@@ -783,8 +783,8 @@ static int tusb6010_init(SysBusDevice *sbd)
DeviceState *dev = DEVICE(sbd);
TUSBState *s = TUSB(dev);
- s->otg_timer = qemu_new_timer_ns(vm_clock, tusb_otg_tick, s);
- s->pwr_timer = qemu_new_timer_ns(vm_clock, tusb_power_tick, s);
+ s->otg_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_otg_tick, s);
+ s->pwr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_power_tick, s);
memory_region_init_io(&s->iomem[1], OBJECT(s), &tusb_async_ops, s,
"tusb-async", UINT32_MAX);
sysbus_init_mmio(sbd, &s->iomem[0]);
diff --git a/hw/timer/twl92230.c b/hw/timer/twl92230.c
index b730d853f7..f3ea36503c 100644
--- a/hw/timer/twl92230.c
+++ b/hw/timer/twl92230.c
@@ -72,14 +72,14 @@ static inline void menelaus_update(MenelausState *s)
static inline void menelaus_rtc_start(MenelausState *s)
{
- s->rtc.next += qemu_get_clock_ms(rtc_clock);
- qemu_mod_timer(s->rtc.hz_tm, s->rtc.next);
+ s->rtc.next += qemu_clock_get_ms(rtc_clock);
+ timer_mod(s->rtc.hz_tm, s->rtc.next);
}
static inline void menelaus_rtc_stop(MenelausState *s)
{
- qemu_del_timer(s->rtc.hz_tm);
- s->rtc.next -= qemu_get_clock_ms(rtc_clock);
+ timer_del(s->rtc.hz_tm);
+ s->rtc.next -= qemu_clock_get_ms(rtc_clock);
if (s->rtc.next < 1)
s->rtc.next = 1;
}
@@ -102,7 +102,7 @@ static void menelaus_rtc_hz(void *opaque)
s->rtc.next_comp --;
s->rtc.alm_sec --;
s->rtc.next += 1000;
- qemu_mod_timer(s->rtc.hz_tm, s->rtc.next);
+ timer_mod(s->rtc.hz_tm, s->rtc.next);
if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */
menelaus_rtc_update(s);
if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec)
@@ -782,7 +782,7 @@ static void menelaus_pre_save(void *opaque)
{
MenelausState *s = opaque;
/* Should be <= 1000 */
- s->rtc_next_vmstate = s->rtc.next - qemu_get_clock_ms(rtc_clock);
+ s->rtc_next_vmstate = s->rtc.next - qemu_clock_get_ms(rtc_clock);
}
static int menelaus_post_load(void *opaque, int version_id)
@@ -843,7 +843,7 @@ static int twl92230_init(I2CSlave *i2c)
{
MenelausState *s = FROM_I2C_SLAVE(MenelausState, i2c);
- s->rtc.hz_tm = qemu_new_timer_ms(rtc_clock, menelaus_rtc_hz, s);
+ s->rtc.hz_tm = timer_new_ms(rtc_clock, menelaus_rtc_hz, s);
/* Three output pins plus one interrupt pin. */
qdev_init_gpio_out(&i2c->qdev, s->out, 4);
diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c
index 5f2c9020ea..6113b975bf 100644
--- a/hw/timer/xilinx_timer.c
+++ b/hw/timer/xilinx_timer.c
@@ -25,6 +25,7 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
#include "qemu/log.h"
+#include "qemu/main-loop.h"
#define D(x)
diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
index abe384ba9a..6f0a4d2814 100644
--- a/hw/tpm/tpm_tis.c
+++ b/hw/tpm/tpm_tis.c
@@ -28,6 +28,7 @@
#include "hw/pci/pci_ids.h"
#include "tpm_tis.h"
#include "qemu-common.h"
+#include "qemu/main-loop.h"
/*#define DEBUG_TIS */
diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
index 010a0d0d32..e5523d54e0 100644
--- a/hw/usb/hcd-ehci.c
+++ b/hw/usb/hcd-ehci.c
@@ -150,7 +150,7 @@ typedef enum {
#define NLPTR_TYPE_FSTN 3 // frame span traversal node
#define SET_LAST_RUN_CLOCK(s) \
- (s)->last_run_ns = qemu_get_clock_ns(vm_clock);
+ (s)->last_run_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* nifty macros from Arnon's EHCI version */
#define get_field(data, field) \
@@ -958,7 +958,7 @@ static void ehci_reset(void *opaque)
}
ehci_queues_rip_all(s, 0);
ehci_queues_rip_all(s, 1);
- qemu_del_timer(s->frame_timer);
+ timer_del(s->frame_timer);
qemu_bh_cancel(s->async_bh);
}
@@ -2296,7 +2296,7 @@ static void ehci_frame_timer(void *opaque)
int uframes, skipped_uframes;
int i;
- t_now = qemu_get_clock_ns(vm_clock);
+ t_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ns_elapsed = t_now - ehci->last_run_ns;
uframes = ns_elapsed / UFRAME_TIMER_NS;
@@ -2374,7 +2374,7 @@ static void ehci_frame_timer(void *opaque)
expire_time = t_now + (get_ticks_per_sec()
* (ehci->async_stepdown+1) / FRAME_TIMER_FREQ);
}
- qemu_mod_timer(ehci->frame_timer, expire_time);
+ timer_mod(ehci->frame_timer, expire_time);
}
}
@@ -2527,7 +2527,7 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp)
s->ports[i].dev = 0;
}
- s->frame_timer = qemu_new_timer_ns(vm_clock, ehci_frame_timer, s);
+ s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_frame_timer, s);
s->async_bh = qemu_bh_new(ehci_frame_timer, s);
qemu_register_reset(ehci_reset, s);
diff --git a/hw/usb/hcd-musb.c b/hw/usb/hcd-musb.c
index 7968e17c34..f91aa5580b 100644
--- a/hw/usb/hcd-musb.c
+++ b/hw/usb/hcd-musb.c
@@ -558,9 +558,9 @@ static void musb_schedule_cb(USBPort *port, USBPacket *packey)
return musb_cb_tick(ep);
if (!ep->intv_timer[dir])
- ep->intv_timer[dir] = qemu_new_timer_ns(vm_clock, musb_cb_tick, ep);
+ ep->intv_timer[dir] = timer_new_ns(QEMU_CLOCK_VIRTUAL, musb_cb_tick, ep);
- qemu_mod_timer(ep->intv_timer[dir], qemu_get_clock_ns(vm_clock) +
+ timer_mod(ep->intv_timer[dir], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(timeout, get_ticks_per_sec(), 8000));
}
@@ -962,7 +962,7 @@ static void musb_write_fifo(MUSBEndPoint *ep, uint8_t value)
static void musb_ep_frame_cancel(MUSBEndPoint *ep, int dir)
{
if (ep->intv_timer[dir])
- qemu_del_timer(ep->intv_timer[dir]);
+ timer_del(ep->intv_timer[dir]);
}
/* Bus control */
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index d7836d6803..39a25a72e1 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -1251,8 +1251,8 @@ static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion)
/* Generate a SOF event, and set a timer for EOF */
static void ohci_sof(OHCIState *ohci)
{
- ohci->sof_time = qemu_get_clock_ns(vm_clock);
- qemu_mod_timer(ohci->eof_timer, ohci->sof_time + usb_frame_time);
+ ohci->sof_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ timer_mod(ohci->eof_timer, ohci->sof_time + usb_frame_time);
ohci_set_interrupt(ohci, OHCI_INTR_SF);
}
@@ -1349,12 +1349,12 @@ static void ohci_frame_boundary(void *opaque)
*/
static int ohci_bus_start(OHCIState *ohci)
{
- ohci->eof_timer = qemu_new_timer_ns(vm_clock,
+ ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
ohci_frame_boundary,
ohci);
if (ohci->eof_timer == NULL) {
- fprintf(stderr, "usb-ohci: %s: qemu_new_timer_ns failed\n", ohci->name);
+ fprintf(stderr, "usb-ohci: %s: timer_new_ns failed\n", ohci->name);
ohci_die(ohci);
return 0;
}
@@ -1370,7 +1370,7 @@ static int ohci_bus_start(OHCIState *ohci)
static void ohci_bus_stop(OHCIState *ohci)
{
if (ohci->eof_timer)
- qemu_del_timer(ohci->eof_timer);
+ timer_del(ohci->eof_timer);
ohci->eof_timer = NULL;
}
@@ -1474,7 +1474,7 @@ static uint32_t ohci_get_frame_remaining(OHCIState *ohci)
/* Being in USB operational state guarnatees sof_time was
* set already.
*/
- tks = qemu_get_clock_ns(vm_clock) - ohci->sof_time;
+ tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ohci->sof_time;
/* avoid muldiv if possible */
if (tks >= usb_frame_time)
diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c
index ac8283313e..578b949c92 100644
--- a/hw/usb/hcd-uhci.c
+++ b/hw/usb/hcd-uhci.c
@@ -32,6 +32,7 @@
#include "qemu/iov.h"
#include "sysemu/dma.h"
#include "trace.h"
+#include "qemu/main-loop.h"
//#define DEBUG
//#define DEBUG_DUMP_DATA
@@ -432,7 +433,7 @@ static int uhci_post_load(void *opaque, int version_id)
UHCIState *s = opaque;
if (version_id < 2) {
- s->expire_time = qemu_get_clock_ns(vm_clock) +
+ s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / FRAME_TIMER_FREQ);
}
return 0;
@@ -475,9 +476,9 @@ static void uhci_port_write(void *opaque, hwaddr addr,
if ((val & UHCI_CMD_RS) && !(s->cmd & UHCI_CMD_RS)) {
/* start frame processing */
trace_usb_uhci_schedule_start();
- s->expire_time = qemu_get_clock_ns(vm_clock) +
+ s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / FRAME_TIMER_FREQ);
- qemu_mod_timer(s->frame_timer, s->expire_time);
+ timer_mod(s->frame_timer, s->expire_time);
s->status &= ~UHCI_STS_HCHALTED;
} else if (!(val & UHCI_CMD_RS)) {
s->status |= UHCI_STS_HCHALTED;
@@ -1160,7 +1161,7 @@ static void uhci_frame_timer(void *opaque)
if (!(s->cmd & UHCI_CMD_RS)) {
/* Full stop */
trace_usb_uhci_schedule_stop();
- qemu_del_timer(s->frame_timer);
+ timer_del(s->frame_timer);
uhci_async_cancel_all(s);
/* set hchalted bit in status - UHCI11D 2.1.2 */
s->status |= UHCI_STS_HCHALTED;
@@ -1169,7 +1170,7 @@ static void uhci_frame_timer(void *opaque)
/* We still store expire_time in our state, for migration */
t_last_run = s->expire_time - frame_t;
- t_now = qemu_get_clock_ns(vm_clock);
+ t_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* Process up to MAX_FRAMES_PER_TICK frames */
frames = (t_now - t_last_run) / frame_t;
@@ -1203,7 +1204,7 @@ static void uhci_frame_timer(void *opaque)
}
s->pending_int_mask = 0;
- qemu_mod_timer(s->frame_timer, t_now + frame_t);
+ timer_mod(s->frame_timer, t_now + frame_t);
}
static const MemoryRegionOps uhci_ioport_ops = {
@@ -1260,7 +1261,7 @@ static int usb_uhci_common_initfn(PCIDevice *dev)
}
}
s->bh = qemu_bh_new(uhci_bh, s);
- s->frame_timer = qemu_new_timer_ns(vm_clock, uhci_frame_timer, s);
+ s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s);
s->num_ports_vmstate = NB_PORTS;
QTAILQ_INIT(&s->queues);
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index 58c88b8a6b..be6b86e2ba 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -608,7 +608,7 @@ static const char *event_name(XHCIEvent *event)
static uint64_t xhci_mfindex_get(XHCIState *xhci)
{
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
return (now - xhci->mfindex_start) / 125000;
}
@@ -619,12 +619,12 @@ static void xhci_mfwrap_update(XHCIState *xhci)
int64_t now;
if ((xhci->usbcmd & bits) == bits) {
- now = qemu_get_clock_ns(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
mfindex = ((now - xhci->mfindex_start) / 125000) & 0x3fff;
left = 0x4000 - mfindex;
- qemu_mod_timer(xhci->mfwrap_timer, now + left * 125000);
+ timer_mod(xhci->mfwrap_timer, now + left * 125000);
} else {
- qemu_del_timer(xhci->mfwrap_timer);
+ timer_del(xhci->mfwrap_timer);
}
}
@@ -1086,7 +1086,7 @@ static void xhci_run(XHCIState *xhci)
{
trace_usb_xhci_run();
xhci->usbsts &= ~USBSTS_HCH;
- xhci->mfindex_start = qemu_get_clock_ns(vm_clock);
+ xhci->mfindex_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
static void xhci_stop(XHCIState *xhci)
@@ -1229,7 +1229,7 @@ static XHCIEPContext *xhci_alloc_epctx(XHCIState *xhci,
for (i = 0; i < ARRAY_SIZE(epctx->transfers); i++) {
usb_packet_init(&epctx->transfers[i].packet);
}
- epctx->kick_timer = qemu_new_timer_ns(vm_clock, xhci_ep_kick_timer, epctx);
+ epctx->kick_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xhci_ep_kick_timer, epctx);
return epctx;
}
@@ -1304,7 +1304,7 @@ static int xhci_ep_nuke_one_xfer(XHCITransfer *t)
XHCIEPContext *epctx = t->xhci->slots[t->slotid-1].eps[t->epid-1];
if (epctx) {
epctx->retry = NULL;
- qemu_del_timer(epctx->kick_timer);
+ timer_del(epctx->kick_timer);
}
t->running_retry = 0;
}
@@ -1380,7 +1380,7 @@ static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid,
xhci_set_ep_state(xhci, epctx, NULL, EP_DISABLED);
- qemu_free_timer(epctx->kick_timer);
+ timer_free(epctx->kick_timer);
g_free(epctx);
slot->eps[epid-1] = NULL;
@@ -1844,12 +1844,12 @@ static void xhci_check_iso_kick(XHCIState *xhci, XHCITransfer *xfer,
XHCIEPContext *epctx, uint64_t mfindex)
{
if (xfer->mfindex_kick > mfindex) {
- qemu_mod_timer(epctx->kick_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(xfer->mfindex_kick - mfindex) * 125000);
xfer->running_retry = 1;
} else {
epctx->mfindex_last = xfer->mfindex_kick;
- qemu_del_timer(epctx->kick_timer);
+ timer_del(epctx->kick_timer);
xfer->running_retry = 0;
}
}
@@ -2745,7 +2745,7 @@ static void xhci_reset(DeviceState *dev)
xhci->intr[i].ev_buffer_get = 0;
}
- xhci->mfindex_start = qemu_get_clock_ns(vm_clock);
+ xhci->mfindex_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
xhci_mfwrap_update(xhci);
}
@@ -3366,7 +3366,7 @@ static int usb_xhci_initfn(struct PCIDevice *dev)
xhci->numslots = 1;
}
- xhci->mfwrap_timer = qemu_new_timer_ns(vm_clock, xhci_mfwrap_timer, xhci);
+ xhci->mfwrap_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xhci_mfwrap_timer, xhci);
xhci->irq = dev->irq[0];
@@ -3451,7 +3451,7 @@ static int usb_xhci_post_load(void *opaque, int version_id)
epctx->state = state;
if (state == EP_RUNNING) {
/* kick endpoint after vmload is finished */
- qemu_mod_timer(epctx->kick_timer, qemu_get_clock_ns(vm_clock));
+ timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
}
}
diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c
index f660770076..128955dd92 100644
--- a/hw/usb/host-libusb.c
+++ b/hw/usb/host-libusb.c
@@ -1462,7 +1462,7 @@ static void usb_host_auto_check(void *unused)
if (unconnected == 0) {
/* nothing to watch */
if (usb_auto_timer) {
- qemu_del_timer(usb_auto_timer);
+ timer_del(usb_auto_timer);
trace_usb_host_auto_scan_disabled();
}
return;
@@ -1474,13 +1474,13 @@ static void usb_host_auto_check(void *unused)
usb_vmstate = qemu_add_vm_change_state_handler(usb_host_vm_state, NULL);
}
if (!usb_auto_timer) {
- usb_auto_timer = qemu_new_timer_ms(rt_clock, usb_host_auto_check, NULL);
+ usb_auto_timer = timer_new_ms(QEMU_CLOCK_REALTIME, usb_host_auto_check, NULL);
if (!usb_auto_timer) {
return;
}
trace_usb_host_auto_scan_enabled();
}
- qemu_mod_timer(usb_auto_timer, qemu_get_clock_ms(rt_clock) + 2000);
+ timer_mod(usb_auto_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 2000);
}
void usb_host_info(Monitor *mon, const QDict *qdict)
diff --git a/hw/usb/host-linux.c b/hw/usb/host-linux.c
index 7901f4c01a..65cd3b444c 100644
--- a/hw/usb/host-linux.c
+++ b/hw/usb/host-linux.c
@@ -1754,7 +1754,7 @@ static void usb_host_auto_check(void *unused)
if (unconnected == 0) {
/* nothing to watch */
if (usb_auto_timer) {
- qemu_del_timer(usb_auto_timer);
+ timer_del(usb_auto_timer);
trace_usb_host_auto_scan_disabled();
}
return;
@@ -1765,13 +1765,13 @@ static void usb_host_auto_check(void *unused)
usb_vmstate = qemu_add_vm_change_state_handler(usb_host_vm_state, NULL);
}
if (!usb_auto_timer) {
- usb_auto_timer = qemu_new_timer_ms(rt_clock, usb_host_auto_check, NULL);
+ usb_auto_timer = timer_new_ms(QEMU_CLOCK_REALTIME, usb_host_auto_check, NULL);
if (!usb_auto_timer) {
return;
}
trace_usb_host_auto_scan_enabled();
}
- qemu_mod_timer(usb_auto_timer, qemu_get_clock_ms(rt_clock) + 2000);
+ timer_mod(usb_auto_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 2000);
}
#ifndef CONFIG_USB_LIBUSB
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index e3b9f324b3..287a505b48 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -1297,7 +1297,7 @@ static int usbredir_initfn(USBDevice *udev)
}
dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev);
- dev->attach_timer = qemu_new_timer_ms(vm_clock, usbredir_do_attach, dev);
+ dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev);
packet_id_queue_init(&dev->cancelled, dev, "cancelled");
packet_id_queue_init(&dev->already_in_flight, dev, "already-in-flight");
@@ -1338,8 +1338,8 @@ static void usbredir_handle_destroy(USBDevice *udev)
/* Note must be done after qemu_chr_close, as that causes a close event */
qemu_bh_delete(dev->chardev_close_bh);
- qemu_del_timer(dev->attach_timer);
- qemu_free_timer(dev->attach_timer);
+ timer_del(dev->attach_timer);
+ timer_free(dev->attach_timer);
usbredir_cleanup_device_queues(dev);
@@ -1493,7 +1493,7 @@ static void usbredir_device_connect(void *priv,
USBRedirDevice *dev = priv;
const char *speed;
- if (qemu_timer_pending(dev->attach_timer) || dev->dev.attached) {
+ if (timer_pending(dev->attach_timer) || dev->dev.attached) {
ERROR("Received device connect while already connected\n");
return;
}
@@ -1548,7 +1548,7 @@ static void usbredir_device_connect(void *priv,
}
usbredir_check_bulk_receiving(dev);
- qemu_mod_timer(dev->attach_timer, dev->next_attach_time);
+ timer_mod(dev->attach_timer, dev->next_attach_time);
}
static void usbredir_device_disconnect(void *priv)
@@ -1556,7 +1556,7 @@ static void usbredir_device_disconnect(void *priv)
USBRedirDevice *dev = priv;
/* Stop any pending attaches */
- qemu_del_timer(dev->attach_timer);
+ timer_del(dev->attach_timer);
if (dev->dev.attached) {
DPRINTF("detaching device\n");
@@ -1565,7 +1565,7 @@ static void usbredir_device_disconnect(void *priv)
* Delay next usb device attach to give the guest a chance to see
* see the detach / attach in case of quick close / open succession
*/
- dev->next_attach_time = qemu_get_clock_ms(vm_clock) + 200;
+ dev->next_attach_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 200;
}
/* Reset state so that the next dev connected starts with a clean slate */
@@ -1588,7 +1588,7 @@ static void usbredir_interface_info(void *priv,
* If we receive interface info after the device has already been
* connected (ie on a set_config), re-check interface dependent things.
*/
- if (qemu_timer_pending(dev->attach_timer) || dev->dev.attached) {
+ if (timer_pending(dev->attach_timer) || dev->dev.attached) {
usbredir_check_bulk_receiving(dev);
if (usbredir_check_filter(dev)) {
ERROR("Device no longer matches filter after interface info "
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index aac7f83ccf..9504877120 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -78,8 +78,8 @@ static bool balloon_stats_enabled(const VirtIOBalloon *s)
static void balloon_stats_destroy_timer(VirtIOBalloon *s)
{
if (balloon_stats_enabled(s)) {
- qemu_del_timer(s->stats_timer);
- qemu_free_timer(s->stats_timer);
+ timer_del(s->stats_timer);
+ timer_free(s->stats_timer);
s->stats_timer = NULL;
s->stats_poll_interval = 0;
}
@@ -87,7 +87,7 @@ static void balloon_stats_destroy_timer(VirtIOBalloon *s)
static void balloon_stats_change_timer(VirtIOBalloon *s, int secs)
{
- qemu_mod_timer(s->stats_timer, qemu_get_clock_ms(vm_clock) + secs * 1000);
+ timer_mod(s->stats_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + secs * 1000);
}
static void balloon_stats_poll_cb(void *opaque)
@@ -173,7 +173,7 @@ static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v,
/* create a new timer */
g_assert(s->stats_timer == NULL);
- s->stats_timer = qemu_new_timer_ms(vm_clock, balloon_stats_poll_cb, s);
+ s->stats_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, balloon_stats_poll_cb, s);
s->stats_poll_interval = value;
balloon_stats_change_timer(s, 0);
}
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index bac8421a20..314e393520 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -129,8 +129,8 @@ static void check_rate_limit(void *opaque)
vrng->quota_remaining = vrng->conf.max_bytes;
virtio_rng_process(vrng);
- qemu_mod_timer(vrng->rate_limit_timer,
- qemu_get_clock_ms(vm_clock) + vrng->conf.period_ms);
+ timer_mod(vrng->rate_limit_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms);
}
static int virtio_rng_device_init(VirtIODevice *vdev)
@@ -172,11 +172,11 @@ static int virtio_rng_device_init(VirtIODevice *vdev)
assert(vrng->conf.max_bytes <= INT64_MAX);
vrng->quota_remaining = vrng->conf.max_bytes;
- vrng->rate_limit_timer = qemu_new_timer_ms(vm_clock,
+ vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
check_rate_limit, vrng);
- qemu_mod_timer(vrng->rate_limit_timer,
- qemu_get_clock_ms(vm_clock) + vrng->conf.period_ms);
+ timer_mod(vrng->rate_limit_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms);
register_savevm(qdev, "virtio-rng", -1, 1, virtio_rng_save,
virtio_rng_load, vrng);
@@ -189,8 +189,8 @@ static int virtio_rng_device_exit(DeviceState *qdev)
VirtIORNG *vrng = VIRTIO_RNG(qdev);
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
- qemu_del_timer(vrng->rate_limit_timer);
- qemu_free_timer(vrng->rate_limit_timer);
+ timer_del(vrng->rate_limit_timer);
+ timer_free(vrng->rate_limit_timer);
unregister_savevm(qdev, "virtio-rng", vrng);
virtio_cleanup(vdev);
return 0;
diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c
index 2e064bac81..36d38878ee 100644
--- a/hw/watchdog/wdt_i6300esb.c
+++ b/hw/watchdog/wdt_i6300esb.c
@@ -130,7 +130,7 @@ static void i6300esb_restart_timer(I6300State *d, int stage)
i6300esb_debug("stage %d, timeout %" PRIi64 "\n", d->stage, timeout);
- qemu_mod_timer(d->timer, qemu_get_clock_ns(vm_clock) + timeout);
+ timer_mod(d->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout);
}
/* This is called when the guest disables the watchdog. */
@@ -138,7 +138,7 @@ static void i6300esb_disable_timer(I6300State *d)
{
i6300esb_debug("timer disabled\n");
- qemu_del_timer(d->timer);
+ timer_del(d->timer);
}
static void i6300esb_reset(DeviceState *dev)
@@ -414,7 +414,7 @@ static int i6300esb_init(PCIDevice *dev)
i6300esb_debug("I6300State = %p\n", d);
- d->timer = qemu_new_timer_ns(vm_clock, i6300esb_timer_expired, d);
+ d->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, i6300esb_timer_expired, d);
d->previous_reboot_flag = 0;
memory_region_init_io(&d->io_mem, OBJECT(d), &i6300esb_ops, d,
diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c
index e97b4c3049..bc994a4c32 100644
--- a/hw/watchdog/wdt_ib700.c
+++ b/hw/watchdog/wdt_ib700.c
@@ -62,7 +62,7 @@ static void ib700_write_enable_reg(void *vp, uint32_t addr, uint32_t data)
ib700_debug("addr = %x, data = %x\n", addr, data);
timeout = (int64_t) time_map[data & 0xF] * get_ticks_per_sec();
- qemu_mod_timer(s->timer, qemu_get_clock_ns (vm_clock) + timeout);
+ timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout);
}
/* A write (of any value) to this register disables the timer. */
@@ -72,7 +72,7 @@ static void ib700_write_disable_reg(void *vp, uint32_t addr, uint32_t data)
ib700_debug("addr = %x, data = %x\n", addr, data);
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
/* This is called when the watchdog expires. */
@@ -83,7 +83,7 @@ static void ib700_timer_expired(void *vp)
ib700_debug("watchdog expired\n");
watchdog_perform_action();
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
static const VMStateDescription vmstate_ib700 = {
@@ -110,7 +110,7 @@ static void wdt_ib700_realize(DeviceState *dev, Error **errp)
ib700_debug("watchdog init\n");
- s->timer = qemu_new_timer_ns(vm_clock, ib700_timer_expired, s);
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ib700_timer_expired, s);
portio_list_init(port_list, OBJECT(s), wdt_portio_list, s, "ib700");
portio_list_add(port_list, isa_address_space_io(&s->parent_obj), 0);
@@ -122,7 +122,7 @@ static void wdt_ib700_reset(DeviceState *dev)
ib700_debug("watchdog reset\n");
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
static WatchdogTimerModel model = {
diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c
index 7f015ff5ab..e2005bd981 100644
--- a/hw/xtensa/pic_cpu.c
+++ b/hw/xtensa/pic_cpu.c
@@ -52,11 +52,11 @@ void check_interrupts(CPUXtensaState *env)
uint32_t int_set_enabled = env->sregs[INTSET] & env->sregs[INTENABLE];
int level;
- /* If the CPU is halted advance CCOUNT according to the vm_clock time
+ /* If the CPU is halted advance CCOUNT according to the QEMU_CLOCK_VIRTUAL time
* elapsed since the moment when it was advanced last time.
*/
if (cs->halted) {
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
xtensa_advance_ccount(env,
muldiv64(now - env->halt_clock,
@@ -119,7 +119,7 @@ void xtensa_rearm_ccompare_timer(CPUXtensaState *env)
}
}
env->wake_ccount = wake_ccount;
- qemu_mod_timer(env->ccompare_timer, env->halt_clock +
+ timer_mod(env->ccompare_timer, env->halt_clock +
muldiv64(wake_ccount - env->sregs[CCOUNT],
1000000, env->config->clock_freq_khz));
}
@@ -131,7 +131,7 @@ static void xtensa_ccompare_cb(void *opaque)
CPUState *cs = CPU(cpu);
if (cs->halted) {
- env->halt_clock = qemu_get_clock_ns(vm_clock);
+ env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
xtensa_advance_ccount(env, env->wake_ccount - env->sregs[CCOUNT]);
if (!cpu_has_work(cs)) {
env->sregs[CCOUNT] = env->wake_ccount + 1;
@@ -149,7 +149,7 @@ void xtensa_irq_init(CPUXtensaState *env)
if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT) &&
env->config->nccompare > 0) {
env->ccompare_timer =
- qemu_new_timer_ns(vm_clock, &xtensa_ccompare_cb, cpu);
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &xtensa_ccompare_cb, cpu);
}
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 5743bf1ba0..2efdf416cf 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -14,10 +14,12 @@
#ifndef QEMU_AIO_H
#define QEMU_AIO_H
+#include "qemu/typedefs.h"
#include "qemu-common.h"
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
#include "qemu/thread.h"
+#include "qemu/timer.h"
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
typedef void BlockDriverCompletionFunc(void *opaque, int ret);
@@ -42,7 +44,7 @@ typedef struct AioHandler AioHandler;
typedef void QEMUBHFunc(void *opaque);
typedef void IOHandler(void *opaque);
-typedef struct AioContext {
+struct AioContext {
GSource source;
/* The list of registered AIO handlers */
@@ -72,7 +74,10 @@ typedef struct AioContext {
/* Thread pool for performing work and receiving completion callbacks */
struct ThreadPool *thread_pool;
-} AioContext;
+
+ /* TimerLists for calling timers - one per clock type */
+ QEMUTimerListGroup tlg;
+};
/**
* aio_context_new: Allocate a new AioContext.
@@ -241,4 +246,47 @@ void qemu_aio_set_fd_handler(int fd,
void *opaque);
#endif
+/**
+ * aio_timer_new:
+ * @ctx: the aio context
+ * @type: the clock type
+ * @scale: the scale
+ * @cb: the callback to call on timer expiry
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Allocate a new timer attached to the context @ctx.
+ * The function is responsible for memory allocation.
+ *
+ * The preferred interface is aio_timer_init. Use that
+ * unless you really need dynamic memory allocation.
+ *
+ * Returns: a pointer to the new timer
+ */
+static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
+ int scale,
+ QEMUTimerCB *cb, void *opaque)
+{
+ return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
+}
+
+/**
+ * aio_timer_init:
+ * @ctx: the aio context
+ * @ts: the timer
+ * @type: the clock type
+ * @scale: the scale
+ * @cb: the callback to call on timer expiry
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Initialise a new timer attached to the context @ctx.
+ * The caller is responsible for memory allocation.
+ */
+static inline void aio_timer_init(AioContext *ctx,
+ QEMUTimer *ts, QEMUClockType type,
+ int scale,
+ QEMUTimerCB *cb, void *opaque)
+{
+ timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque);
+}
+
#endif
diff --git a/include/block/block_int.h b/include/block/block_int.h
index e45f2a0d56..8012e253c9 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -34,6 +34,7 @@
#include "monitor/monitor.h"
#include "qemu/hbitmap.h"
#include "block/snapshot.h"
+#include "qemu/main-loop.h"
#define BLOCK_FLAG_ENCRYPT 1
#define BLOCK_FLAG_COMPAT6 4
@@ -281,6 +282,9 @@ struct BlockDriverState {
/* Whether the disk can expand beyond total_sectors */
int growable;
+ /* Whether produces zeros when read beyond eof */
+ bool zero_beyond_eof;
+
/* the memory alignment required for the buffers handled by this driver */
int buffer_alignment;
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index c290d07bba..d530409ff5 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -141,7 +141,7 @@ void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
* Put the job to sleep (assuming that it wasn't canceled) for @ns
* nanoseconds. Canceling the job will interrupt the wait immediately.
*/
-void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns);
+void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns);
/**
* block_job_completed:
diff --git a/include/block/coroutine.h b/include/block/coroutine.h
index 1f2db3e8a4..4232569c53 100644
--- a/include/block/coroutine.h
+++ b/include/block/coroutine.h
@@ -16,6 +16,7 @@
#define QEMU_COROUTINE_H
#include <stdbool.h>
+#include "qemu/typedefs.h"
#include "qemu/queue.h"
#include "qemu/timer.h"
@@ -212,7 +213,7 @@ void qemu_co_rwlock_unlock(CoRwlock *lock);
* Note this function uses timers and hence only works when a main loop is in
* use. See main-loop.h and do not use from qemu-tool programs.
*/
-void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns);
+void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns);
/**
* Yield until a file descriptor becomes readable
diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h
index 635be7be10..51733d3390 100644
--- a/include/hw/acpi/acpi.h
+++ b/include/hw/acpi/acpi.h
@@ -136,7 +136,7 @@ void acpi_pm_tmr_reset(ACPIREGS *ar);
#include "qemu/timer.h"
static inline int64_t acpi_pm_tmr_get_clock(void)
{
- return muldiv64(qemu_get_clock_ns(vm_clock), PM_TIMER_FREQUENCY,
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY,
get_ticks_per_sec());
}
diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h
index d1610f135b..d413a4a696 100644
--- a/include/qemu/ratelimit.h
+++ b/include/qemu/ratelimit.h
@@ -23,7 +23,7 @@ typedef struct {
static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
{
- int64_t now = qemu_get_clock_ns(rt_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
if (limit->next_slice_time < now) {
limit->next_slice_time = now + limit->slice_ns;
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 9dd206ce7f..e4934dd61b 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -1,8 +1,8 @@
#ifndef QEMU_TIMER_H
#define QEMU_TIMER_H
+#include "qemu/typedefs.h"
#include "qemu-common.h"
-#include "qemu/main-loop.h"
#include "qemu/notify.h"
/* timers */
@@ -11,80 +11,643 @@
#define SCALE_US 1000
#define SCALE_NS 1
-typedef struct QEMUClock QEMUClock;
+/**
+ * QEMUClockType:
+ *
+ * The following clock types are available:
+ *
+ * @QEMU_CLOCK_REALTIME: Real time clock
+ *
+ * The real time clock should be used only for stuff which does not
+ * change the virtual machine state, as it is run even if the virtual
+ * machine is stopped. The real time clock has a frequency of 1000
+ * Hz.
+ *
+ * @QEMU_CLOCK_VIRTUAL: virtual clock
+ *
+ * The virtual clock is only run during the emulation. It is stopped
+ * when the virtual machine is stopped. Virtual timers use a high
+ * precision clock, usually cpu cycles (use ticks_per_sec).
+ *
+ * @QEMU_CLOCK_HOST: host clock
+ *
+ * The host clock should be use for device models that emulate accurate
+ * real time sources. It will continue to run when the virtual machine
+ * is suspended, and it will reflect system time changes the host may
+ * undergo (e.g. due to NTP). The host clock has the same precision as
+ * the virtual clock.
+ */
+
+typedef enum {
+ QEMU_CLOCK_REALTIME = 0,
+ QEMU_CLOCK_VIRTUAL = 1,
+ QEMU_CLOCK_HOST = 2,
+ QEMU_CLOCK_MAX
+} QEMUClockType;
+
+typedef struct QEMUTimerList QEMUTimerList;
+
+struct QEMUTimerListGroup {
+ QEMUTimerList *tl[QEMU_CLOCK_MAX];
+};
+
typedef void QEMUTimerCB(void *opaque);
+typedef void QEMUTimerListNotifyCB(void *opaque);
+
+struct QEMUTimer {
+ int64_t expire_time; /* in nanoseconds */
+ QEMUTimerList *timer_list;
+ QEMUTimerCB *cb;
+ void *opaque;
+ QEMUTimer *next;
+ int scale;
+};
+
+extern QEMUTimerListGroup main_loop_tlg;
+
+/*
+ * QEMUClockType
+ */
+
+/*
+ * qemu_clock_get_ns;
+ * @type: the clock type
+ *
+ * Get the nanosecond value of a clock with
+ * type @type
+ *
+ * Returns: the clock value in nanoseconds
+ */
+int64_t qemu_clock_get_ns(QEMUClockType type);
+
+/**
+ * qemu_clock_get_ms;
+ * @type: the clock type
+ *
+ * Get the millisecond value of a clock with
+ * type @type
+ *
+ * Returns: the clock value in milliseconds
+ */
+static inline int64_t qemu_clock_get_ms(QEMUClockType type)
+{
+ return qemu_clock_get_ns(type) / SCALE_MS;
+}
+
+/**
+ * qemu_clock_get_us;
+ * @type: the clock type
+ *
+ * Get the microsecond value of a clock with
+ * type @type
+ *
+ * Returns: the clock value in microseconds
+ */
+static inline int64_t qemu_clock_get_us(QEMUClockType type)
+{
+ return qemu_clock_get_ns(type) / SCALE_US;
+}
+
+/**
+ * qemu_clock_has_timers:
+ * @type: the clock type
+ *
+ * Determines whether a clock's default timer list
+ * has timers attached
+ *
+ * Returns: true if the clock's default timer list
+ * has timers attached
+ */
+bool qemu_clock_has_timers(QEMUClockType type);
+
+/**
+ * qemu_clock_expired:
+ * @type: the clock type
+ *
+ * Determines whether a clock's default timer list
+ * has an expired clock.
+ *
+ * Returns: true if the clock's default timer list has
+ * an expired timer
+ */
+bool qemu_clock_expired(QEMUClockType type);
+
+/**
+ * qemu_clock_use_for_deadline:
+ * @type: the clock type
+ *
+ * Determine whether a clock should be used for deadline
+ * calculations. Some clocks, for instance vm_clock with
+ * use_icount set, do not count in nanoseconds. Such clocks
+ * are not used for deadline calculations, and are presumed
+ * to interrupt any poll using qemu_notify/aio_notify
+ * etc.
+ *
+ * Returns: true if the clock runs in nanoseconds and
+ * should be used for a deadline.
+ */
+bool qemu_clock_use_for_deadline(QEMUClockType type);
+
+/**
+ * qemu_clock_deadline_ns_all:
+ * @type: the clock type
+ *
+ * Calculate the deadline across all timer lists associated
+ * with a clock (as opposed to just the default one)
+ * in nanoseconds, or -1 if no timer is set to expire.
+ *
+ * Returns: time until expiry in nanoseconds or -1
+ */
+int64_t qemu_clock_deadline_ns_all(QEMUClockType type);
+
+/**
+ * qemu_clock_get_main_loop_timerlist:
+ * @type: the clock type
+ *
+ * Return the default timer list assocatiated with a clock.
+ *
+ * Returns: the default timer list
+ */
+QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type);
-/* The real time clock should be used only for stuff which does not
- change the virtual machine state, as it is run even if the virtual
- machine is stopped. The real time clock has a frequency of 1000
- Hz. */
-extern QEMUClock *rt_clock;
-
-/* The virtual clock is only run during the emulation. It is stopped
- when the virtual machine is stopped. Virtual timers use a high
- precision clock, usually cpu cycles (use ticks_per_sec). */
-extern QEMUClock *vm_clock;
-
-/* The host clock should be use for device models that emulate accurate
- real time sources. It will continue to run when the virtual machine
- is suspended, and it will reflect system time changes the host may
- undergo (e.g. due to NTP). The host clock has the same precision as
- the virtual clock. */
-extern QEMUClock *host_clock;
-
-int64_t qemu_get_clock_ns(QEMUClock *clock);
-int64_t qemu_clock_has_timers(QEMUClock *clock);
-int64_t qemu_clock_expired(QEMUClock *clock);
-int64_t qemu_clock_deadline(QEMUClock *clock);
-void qemu_clock_enable(QEMUClock *clock, bool enabled);
-void qemu_clock_warp(QEMUClock *clock);
-
-void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
-void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
+/**
+ * qemu_clock_nofify:
+ * @type: the clock type
+ *
+ * Call the notifier callback connected with the default timer
+ * list linked to the clock, or qemu_notify() if none.
+ */
+void qemu_clock_notify(QEMUClockType type);
+
+/**
+ * qemu_clock_enable:
+ * @type: the clock type
+ * @enabled: true to enable, false to disable
+ *
+ * Enable or disable a clock
+ */
+void qemu_clock_enable(QEMUClockType type, bool enabled);
+
+/**
+ * qemu_clock_warp:
+ * @type: the clock type
+ *
+ * Warp a clock to a new value
+ */
+void qemu_clock_warp(QEMUClockType type);
+
+/**
+ * qemu_clock_register_reset_notifier:
+ * @type: the clock type
+ * @notifier: the notifier function
+ *
+ * Register a notifier function to call when the clock
+ * concerned is reset.
+ */
+void qemu_clock_register_reset_notifier(QEMUClockType type,
+ Notifier *notifier);
+
+/**
+ * qemu_clock_unregister_reset_notifier:
+ * @type: the clock type
+ * @notifier: the notifier function
+ *
+ * Unregister a notifier function to call when the clock
+ * concerned is reset.
+ */
+void qemu_clock_unregister_reset_notifier(QEMUClockType type,
Notifier *notifier);
-QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
- QEMUTimerCB *cb, void *opaque);
-void qemu_free_timer(QEMUTimer *ts);
-void qemu_del_timer(QEMUTimer *ts);
-void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
-void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
-bool qemu_timer_pending(QEMUTimer *ts);
-bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
-uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
-
-void qemu_run_timers(QEMUClock *clock);
-void qemu_run_all_timers(void);
-void configure_alarms(char const *opt);
-void init_clocks(void);
-int init_timer_alarm(void);
+/**
+ * qemu_clock_run_timers:
+ * @type: clock on which to operate
+ *
+ * Run all the timers associated with the default timer list
+ * of a clock.
+ *
+ * Returns: true if any timer ran.
+ */
+bool qemu_clock_run_timers(QEMUClockType type);
-int64_t cpu_get_ticks(void);
-void cpu_enable_ticks(void);
-void cpu_disable_ticks(void);
+/**
+ * qemu_clock_run_all_timers:
+ *
+ * Run all the timers associated with the default timer list
+ * of every clock.
+ *
+ * Returns: true if any timer ran.
+ */
+bool qemu_clock_run_all_timers(void);
+
+/*
+ * QEMUTimerList
+ */
+
+/**
+ * timerlist_new:
+ * @type: the clock type to associate with the timerlist
+ * @cb: the callback to call on notification
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timerlist associated with the clock of
+ * type @type.
+ *
+ * Returns: a pointer to the QEMUTimerList created
+ */
+QEMUTimerList *timerlist_new(QEMUClockType type,
+ QEMUTimerListNotifyCB *cb, void *opaque);
+
+/**
+ * timerlist_free:
+ * @timer_list: the timer list to free
+ *
+ * Frees a timer_list. It must have no active timers.
+ */
+void timerlist_free(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_has_timers:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine whether a timer list has active timers
+ *
+ * Returns: true if the timer list has timers.
+ */
+bool timerlist_has_timers(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_expired:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine whether a timer list has any timers which
+ * are expired.
+ *
+ * Returns: true if the timer list has timers which
+ * have expired.
+ */
+bool timerlist_expired(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_deadline_ns:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine the deadline for a timer_list, i.e.
+ * the number of nanoseconds until the first timer
+ * expires. Return -1 if there are no timers.
+ *
+ * Returns: the number of nanoseconds until the earliest
+ * timer expires -1 if none
+ */
+int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_get_clock:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine the clock type associated with a timer list.
+ *
+ * Returns: the clock type associated with the
+ * timer list.
+ */
+QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_run_timers:
+ * @timer_list: the timer list to use
+ *
+ * Call all expired timers associated with the timer list.
+ *
+ * Returns: true if any timer expired
+ */
+bool timerlist_run_timers(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_notify:
+ * @timer_list: the timer list to use
+ *
+ * call the notifier callback associated with the timer list.
+ */
+void timerlist_notify(QEMUTimerList *timer_list);
+
+/*
+ * QEMUTimerListGroup
+ */
+
+/**
+ * timerlistgroup_init:
+ * @tlg: the timer list group
+ * @cb: the callback to call when a notify is required
+ * @opaque: the opaque pointer to be passed to the callback.
+ *
+ * Initialise a timer list group. This must already be
+ * allocated in memory and zeroed. The notifier callback is
+ * called whenever a clock in the timer list group is
+ * reenabled or whenever a timer associated with any timer
+ * list is modified. If @cb is specified as null, qemu_notify()
+ * is used instead.
+ */
+void timerlistgroup_init(QEMUTimerListGroup *tlg,
+ QEMUTimerListNotifyCB *cb, void *opaque);
+
+/**
+ * timerlistgroup_deinit:
+ * @tlg: the timer list group
+ *
+ * Deinitialise a timer list group. This must already be
+ * initialised. Note the memory is not freed.
+ */
+void timerlistgroup_deinit(QEMUTimerListGroup *tlg);
+
+/**
+ * timerlistgroup_run_timers:
+ * @tlg: the timer list group
+ *
+ * Run the timers associated with a timer list group.
+ * This will run timers on multiple clocks.
+ *
+ * Returns: true if any timer callback ran
+ */
+bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg);
+
+/**
+ * timerlistgroup_deadline_ns:
+ * @tlg: the timer list group
+ *
+ * Determine the deadline of the soonest timer to
+ * expire associated with any timer list linked to
+ * the timer list group. Only clocks suitable for
+ * deadline calculation are included.
+ *
+ * Returns: the deadline in nanoseconds or -1 if no
+ * timers are to expire.
+ */
+int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg);
+
+/*
+ * QEMUTimer
+ */
+
+/**
+ * timer_init:
+ * @ts: the timer to be initialised
+ * @timer_list: the timer list to attach the timer to
+ * @scale: the scale value for the tiemr
+ * @cb: the callback to be called when the timer expires
+ * @opaque: the opaque pointer to be passed to the callback
+ *
+ * Initialise a new timer and associate it with @timer_list.
+ * The caller is responsible for allocating the memory.
+ *
+ * You need not call an explicit deinit call. Simply make
+ * sure it is not on a list with timer_del.
+ */
+void timer_init(QEMUTimer *ts,
+ QEMUTimerList *timer_list, int scale,
+ QEMUTimerCB *cb, void *opaque);
+
+/**
+ * timer_new_tl:
+ * @timer_list: the timer list to attach the timer to
+ * @scale: the scale value for the tiemr
+ * @cb: the callback to be called when the timer expires
+ * @opaque: the opaque pointer to be passed to the callback
+ *
+ * Creeate a new timer and associate it with @timer_list.
+ * The memory is allocated by the function.
+ *
+ * This is not the preferred interface unless you know you
+ * are going to call timer_free. Use timer_init instead.
+ *
+ * Returns: a pointer to the timer
+ */
+static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list,
+ int scale,
+ QEMUTimerCB *cb,
+ void *opaque)
+{
+ QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
+ timer_init(ts, timer_list, scale, cb, opaque);
+ return ts;
+}
-static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
- void *opaque)
+/**
+ * timer_new:
+ * @type: the clock type to use
+ * @scale: the scale value for the tiemr
+ * @cb: the callback to be called when the timer expires
+ * @opaque: the opaque pointer to be passed to the callback
+ *
+ * Creeate a new timer and associate it with the default
+ * timer list for the clock type @type.
+ *
+ * Returns: a pointer to the timer
+ */
+static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
+ QEMUTimerCB *cb, void *opaque)
{
- return qemu_new_timer(clock, SCALE_NS, cb, opaque);
+ return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque);
}
-static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
- void *opaque)
+/**
+ * timer_new_ns:
+ * @clock: the clock to associate with the timer
+ * @callback: the callback to call when the timer expires
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timer with nanosecond scale on the default timer list
+ * associated with the clock.
+ *
+ * Returns: a pointer to the newly created timer
+ */
+static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
+ void *opaque)
{
- return qemu_new_timer(clock, SCALE_MS, cb, opaque);
+ return timer_new(type, SCALE_NS, cb, opaque);
}
-static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
+/**
+ * timer_new_us:
+ * @clock: the clock to associate with the timer
+ * @callback: the callback to call when the timer expires
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timer with microsecond scale on the default timer list
+ * associated with the clock.
+ *
+ * Returns: a pointer to the newly created timer
+ */
+static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
+ void *opaque)
{
- return qemu_get_clock_ns(clock) / SCALE_MS;
+ return timer_new(type, SCALE_US, cb, opaque);
}
+/**
+ * timer_new_ms:
+ * @clock: the clock to associate with the timer
+ * @callback: the callback to call when the timer expires
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timer with millisecond scale on the default timer list
+ * associated with the clock.
+ *
+ * Returns: a pointer to the newly created timer
+ */
+static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
+ void *opaque)
+{
+ return timer_new(type, SCALE_MS, cb, opaque);
+}
+
+/**
+ * timer_free:
+ * @ts: the timer
+ *
+ * Free a timer (it must not be on the active list)
+ */
+void timer_free(QEMUTimer *ts);
+
+/**
+ * timer_del:
+ * @ts: the timer
+ *
+ * Delete a timer from the active list.
+ */
+void timer_del(QEMUTimer *ts);
+
+/**
+ * timer_mod_ns:
+ * @ts: the timer
+ * @expire_time: the expiry time in nanoseconds
+ *
+ * Modify a timer to expire at @expire_time
+ */
+void timer_mod_ns(QEMUTimer *ts, int64_t expire_time);
+
+/**
+ * timer_mod:
+ * @ts: the timer
+ * @expire_time: the expire time in the units associated with the timer
+ *
+ * Modify a timer to expiry at @expire_time, taking into
+ * account the scale associated with the timer.
+ */
+void timer_mod(QEMUTimer *ts, int64_t expire_timer);
+
+/**
+ * timer_pending:
+ * @ts: the timer
+ *
+ * Determines whether a timer is pending (i.e. is on the
+ * active list of timers, whether or not it has not yet expired).
+ *
+ * Returns: true if the timer is pending
+ */
+bool timer_pending(QEMUTimer *ts);
+
+/**
+ * timer_expired:
+ * @ts: the timer
+ *
+ * Determines whether a timer has expired.
+ *
+ * Returns: true if the timer has expired
+ */
+bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
+
+/**
+ * timer_expire_time_ns:
+ * @ts: the timer
+ *
+ * Determine the expiry time of a timer
+ *
+ * Returns: the expiry time in nanoseconds
+ */
+uint64_t timer_expire_time_ns(QEMUTimer *ts);
+
+/**
+ * timer_get:
+ * @f: the file
+ * @ts: the timer
+ *
+ * Read a timer @ts from a file @f
+ */
+void timer_get(QEMUFile *f, QEMUTimer *ts);
+
+/**
+ * timer_put:
+ * @f: the file
+ * @ts: the timer
+ */
+void timer_put(QEMUFile *f, QEMUTimer *ts);
+
+/*
+ * General utility functions
+ */
+
+/**
+ * qemu_timeout_ns_to_ms:
+ * @ns: nanosecond timeout value
+ *
+ * Convert a nanosecond timeout value (or -1) to
+ * a millisecond value (or -1), always rounding up.
+ *
+ * Returns: millisecond timeout value
+ */
+int qemu_timeout_ns_to_ms(int64_t ns);
+
+/**
+ * qemu_poll_ns:
+ * @fds: Array of file descriptors
+ * @nfds: number of file descriptors
+ * @timeout: timeout in nanoseconds
+ *
+ * Perform a poll like g_poll but with a timeout in nanoseconds.
+ * See g_poll documentation for further details.
+ *
+ * Returns: number of fds ready
+ */
+int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
+
+/**
+ * qemu_soonest_timeout:
+ * @timeout1: first timeout in nanoseconds (or -1 for infinite)
+ * @timeout2: second timeout in nanoseconds (or -1 for infinite)
+ *
+ * Calculates the soonest of two timeout values. -1 means infinite, which
+ * is later than any other value.
+ *
+ * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
+ */
+static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
+{
+ /* we can abuse the fact that -1 (which means infinite) is a maximal
+ * value when cast to unsigned. As this is disgusting, it's kept in
+ * one inline function.
+ */
+ return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
+}
+
+/**
+ * initclocks:
+ *
+ * Initialise the clock & timer infrastructure
+ */
+void init_clocks(void);
+
+int64_t cpu_get_ticks(void);
+void cpu_enable_ticks(void);
+void cpu_disable_ticks(void);
+
static inline int64_t get_ticks_per_sec(void)
{
return 1000000000LL;
}
+/*
+ * Low level clock functions
+ */
+
/* real time host monotonic timer */
static inline int64_t get_clock_realtime(void)
{
@@ -128,9 +691,6 @@ static inline int64_t get_clock(void)
}
#endif
-void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
-void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
-
/* icount */
int64_t cpu_get_icount(void);
int64_t cpu_get_clock(void);
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index ac9f8d41a3..3205540059 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -4,9 +4,12 @@
/* A load of opaque types so that device init declarations don't have to
pull in all the real definitions. */
typedef struct QEMUTimer QEMUTimer;
+typedef struct QEMUTimerListGroup QEMUTimerListGroup;
typedef struct QEMUFile QEMUFile;
typedef struct QEMUBH QEMUBH;
+typedef struct AioContext AioContext;
+
struct Monitor;
typedef struct Monitor Monitor;
typedef struct MigrationParams MigrationParams;
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index d7a77b6488..b1aa059102 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -124,7 +124,7 @@ extern int boot_menu;
extern uint8_t *boot_splash_filedata;
extern size_t boot_splash_filedata_size;
extern uint8_t qemu_extra_params_fw[2];
-extern QEMUClock *rtc_clock;
+extern QEMUClockType rtc_clock;
#define MAX_NODES 64
#define MAX_CPUMASK_BITS 255
diff --git a/main-loop.c b/main-loop.c
index 2d9774ef75..1c38ea2b93 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -131,10 +131,6 @@ int qemu_init_main_loop(void)
GSource *src;
init_clocks();
- if (init_timer_alarm() < 0) {
- fprintf(stderr, "could not initialize alarm timer\n");
- exit(1);
- }
ret = qemu_signal_init();
if (ret) {
@@ -155,10 +151,11 @@ static int max_priority;
static int glib_pollfds_idx;
static int glib_n_poll_fds;
-static void glib_pollfds_fill(uint32_t *cur_timeout)
+static void glib_pollfds_fill(int64_t *cur_timeout)
{
GMainContext *context = g_main_context_default();
int timeout = 0;
+ int64_t timeout_ns;
int n;
g_main_context_prepare(context, &max_priority);
@@ -174,9 +171,13 @@ static void glib_pollfds_fill(uint32_t *cur_timeout)
glib_n_poll_fds);
} while (n != glib_n_poll_fds);
- if (timeout >= 0 && timeout < *cur_timeout) {
- *cur_timeout = timeout;
+ if (timeout < 0) {
+ timeout_ns = -1;
+ } else {
+ timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
}
+
+ *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
}
static void glib_pollfds_poll(void)
@@ -191,7 +192,7 @@ static void glib_pollfds_poll(void)
#define MAX_MAIN_LOOP_SPIN (1000)
-static int os_host_main_loop_wait(uint32_t timeout)
+static int os_host_main_loop_wait(int64_t timeout)
{
int ret;
static int spin_counter;
@@ -204,7 +205,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
* print a message to the screen. If we run into this condition, create
* a fake timeout in order to give the VCPU threads a chance to run.
*/
- if (spin_counter > MAX_MAIN_LOOP_SPIN) {
+ if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
static bool notified;
if (!notified) {
@@ -214,19 +215,19 @@ static int os_host_main_loop_wait(uint32_t timeout)
notified = true;
}
- timeout = 1;
+ timeout = SCALE_MS;
}
- if (timeout > 0) {
+ if (timeout) {
spin_counter = 0;
qemu_mutex_unlock_iothread();
} else {
spin_counter++;
}
- ret = g_poll((GPollFD *)gpollfds->data, gpollfds->len, timeout);
+ ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
- if (timeout > 0) {
+ if (timeout) {
qemu_mutex_lock_iothread();
}
@@ -373,7 +374,7 @@ static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
}
}
-static int os_host_main_loop_wait(uint32_t timeout)
+static int os_host_main_loop_wait(int64_t timeout)
{
GMainContext *context = g_main_context_default();
GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
@@ -382,6 +383,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
PollingEntry *pe;
WaitObjects *w = &wait_objects;
gint poll_timeout;
+ int64_t poll_timeout_ns;
static struct timeval tv0;
fd_set rfds, wfds, xfds;
int nfds;
@@ -419,12 +421,17 @@ static int os_host_main_loop_wait(uint32_t timeout)
poll_fds[n_poll_fds + i].events = G_IO_IN;
}
- if (poll_timeout < 0 || timeout < poll_timeout) {
- poll_timeout = timeout;
+ if (poll_timeout < 0) {
+ poll_timeout_ns = -1;
+ } else {
+ poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
}
+ poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
+
qemu_mutex_unlock_iothread();
- g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
+ g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);
+
qemu_mutex_lock_iothread();
if (g_poll_ret > 0) {
for (i = 0; i < w->num; i++) {
@@ -449,6 +456,7 @@ int main_loop_wait(int nonblocking)
{
int ret;
uint32_t timeout = UINT32_MAX;
+ int64_t timeout_ns;
if (nonblocking) {
timeout = 0;
@@ -462,13 +470,24 @@ int main_loop_wait(int nonblocking)
slirp_pollfds_fill(gpollfds);
#endif
qemu_iohandler_fill(gpollfds);
- ret = os_host_main_loop_wait(timeout);
+
+ if (timeout == UINT32_MAX) {
+ timeout_ns = -1;
+ } else {
+ timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
+ }
+
+ timeout_ns = qemu_soonest_timeout(timeout_ns,
+ timerlistgroup_deadline_ns(
+ &main_loop_tlg));
+
+ ret = os_host_main_loop_wait(timeout_ns);
qemu_iohandler_poll(gpollfds, ret);
#ifdef CONFIG_SLIRP
slirp_pollfds_poll(gpollfds, (ret < 0));
#endif
- qemu_run_all_timers();
+ qemu_clock_run_all_timers();
return ret;
}
diff --git a/migration-exec.c b/migration-exec.c
index deab4e378e..479024752f 100644
--- a/migration-exec.c
+++ b/migration-exec.c
@@ -17,6 +17,7 @@
#include "qemu-common.h"
#include "qemu/sockets.h"
+#include "qemu/main-loop.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
#include "block/block.h"
diff --git a/migration-fd.c b/migration-fd.c
index 3d4613cbaf..d2e523af74 100644
--- a/migration-fd.c
+++ b/migration-fd.c
@@ -14,6 +14,7 @@
*/
#include "qemu-common.h"
+#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "migration/migration.h"
#include "monitor/monitor.h"
diff --git a/migration-tcp.c b/migration-tcp.c
index b20ee58f55..782572de82 100644
--- a/migration-tcp.c
+++ b/migration-tcp.c
@@ -18,6 +18,7 @@
#include "migration/migration.h"
#include "migration/qemu-file.h"
#include "block/block.h"
+#include "qemu/main-loop.h"
//#define DEBUG_MIGRATION_TCP
diff --git a/migration-unix.c b/migration-unix.c
index 94b7022fc8..651fc5b707 100644
--- a/migration-unix.c
+++ b/migration-unix.c
@@ -15,6 +15,7 @@
#include "qemu-common.h"
#include "qemu/sockets.h"
+#include "qemu/main-loop.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
#include "block/block.h"
diff --git a/migration.c b/migration.c
index 1402fa7680..200d404547 100644
--- a/migration.c
+++ b/migration.c
@@ -14,6 +14,7 @@
*/
#include "qemu-common.h"
+#include "qemu/main-loop.h"
#include "migration/migration.h"
#include "monitor/monitor.h"
#include "migration/qemu-file.h"
@@ -197,7 +198,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->has_status = true;
info->status = g_strdup("active");
info->has_total_time = true;
- info->total_time = qemu_get_clock_ms(rt_clock)
+ info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
- s->total_time;
info->has_expected_downtime = true;
info->expected_downtime = s->expected_downtime;
@@ -375,7 +376,7 @@ static MigrationState *migrate_init(const MigrationParams *params)
s->state = MIG_STATE_SETUP;
trace_migrate_set_state(MIG_STATE_SETUP);
- s->total_time = qemu_get_clock_ms(rt_clock);
+ s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
return s;
}
@@ -544,8 +545,8 @@ int64_t migrate_xbzrle_cache_size(void)
static void *migration_thread(void *opaque)
{
MigrationState *s = opaque;
- int64_t initial_time = qemu_get_clock_ms(rt_clock);
- int64_t setup_start = qemu_get_clock_ms(host_clock);
+ int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
int64_t initial_bytes = 0;
int64_t max_size = 0;
int64_t start_time = initial_time;
@@ -554,7 +555,7 @@ static void *migration_thread(void *opaque)
DPRINTF("beginning savevm\n");
qemu_savevm_state_begin(s->file, &s->params);
- s->setup_time = qemu_get_clock_ms(host_clock) - setup_start;
+ s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE);
DPRINTF("setup complete\n");
@@ -574,7 +575,7 @@ static void *migration_thread(void *opaque)
DPRINTF("done iterating\n");
qemu_mutex_lock_iothread();
- start_time = qemu_get_clock_ms(rt_clock);
+ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
old_vm_running = runstate_is_running();
@@ -601,7 +602,7 @@ static void *migration_thread(void *opaque)
migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
break;
}
- current_time = qemu_get_clock_ms(rt_clock);
+ current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
if (current_time >= initial_time + BUFFER_DELAY) {
uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
uint64_t time_spent = current_time - initial_time;
@@ -632,7 +633,7 @@ static void *migration_thread(void *opaque)
qemu_mutex_lock_iothread();
if (s->state == MIG_STATE_COMPLETED) {
- int64_t end_time = qemu_get_clock_ms(rt_clock);
+ int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->total_time = end_time - s->total_time;
s->downtime = end_time - start_time;
runstate_set(RUN_STATE_POSTMIGRATE);
diff --git a/monitor.c b/monitor.c
index da9c9a222b..ee9744cfb6 100644
--- a/monitor.c
+++ b/monitor.c
@@ -537,7 +537,7 @@ monitor_protocol_event_queue(MonitorEvent event,
QObject *data)
{
MonitorEventState *evstate;
- int64_t now = qemu_get_clock_ns(rt_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
assert(event < QEVENT_MAX);
qemu_mutex_lock(&monitor_event_state_lock);
@@ -564,7 +564,7 @@ monitor_protocol_event_queue(MonitorEvent event,
qobject_decref(evstate->data);
} else {
int64_t then = evstate->last + evstate->rate;
- qemu_mod_timer_ns(evstate->timer, then);
+ timer_mod_ns(evstate->timer, then);
}
evstate->data = data;
qobject_incref(evstate->data);
@@ -584,7 +584,7 @@ monitor_protocol_event_queue(MonitorEvent event,
static void monitor_protocol_event_handler(void *opaque)
{
MonitorEventState *evstate = opaque;
- int64_t now = qemu_get_clock_ns(rt_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
qemu_mutex_lock(&monitor_event_state_lock);
@@ -622,7 +622,7 @@ monitor_protocol_event_throttle(MonitorEvent event,
trace_monitor_protocol_event_throttle(event, rate);
evstate->event = event;
evstate->rate = rate * SCALE_MS;
- evstate->timer = qemu_new_timer(rt_clock,
+ evstate->timer = timer_new(QEMU_CLOCK_REALTIME,
SCALE_MS,
monitor_protocol_event_handler,
evstate);
diff --git a/nbd.c b/nbd.c
index 2606403a41..0fd05836ca 100644
--- a/nbd.c
+++ b/nbd.c
@@ -38,6 +38,7 @@
#include "qemu/sockets.h"
#include "qemu/queue.h"
+#include "qemu/main-loop.h"
//#define DEBUG_NBD
diff --git a/net/dump.c b/net/dump.c
index 4119721720..9d3a09e334 100644
--- a/net/dump.c
+++ b/net/dump.c
@@ -69,7 +69,7 @@ static ssize_t dump_receive(NetClientState *nc, const uint8_t *buf, size_t size)
return size;
}
- ts = muldiv64(qemu_get_clock_ns(vm_clock), 1000000, get_ticks_per_sec());
+ ts = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1000000, get_ticks_per_sec());
caplen = size > s->pcap_caplen ? s->pcap_caplen : size;
hdr.ts.tv_sec = ts / 1000000 + s->start_ts;
diff --git a/net/net.c b/net/net.c
index c0d61bf78b..114859267e 100644
--- a/net/net.c
+++ b/net/net.c
@@ -36,6 +36,7 @@
#include "qmp-commands.h"
#include "hw/qdev.h"
#include "qemu/iov.h"
+#include "qemu/main-loop.h"
#include "qapi-visit.h"
#include "qapi/opts-visitor.h"
#include "qapi/dealloc-visitor.h"
diff --git a/net/socket.c b/net/socket.c
index 87af1d3d39..e61309d8d5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -31,6 +31,7 @@
#include "qemu/option.h"
#include "qemu/sockets.h"
#include "qemu/iov.h"
+#include "qemu/main-loop.h"
typedef struct NetSocketState {
NetClientState nc;
diff --git a/qemu-char.c b/qemu-char.c
index 5446b8834c..62594965bd 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -281,7 +281,7 @@ static int mux_chr_write(CharDriverState *chr, const uint8_t *buf, int len)
int64_t ti;
int secs;
- ti = qemu_get_clock_ms(rt_clock);
+ ti = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
if (d->timestamps_start == -1)
d->timestamps_start = ti;
ti -= d->timestamps_start;
diff --git a/qemu-coroutine-io.c b/qemu-coroutine-io.c
index c4df35a640..054ca70627 100644
--- a/qemu-coroutine-io.c
+++ b/qemu-coroutine-io.c
@@ -26,6 +26,7 @@
#include "qemu/sockets.h"
#include "block/coroutine.h"
#include "qemu/iov.h"
+#include "qemu/main-loop.h"
ssize_t coroutine_fn
qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
diff --git a/qemu-coroutine-sleep.c b/qemu-coroutine-sleep.c
index 169ce5ccc9..f6db978c1d 100644
--- a/qemu-coroutine-sleep.c
+++ b/qemu-coroutine-sleep.c
@@ -26,14 +26,14 @@ static void co_sleep_cb(void *opaque)
qemu_coroutine_enter(sleep_cb->co, NULL);
}
-void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns)
+void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns)
{
CoSleepCB sleep_cb = {
.co = qemu_coroutine_self(),
};
- sleep_cb.ts = qemu_new_timer(clock, SCALE_NS, co_sleep_cb, &sleep_cb);
- qemu_mod_timer(sleep_cb.ts, qemu_get_clock_ns(clock) + ns);
+ sleep_cb.ts = timer_new(type, SCALE_NS, co_sleep_cb, &sleep_cb);
+ timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns);
qemu_coroutine_yield();
- qemu_del_timer(sleep_cb.ts);
- qemu_free_timer(sleep_cb.ts);
+ timer_del(sleep_cb.ts);
+ timer_free(sleep_cb.ts);
}
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index ffbcf31cfc..f91b6c4f02 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -10,6 +10,7 @@
#include "qemu-io.h"
#include "block/block_int.h"
+#include "qemu/main-loop.h"
#define CMD_NOFILE_OK 0x01
diff --git a/qemu-nbd.c b/qemu-nbd.c
index 9c31d45706..f044546c28 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -19,6 +19,7 @@
#include "qemu-common.h"
#include "block/block.h"
#include "block/nbd.h"
+#include "qemu/main-loop.h"
#include <stdarg.h>
#include <stdio.h>
diff --git a/qemu-timer.c b/qemu-timer.c
index b2d95e2fec..95ff47fef3 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -33,276 +33,277 @@
#include <pthread.h>
#endif
-#ifdef _WIN32
-#include <mmsystem.h>
+#ifdef CONFIG_PPOLL
+#include <poll.h>
+#endif
+
+#ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK
+#include <sys/prctl.h>
#endif
/***********************************************************/
/* timers */
-#define QEMU_CLOCK_REALTIME 0
-#define QEMU_CLOCK_VIRTUAL 1
-#define QEMU_CLOCK_HOST 2
-
-struct QEMUClock {
- QEMUTimer *active_timers;
+typedef struct QEMUClock {
+ QLIST_HEAD(, QEMUTimerList) timerlists;
NotifierList reset_notifiers;
int64_t last;
- int type;
+ QEMUClockType type;
bool enabled;
-};
+} QEMUClock;
-struct QEMUTimer {
- int64_t expire_time; /* in nanoseconds */
- QEMUClock *clock;
- QEMUTimerCB *cb;
- void *opaque;
- QEMUTimer *next;
- int scale;
-};
+QEMUTimerListGroup main_loop_tlg;
+QEMUClock qemu_clocks[QEMU_CLOCK_MAX];
-struct qemu_alarm_timer {
- char const *name;
- int (*start)(struct qemu_alarm_timer *t);
- void (*stop)(struct qemu_alarm_timer *t);
- void (*rearm)(struct qemu_alarm_timer *t, int64_t nearest_delta_ns);
-#if defined(__linux__)
- timer_t timer;
- int fd;
-#elif defined(_WIN32)
- HANDLE timer;
-#endif
- bool expired;
- bool pending;
+/* A QEMUTimerList is a list of timers attached to a clock. More
+ * than one QEMUTimerList can be attached to each clock, for instance
+ * used by different AioContexts / threads. Each clock also has
+ * a list of the QEMUTimerLists associated with it, in order that
+ * reenabling the clock can call all the notifiers.
+ */
+
+struct QEMUTimerList {
+ QEMUClock *clock;
+ QEMUTimer *active_timers;
+ QLIST_ENTRY(QEMUTimerList) list;
+ QEMUTimerListNotifyCB *notify_cb;
+ void *notify_opaque;
};
-static struct qemu_alarm_timer *alarm_timer;
+/**
+ * qemu_clock_ptr:
+ * @type: type of clock
+ *
+ * Translate a clock type into a pointer to QEMUClock object.
+ *
+ * Returns: a pointer to the QEMUClock object
+ */
+static inline QEMUClock *qemu_clock_ptr(QEMUClockType type)
+{
+ return &qemu_clocks[type];
+}
-static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
+static bool timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
{
return timer_head && (timer_head->expire_time <= current_time);
}
-static int64_t qemu_next_alarm_deadline(void)
+QEMUTimerList *timerlist_new(QEMUClockType type,
+ QEMUTimerListNotifyCB *cb,
+ void *opaque)
{
- int64_t delta = INT64_MAX;
- int64_t rtdelta;
-
- if (!use_icount && vm_clock->enabled && vm_clock->active_timers) {
- delta = vm_clock->active_timers->expire_time -
- qemu_get_clock_ns(vm_clock);
- }
- if (host_clock->enabled && host_clock->active_timers) {
- int64_t hdelta = host_clock->active_timers->expire_time -
- qemu_get_clock_ns(host_clock);
- if (hdelta < delta) {
- delta = hdelta;
- }
- }
- if (rt_clock->enabled && rt_clock->active_timers) {
- rtdelta = (rt_clock->active_timers->expire_time -
- qemu_get_clock_ns(rt_clock));
- if (rtdelta < delta) {
- delta = rtdelta;
- }
- }
+ QEMUTimerList *timer_list;
+ QEMUClock *clock = qemu_clock_ptr(type);
- return delta;
+ timer_list = g_malloc0(sizeof(QEMUTimerList));
+ timer_list->clock = clock;
+ timer_list->notify_cb = cb;
+ timer_list->notify_opaque = opaque;
+ QLIST_INSERT_HEAD(&clock->timerlists, timer_list, list);
+ return timer_list;
}
-static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
+void timerlist_free(QEMUTimerList *timer_list)
{
- int64_t nearest_delta_ns = qemu_next_alarm_deadline();
- if (nearest_delta_ns < INT64_MAX) {
- t->rearm(t, nearest_delta_ns);
+ assert(!timerlist_has_timers(timer_list));
+ if (timer_list->clock) {
+ QLIST_REMOVE(timer_list, list);
}
+ g_free(timer_list);
}
-/* TODO: MIN_TIMER_REARM_NS should be optimized */
-#define MIN_TIMER_REARM_NS 250000
-
-#ifdef _WIN32
-
-static int mm_start_timer(struct qemu_alarm_timer *t);
-static void mm_stop_timer(struct qemu_alarm_timer *t);
-static void mm_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-static int win32_start_timer(struct qemu_alarm_timer *t);
-static void win32_stop_timer(struct qemu_alarm_timer *t);
-static void win32_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-#else
-
-static int unix_start_timer(struct qemu_alarm_timer *t);
-static void unix_stop_timer(struct qemu_alarm_timer *t);
-static void unix_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-#ifdef __linux__
-
-static int dynticks_start_timer(struct qemu_alarm_timer *t);
-static void dynticks_stop_timer(struct qemu_alarm_timer *t);
-static void dynticks_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-#endif /* __linux__ */
-
-#endif /* _WIN32 */
+static void qemu_clock_init(QEMUClockType type)
+{
+ QEMUClock *clock = qemu_clock_ptr(type);
-static struct qemu_alarm_timer alarm_timers[] = {
-#ifndef _WIN32
-#ifdef __linux__
- {"dynticks", dynticks_start_timer,
- dynticks_stop_timer, dynticks_rearm_timer},
-#endif
- {"unix", unix_start_timer, unix_stop_timer, unix_rearm_timer},
-#else
- {"mmtimer", mm_start_timer, mm_stop_timer, mm_rearm_timer},
- {"dynticks", win32_start_timer, win32_stop_timer, win32_rearm_timer},
-#endif
- {NULL, }
-};
+ clock->type = type;
+ clock->enabled = true;
+ clock->last = INT64_MIN;
+ QLIST_INIT(&clock->timerlists);
+ notifier_list_init(&clock->reset_notifiers);
+ main_loop_tlg.tl[type] = timerlist_new(type, NULL, NULL);
+}
-static void show_available_alarms(void)
+bool qemu_clock_use_for_deadline(QEMUClockType type)
{
- int i;
-
- printf("Available alarm timers, in order of precedence:\n");
- for (i = 0; alarm_timers[i].name; i++)
- printf("%s\n", alarm_timers[i].name);
+ return !(use_icount && (type == QEMU_CLOCK_VIRTUAL));
}
-void configure_alarms(char const *opt)
+void qemu_clock_notify(QEMUClockType type)
{
- int i;
- int cur = 0;
- int count = ARRAY_SIZE(alarm_timers) - 1;
- char *arg;
- char *name;
- struct qemu_alarm_timer tmp;
+ QEMUTimerList *timer_list;
+ QEMUClock *clock = qemu_clock_ptr(type);
+ QLIST_FOREACH(timer_list, &clock->timerlists, list) {
+ timerlist_notify(timer_list);
+ }
+}
- if (is_help_option(opt)) {
- show_available_alarms();
- exit(0);
+void qemu_clock_enable(QEMUClockType type, bool enabled)
+{
+ QEMUClock *clock = qemu_clock_ptr(type);
+ bool old = clock->enabled;
+ clock->enabled = enabled;
+ if (enabled && !old) {
+ qemu_clock_notify(type);
}
+}
- arg = g_strdup(opt);
+bool timerlist_has_timers(QEMUTimerList *timer_list)
+{
+ return !!timer_list->active_timers;
+}
- /* Reorder the array */
- name = strtok(arg, ",");
- while (name) {
- for (i = 0; i < count && alarm_timers[i].name; i++) {
- if (!strcmp(alarm_timers[i].name, name))
- break;
- }
+bool qemu_clock_has_timers(QEMUClockType type)
+{
+ return timerlist_has_timers(
+ main_loop_tlg.tl[type]);
+}
- if (i == count) {
- fprintf(stderr, "Unknown clock %s\n", name);
- goto next;
- }
+bool timerlist_expired(QEMUTimerList *timer_list)
+{
+ return (timer_list->active_timers &&
+ timer_list->active_timers->expire_time <
+ qemu_clock_get_ns(timer_list->clock->type));
+}
- if (i < cur)
- /* Ignore */
- goto next;
+bool qemu_clock_expired(QEMUClockType type)
+{
+ return timerlist_expired(
+ main_loop_tlg.tl[type]);
+}
- /* Swap */
- tmp = alarm_timers[i];
- alarm_timers[i] = alarm_timers[cur];
- alarm_timers[cur] = tmp;
+/*
+ * As above, but return -1 for no deadline, and do not cap to 2^32
+ * as we know the result is always positive.
+ */
+
+int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
+{
+ int64_t delta;
- cur++;
-next:
- name = strtok(NULL, ",");
+ if (!timer_list->clock->enabled || !timer_list->active_timers) {
+ return -1;
}
- g_free(arg);
+ delta = timer_list->active_timers->expire_time -
+ qemu_clock_get_ns(timer_list->clock->type);
- if (cur) {
- /* Disable remaining timers */
- for (i = cur; i < count; i++)
- alarm_timers[i].name = NULL;
- } else {
- show_available_alarms();
- exit(1);
+ if (delta <= 0) {
+ return 0;
}
-}
-QEMUClock *rt_clock;
-QEMUClock *vm_clock;
-QEMUClock *host_clock;
+ return delta;
+}
-static QEMUClock *qemu_new_clock(int type)
+/* Calculate the soonest deadline across all timerlists attached
+ * to the clock. This is used for the icount timeout so we
+ * ignore whether or not the clock should be used in deadline
+ * calculations.
+ */
+int64_t qemu_clock_deadline_ns_all(QEMUClockType type)
{
- QEMUClock *clock;
-
- clock = g_malloc0(sizeof(QEMUClock));
- clock->type = type;
- clock->enabled = true;
- clock->last = INT64_MIN;
- notifier_list_init(&clock->reset_notifiers);
- return clock;
+ int64_t deadline = -1;
+ QEMUTimerList *timer_list;
+ QEMUClock *clock = qemu_clock_ptr(type);
+ QLIST_FOREACH(timer_list, &clock->timerlists, list) {
+ deadline = qemu_soonest_timeout(deadline,
+ timerlist_deadline_ns(timer_list));
+ }
+ return deadline;
}
-void qemu_clock_enable(QEMUClock *clock, bool enabled)
+QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
{
- bool old = clock->enabled;
- clock->enabled = enabled;
- if (enabled && !old) {
- qemu_rearm_alarm_timer(alarm_timer);
- }
+ return timer_list->clock->type;
}
-int64_t qemu_clock_has_timers(QEMUClock *clock)
+QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
{
- return !!clock->active_timers;
+ return main_loop_tlg.tl[type];
}
-int64_t qemu_clock_expired(QEMUClock *clock)
+void timerlist_notify(QEMUTimerList *timer_list)
{
- return (clock->active_timers &&
- clock->active_timers->expire_time < qemu_get_clock_ns(clock));
+ if (timer_list->notify_cb) {
+ timer_list->notify_cb(timer_list->notify_opaque);
+ } else {
+ qemu_notify_event();
+ }
}
-int64_t qemu_clock_deadline(QEMUClock *clock)
+/* Transition function to convert a nanosecond timeout to ms
+ * This is used where a system does not support ppoll
+ */
+int qemu_timeout_ns_to_ms(int64_t ns)
{
- /* To avoid problems with overflow limit this to 2^32. */
- int64_t delta = INT32_MAX;
+ int64_t ms;
+ if (ns < 0) {
+ return -1;
+ }
- if (clock->active_timers) {
- delta = clock->active_timers->expire_time - qemu_get_clock_ns(clock);
+ if (!ns) {
+ return 0;
}
- if (delta < 0) {
- delta = 0;
+
+ /* Always round up, because it's better to wait too long than to wait too
+ * little and effectively busy-wait
+ */
+ ms = (ns + SCALE_MS - 1) / SCALE_MS;
+
+ /* To avoid overflow problems, limit this to 2^31, i.e. approx 25 days */
+ if (ms > (int64_t) INT32_MAX) {
+ ms = INT32_MAX;
}
- return delta;
+
+ return (int) ms;
}
-QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
- QEMUTimerCB *cb, void *opaque)
+
+/* qemu implementation of g_poll which uses a nanosecond timeout but is
+ * otherwise identical to g_poll
+ */
+int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout)
{
- QEMUTimer *ts;
+#ifdef CONFIG_PPOLL
+ if (timeout < 0) {
+ return ppoll((struct pollfd *)fds, nfds, NULL, NULL);
+ } else {
+ struct timespec ts;
+ ts.tv_sec = timeout / 1000000000LL;
+ ts.tv_nsec = timeout % 1000000000LL;
+ return ppoll((struct pollfd *)fds, nfds, &ts, NULL);
+ }
+#else
+ return g_poll(fds, nfds, qemu_timeout_ns_to_ms(timeout));
+#endif
+}
- ts = g_malloc0(sizeof(QEMUTimer));
- ts->clock = clock;
+
+void timer_init(QEMUTimer *ts,
+ QEMUTimerList *timer_list, int scale,
+ QEMUTimerCB *cb, void *opaque)
+{
+ ts->timer_list = timer_list;
ts->cb = cb;
ts->opaque = opaque;
ts->scale = scale;
- return ts;
}
-void qemu_free_timer(QEMUTimer *ts)
+void timer_free(QEMUTimer *ts)
{
g_free(ts);
}
/* stop a timer, but do not dealloc it */
-void qemu_del_timer(QEMUTimer *ts)
+void timer_del(QEMUTimer *ts)
{
QEMUTimer **pt, *t;
/* NOTE: this code must be signal safe because
- qemu_timer_expired() can be called from a signal. */
- pt = &ts->clock->active_timers;
+ timer_expired() can be called from a signal. */
+ pt = &ts->timer_list->active_timers;
for(;;) {
t = *pt;
if (!t)
@@ -317,19 +318,19 @@ void qemu_del_timer(QEMUTimer *ts)
/* modify the current timer so that it will be fired when current_time
>= expire_time. The corresponding callback will be called. */
-void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
+void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
{
QEMUTimer **pt, *t;
- qemu_del_timer(ts);
+ timer_del(ts);
/* add the timer in the sorted list */
/* NOTE: this code must be signal safe because
- qemu_timer_expired() can be called from a signal. */
- pt = &ts->clock->active_timers;
+ timer_expired() can be called from a signal. */
+ pt = &ts->timer_list->active_timers;
for(;;) {
t = *pt;
- if (!qemu_timer_expired_ns(t, expire_time)) {
+ if (!timer_expired_ns(t, expire_time)) {
break;
}
pt = &t->next;
@@ -339,27 +340,22 @@ void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
*pt = ts;
/* Rearm if necessary */
- if (pt == &ts->clock->active_timers) {
- if (!alarm_timer->pending) {
- qemu_rearm_alarm_timer(alarm_timer);
- }
+ if (pt == &ts->timer_list->active_timers) {
/* Interrupt execution to force deadline recalculation. */
- qemu_clock_warp(ts->clock);
- if (use_icount) {
- qemu_notify_event();
- }
+ qemu_clock_warp(ts->timer_list->clock->type);
+ timerlist_notify(ts->timer_list);
}
}
-void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
+void timer_mod(QEMUTimer *ts, int64_t expire_time)
{
- qemu_mod_timer_ns(ts, expire_time * ts->scale);
+ timer_mod_ns(ts, expire_time * ts->scale);
}
-bool qemu_timer_pending(QEMUTimer *ts)
+bool timer_pending(QEMUTimer *ts)
{
QEMUTimer *t;
- for (t = ts->clock->active_timers; t != NULL; t = t->next) {
+ for (t = ts->timer_list->active_timers; t != NULL; t = t->next) {
if (t == ts) {
return true;
}
@@ -367,39 +363,90 @@ bool qemu_timer_pending(QEMUTimer *ts)
return false;
}
-bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
+bool timer_expired(QEMUTimer *timer_head, int64_t current_time)
{
- return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
+ return timer_expired_ns(timer_head, current_time * timer_head->scale);
}
-void qemu_run_timers(QEMUClock *clock)
+bool timerlist_run_timers(QEMUTimerList *timer_list)
{
QEMUTimer *ts;
int64_t current_time;
+ bool progress = false;
- if (!clock->enabled)
- return;
+ if (!timer_list->clock->enabled) {
+ return progress;
+ }
- current_time = qemu_get_clock_ns(clock);
+ current_time = qemu_clock_get_ns(timer_list->clock->type);
for(;;) {
- ts = clock->active_timers;
- if (!qemu_timer_expired_ns(ts, current_time)) {
+ ts = timer_list->active_timers;
+ if (!timer_expired_ns(ts, current_time)) {
break;
}
/* remove timer from the list before calling the callback */
- clock->active_timers = ts->next;
+ timer_list->active_timers = ts->next;
ts->next = NULL;
/* run the callback (the timer list can be modified) */
ts->cb(ts->opaque);
+ progress = true;
+ }
+ return progress;
+}
+
+bool qemu_clock_run_timers(QEMUClockType type)
+{
+ return timerlist_run_timers(main_loop_tlg.tl[type]);
+}
+
+void timerlistgroup_init(QEMUTimerListGroup *tlg,
+ QEMUTimerListNotifyCB *cb, void *opaque)
+{
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ tlg->tl[type] = timerlist_new(type, cb, opaque);
}
}
-int64_t qemu_get_clock_ns(QEMUClock *clock)
+void timerlistgroup_deinit(QEMUTimerListGroup *tlg)
+{
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ timerlist_free(tlg->tl[type]);
+ }
+}
+
+bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg)
+{
+ QEMUClockType type;
+ bool progress = false;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ progress |= timerlist_run_timers(tlg->tl[type]);
+ }
+ return progress;
+}
+
+int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg)
+{
+ int64_t deadline = -1;
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ if (qemu_clock_use_for_deadline(tlg->tl[type]->clock->type)) {
+ deadline = qemu_soonest_timeout(deadline,
+ timerlist_deadline_ns(
+ tlg->tl[type]));
+ }
+ }
+ return deadline;
+}
+
+int64_t qemu_clock_get_ns(QEMUClockType type)
{
int64_t now, last;
+ QEMUClock *clock = qemu_clock_ptr(type);
- switch(clock->type) {
+ switch (type) {
case QEMU_CLOCK_REALTIME:
return get_clock();
default:
@@ -420,361 +467,44 @@ int64_t qemu_get_clock_ns(QEMUClock *clock)
}
}
-void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier)
+void qemu_clock_register_reset_notifier(QEMUClockType type,
+ Notifier *notifier)
{
+ QEMUClock *clock = qemu_clock_ptr(type);
notifier_list_add(&clock->reset_notifiers, notifier);
}
-void qemu_unregister_clock_reset_notifier(QEMUClock *clock, Notifier *notifier)
+void qemu_clock_unregister_reset_notifier(QEMUClockType type,
+ Notifier *notifier)
{
notifier_remove(notifier);
}
void init_clocks(void)
{
- if (!rt_clock) {
- rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
- vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
- host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
- }
-}
-
-uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts)
-{
- return qemu_timer_pending(ts) ? ts->expire_time : -1;
-}
-
-void qemu_run_all_timers(void)
-{
- alarm_timer->pending = false;
-
- /* vm time timers */
- qemu_run_timers(vm_clock);
- qemu_run_timers(rt_clock);
- qemu_run_timers(host_clock);
-
- /* rearm timer, if not periodic */
- if (alarm_timer->expired) {
- alarm_timer->expired = false;
- qemu_rearm_alarm_timer(alarm_timer);
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ qemu_clock_init(type);
}
-}
-#ifdef _WIN32
-static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
-#else
-static void host_alarm_handler(int host_signum)
+#ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK
+ prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0);
#endif
-{
- struct qemu_alarm_timer *t = alarm_timer;
- if (!t)
- return;
-
- t->expired = true;
- t->pending = true;
- qemu_notify_event();
-}
-
-#if defined(__linux__)
-
-#include "qemu/compatfd.h"
-
-static int dynticks_start_timer(struct qemu_alarm_timer *t)
-{
- struct sigevent ev;
- timer_t host_timer;
- struct sigaction act;
-
- sigfillset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = host_alarm_handler;
-
- sigaction(SIGALRM, &act, NULL);
-
- /*
- * Initialize ev struct to 0 to avoid valgrind complaining
- * about uninitialized data in timer_create call
- */
- memset(&ev, 0, sizeof(ev));
- ev.sigev_value.sival_int = 0;
- ev.sigev_notify = SIGEV_SIGNAL;
-#ifdef CONFIG_SIGEV_THREAD_ID
- if (qemu_signalfd_available()) {
- ev.sigev_notify = SIGEV_THREAD_ID;
- ev._sigev_un._tid = qemu_get_thread_id();
- }
-#endif /* CONFIG_SIGEV_THREAD_ID */
- ev.sigev_signo = SIGALRM;
-
- if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
- perror("timer_create");
- return -1;
- }
-
- t->timer = host_timer;
-
- return 0;
-}
-
-static void dynticks_stop_timer(struct qemu_alarm_timer *t)
-{
- timer_t host_timer = t->timer;
-
- timer_delete(host_timer);
-}
-
-static void dynticks_rearm_timer(struct qemu_alarm_timer *t,
- int64_t nearest_delta_ns)
-{
- timer_t host_timer = t->timer;
- struct itimerspec timeout;
- int64_t current_ns;
-
- if (nearest_delta_ns < MIN_TIMER_REARM_NS)
- nearest_delta_ns = MIN_TIMER_REARM_NS;
-
- /* check whether a timer is already running */
- if (timer_gettime(host_timer, &timeout)) {
- perror("gettime");
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
- current_ns = timeout.it_value.tv_sec * 1000000000LL + timeout.it_value.tv_nsec;
- if (current_ns && current_ns <= nearest_delta_ns)
- return;
-
- timeout.it_interval.tv_sec = 0;
- timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
- timeout.it_value.tv_sec = nearest_delta_ns / 1000000000;
- timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000;
- if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
- perror("settime");
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
-}
-
-#endif /* defined(__linux__) */
-
-#if !defined(_WIN32)
-
-static int unix_start_timer(struct qemu_alarm_timer *t)
-{
- struct sigaction act;
-
- /* timer signal */
- sigfillset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = host_alarm_handler;
-
- sigaction(SIGALRM, &act, NULL);
- return 0;
-}
-
-static void unix_rearm_timer(struct qemu_alarm_timer *t,
- int64_t nearest_delta_ns)
-{
- struct itimerval itv;
- int err;
-
- if (nearest_delta_ns < MIN_TIMER_REARM_NS)
- nearest_delta_ns = MIN_TIMER_REARM_NS;
-
- itv.it_interval.tv_sec = 0;
- itv.it_interval.tv_usec = 0; /* 0 for one-shot timer */
- itv.it_value.tv_sec = nearest_delta_ns / 1000000000;
- itv.it_value.tv_usec = (nearest_delta_ns % 1000000000) / 1000;
- err = setitimer(ITIMER_REAL, &itv, NULL);
- if (err) {
- perror("setitimer");
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
-}
-
-static void unix_stop_timer(struct qemu_alarm_timer *t)
-{
- struct itimerval itv;
-
- memset(&itv, 0, sizeof(itv));
- setitimer(ITIMER_REAL, &itv, NULL);
-}
-
-#endif /* !defined(_WIN32) */
-
-
-#ifdef _WIN32
-
-static MMRESULT mm_timer;
-static TIMECAPS mm_tc;
-
-static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
- DWORD_PTR dwUser, DWORD_PTR dw1,
- DWORD_PTR dw2)
-{
- struct qemu_alarm_timer *t = alarm_timer;
- if (!t) {
- return;
- }
- t->expired = true;
- t->pending = true;
- qemu_notify_event();
}
-static int mm_start_timer(struct qemu_alarm_timer *t)
+uint64_t timer_expire_time_ns(QEMUTimer *ts)
{
- timeGetDevCaps(&mm_tc, sizeof(mm_tc));
- return 0;
+ return timer_pending(ts) ? ts->expire_time : -1;
}
-static void mm_stop_timer(struct qemu_alarm_timer *t)
+bool qemu_clock_run_all_timers(void)
{
- if (mm_timer) {
- timeKillEvent(mm_timer);
- }
-}
+ bool progress = false;
+ QEMUClockType type;
-static void mm_rearm_timer(struct qemu_alarm_timer *t, int64_t delta)
-{
- int64_t nearest_delta_ms = delta / 1000000;
- if (nearest_delta_ms < mm_tc.wPeriodMin) {
- nearest_delta_ms = mm_tc.wPeriodMin;
- } else if (nearest_delta_ms > mm_tc.wPeriodMax) {
- nearest_delta_ms = mm_tc.wPeriodMax;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ progress |= qemu_clock_run_timers(type);
}
- if (mm_timer) {
- timeKillEvent(mm_timer);
- }
- mm_timer = timeSetEvent((UINT)nearest_delta_ms,
- mm_tc.wPeriodMin,
- mm_alarm_handler,
- (DWORD_PTR)t,
- TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
-
- if (!mm_timer) {
- fprintf(stderr, "Failed to re-arm win32 alarm timer\n");
- timeEndPeriod(mm_tc.wPeriodMin);
- exit(1);
- }
+ return progress;
}
-
-static int win32_start_timer(struct qemu_alarm_timer *t)
-{
- HANDLE hTimer;
- BOOLEAN success;
-
- /* If you call ChangeTimerQueueTimer on a one-shot timer (its period
- is zero) that has already expired, the timer is not updated. Since
- creating a new timer is relatively expensive, set a bogus one-hour
- interval in the dynticks case. */
- success = CreateTimerQueueTimer(&hTimer,
- NULL,
- host_alarm_handler,
- t,
- 1,
- 3600000,
- WT_EXECUTEINTIMERTHREAD);
-
- if (!success) {
- fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
- GetLastError());
- return -1;
- }
-
- t->timer = hTimer;
- return 0;
-}
-
-static void win32_stop_timer(struct qemu_alarm_timer *t)
-{
- HANDLE hTimer = t->timer;
-
- if (hTimer) {
- DeleteTimerQueueTimer(NULL, hTimer, NULL);
- }
-}
-
-static void win32_rearm_timer(struct qemu_alarm_timer *t,
- int64_t nearest_delta_ns)
-{
- HANDLE hTimer = t->timer;
- int64_t nearest_delta_ms;
- BOOLEAN success;
-
- nearest_delta_ms = nearest_delta_ns / 1000000;
- if (nearest_delta_ms < 1) {
- nearest_delta_ms = 1;
- }
- /* ULONG_MAX can be 32 bit */
- if (nearest_delta_ms > ULONG_MAX) {
- nearest_delta_ms = ULONG_MAX;
- }
- success = ChangeTimerQueueTimer(NULL,
- hTimer,
- (unsigned long) nearest_delta_ms,
- 3600000);
-
- if (!success) {
- fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n",
- GetLastError());
- exit(-1);
- }
-
-}
-
-#endif /* _WIN32 */
-
-static void quit_timers(void)
-{
- struct qemu_alarm_timer *t = alarm_timer;
- alarm_timer = NULL;
- t->stop(t);
-}
-
-#ifdef CONFIG_POSIX
-static void reinit_timers(void)
-{
- struct qemu_alarm_timer *t = alarm_timer;
- t->stop(t);
- if (t->start(t)) {
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
- qemu_rearm_alarm_timer(t);
-}
-#endif /* CONFIG_POSIX */
-
-int init_timer_alarm(void)
-{
- struct qemu_alarm_timer *t = NULL;
- int i, err = -1;
-
- if (alarm_timer) {
- return 0;
- }
-
- for (i = 0; alarm_timers[i].name; i++) {
- t = &alarm_timers[i];
-
- err = t->start(t);
- if (!err)
- break;
- }
-
- if (err) {
- err = -ENOENT;
- goto fail;
- }
-
- atexit(quit_timers);
-#ifdef CONFIG_POSIX
- pthread_atfork(NULL, NULL, reinit_timers);
-#endif
- alarm_timer = t;
- return 0;
-
-fail:
- return err;
-}
-
diff --git a/qtest.c b/qtest.c
index 74f1842c1e..ef671fb05d 100644
--- a/qtest.c
+++ b/qtest.c
@@ -47,7 +47,7 @@ static bool qtest_opened;
*
* Clock management:
*
- * The qtest client is completely in charge of the vm_clock. qtest commands
+ * The qtest client is completely in charge of the QEMU_CLOCK_VIRTUAL. qtest commands
* let you adjust the value of the clock (monotonically). All the commands
* return the current value of the clock in nanoseconds.
*
@@ -412,11 +412,11 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
if (words[1]) {
ns = strtoll(words[1], NULL, 0);
} else {
- ns = qemu_clock_deadline(vm_clock);
+ ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
}
- qtest_clock_warp(qemu_get_clock_ns(vm_clock) + ns);
+ qtest_clock_warp(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns);
qtest_send_prefix(chr);
- qtest_send(chr, "OK %"PRIi64"\n", (int64_t)qemu_get_clock_ns(vm_clock));
+ qtest_send(chr, "OK %"PRIi64"\n", (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
} else if (strcmp(words[0], "clock_set") == 0) {
int64_t ns;
@@ -424,7 +424,7 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
ns = strtoll(words[1], NULL, 0);
qtest_clock_warp(ns);
qtest_send_prefix(chr);
- qtest_send(chr, "OK %"PRIi64"\n", (int64_t)qemu_get_clock_ns(vm_clock));
+ qtest_send(chr, "OK %"PRIi64"\n", (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
} else {
qtest_send_prefix(chr);
qtest_send(chr, "FAIL Unknown command `%s'\n", words[0]);
diff --git a/savevm.c b/savevm.c
index 03fc4d93bf..c536aa4986 100644
--- a/savevm.c
+++ b/savevm.c
@@ -97,18 +97,18 @@ static void qemu_announce_self_once(void *opaque)
if (--count) {
/* delay 50ms, 150ms, 250ms, ... */
- qemu_mod_timer(timer, qemu_get_clock_ms(rt_clock) +
+ timer_mod(timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) +
50 + (SELF_ANNOUNCE_ROUNDS - count - 1) * 100);
} else {
- qemu_del_timer(timer);
- qemu_free_timer(timer);
+ timer_del(timer);
+ timer_free(timer);
}
}
void qemu_announce_self(void)
{
static QEMUTimer *timer;
- timer = qemu_new_timer_ms(rt_clock, qemu_announce_self_once, &timer);
+ timer = timer_new_ms(QEMU_CLOCK_REALTIME, qemu_announce_self_once, &timer);
qemu_announce_self_once(&timer);
}
@@ -979,23 +979,23 @@ uint64_t qemu_get_be64(QEMUFile *f)
/* timer */
-void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
+void timer_put(QEMUFile *f, QEMUTimer *ts)
{
uint64_t expire_time;
- expire_time = qemu_timer_expire_time_ns(ts);
+ expire_time = timer_expire_time_ns(ts);
qemu_put_be64(f, expire_time);
}
-void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
+void timer_get(QEMUFile *f, QEMUTimer *ts)
{
uint64_t expire_time;
expire_time = qemu_get_be64(f);
if (expire_time != -1) {
- qemu_mod_timer_ns(ts, expire_time);
+ timer_mod_ns(ts, expire_time);
} else {
- qemu_del_timer(ts);
+ timer_del(ts);
}
}
@@ -1339,14 +1339,14 @@ const VMStateInfo vmstate_info_float64 = {
static int get_timer(QEMUFile *f, void *pv, size_t size)
{
QEMUTimer *v = pv;
- qemu_get_timer(f, v);
+ timer_get(f, v);
return 0;
}
static void put_timer(QEMUFile *f, void *pv, size_t size)
{
QEMUTimer *v = pv;
- qemu_put_timer(f, v);
+ timer_put(f, v);
}
const VMStateInfo vmstate_info_timer = {
@@ -2387,7 +2387,7 @@ void do_savevm(Monitor *mon, const QDict *qdict)
qemu_gettimeofday(&tv);
sn->date_sec = tv.tv_sec;
sn->date_nsec = tv.tv_usec * 1000;
- sn->vm_clock_nsec = qemu_get_clock_ns(vm_clock);
+ sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (name) {
ret = bdrv_snapshot_find(bs, old_sn, name);
diff --git a/scripts/switch-timer-api b/scripts/switch-timer-api
new file mode 100755
index 0000000000..a369a083d1
--- /dev/null
+++ b/scripts/switch-timer-api
@@ -0,0 +1,178 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Getopt::Long;
+use FindBin;
+
+my @legacy = qw(qemu_clock_ptr qemu_get_clock_ns qemu_get_clock_ms qemu_register_clock_reset_notifier qemu_unregister_clock_reset_notifier qemu_new_timer qemu_free_timer qemu_del_timer qemu_mod_timer_ns qemu_mod_timer qemu_run_timers qemu_new_timer_ns qemu_new_timer_us qemu_new_timer_ms);
+my $legacyre = '\b('.join('|', @legacy).')\b';
+my $option_git;
+my $option_dryrun;
+my $option_quiet;
+my $option_rtc;
+my $suffix=".tmp.$$";
+my @files;
+my $getfiles = 'git grep -l -E \'\b((host|rt|vm|rtc)_clock\b|qemu_\w*timer)\' | egrep \'\.[ch]$\' | egrep -v \'qemu-timer\.c$|include/qemu/timer\.h$\'';
+
+sub Syntax
+{
+ print STDERR <<STOP;
+Usage: $FindBin::Script [options] FILE ...
+
+Translate each FILE to the new Qemu timer API. If no files
+are passed, a reasonable guess is taken.
+
+Options:
+ -q, --quiet Do not show warnings etc
+ -d, --dry-run Do a dry run
+ -g, --git Generate a git commit for each change
+ -r, --rtc Only fix up rtc usage
+ -h, --help Print this message
+
+STOP
+return;
+}
+
+sub ParseOptions
+{
+ if (!GetOptions (
+ "dry-run|d" => \$option_dryrun,
+ "git|g" => \$option_git,
+ "quiet|q" => \$option_quiet,
+ "rtc|r" => \$option_rtc,
+ "help|h" => sub { Syntax(); exit(0); }
+ ))
+ {
+ Syntax();
+ die "Bad options";
+ }
+
+ if ($#ARGV >=0)
+ {
+ @files = @ARGV;
+ }
+ else
+ {
+ @files = split(/\s+/, `$getfiles`);
+ }
+
+ foreach my $file (@files)
+ {
+ die "Cannot find $file" unless (-f $file && -r $file);
+ }
+}
+
+sub DoWarn
+{
+ my $text = shift @_;
+ my $line = shift @_;
+ return if ($option_quiet);
+ chomp ($line);
+ print STDERR "$text\n";
+ print STDERR "$line\n\n";
+}
+
+sub Process
+{
+ my $ifn = shift @_;
+ my $ofn = $ifn.$suffix;
+
+ my $intext;
+ my $outtext;
+ my $linenum = 0;
+
+ open my $input, "<", $ifn || die "Cannot open $ifn for read: $!";
+
+ while (<$input>)
+ {
+ my $line = $_;
+ $intext .= $line;
+ $linenum++;
+
+ # fix the specific uses
+ unless ($option_rtc)
+ {
+ $line =~ s/\bqemu_new_timer(_[num]s)\s*\((vm_|rt_|host_)clock\b/timer_new$1(XXX_$2clock/g;
+ $line =~ s/\bqemu_new_timer\s*\((vm_|rt_|host_)clock\b/timer_new(XXX_$1clock/g;
+ $line =~ s/\bqemu_get_clock(_[num]s)\s*\((vm_|rt_|host_)clock\b/qemu_clock_get$1(XXX_$2clock/g;
+ }
+
+ # rtc is different
+ $line =~ s/\bqemu_new_timer(_[num]s)\s*\(rtc_clock\b/timer_new$1(rtc_clock/g;
+ $line =~ s/\bqemu_new_timer\s*\(rtc_clock\b/timer_new(rtc_clock/g;
+ $line =~ s/\bqemu_get_clock(_[num]s)\s*\(rtc_clock\b/qemu_clock_get$1(rtc_clock/g;
+ $line =~ s/\bqemu_register_clock_reset_notifier\s*\(rtc_clock\b/qemu_register_clock_reset_notifier(qemu_clock_ptr(rtc_clock)/g;
+
+ unless ($option_rtc)
+ {
+ # fix up comments
+ $line =~ s/\b(vm_|rt_|host_)clock\b/XXX_$1clock/g if ($line =~ m,^[/ ]+\*,);
+
+ # spurious fprintf error reporting
+ $line =~ s/: qemu_new_timer_ns failed/: timer_new_ns failed/g;
+
+ # these have just changed name
+ $line =~ s/\bqemu_mod_timer\b/timer_mod/g;
+ $line =~ s/\bqemu_mod_timer_(ns|us|ms)\b/timer_mod_$1/g;
+ $line =~ s/\bqemu_free_timer\b/timer_free/g;
+ $line =~ s/\bqemu_del_timer\b/timer_del/g;
+ }
+
+ # fix up rtc_clock
+ $line =~ s/QEMUClock \*rtc_clock;/QEMUClockType rtc_clock;/g;
+ $line =~ s/\brtc_clock = (vm_|rt_|host_)clock\b/rtc_clock = XXX_$1clock/g;
+
+ unless ($option_rtc)
+ {
+ # replace any more general uses
+ $line =~ s/\b(vm_|rt_|host_)clock\b/qemu_clock_ptr(XXX_$1clock)/g;
+ }
+
+ # fix up the place holders
+ $line =~ s/\bXXX_vm_clock\b/QEMU_CLOCK_VIRTUAL/g;
+ $line =~ s/\bXXX_rt_clock\b/QEMU_CLOCK_REALTIME/g;
+ $line =~ s/\bXXX_host_clock\b/QEMU_CLOCK_HOST/g;
+
+ unless ($option_rtc)
+ {
+ DoWarn("$ifn:$linenum WARNING: timer $1 not fixed up", $line) if ($line =~ /\b((vm_|rt_|host_)clock)\b/);
+ DoWarn("$ifn:$linenum WARNING: function $1 not fixed up", $line) if ($line =~ /\b(qemu_new_timer\w+)\b/);
+ DoWarn("$ifn:$linenum WARNING: legacy function $1 remains", $line) if ($line =~ /$legacyre/o);
+ }
+
+ $outtext .= $line;
+ }
+
+ close $input;
+
+ if ($intext ne $outtext)
+ {
+ print STDERR "Patching $ifn\n" unless ($option_quiet);
+ unless ($option_dryrun)
+ {
+ open my $output, ">", $ofn || die "Cannot open $ofn for write: $!";
+ print $output $outtext;
+ close $output;
+ rename ($ofn, $ifn) || die "Cannot rename temp file to $ifn: $!";
+ return 1;
+ }
+ }
+ return 0;
+}
+
+sub DoCommit
+{
+ my $file = shift @_;
+ open (my $git, "| git commit -F - $file") || die "Cannot run git commit on $file: $!";
+ print $git "timers api: use new timer api in $file\n\nConvert $file to use new timer API.\nThis is an automated commit made by scripts/switch-timer-api\n";
+ close ($git);
+}
+
+ParseOptions;
+
+foreach my $file (@files)
+{
+ my $changed = Process ($file);
+ DoCommit($file) if ($changed && $option_git);
+}
diff --git a/slirp/if.c b/slirp/if.c
index dcd5fafe5d..87ca8a53a9 100644
--- a/slirp/if.c
+++ b/slirp/if.c
@@ -154,7 +154,7 @@ diddit:
*/
void if_start(Slirp *slirp)
{
- uint64_t now = qemu_get_clock_ns(rt_clock);
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
bool from_batchq, next_from_batchq;
struct mbuf *ifm, *ifm_next, *ifqt;
diff --git a/slirp/misc.c b/slirp/misc.c
index 0bcc481939..c0d489950a 100644
--- a/slirp/misc.c
+++ b/slirp/misc.c
@@ -9,6 +9,7 @@
#include <libslirp.h>
#include "monitor/monitor.h"
+#include "qemu/main-loop.h"
#ifdef DEBUG
int slirp_debug = DBG_CALL|DBG_MISC|DBG_ERROR;
diff --git a/slirp/slirp.c b/slirp/slirp.c
index 80b28ea89e..5c3dabba93 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -448,7 +448,7 @@ void slirp_pollfds_poll(GArray *pollfds, int select_error)
return;
}
- curtime = qemu_get_clock_ms(rt_clock);
+ curtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
QTAILQ_FOREACH(slirp, &slirp_instances, entry) {
/*
@@ -787,7 +787,7 @@ int if_encap(Slirp *slirp, struct mbuf *ifm)
ifm->arp_requested = true;
/* Expire request and drop outgoing packet after 1 second */
- ifm->expiration_date = qemu_get_clock_ns(rt_clock) + 1000000000ULL;
+ ifm->expiration_date = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + 1000000000ULL;
}
return 0;
} else {
diff --git a/stubs/clock-warp.c b/stubs/clock-warp.c
index b64c462e73..5565118d11 100644
--- a/stubs/clock-warp.c
+++ b/stubs/clock-warp.c
@@ -1,7 +1,7 @@
#include "qemu-common.h"
#include "qemu/timer.h"
-void qemu_clock_warp(QEMUClock *clock)
+void qemu_clock_warp(QEMUClockType type)
{
}
diff --git a/target-alpha/sys_helper.c b/target-alpha/sys_helper.c
index 97cf9ebfc9..035810c27c 100644
--- a/target-alpha/sys_helper.c
+++ b/target-alpha/sys_helper.c
@@ -30,9 +30,9 @@ uint64_t helper_load_pcc(CPUAlphaState *env)
In order to make OS-level time accounting work with the RPCC,
present it with a well-timed clock fixed at 250MHz. */
return (((uint64_t)env->pcc_ofs << 32)
- | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
+ | (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
#else
- /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
+ /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
clock ticks. Also, don't bother taking PCC_OFS into account. */
return (uint32_t)cpu_get_real_ticks();
#endif
@@ -88,12 +88,12 @@ void helper_halt(uint64_t restart)
uint64_t helper_get_vmtime(void)
{
- return qemu_get_clock_ns(vm_clock);
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
uint64_t helper_get_walltime(void)
{
- return qemu_get_clock_ns(rtc_clock);
+ return qemu_clock_get_ns(rtc_clock);
}
void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
@@ -102,9 +102,9 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
if (expire) {
env->alarm_expire = expire;
- qemu_mod_timer(cpu->alarm_timer, expire);
+ timer_mod(cpu->alarm_timer, expire);
} else {
- qemu_del_timer(cpu->alarm_timer);
+ timer_del(cpu->alarm_timer);
}
}
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index f01ce03682..b2556c66b4 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -204,9 +204,9 @@ static void arm_cpu_initfn(Object *obj)
qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 2);
}
- cpu->gt_timer[GTIMER_PHYS] = qemu_new_timer(vm_clock, GTIMER_SCALE,
+ cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
arm_gt_ptimer_cb, cpu);
- cpu->gt_timer[GTIMER_VIRT] = qemu_new_timer(vm_clock, GTIMER_SCALE,
+ cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
arm_gt_vtimer_cb, cpu);
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
ARRAY_SIZE(cpu->gt_timer_outputs));
diff --git a/target-arm/helper.c b/target-arm/helper.c
index f4e1b06d23..e51ef20aea 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -699,7 +699,7 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
static uint64_t gt_get_countervalue(CPUARMState *env)
{
- return qemu_get_clock_ns(vm_clock) / GTIMER_SCALE;
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
}
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
@@ -733,12 +733,12 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
if (nexttick > INT64_MAX / GTIMER_SCALE) {
nexttick = INT64_MAX / GTIMER_SCALE;
}
- qemu_mod_timer(cpu->gt_timer[timeridx], nexttick);
+ timer_mod(cpu->gt_timer[timeridx], nexttick);
} else {
/* Timer disabled: ISTATUS and timer output always clear */
gt->ctl &= ~4;
qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
- qemu_del_timer(cpu->gt_timer[timeridx]);
+ timer_del(cpu->gt_timer[timeridx]);
}
}
@@ -758,7 +758,7 @@ static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
ARMCPU *cpu = arm_env_get_cpu(env);
int timeridx = ri->opc1 & 1;
- qemu_del_timer(cpu->gt_timer[timeridx]);
+ timer_del(cpu->gt_timer[timeridx]);
}
static int gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -941,7 +941,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
#else
/* In user-mode none of the generic timer registers are accessible,
- * and their implementation depends on vm_clock and qdev gpio outputs,
+ * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
* so instead just don't register any of them.
*/
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 30a870ecb1..8a196c6cc1 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -419,7 +419,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
return ret;
}
- idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_cpu, cpu);
+ idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
/* Some targets support access to KVM's guest TLB. */
switch (cenv->mmu_model) {
@@ -1136,7 +1136,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
}
/* Always wake up soon in case the interrupt was level based */
- qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(get_ticks_per_sec() / 50));
}
@@ -1807,7 +1807,7 @@ int kvmppc_get_htab_fd(bool write)
int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
{
- int64_t starttime = qemu_get_clock_ns(rt_clock);
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
uint8_t buf[bufsize];
ssize_t rc;
@@ -1823,7 +1823,7 @@ int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
}
} while ((rc != 0)
&& ((max_ns < 0)
- || ((qemu_get_clock_ns(rt_clock) - starttime) < max_ns)));
+ || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
return (rc == 0) ? 1 : 0;
}
diff --git a/target-ppc/kvm_ppc.c b/target-ppc/kvm_ppc.c
index 1b192a8038..9b8365503b 100644
--- a/target-ppc/kvm_ppc.c
+++ b/target-ppc/kvm_ppc.c
@@ -24,7 +24,7 @@ static unsigned int kvmppc_timer_rate;
static void kvmppc_timer_hack(void *opaque)
{
qemu_notify_event();
- qemu_mod_timer(kvmppc_timer, qemu_get_clock_ns(vm_clock) + kvmppc_timer_rate);
+ timer_mod(kvmppc_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + kvmppc_timer_rate);
}
void kvmppc_init(void)
@@ -34,7 +34,7 @@ void kvmppc_init(void)
* run. So, until QEMU gains IO threads, we create this timer to ensure
* that the device model gets a chance to run. */
kvmppc_timer_rate = get_ticks_per_sec() / 10;
- kvmppc_timer = qemu_new_timer_ns(vm_clock, &kvmppc_timer_hack, NULL);
- qemu_mod_timer(kvmppc_timer, qemu_get_clock_ns(vm_clock) + kvmppc_timer_rate);
+ kvmppc_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &kvmppc_timer_hack, NULL);
+ timer_mod(kvmppc_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + kvmppc_timer_rate);
}
diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c
index 6be6c084a7..5cc99387b2 100644
--- a/target-s390x/cpu.c
+++ b/target-s390x/cpu.c
@@ -129,8 +129,8 @@ static void s390_cpu_initfn(Object *obj)
env->tod_offset = TOD_UNIX_EPOCH +
(time2tod(mktimegm(&tm)) * 1000000000ULL);
env->tod_basetime = 0;
- env->tod_timer = qemu_new_timer_ns(vm_clock, s390x_tod_timer, cpu);
- env->cpu_timer = qemu_new_timer_ns(vm_clock, s390x_cpu_timer, cpu);
+ env->tod_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu);
+ env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
/* set CPUState::halted state to 1 to avoid decrementing the running
* cpu counter in s390_cpu_reset to a negative number at
* initial ipl */
diff --git a/target-s390x/misc_helper.c b/target-s390x/misc_helper.c
index 09301d0a6f..454960aa01 100644
--- a/target-s390x/misc_helper.c
+++ b/target-s390x/misc_helper.c
@@ -225,7 +225,7 @@ static inline uint64_t clock_value(CPUS390XState *env)
uint64_t time;
time = env->tod_offset +
- time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
+ time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
return time;
}
@@ -248,7 +248,7 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t time)
/* nanoseconds */
time = (time * 125) >> 9;
- qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
+ timer_mod(env->tod_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time);
}
/* Store Clock Comparator */
@@ -268,7 +268,7 @@ void HELPER(spt)(CPUS390XState *env, uint64_t time)
/* nanoseconds */
time = (time * 125) >> 9;
- qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
+ timer_mod(env->cpu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time);
}
/* Store CPU Timer */
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index 6ca912c5bb..01123af707 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -390,7 +390,7 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
}
cpu = CPU(xtensa_env_get_cpu(env));
- env->halt_clock = qemu_get_clock_ns(vm_clock);
+ env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
cpu->halted = 1;
if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) {
xtensa_rearm_ccompare_timer(env);
diff --git a/tests/libqtest.h b/tests/libqtest.h
index 0f6aade092..a6e99bd023 100644
--- a/tests/libqtest.h
+++ b/tests/libqtest.h
@@ -258,9 +258,9 @@ void qtest_memwrite(QTestState *s, uint64_t addr, const void *data, size_t size)
* qtest_clock_step_next:
* @s: #QTestState instance to operate on.
*
- * Advance the vm_clock to the next deadline.
+ * Advance the QEMU_CLOCK_VIRTUAL to the next deadline.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
int64_t qtest_clock_step_next(QTestState *s);
@@ -269,9 +269,9 @@ int64_t qtest_clock_step_next(QTestState *s);
* @s: QTestState instance to operate on.
* @step: Number of nanoseconds to advance the clock by.
*
- * Advance the vm_clock by @step nanoseconds.
+ * Advance the QEMU_CLOCK_VIRTUAL by @step nanoseconds.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
int64_t qtest_clock_step(QTestState *s, int64_t step);
@@ -280,9 +280,9 @@ int64_t qtest_clock_step(QTestState *s, int64_t step);
* @s: QTestState instance to operate on.
* @val: Nanoseconds value to advance the clock to.
*
- * Advance the vm_clock to @val nanoseconds since the VM was launched.
+ * Advance the QEMU_CLOCK_VIRTUAL to @val nanoseconds since the VM was launched.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
int64_t qtest_clock_set(QTestState *s, int64_t val);
@@ -584,9 +584,9 @@ static inline void memwrite(uint64_t addr, const void *data, size_t size)
/**
* clock_step_next:
*
- * Advance the vm_clock to the next deadline.
+ * Advance the QEMU_CLOCK_VIRTUAL to the next deadline.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
static inline int64_t clock_step_next(void)
{
@@ -597,9 +597,9 @@ static inline int64_t clock_step_next(void)
* clock_step:
* @step: Number of nanoseconds to advance the clock by.
*
- * Advance the vm_clock by @step nanoseconds.
+ * Advance the QEMU_CLOCK_VIRTUAL by @step nanoseconds.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
static inline int64_t clock_step(int64_t step)
{
@@ -610,9 +610,9 @@ static inline int64_t clock_step(int64_t step)
* clock_set:
* @val: Nanoseconds value to advance the clock to.
*
- * Advance the vm_clock to @val nanoseconds since the VM was launched.
+ * Advance the QEMU_CLOCK_VIRTUAL to @val nanoseconds since the VM was launched.
*
- * Returns: The current value of the vm_clock in nanoseconds.
+ * Returns: The current value of the QEMU_CLOCK_VIRTUAL in nanoseconds.
*/
static inline int64_t clock_set(int64_t val)
{
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 1ab5637d95..07a1f61f87 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -12,6 +12,7 @@
#include <glib.h>
#include "block/aio.h"
+#include "qemu/timer.h"
AioContext *ctx;
@@ -46,6 +47,15 @@ typedef struct {
int max;
} BHTestData;
+typedef struct {
+ QEMUTimer timer;
+ QEMUClockType clock_type;
+ int n;
+ int max;
+ int64_t ns;
+ AioContext *ctx;
+} TimerTestData;
+
static void bh_test_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -54,6 +64,19 @@ static void bh_test_cb(void *opaque)
}
}
+static void timer_test_cb(void *opaque)
+{
+ TimerTestData *data = opaque;
+ if (++data->n < data->max) {
+ timer_mod(&data->timer,
+ qemu_clock_get_ns(data->clock_type) + data->ns);
+ }
+}
+
+static void dummy_io_handler_read(void *opaque)
+{
+}
+
static void bh_delete_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -342,6 +365,64 @@ static void test_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+static void test_timer_schedule(void)
+{
+ TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
+ .max = 2,
+ .clock_type = QEMU_CLOCK_VIRTUAL };
+ int pipefd[2];
+
+ /* aio_poll will not block to wait for timers to complete unless it has
+ * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+ */
+ g_assert(!pipe2(pipefd, O_NONBLOCK));
+ aio_set_fd_handler(ctx, pipefd[0],
+ dummy_io_handler_read, NULL, NULL);
+ aio_poll(ctx, false);
+
+ aio_timer_init(ctx, &data.timer, data.clock_type,
+ SCALE_NS, timer_test_cb, &data);
+ timer_mod(&data.timer,
+ qemu_clock_get_ns(data.clock_type) +
+ data.ns);
+
+ g_assert_cmpint(data.n, ==, 0);
+
+ /* timer_mod may well cause an event notifer to have gone off,
+ * so clear that
+ */
+ do {} while (aio_poll(ctx, false));
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 0);
+
+ sleep(1);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ /* timer_mod called by our callback */
+ do {} while (aio_poll(ctx, false));
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ g_assert(aio_poll(ctx, true));
+ g_assert_cmpint(data.n, ==, 2);
+
+ /* As max is now 2, an event notifier should not have gone off */
+
+ g_assert(!aio_poll(ctx, false));
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+
+ timer_del(&data.timer);
+}
+
/* Now the same tests, using the context as a GSource. They are
* very similar to the ones above, with g_main_context_iteration
* replacing aio_poll. However:
@@ -624,12 +705,61 @@ static void test_source_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+static void test_source_timer_schedule(void)
+{
+ TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
+ .max = 2,
+ .clock_type = QEMU_CLOCK_VIRTUAL };
+ int pipefd[2];
+ int64_t expiry;
+
+ /* aio_poll will not block to wait for timers to complete unless it has
+ * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
+ */
+ g_assert(!pipe2(pipefd, O_NONBLOCK));
+ aio_set_fd_handler(ctx, pipefd[0],
+ dummy_io_handler_read, NULL, NULL);
+ do {} while (g_main_context_iteration(NULL, false));
+
+ aio_timer_init(ctx, &data.timer, data.clock_type,
+ SCALE_NS, timer_test_cb, &data);
+ expiry = qemu_clock_get_ns(data.clock_type) +
+ data.ns;
+ timer_mod(&data.timer, expiry);
+
+ g_assert_cmpint(data.n, ==, 0);
+
+ sleep(1);
+ g_assert_cmpint(data.n, ==, 0);
+
+ g_assert(g_main_context_iteration(NULL, false));
+ g_assert_cmpint(data.n, ==, 1);
+
+ /* The comment above was not kidding when it said this wakes up itself */
+ do {
+ g_assert(g_main_context_iteration(NULL, true));
+ } while (qemu_clock_get_ns(data.clock_type) <= expiry);
+ sleep(1);
+ g_main_context_iteration(NULL, false);
+
+ g_assert_cmpint(data.n, ==, 2);
+
+ aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+
+ timer_del(&data.timer);
+}
+
+
/* End of tests. */
int main(int argc, char **argv)
{
GSource *src;
+ init_clocks();
+
ctx = aio_context_new();
src = aio_get_g_source(ctx);
g_source_attach(src, NULL);
@@ -650,6 +780,7 @@ int main(int argc, char **argv)
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
+ g_test_add_func("/aio/timer/schedule", test_timer_schedule);
g_test_add_func("/aio-gsource/notify", test_source_notify);
g_test_add_func("/aio-gsource/flush", test_source_flush);
@@ -664,5 +795,6 @@ int main(int argc, char **argv)
g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
+ g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
return g_test_run();
}
diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c
index 8188d1a69d..c1f8e13a9f 100644
--- a/tests/test-thread-pool.c
+++ b/tests/test-thread-pool.c
@@ -3,6 +3,7 @@
#include "block/aio.h"
#include "block/thread-pool.h"
#include "block/block.h"
+#include "qemu/timer.h"
static AioContext *ctx;
static ThreadPool *pool;
@@ -205,6 +206,8 @@ int main(int argc, char **argv)
{
int ret;
+ init_clocks();
+
ctx = aio_context_new();
pool = aio_get_thread_pool(ctx);
diff --git a/thread-pool.c b/thread-pool.c
index 5025567817..3735fd34bc 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -23,6 +23,7 @@
#include "block/block_int.h"
#include "qemu/event_notifier.h"
#include "block/thread-pool.h"
+#include "qemu/main-loop.h"
static void do_spawn_thread(ThreadPool *pool);
diff --git a/ui/console.c b/ui/console.c
index e3e82979d8..aad4fc9a57 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -208,8 +208,8 @@ static void gui_update(void *opaque)
}
trace_console_refresh(interval);
}
- ds->last_update = qemu_get_clock_ms(rt_clock);
- qemu_mod_timer(ds->gui_timer, ds->last_update + interval);
+ ds->last_update = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ timer_mod(ds->gui_timer, ds->last_update + interval);
}
static void gui_setup_refresh(DisplayState *ds)
@@ -232,12 +232,12 @@ static void gui_setup_refresh(DisplayState *ds)
}
if (need_timer && ds->gui_timer == NULL) {
- ds->gui_timer = qemu_new_timer_ms(rt_clock, gui_update, ds);
- qemu_mod_timer(ds->gui_timer, qemu_get_clock_ms(rt_clock));
+ ds->gui_timer = timer_new_ms(QEMU_CLOCK_REALTIME, gui_update, ds);
+ timer_mod(ds->gui_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
if (!need_timer && ds->gui_timer != NULL) {
- qemu_del_timer(ds->gui_timer);
- qemu_free_timer(ds->gui_timer);
+ timer_del(ds->gui_timer);
+ timer_free(ds->gui_timer);
ds->gui_timer = NULL;
}
@@ -1040,7 +1040,7 @@ void console_select(unsigned int index)
DisplayState *ds = s->ds;
if (active_console && active_console->cursor_timer) {
- qemu_del_timer(active_console->cursor_timer);
+ timer_del(active_console->cursor_timer);
}
active_console = s;
if (ds->have_gfx) {
@@ -1059,8 +1059,8 @@ void console_select(unsigned int index)
dpy_text_resize(s, s->width, s->height);
}
if (s->cursor_timer) {
- qemu_mod_timer(s->cursor_timer,
- qemu_get_clock_ms(rt_clock) + CONSOLE_CURSOR_PERIOD / 2);
+ timer_mod(s->cursor_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + CONSOLE_CURSOR_PERIOD / 2);
}
}
}
@@ -1105,7 +1105,7 @@ static void kbd_send_chars(void *opaque)
/* characters are pending: we send them a bit later (XXX:
horrible, should change char device API) */
if (s->out_fifo.count > 0) {
- qemu_mod_timer(s->kbd_timer, qemu_get_clock_ms(rt_clock) + 1);
+ timer_mod(s->kbd_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1);
}
}
@@ -1366,7 +1366,7 @@ void update_displaychangelistener(DisplayChangeListener *dcl,
dcl->update_interval = interval;
if (!ds->refreshing && ds->update_interval > interval) {
- qemu_mod_timer(ds->gui_timer, ds->last_update + interval);
+ timer_mod(ds->gui_timer, ds->last_update + interval);
}
}
@@ -1691,8 +1691,8 @@ static void text_console_update_cursor(void *opaque)
s->cursor_visible_phase = !s->cursor_visible_phase;
graphic_hw_invalidate(s);
- qemu_mod_timer(s->cursor_timer,
- qemu_get_clock_ms(rt_clock) + CONSOLE_CURSOR_PERIOD / 2);
+ timer_mod(s->cursor_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + CONSOLE_CURSOR_PERIOD / 2);
}
static const GraphicHwOps text_console_ops = {
@@ -1712,7 +1712,7 @@ static void text_console_do_init(CharDriverState *chr, DisplayState *ds)
s->out_fifo.buf = s->out_fifo_buf;
s->out_fifo.buf_size = sizeof(s->out_fifo_buf);
- s->kbd_timer = qemu_new_timer_ms(rt_clock, kbd_send_chars, s);
+ s->kbd_timer = timer_new_ms(QEMU_CLOCK_REALTIME, kbd_send_chars, s);
s->ds = ds;
s->y_displayed = 0;
@@ -1729,7 +1729,7 @@ static void text_console_do_init(CharDriverState *chr, DisplayState *ds)
}
s->cursor_timer =
- qemu_new_timer_ms(rt_clock, text_console_update_cursor, s);
+ timer_new_ms(QEMU_CLOCK_REALTIME, text_console_update_cursor, s);
s->hw_ops = &text_console_ops;
s->hw = s;
diff --git a/ui/input.c b/ui/input.c
index 92c44ca810..10d8c056f1 100644
--- a/ui/input.c
+++ b/ui/input.c
@@ -277,11 +277,11 @@ void qmp_send_key(KeyValueList *keys, bool has_hold_time, int64_t hold_time,
KeyValueList *p;
if (!key_timer) {
- key_timer = qemu_new_timer_ns(vm_clock, release_keys, NULL);
+ key_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, release_keys, NULL);
}
if (keycodes != NULL) {
- qemu_del_timer(key_timer);
+ timer_del(key_timer);
release_keys(NULL);
}
@@ -308,7 +308,7 @@ void qmp_send_key(KeyValueList *keys, bool has_hold_time, int64_t hold_time,
}
/* delayed key up events */
- qemu_mod_timer(key_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(key_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(get_ticks_per_sec(), hold_time, 1000));
}
diff --git a/ui/spice-core.c b/ui/spice-core.c
index bd7a248f91..3a2cd7e0c6 100644
--- a/ui/spice-core.c
+++ b/ui/spice-core.c
@@ -63,25 +63,25 @@ static SpiceTimer *timer_add(SpiceTimerFunc func, void *opaque)
SpiceTimer *timer;
timer = g_malloc0(sizeof(*timer));
- timer->timer = qemu_new_timer_ms(rt_clock, func, opaque);
+ timer->timer = timer_new_ms(QEMU_CLOCK_REALTIME, func, opaque);
QTAILQ_INSERT_TAIL(&timers, timer, next);
return timer;
}
static void timer_start(SpiceTimer *timer, uint32_t ms)
{
- qemu_mod_timer(timer->timer, qemu_get_clock_ms(rt_clock) + ms);
+ timer_mod(timer->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + ms);
}
static void timer_cancel(SpiceTimer *timer)
{
- qemu_del_timer(timer->timer);
+ timer_del(timer->timer);
}
static void timer_remove(SpiceTimer *timer)
{
- qemu_del_timer(timer->timer);
- qemu_free_timer(timer->timer);
+ timer_del(timer->timer);
+ timer_free(timer->timer);
QTAILQ_REMOVE(&timers, timer, next);
g_free(timer);
}
diff --git a/ui/vnc-auth-sasl.h b/ui/vnc-auth-sasl.h
index 8091d689cb..3f59da67eb 100644
--- a/ui/vnc-auth-sasl.h
+++ b/ui/vnc-auth-sasl.h
@@ -33,6 +33,7 @@ typedef struct VncStateSASL VncStateSASL;
typedef struct VncDisplaySASL VncDisplaySASL;
#include "qemu/acl.h"
+#include "qemu/main-loop.h"
struct VncStateSASL {
sasl_conn_t *conn;
diff --git a/ui/vnc-auth-vencrypt.c b/ui/vnc-auth-vencrypt.c
index c59b188602..bc7032e695 100644
--- a/ui/vnc-auth-vencrypt.c
+++ b/ui/vnc-auth-vencrypt.c
@@ -25,7 +25,7 @@
*/
#include "vnc.h"
-
+#include "qemu/main-loop.h"
static void start_auth_vencrypt_subauth(VncState *vs)
{
diff --git a/ui/vnc-ws.c b/ui/vnc-ws.c
index df89315733..e304bafeb0 100644
--- a/ui/vnc-ws.c
+++ b/ui/vnc-ws.c
@@ -19,6 +19,7 @@
*/
#include "vnc.h"
+#include "qemu/main-loop.h"
#ifdef CONFIG_VNC_TLS
#include "qemu/sockets.h"
diff --git a/vl.c b/vl.c
index 1c283c9fce..dfbc071eef 100644
--- a/vl.c
+++ b/vl.c
@@ -196,7 +196,7 @@ NICInfo nd_table[MAX_NICS];
int autostart;
static int rtc_utc = 1;
static int rtc_date_offset = -1; /* -1 means no change */
-QEMUClock *rtc_clock;
+QEMUClockType rtc_clock;
int vga_interface_type = VGA_NONE;
static int full_screen = 0;
static int no_frame = 0;
@@ -805,11 +805,11 @@ static void configure_rtc(QemuOpts *opts)
value = qemu_opt_get(opts, "clock");
if (value) {
if (!strcmp(value, "host")) {
- rtc_clock = host_clock;
+ rtc_clock = QEMU_CLOCK_HOST;
} else if (!strcmp(value, "rt")) {
- rtc_clock = rt_clock;
+ rtc_clock = QEMU_CLOCK_REALTIME;
} else if (!strcmp(value, "vm")) {
- rtc_clock = vm_clock;
+ rtc_clock = QEMU_CLOCK_VIRTUAL;
} else {
fprintf(stderr, "qemu: invalid option value '%s'\n", value);
exit(1);
@@ -2965,7 +2965,7 @@ int main(int argc, char **argv, char **envp)
runstate_init();
init_clocks();
- rtc_clock = host_clock;
+ rtc_clock = QEMU_CLOCK_HOST;
qemu_cache_utils_init(envp);
@@ -3714,7 +3714,9 @@ int main(int argc, char **argv, char **envp)
old_param = 1;
break;
case QEMU_OPTION_clock:
- configure_alarms(optarg);
+ /* Clock options no longer exist. Keep this option for
+ * backward compatibility.
+ */
break;
case QEMU_OPTION_startdate:
configure_rtc_date_offset(optarg, 1);
diff --git a/xen-all.c b/xen-all.c
index 21246e0ffd..eb13111361 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -606,8 +606,8 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
port = xc_evtchn_pending(state->xce_handle);
if (port == state->bufioreq_local_port) {
- qemu_mod_timer(state->buffered_io_timer,
- BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+ timer_mod(state->buffered_io_timer,
+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
return NULL;
}
@@ -828,10 +828,10 @@ static void handle_buffered_io(void *opaque)
XenIOState *state = opaque;
if (handle_buffered_iopage(state)) {
- qemu_mod_timer(state->buffered_io_timer,
- BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+ timer_mod(state->buffered_io_timer,
+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
} else {
- qemu_del_timer(state->buffered_io_timer);
+ timer_del(state->buffered_io_timer);
xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
}
}
@@ -962,7 +962,7 @@ static void xen_main_loop_prepare(XenIOState *state)
evtchn_fd = xc_evtchn_fd(state->xce_handle);
}
- state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
+ state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
state);
if (evtchn_fd != -1) {