blob: 4b11bca2a21d7105d4072eef0d2e279e4e23048f [file] [log] [blame]
Kevin Wolf1d95db72019-06-13 17:34:02 +02001/*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "monitor-internal.h"
27#include "qapi/error.h"
Kevin Wolff2098722020-02-24 15:30:04 +010028#include "qapi/opts-visitor.h"
Kevin Wolf1d95db72019-06-13 17:34:02 +020029#include "qapi/qapi-emit-events.h"
Kevin Wolff2098722020-02-24 15:30:04 +010030#include "qapi/qapi-visit-control.h"
Kevin Wolf1d95db72019-06-13 17:34:02 +020031#include "qapi/qmp/qdict.h"
Kevin Wolf1d95db72019-06-13 17:34:02 +020032#include "qemu/error-report.h"
33#include "qemu/option.h"
34#include "sysemu/qtest.h"
35#include "trace.h"
36
37/*
38 * To prevent flooding clients, events can be throttled. The
39 * throttling is calculated globally, rather than per-Monitor
40 * instance.
41 */
42typedef struct MonitorQAPIEventState {
43 QAPIEvent event; /* Throttling state for this event type and... */
44 QDict *data; /* ... data, see qapi_event_throttle_equal() */
45 QEMUTimer *timer; /* Timer for handling delayed events */
46 QDict *qdict; /* Delayed event (if any) */
47} MonitorQAPIEventState;
48
49typedef struct {
50 int64_t rate; /* Minimum time (in ns) between two events */
51} MonitorQAPIEventConf;
52
53/* Shared monitor I/O thread */
54IOThread *mon_iothread;
55
Kevin Wolf9ce44e22020-10-05 17:58:50 +020056/* Coroutine to dispatch the requests received from I/O thread */
57Coroutine *qmp_dispatcher_co;
58
59/* Set to true when the dispatcher coroutine should terminate */
60bool qmp_dispatcher_co_shutdown;
61
62/*
63 * qmp_dispatcher_co_busy is used for synchronisation between the
64 * monitor thread and the main thread to ensure that the dispatcher
65 * coroutine never gets scheduled a second time when it's already
66 * scheduled (scheduling the same coroutine twice is forbidden).
67 *
68 * It is true if the coroutine is active and processing requests.
69 * Additional requests may then be pushed onto mon->qmp_requests,
70 * and @qmp_dispatcher_co_shutdown may be set without further ado.
71 * @qmp_dispatcher_co_busy must not be woken up in this case.
72 *
73 * If false, you also have to set @qmp_dispatcher_co_busy to true and
74 * wake up @qmp_dispatcher_co after pushing the new requests.
75 *
76 * The coroutine will automatically change this variable back to false
77 * before it yields. Nobody else may set the variable to false.
78 *
79 * Access must be atomic for thread safety.
80 */
81bool qmp_dispatcher_co_busy;
Kevin Wolf1d95db72019-06-13 17:34:02 +020082
Kevin Wolfe69ee452020-10-05 17:58:48 +020083/*
84 * Protects mon_list, monitor_qapi_event_state, coroutine_mon,
85 * monitor_destroyed.
86 */
Kevin Wolf1d95db72019-06-13 17:34:02 +020087QemuMutex monitor_lock;
88static GHashTable *monitor_qapi_event_state;
Kevin Wolfe69ee452020-10-05 17:58:48 +020089static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */
Kevin Wolf1d95db72019-06-13 17:34:02 +020090
91MonitorList mon_list;
92int mon_refcount;
93static bool monitor_destroyed;
94
Kevin Wolf947e4742020-10-05 17:58:44 +020095Monitor *monitor_cur(void)
96{
Kevin Wolfe69ee452020-10-05 17:58:48 +020097 Monitor *mon;
98
99 qemu_mutex_lock(&monitor_lock);
100 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self());
101 qemu_mutex_unlock(&monitor_lock);
102
103 return mon;
Kevin Wolf947e4742020-10-05 17:58:44 +0200104}
105
106/**
107 * Sets a new current monitor and returns the old one.
Kevin Wolfe69ee452020-10-05 17:58:48 +0200108 *
109 * If a non-NULL monitor is set for a coroutine, another call
110 * resetting it to NULL is required before the coroutine terminates,
111 * otherwise a stale entry would remain in the hash table.
Kevin Wolf947e4742020-10-05 17:58:44 +0200112 */
Kevin Wolfe69ee452020-10-05 17:58:48 +0200113Monitor *monitor_set_cur(Coroutine *co, Monitor *mon)
Kevin Wolf947e4742020-10-05 17:58:44 +0200114{
Kevin Wolfe69ee452020-10-05 17:58:48 +0200115 Monitor *old_monitor = monitor_cur();
Kevin Wolf947e4742020-10-05 17:58:44 +0200116
Kevin Wolfe69ee452020-10-05 17:58:48 +0200117 qemu_mutex_lock(&monitor_lock);
118 if (mon) {
119 g_hash_table_replace(coroutine_mon, co, mon);
120 } else {
121 g_hash_table_remove(coroutine_mon, co);
122 }
123 qemu_mutex_unlock(&monitor_lock);
124
Kevin Wolf947e4742020-10-05 17:58:44 +0200125 return old_monitor;
126}
Kevin Wolf1d95db72019-06-13 17:34:02 +0200127
128/**
129 * Is the current monitor, if any, a QMP monitor?
130 */
131bool monitor_cur_is_qmp(void)
132{
Kevin Wolf947e4742020-10-05 17:58:44 +0200133 Monitor *cur_mon = monitor_cur();
134
Kevin Wolf1d95db72019-06-13 17:34:02 +0200135 return cur_mon && monitor_is_qmp(cur_mon);
136}
137
138/**
139 * Is @mon is using readline?
140 * Note: not all HMP monitors use readline, e.g., gdbserver has a
141 * non-interactive HMP monitor, so readline is not used there.
142 */
Kevin Wolf92082412019-06-13 17:34:03 +0200143static inline bool monitor_uses_readline(const MonitorHMP *mon)
Kevin Wolf1d95db72019-06-13 17:34:02 +0200144{
Kevin Wolf92082412019-06-13 17:34:03 +0200145 return mon->use_readline;
Kevin Wolf1d95db72019-06-13 17:34:02 +0200146}
147
148static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
149{
Kevin Wolf92082412019-06-13 17:34:03 +0200150 if (monitor_is_qmp(mon)) {
151 return false;
152 }
153
154 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
Kevin Wolf1d95db72019-06-13 17:34:02 +0200155}
156
157static void monitor_flush_locked(Monitor *mon);
158
Marc-André Lureaubf7b1ea2021-08-04 17:01:14 +0400159static gboolean monitor_unblocked(void *do_not_use, GIOCondition cond,
Kevin Wolf1d95db72019-06-13 17:34:02 +0200160 void *opaque)
161{
162 Monitor *mon = opaque;
163
Paolo Bonzinie37548e2023-05-17 14:47:55 +0200164 QEMU_LOCK_GUARD(&mon->mon_lock);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200165 mon->out_watch = 0;
166 monitor_flush_locked(mon);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200167 return FALSE;
168}
169
170/* Caller must hold mon->mon_lock */
171static void monitor_flush_locked(Monitor *mon)
172{
173 int rc;
174 size_t len;
175 const char *buf;
176
177 if (mon->skip_flush) {
178 return;
179 }
180
Markus Armbruster20076f42020-12-11 18:11:34 +0100181 buf = mon->outbuf->str;
182 len = mon->outbuf->len;
Kevin Wolf1d95db72019-06-13 17:34:02 +0200183
184 if (len && !mon->mux_out) {
185 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
186 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
187 /* all flushed or error */
Markus Armbruster20076f42020-12-11 18:11:34 +0100188 g_string_truncate(mon->outbuf, 0);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200189 return;
190 }
191 if (rc > 0) {
192 /* partial write */
Markus Armbruster20076f42020-12-11 18:11:34 +0100193 g_string_erase(mon->outbuf, 0, rc);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200194 }
195 if (mon->out_watch == 0) {
196 mon->out_watch =
197 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
198 monitor_unblocked, mon);
199 }
200 }
201}
202
203void monitor_flush(Monitor *mon)
204{
Paolo Bonzinie37548e2023-05-17 14:47:55 +0200205 QEMU_LOCK_GUARD(&mon->mon_lock);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200206 monitor_flush_locked(mon);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200207}
208
209/* flush at every end of line */
210int monitor_puts(Monitor *mon, const char *str)
211{
212 int i;
213 char c;
214
215 qemu_mutex_lock(&mon->mon_lock);
216 for (i = 0; str[i]; i++) {
217 c = str[i];
218 if (c == '\n') {
Markus Armbruster20076f42020-12-11 18:11:34 +0100219 g_string_append_c(mon->outbuf, '\r');
Kevin Wolf1d95db72019-06-13 17:34:02 +0200220 }
Markus Armbruster20076f42020-12-11 18:11:34 +0100221 g_string_append_c(mon->outbuf, c);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200222 if (c == '\n') {
223 monitor_flush_locked(mon);
224 }
225 }
226 qemu_mutex_unlock(&mon->mon_lock);
227
228 return i;
229}
230
231int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
232{
233 char *buf;
234 int n;
235
236 if (!mon) {
237 return -1;
238 }
239
240 if (monitor_is_qmp(mon)) {
241 return -1;
242 }
243
244 buf = g_strdup_vprintf(fmt, ap);
245 n = monitor_puts(mon, buf);
246 g_free(buf);
247 return n;
248}
249
250int monitor_printf(Monitor *mon, const char *fmt, ...)
251{
252 int ret;
253
254 va_list ap;
255 va_start(ap, fmt);
256 ret = monitor_vprintf(mon, fmt, ap);
257 va_end(ap);
258 return ret;
259}
260
Markus Armbrusterdd00d7f2023-01-24 13:19:41 +0100261void monitor_printc(Monitor *mon, int c)
262{
263 monitor_printf(mon, "'");
264 switch(c) {
265 case '\'':
266 monitor_printf(mon, "\\'");
267 break;
268 case '\\':
269 monitor_printf(mon, "\\\\");
270 break;
271 case '\n':
272 monitor_printf(mon, "\\n");
273 break;
274 case '\r':
275 monitor_printf(mon, "\\r");
276 break;
277 default:
278 if (c >= 32 && c <= 126) {
279 monitor_printf(mon, "%c", c);
280 } else {
281 monitor_printf(mon, "\\x%02x", c);
282 }
283 break;
284 }
285 monitor_printf(mon, "'");
286}
287
Kevin Wolf1d95db72019-06-13 17:34:02 +0200288/*
289 * Print to current monitor if we have one, else to stderr.
290 */
291int error_vprintf(const char *fmt, va_list ap)
292{
Kevin Wolf947e4742020-10-05 17:58:44 +0200293 Monitor *cur_mon = monitor_cur();
294
Kevin Wolf1d95db72019-06-13 17:34:02 +0200295 if (cur_mon && !monitor_cur_is_qmp()) {
296 return monitor_vprintf(cur_mon, fmt, ap);
297 }
298 return vfprintf(stderr, fmt, ap);
299}
300
301int error_vprintf_unless_qmp(const char *fmt, va_list ap)
302{
Kevin Wolf947e4742020-10-05 17:58:44 +0200303 Monitor *cur_mon = monitor_cur();
304
Kevin Wolf1d95db72019-06-13 17:34:02 +0200305 if (!cur_mon) {
306 return vfprintf(stderr, fmt, ap);
307 }
308 if (!monitor_cur_is_qmp()) {
309 return monitor_vprintf(cur_mon, fmt, ap);
310 }
311 return -1;
312}
313
Marc-André Lureau756a98d2022-04-20 17:26:13 +0400314int error_printf_unless_qmp(const char *fmt, ...)
315{
316 va_list ap;
317 int ret;
318
319 va_start(ap, fmt);
320 ret = error_vprintf_unless_qmp(fmt, ap);
321 va_end(ap);
322 return ret;
323}
Kevin Wolf1d95db72019-06-13 17:34:02 +0200324
325static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
326 /* Limit guest-triggerable events to 1 per second */
327 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
328 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
329 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
330 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
331 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
332 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
David Hildenbrand722a3c72020-06-26 09:22:44 +0200333 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS },
Kevin Wolf1d95db72019-06-13 17:34:02 +0200334};
335
336/*
337 * Return the clock to use for recording an event's time.
338 * It's QEMU_CLOCK_REALTIME, except for qtests it's
339 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
340 * Beware: result is invalid before configure_accelerator().
341 */
342static inline QEMUClockType monitor_get_event_clock(void)
343{
344 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
345}
346
347/*
348 * Broadcast an event to all monitors.
349 * @qdict is the event object. Its member "event" must match @event.
350 * Caller must hold monitor_lock.
351 */
352static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
353{
354 Monitor *mon;
355 MonitorQMP *qmp_mon;
356
357 trace_monitor_protocol_event_emit(event, qdict);
358 QTAILQ_FOREACH(mon, &mon_list, entry) {
359 if (!monitor_is_qmp(mon)) {
360 continue;
361 }
362
363 qmp_mon = container_of(mon, MonitorQMP, common);
364 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
365 qmp_send_response(qmp_mon, qdict);
366 }
367 }
368}
369
370static void monitor_qapi_event_handler(void *opaque);
371
372/*
373 * Queue a new event for emission to Monitor instances,
374 * applying any rate limiting if required.
375 */
376static void
377monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
378{
379 MonitorQAPIEventConf *evconf;
380 MonitorQAPIEventState *evstate;
381
382 assert(event < QAPI_EVENT__MAX);
383 evconf = &monitor_qapi_event_conf[event];
384 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
385
Mahmoud Mandoura8e2ab52021-03-11 05:15:34 +0200386 QEMU_LOCK_GUARD(&monitor_lock);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200387
388 if (!evconf->rate) {
389 /* Unthrottled event */
390 monitor_qapi_event_emit(event, qdict);
391 } else {
392 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
393 MonitorQAPIEventState key = { .event = event, .data = data };
394
395 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
396 assert(!evstate || timer_pending(evstate->timer));
397
398 if (evstate) {
399 /*
400 * Timer is pending for (at least) evconf->rate ns after
401 * last send. Store event for sending when timer fires,
402 * replacing a prior stored event if any.
403 */
404 qobject_unref(evstate->qdict);
405 evstate->qdict = qobject_ref(qdict);
406 } else {
407 /*
408 * Last send was (at least) evconf->rate ns ago.
409 * Send immediately, and arm the timer to call
410 * monitor_qapi_event_handler() in evconf->rate ns. Any
411 * events arriving before then will be delayed until then.
412 */
413 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
414
415 monitor_qapi_event_emit(event, qdict);
416
417 evstate = g_new(MonitorQAPIEventState, 1);
418 evstate->event = event;
419 evstate->data = qobject_ref(data);
420 evstate->qdict = NULL;
421 evstate->timer = timer_new_ns(monitor_get_event_clock(),
422 monitor_qapi_event_handler,
423 evstate);
424 g_hash_table_add(monitor_qapi_event_state, evstate);
425 timer_mod_ns(evstate->timer, now + evconf->rate);
426 }
427 }
Kevin Wolf1d95db72019-06-13 17:34:02 +0200428}
429
430void qapi_event_emit(QAPIEvent event, QDict *qdict)
431{
432 /*
433 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
434 * would deadlock on monitor_lock. Work around by queueing
435 * events in thread-local storage.
436 * TODO: remove this, make it re-enter safe.
437 */
438 typedef struct MonitorQapiEvent {
439 QAPIEvent event;
440 QDict *qdict;
441 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
442 } MonitorQapiEvent;
443 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
444 static __thread bool reentered;
445 MonitorQapiEvent *ev;
446
447 if (!reentered) {
448 QSIMPLEQ_INIT(&event_queue);
449 }
450
451 ev = g_new(MonitorQapiEvent, 1);
452 ev->qdict = qobject_ref(qdict);
453 ev->event = event;
454 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
455 if (reentered) {
456 return;
457 }
458
459 reentered = true;
460
461 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
462 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
463 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
464 qobject_unref(ev->qdict);
465 g_free(ev);
466 }
467
468 reentered = false;
469}
470
471/*
472 * This function runs evconf->rate ns after sending a throttled
473 * event.
474 * If another event has since been stored, send it.
475 */
476static void monitor_qapi_event_handler(void *opaque)
477{
478 MonitorQAPIEventState *evstate = opaque;
479 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
480
481 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
Mahmoud Mandoura8e2ab52021-03-11 05:15:34 +0200482 QEMU_LOCK_GUARD(&monitor_lock);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200483
484 if (evstate->qdict) {
485 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
486
487 monitor_qapi_event_emit(evstate->event, evstate->qdict);
488 qobject_unref(evstate->qdict);
489 evstate->qdict = NULL;
490 timer_mod_ns(evstate->timer, now + evconf->rate);
491 } else {
492 g_hash_table_remove(monitor_qapi_event_state, evstate);
493 qobject_unref(evstate->data);
494 timer_free(evstate->timer);
495 g_free(evstate);
496 }
Kevin Wolf1d95db72019-06-13 17:34:02 +0200497}
498
499static unsigned int qapi_event_throttle_hash(const void *key)
500{
501 const MonitorQAPIEventState *evstate = key;
502 unsigned int hash = evstate->event * 255;
503
504 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
505 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
506 }
507
508 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
509 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
510 }
511
David Hildenbrand77ae2302021-09-29 18:24:45 +0200512 if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) {
513 hash += g_str_hash(qdict_get_str(evstate->data, "qom-path"));
514 }
515
Kevin Wolf1d95db72019-06-13 17:34:02 +0200516 return hash;
517}
518
519static gboolean qapi_event_throttle_equal(const void *a, const void *b)
520{
521 const MonitorQAPIEventState *eva = a;
522 const MonitorQAPIEventState *evb = b;
523
524 if (eva->event != evb->event) {
525 return FALSE;
526 }
527
528 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
529 return !strcmp(qdict_get_str(eva->data, "id"),
530 qdict_get_str(evb->data, "id"));
531 }
532
533 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
534 return !strcmp(qdict_get_str(eva->data, "node-name"),
535 qdict_get_str(evb->data, "node-name"));
536 }
537
David Hildenbrand77ae2302021-09-29 18:24:45 +0200538 if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) {
539 return !strcmp(qdict_get_str(eva->data, "qom-path"),
540 qdict_get_str(evb->data, "qom-path"));
541 }
542
Kevin Wolf1d95db72019-06-13 17:34:02 +0200543 return TRUE;
544}
545
546int monitor_suspend(Monitor *mon)
547{
548 if (monitor_is_hmp_non_interactive(mon)) {
549 return -ENOTTY;
550 }
551
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100552 qatomic_inc(&mon->suspend_cnt);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200553
554 if (mon->use_io_thread) {
555 /*
556 * Kick I/O thread to make sure this takes effect. It'll be
557 * evaluated again in prepare() of the watch object.
558 */
559 aio_notify(iothread_get_aio_context(mon_iothread));
560 }
561
562 trace_monitor_suspend(mon, 1);
563 return 0;
564}
565
566static void monitor_accept_input(void *opaque)
567{
568 Monitor *mon = opaque;
569
570 qemu_chr_fe_accept_input(&mon->chr);
571}
572
573void monitor_resume(Monitor *mon)
574{
575 if (monitor_is_hmp_non_interactive(mon)) {
576 return;
577 }
578
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100579 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) {
Kevin Wolf1d95db72019-06-13 17:34:02 +0200580 AioContext *ctx;
581
582 if (mon->use_io_thread) {
583 ctx = iothread_get_aio_context(mon_iothread);
584 } else {
585 ctx = qemu_get_aio_context();
586 }
587
588 if (!monitor_is_qmp(mon)) {
589 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
590 assert(hmp_mon->rs);
591 readline_show_prompt(hmp_mon->rs);
592 }
593
594 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
595 }
596
597 trace_monitor_suspend(mon, -1);
598}
599
600int monitor_can_read(void *opaque)
601{
602 Monitor *mon = opaque;
603
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100604 return !qatomic_mb_read(&mon->suspend_cnt);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200605}
606
607void monitor_list_append(Monitor *mon)
608{
609 qemu_mutex_lock(&monitor_lock);
610 /*
611 * This prevents inserting new monitors during monitor_cleanup().
612 * A cleaner solution would involve the main thread telling other
613 * threads to terminate, waiting for their termination.
614 */
615 if (!monitor_destroyed) {
616 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
617 mon = NULL;
618 }
619 qemu_mutex_unlock(&monitor_lock);
620
621 if (mon) {
622 monitor_data_destroy(mon);
623 g_free(mon);
624 }
625}
626
627static void monitor_iothread_init(void)
628{
629 mon_iothread = iothread_create("mon_iothread", &error_abort);
630}
631
Kevin Wolf92082412019-06-13 17:34:03 +0200632void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
Kevin Wolf1d95db72019-06-13 17:34:02 +0200633 bool use_io_thread)
634{
635 if (use_io_thread && !mon_iothread) {
636 monitor_iothread_init();
637 }
638 qemu_mutex_init(&mon->mon_lock);
Kevin Wolf92082412019-06-13 17:34:03 +0200639 mon->is_qmp = is_qmp;
Markus Armbruster20076f42020-12-11 18:11:34 +0100640 mon->outbuf = g_string_new(NULL);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200641 mon->skip_flush = skip_flush;
642 mon->use_io_thread = use_io_thread;
Kevin Wolf1d95db72019-06-13 17:34:02 +0200643}
644
645void monitor_data_destroy(Monitor *mon)
646{
647 g_free(mon->mon_cpu_path);
648 qemu_chr_fe_deinit(&mon->chr, false);
649 if (monitor_is_qmp(mon)) {
650 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
651 } else {
652 readline_free(container_of(mon, MonitorHMP, common)->rs);
653 }
Markus Armbruster20076f42020-12-11 18:11:34 +0100654 g_string_free(mon->outbuf, true);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200655 qemu_mutex_destroy(&mon->mon_lock);
656}
657
Kevin Wolf1d95db72019-06-13 17:34:02 +0200658void monitor_cleanup(void)
659{
660 /*
Kevin Wolf357bda92020-10-13 14:50:27 +0200661 * The dispatcher needs to stop before destroying the monitor and
662 * the I/O thread.
Kevin Wolf9ce44e22020-10-05 17:58:50 +0200663 *
664 * We need to poll both qemu_aio_context and iohandler_ctx to make
665 * sure that the dispatcher coroutine keeps making progress and
666 * eventually terminates. qemu_aio_context is automatically
Stefan Hajnoczi9612aa42023-03-09 14:08:55 -0500667 * polled by calling AIO_WAIT_WHILE_UNLOCKED on it, but we must poll
Kevin Wolf9ce44e22020-10-05 17:58:50 +0200668 * iohandler_ctx manually.
Kevin Wolfc81219a2021-02-12 18:20:27 +0100669 *
670 * Letting the iothread continue while shutting down the dispatcher
671 * means that new requests may still be coming in. This is okay,
672 * we'll just leave them in the queue without sending a response
673 * and monitor_data_destroy() will free them.
Kevin Wolf9ce44e22020-10-05 17:58:50 +0200674 */
675 qmp_dispatcher_co_shutdown = true;
676 if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
677 aio_co_wake(qmp_dispatcher_co);
678 }
679
Stefan Hajnoczi9612aa42023-03-09 14:08:55 -0500680 AIO_WAIT_WHILE_UNLOCKED(NULL,
Kevin Wolf9ce44e22020-10-05 17:58:50 +0200681 (aio_poll(iohandler_get_aio_context(), false),
682 qatomic_mb_read(&qmp_dispatcher_co_busy)));
683
Kevin Wolfc81219a2021-02-12 18:20:27 +0100684 /*
685 * We need to explicitly stop the I/O thread (but not destroy it),
686 * clean up the monitor resources, then destroy the I/O thread since
687 * we need to unregister from chardev below in
688 * monitor_data_destroy(), and chardev is not thread-safe yet
689 */
690 if (mon_iothread) {
691 iothread_stop(mon_iothread);
692 }
693
Kevin Wolf357bda92020-10-13 14:50:27 +0200694 /* Flush output buffers and destroy monitors */
695 qemu_mutex_lock(&monitor_lock);
696 monitor_destroyed = true;
697 while (!QTAILQ_EMPTY(&mon_list)) {
698 Monitor *mon = QTAILQ_FIRST(&mon_list);
699 QTAILQ_REMOVE(&mon_list, mon, entry);
700 /* Permit QAPI event emission from character frontend release */
701 qemu_mutex_unlock(&monitor_lock);
702 monitor_flush(mon);
703 monitor_data_destroy(mon);
704 qemu_mutex_lock(&monitor_lock);
705 g_free(mon);
706 }
707 qemu_mutex_unlock(&monitor_lock);
708
Kevin Wolf1d95db72019-06-13 17:34:02 +0200709 if (mon_iothread) {
710 iothread_destroy(mon_iothread);
711 mon_iothread = NULL;
712 }
713}
714
715static void monitor_qapi_event_init(void)
716{
717 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
718 qapi_event_throttle_equal);
719}
720
Markus Armbruster9d2b5f22023-01-24 13:19:45 +0100721void monitor_init_globals(void)
Kevin Wolf1d95db72019-06-13 17:34:02 +0200722{
723 monitor_qapi_event_init();
724 qemu_mutex_init(&monitor_lock);
Kevin Wolfe69ee452020-10-05 17:58:48 +0200725 coroutine_mon = g_hash_table_new(NULL, NULL);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200726
727 /*
728 * The dispatcher BH must run in the main loop thread, since we
729 * have commands assuming that context. It would be nice to get
730 * rid of those assumptions.
731 */
Kevin Wolf9ce44e22020-10-05 17:58:50 +0200732 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL);
733 qatomic_mb_set(&qmp_dispatcher_co_busy, true);
734 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200735}
736
Kevin Wolfa2f411c2020-02-24 15:30:07 +0100737int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
Kevin Wolfc3e95552020-01-29 11:22:36 +0100738{
Markus Armbruster50707b32022-11-21 09:50:49 +0100739 ERRP_GUARD();
Kevin Wolfc3e95552020-01-29 11:22:36 +0100740 Chardev *chr;
Kevin Wolfc3e95552020-01-29 11:22:36 +0100741
Kevin Wolff2098722020-02-24 15:30:04 +0100742 chr = qemu_chr_find(opts->chardev);
Kevin Wolfc3e95552020-01-29 11:22:36 +0100743 if (chr == NULL) {
Kevin Wolff2098722020-02-24 15:30:04 +0100744 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
Kevin Wolfc3e95552020-01-29 11:22:36 +0100745 return -1;
746 }
747
Kevin Wolfa2f411c2020-02-24 15:30:07 +0100748 if (!opts->has_mode) {
749 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
750 }
751
Kevin Wolff2098722020-02-24 15:30:04 +0100752 switch (opts->mode) {
753 case MONITOR_MODE_CONTROL:
Markus Armbruster50707b32022-11-21 09:50:49 +0100754 monitor_init_qmp(chr, opts->pretty, errp);
Kevin Wolff2098722020-02-24 15:30:04 +0100755 break;
756 case MONITOR_MODE_READLINE:
Kevin Wolfa2f411c2020-02-24 15:30:07 +0100757 if (!allow_hmp) {
758 error_setg(errp, "Only QMP is supported");
759 return -1;
760 }
Kevin Wolff2098722020-02-24 15:30:04 +0100761 if (opts->pretty) {
Daniel P. Berrangé283d8452021-02-19 17:56:13 +0000762 error_setg(errp, "'pretty' is not compatible with HMP monitors");
763 return -1;
Kevin Wolff2098722020-02-24 15:30:04 +0100764 }
Markus Armbruster50707b32022-11-21 09:50:49 +0100765 monitor_init_hmp(chr, true, errp);
Kevin Wolff2098722020-02-24 15:30:04 +0100766 break;
767 default:
768 g_assert_not_reached();
769 }
770
Markus Armbruster50707b32022-11-21 09:50:49 +0100771 return *errp ? -1 : 0;
Kevin Wolff2098722020-02-24 15:30:04 +0100772}
773
774int monitor_init_opts(QemuOpts *opts, Error **errp)
775{
776 Visitor *v;
777 MonitorOptions *options;
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200778 int ret;
Kevin Wolff2098722020-02-24 15:30:04 +0100779
780 v = opts_visitor_new(opts);
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200781 visit_type_MonitorOptions(v, NULL, &options, errp);
Kevin Wolff2098722020-02-24 15:30:04 +0100782 visit_free(v);
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200783 if (!options) {
Kevin Wolff2098722020-02-24 15:30:04 +0100784 return -1;
Kevin Wolfc3e95552020-01-29 11:22:36 +0100785 }
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200786
787 ret = monitor_init(options, true, errp);
788 qapi_free_MonitorOptions(options);
789 return ret;
Kevin Wolfc3e95552020-01-29 11:22:36 +0100790}
791
Kevin Wolf1d95db72019-06-13 17:34:02 +0200792QemuOptsList qemu_mon_opts = {
793 .name = "mon",
794 .implied_opt_name = "chardev",
795 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
796 .desc = {
797 {
798 .name = "mode",
799 .type = QEMU_OPT_STRING,
800 },{
801 .name = "chardev",
802 .type = QEMU_OPT_STRING,
803 },{
804 .name = "pretty",
805 .type = QEMU_OPT_BOOL,
806 },
807 { /* end of list */ }
808 },
809};