blob: 099c164c6dacbd85fe29a5572e0db0f47f12371d [file] [log] [blame]
Kevin Wolf1d95db72019-06-13 17:34:02 +02001/*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "monitor-internal.h"
27#include "qapi/error.h"
Kevin Wolff2098722020-02-24 15:30:04 +010028#include "qapi/opts-visitor.h"
Kevin Wolf1d95db72019-06-13 17:34:02 +020029#include "qapi/qapi-emit-events.h"
Kevin Wolff2098722020-02-24 15:30:04 +010030#include "qapi/qapi-visit-control.h"
Kevin Wolf1d95db72019-06-13 17:34:02 +020031#include "qapi/qmp/qdict.h"
32#include "qapi/qmp/qstring.h"
33#include "qemu/error-report.h"
34#include "qemu/option.h"
35#include "sysemu/qtest.h"
Markus Armbrusterd5938f22019-08-12 07:23:56 +020036#include "sysemu/sysemu.h"
Kevin Wolf1d95db72019-06-13 17:34:02 +020037#include "trace.h"
38
39/*
40 * To prevent flooding clients, events can be throttled. The
41 * throttling is calculated globally, rather than per-Monitor
42 * instance.
43 */
44typedef struct MonitorQAPIEventState {
45 QAPIEvent event; /* Throttling state for this event type and... */
46 QDict *data; /* ... data, see qapi_event_throttle_equal() */
47 QEMUTimer *timer; /* Timer for handling delayed events */
48 QDict *qdict; /* Delayed event (if any) */
49} MonitorQAPIEventState;
50
51typedef struct {
52 int64_t rate; /* Minimum time (in ns) between two events */
53} MonitorQAPIEventConf;
54
55/* Shared monitor I/O thread */
56IOThread *mon_iothread;
57
58/* Bottom half to dispatch the requests received from I/O thread */
59QEMUBH *qmp_dispatcher_bh;
60
61/* Protects mon_list, monitor_qapi_event_state, monitor_destroyed. */
62QemuMutex monitor_lock;
63static GHashTable *monitor_qapi_event_state;
64
65MonitorList mon_list;
66int mon_refcount;
67static bool monitor_destroyed;
68
Kevin Wolf947e4742020-10-05 17:58:44 +020069static __thread Monitor *cur_monitor;
70
71Monitor *monitor_cur(void)
72{
73 return cur_monitor;
74}
75
76/**
77 * Sets a new current monitor and returns the old one.
78 */
79Monitor *monitor_set_cur(Monitor *mon)
80{
81 Monitor *old_monitor = cur_monitor;
82
83 cur_monitor = mon;
84 return old_monitor;
85}
Kevin Wolf1d95db72019-06-13 17:34:02 +020086
87/**
88 * Is the current monitor, if any, a QMP monitor?
89 */
90bool monitor_cur_is_qmp(void)
91{
Kevin Wolf947e4742020-10-05 17:58:44 +020092 Monitor *cur_mon = monitor_cur();
93
Kevin Wolf1d95db72019-06-13 17:34:02 +020094 return cur_mon && monitor_is_qmp(cur_mon);
95}
96
97/**
98 * Is @mon is using readline?
99 * Note: not all HMP monitors use readline, e.g., gdbserver has a
100 * non-interactive HMP monitor, so readline is not used there.
101 */
Kevin Wolf92082412019-06-13 17:34:03 +0200102static inline bool monitor_uses_readline(const MonitorHMP *mon)
Kevin Wolf1d95db72019-06-13 17:34:02 +0200103{
Kevin Wolf92082412019-06-13 17:34:03 +0200104 return mon->use_readline;
Kevin Wolf1d95db72019-06-13 17:34:02 +0200105}
106
107static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
108{
Kevin Wolf92082412019-06-13 17:34:03 +0200109 if (monitor_is_qmp(mon)) {
110 return false;
111 }
112
113 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
Kevin Wolf1d95db72019-06-13 17:34:02 +0200114}
115
116static void monitor_flush_locked(Monitor *mon);
117
118static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
119 void *opaque)
120{
121 Monitor *mon = opaque;
122
123 qemu_mutex_lock(&mon->mon_lock);
124 mon->out_watch = 0;
125 monitor_flush_locked(mon);
126 qemu_mutex_unlock(&mon->mon_lock);
127 return FALSE;
128}
129
130/* Caller must hold mon->mon_lock */
131static void monitor_flush_locked(Monitor *mon)
132{
133 int rc;
134 size_t len;
135 const char *buf;
136
137 if (mon->skip_flush) {
138 return;
139 }
140
141 buf = qstring_get_str(mon->outbuf);
142 len = qstring_get_length(mon->outbuf);
143
144 if (len && !mon->mux_out) {
145 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
146 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
147 /* all flushed or error */
148 qobject_unref(mon->outbuf);
149 mon->outbuf = qstring_new();
150 return;
151 }
152 if (rc > 0) {
153 /* partial write */
154 QString *tmp = qstring_from_str(buf + rc);
155 qobject_unref(mon->outbuf);
156 mon->outbuf = tmp;
157 }
158 if (mon->out_watch == 0) {
159 mon->out_watch =
160 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
161 monitor_unblocked, mon);
162 }
163 }
164}
165
166void monitor_flush(Monitor *mon)
167{
168 qemu_mutex_lock(&mon->mon_lock);
169 monitor_flush_locked(mon);
170 qemu_mutex_unlock(&mon->mon_lock);
171}
172
173/* flush at every end of line */
174int monitor_puts(Monitor *mon, const char *str)
175{
176 int i;
177 char c;
178
179 qemu_mutex_lock(&mon->mon_lock);
180 for (i = 0; str[i]; i++) {
181 c = str[i];
182 if (c == '\n') {
183 qstring_append_chr(mon->outbuf, '\r');
184 }
185 qstring_append_chr(mon->outbuf, c);
186 if (c == '\n') {
187 monitor_flush_locked(mon);
188 }
189 }
190 qemu_mutex_unlock(&mon->mon_lock);
191
192 return i;
193}
194
195int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
196{
197 char *buf;
198 int n;
199
200 if (!mon) {
201 return -1;
202 }
203
204 if (monitor_is_qmp(mon)) {
205 return -1;
206 }
207
208 buf = g_strdup_vprintf(fmt, ap);
209 n = monitor_puts(mon, buf);
210 g_free(buf);
211 return n;
212}
213
214int monitor_printf(Monitor *mon, const char *fmt, ...)
215{
216 int ret;
217
218 va_list ap;
219 va_start(ap, fmt);
220 ret = monitor_vprintf(mon, fmt, ap);
221 va_end(ap);
222 return ret;
223}
224
225/*
226 * Print to current monitor if we have one, else to stderr.
227 */
228int error_vprintf(const char *fmt, va_list ap)
229{
Kevin Wolf947e4742020-10-05 17:58:44 +0200230 Monitor *cur_mon = monitor_cur();
231
Kevin Wolf1d95db72019-06-13 17:34:02 +0200232 if (cur_mon && !monitor_cur_is_qmp()) {
233 return monitor_vprintf(cur_mon, fmt, ap);
234 }
235 return vfprintf(stderr, fmt, ap);
236}
237
238int error_vprintf_unless_qmp(const char *fmt, va_list ap)
239{
Kevin Wolf947e4742020-10-05 17:58:44 +0200240 Monitor *cur_mon = monitor_cur();
241
Kevin Wolf1d95db72019-06-13 17:34:02 +0200242 if (!cur_mon) {
243 return vfprintf(stderr, fmt, ap);
244 }
245 if (!monitor_cur_is_qmp()) {
246 return monitor_vprintf(cur_mon, fmt, ap);
247 }
248 return -1;
249}
250
251
252static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
253 /* Limit guest-triggerable events to 1 per second */
254 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
255 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
256 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
257 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
258 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
259 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
David Hildenbrand722a3c72020-06-26 09:22:44 +0200260 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS },
Kevin Wolf1d95db72019-06-13 17:34:02 +0200261};
262
263/*
264 * Return the clock to use for recording an event's time.
265 * It's QEMU_CLOCK_REALTIME, except for qtests it's
266 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
267 * Beware: result is invalid before configure_accelerator().
268 */
269static inline QEMUClockType monitor_get_event_clock(void)
270{
271 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
272}
273
274/*
275 * Broadcast an event to all monitors.
276 * @qdict is the event object. Its member "event" must match @event.
277 * Caller must hold monitor_lock.
278 */
279static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
280{
281 Monitor *mon;
282 MonitorQMP *qmp_mon;
283
284 trace_monitor_protocol_event_emit(event, qdict);
285 QTAILQ_FOREACH(mon, &mon_list, entry) {
286 if (!monitor_is_qmp(mon)) {
287 continue;
288 }
289
290 qmp_mon = container_of(mon, MonitorQMP, common);
291 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
292 qmp_send_response(qmp_mon, qdict);
293 }
294 }
295}
296
297static void monitor_qapi_event_handler(void *opaque);
298
299/*
300 * Queue a new event for emission to Monitor instances,
301 * applying any rate limiting if required.
302 */
303static void
304monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
305{
306 MonitorQAPIEventConf *evconf;
307 MonitorQAPIEventState *evstate;
308
309 assert(event < QAPI_EVENT__MAX);
310 evconf = &monitor_qapi_event_conf[event];
311 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
312
313 qemu_mutex_lock(&monitor_lock);
314
315 if (!evconf->rate) {
316 /* Unthrottled event */
317 monitor_qapi_event_emit(event, qdict);
318 } else {
319 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
320 MonitorQAPIEventState key = { .event = event, .data = data };
321
322 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
323 assert(!evstate || timer_pending(evstate->timer));
324
325 if (evstate) {
326 /*
327 * Timer is pending for (at least) evconf->rate ns after
328 * last send. Store event for sending when timer fires,
329 * replacing a prior stored event if any.
330 */
331 qobject_unref(evstate->qdict);
332 evstate->qdict = qobject_ref(qdict);
333 } else {
334 /*
335 * Last send was (at least) evconf->rate ns ago.
336 * Send immediately, and arm the timer to call
337 * monitor_qapi_event_handler() in evconf->rate ns. Any
338 * events arriving before then will be delayed until then.
339 */
340 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
341
342 monitor_qapi_event_emit(event, qdict);
343
344 evstate = g_new(MonitorQAPIEventState, 1);
345 evstate->event = event;
346 evstate->data = qobject_ref(data);
347 evstate->qdict = NULL;
348 evstate->timer = timer_new_ns(monitor_get_event_clock(),
349 monitor_qapi_event_handler,
350 evstate);
351 g_hash_table_add(monitor_qapi_event_state, evstate);
352 timer_mod_ns(evstate->timer, now + evconf->rate);
353 }
354 }
355
356 qemu_mutex_unlock(&monitor_lock);
357}
358
359void qapi_event_emit(QAPIEvent event, QDict *qdict)
360{
361 /*
362 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
363 * would deadlock on monitor_lock. Work around by queueing
364 * events in thread-local storage.
365 * TODO: remove this, make it re-enter safe.
366 */
367 typedef struct MonitorQapiEvent {
368 QAPIEvent event;
369 QDict *qdict;
370 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
371 } MonitorQapiEvent;
372 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
373 static __thread bool reentered;
374 MonitorQapiEvent *ev;
375
376 if (!reentered) {
377 QSIMPLEQ_INIT(&event_queue);
378 }
379
380 ev = g_new(MonitorQapiEvent, 1);
381 ev->qdict = qobject_ref(qdict);
382 ev->event = event;
383 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
384 if (reentered) {
385 return;
386 }
387
388 reentered = true;
389
390 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
391 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
392 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
393 qobject_unref(ev->qdict);
394 g_free(ev);
395 }
396
397 reentered = false;
398}
399
400/*
401 * This function runs evconf->rate ns after sending a throttled
402 * event.
403 * If another event has since been stored, send it.
404 */
405static void monitor_qapi_event_handler(void *opaque)
406{
407 MonitorQAPIEventState *evstate = opaque;
408 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
409
410 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
411 qemu_mutex_lock(&monitor_lock);
412
413 if (evstate->qdict) {
414 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
415
416 monitor_qapi_event_emit(evstate->event, evstate->qdict);
417 qobject_unref(evstate->qdict);
418 evstate->qdict = NULL;
419 timer_mod_ns(evstate->timer, now + evconf->rate);
420 } else {
421 g_hash_table_remove(monitor_qapi_event_state, evstate);
422 qobject_unref(evstate->data);
423 timer_free(evstate->timer);
424 g_free(evstate);
425 }
426
427 qemu_mutex_unlock(&monitor_lock);
428}
429
430static unsigned int qapi_event_throttle_hash(const void *key)
431{
432 const MonitorQAPIEventState *evstate = key;
433 unsigned int hash = evstate->event * 255;
434
435 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
436 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
437 }
438
439 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
440 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
441 }
442
443 return hash;
444}
445
446static gboolean qapi_event_throttle_equal(const void *a, const void *b)
447{
448 const MonitorQAPIEventState *eva = a;
449 const MonitorQAPIEventState *evb = b;
450
451 if (eva->event != evb->event) {
452 return FALSE;
453 }
454
455 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
456 return !strcmp(qdict_get_str(eva->data, "id"),
457 qdict_get_str(evb->data, "id"));
458 }
459
460 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
461 return !strcmp(qdict_get_str(eva->data, "node-name"),
462 qdict_get_str(evb->data, "node-name"));
463 }
464
465 return TRUE;
466}
467
468int monitor_suspend(Monitor *mon)
469{
470 if (monitor_is_hmp_non_interactive(mon)) {
471 return -ENOTTY;
472 }
473
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100474 qatomic_inc(&mon->suspend_cnt);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200475
476 if (mon->use_io_thread) {
477 /*
478 * Kick I/O thread to make sure this takes effect. It'll be
479 * evaluated again in prepare() of the watch object.
480 */
481 aio_notify(iothread_get_aio_context(mon_iothread));
482 }
483
484 trace_monitor_suspend(mon, 1);
485 return 0;
486}
487
488static void monitor_accept_input(void *opaque)
489{
490 Monitor *mon = opaque;
491
492 qemu_chr_fe_accept_input(&mon->chr);
493}
494
495void monitor_resume(Monitor *mon)
496{
497 if (monitor_is_hmp_non_interactive(mon)) {
498 return;
499 }
500
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100501 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) {
Kevin Wolf1d95db72019-06-13 17:34:02 +0200502 AioContext *ctx;
503
504 if (mon->use_io_thread) {
505 ctx = iothread_get_aio_context(mon_iothread);
506 } else {
507 ctx = qemu_get_aio_context();
508 }
509
510 if (!monitor_is_qmp(mon)) {
511 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
512 assert(hmp_mon->rs);
513 readline_show_prompt(hmp_mon->rs);
514 }
515
516 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
517 }
518
519 trace_monitor_suspend(mon, -1);
520}
521
522int monitor_can_read(void *opaque)
523{
524 Monitor *mon = opaque;
525
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100526 return !qatomic_mb_read(&mon->suspend_cnt);
Kevin Wolf1d95db72019-06-13 17:34:02 +0200527}
528
529void monitor_list_append(Monitor *mon)
530{
531 qemu_mutex_lock(&monitor_lock);
532 /*
533 * This prevents inserting new monitors during monitor_cleanup().
534 * A cleaner solution would involve the main thread telling other
535 * threads to terminate, waiting for their termination.
536 */
537 if (!monitor_destroyed) {
538 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
539 mon = NULL;
540 }
541 qemu_mutex_unlock(&monitor_lock);
542
543 if (mon) {
544 monitor_data_destroy(mon);
545 g_free(mon);
546 }
547}
548
549static void monitor_iothread_init(void)
550{
551 mon_iothread = iothread_create("mon_iothread", &error_abort);
552}
553
Kevin Wolf92082412019-06-13 17:34:03 +0200554void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
Kevin Wolf1d95db72019-06-13 17:34:02 +0200555 bool use_io_thread)
556{
557 if (use_io_thread && !mon_iothread) {
558 monitor_iothread_init();
559 }
560 qemu_mutex_init(&mon->mon_lock);
Kevin Wolf92082412019-06-13 17:34:03 +0200561 mon->is_qmp = is_qmp;
Kevin Wolf1d95db72019-06-13 17:34:02 +0200562 mon->outbuf = qstring_new();
563 mon->skip_flush = skip_flush;
564 mon->use_io_thread = use_io_thread;
Kevin Wolf1d95db72019-06-13 17:34:02 +0200565}
566
567void monitor_data_destroy(Monitor *mon)
568{
569 g_free(mon->mon_cpu_path);
570 qemu_chr_fe_deinit(&mon->chr, false);
571 if (monitor_is_qmp(mon)) {
572 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
573 } else {
574 readline_free(container_of(mon, MonitorHMP, common)->rs);
575 }
576 qobject_unref(mon->outbuf);
577 qemu_mutex_destroy(&mon->mon_lock);
578}
579
Kevin Wolf1d95db72019-06-13 17:34:02 +0200580void monitor_cleanup(void)
581{
582 /*
583 * We need to explicitly stop the I/O thread (but not destroy it),
584 * clean up the monitor resources, then destroy the I/O thread since
585 * we need to unregister from chardev below in
586 * monitor_data_destroy(), and chardev is not thread-safe yet
587 */
588 if (mon_iothread) {
589 iothread_stop(mon_iothread);
590 }
591
592 /* Flush output buffers and destroy monitors */
593 qemu_mutex_lock(&monitor_lock);
594 monitor_destroyed = true;
595 while (!QTAILQ_EMPTY(&mon_list)) {
596 Monitor *mon = QTAILQ_FIRST(&mon_list);
597 QTAILQ_REMOVE(&mon_list, mon, entry);
598 /* Permit QAPI event emission from character frontend release */
599 qemu_mutex_unlock(&monitor_lock);
600 monitor_flush(mon);
601 monitor_data_destroy(mon);
602 qemu_mutex_lock(&monitor_lock);
603 g_free(mon);
604 }
605 qemu_mutex_unlock(&monitor_lock);
606
607 /* QEMUBHs needs to be deleted before destroying the I/O thread */
608 qemu_bh_delete(qmp_dispatcher_bh);
609 qmp_dispatcher_bh = NULL;
610 if (mon_iothread) {
611 iothread_destroy(mon_iothread);
612 mon_iothread = NULL;
613 }
614}
615
616static void monitor_qapi_event_init(void)
617{
618 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
619 qapi_event_throttle_equal);
620}
621
622void monitor_init_globals_core(void)
623{
624 monitor_qapi_event_init();
625 qemu_mutex_init(&monitor_lock);
626
627 /*
628 * The dispatcher BH must run in the main loop thread, since we
629 * have commands assuming that context. It would be nice to get
630 * rid of those assumptions.
631 */
632 qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
633 monitor_qmp_bh_dispatcher,
634 NULL);
635}
636
Kevin Wolfa2f411c2020-02-24 15:30:07 +0100637int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
Kevin Wolfc3e95552020-01-29 11:22:36 +0100638{
639 Chardev *chr;
Kevin Wolff27a9bb2020-02-24 15:30:05 +0100640 Error *local_err = NULL;
Kevin Wolfc3e95552020-01-29 11:22:36 +0100641
Kevin Wolff2098722020-02-24 15:30:04 +0100642 chr = qemu_chr_find(opts->chardev);
Kevin Wolfc3e95552020-01-29 11:22:36 +0100643 if (chr == NULL) {
Kevin Wolff2098722020-02-24 15:30:04 +0100644 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
Kevin Wolfc3e95552020-01-29 11:22:36 +0100645 return -1;
646 }
647
Kevin Wolfa2f411c2020-02-24 15:30:07 +0100648 if (!opts->has_mode) {
649 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
650 }
651
Kevin Wolff2098722020-02-24 15:30:04 +0100652 switch (opts->mode) {
653 case MONITOR_MODE_CONTROL:
Kevin Wolff27a9bb2020-02-24 15:30:05 +0100654 monitor_init_qmp(chr, opts->pretty, &local_err);
Kevin Wolff2098722020-02-24 15:30:04 +0100655 break;
656 case MONITOR_MODE_READLINE:
Kevin Wolfa2f411c2020-02-24 15:30:07 +0100657 if (!allow_hmp) {
658 error_setg(errp, "Only QMP is supported");
659 return -1;
660 }
Kevin Wolff2098722020-02-24 15:30:04 +0100661 if (opts->pretty) {
662 warn_report("'pretty' is deprecated for HMP monitors, it has no "
663 "effect and will be removed in future versions");
664 }
Kevin Wolf8e9119a2020-02-24 15:30:06 +0100665 monitor_init_hmp(chr, true, &local_err);
Kevin Wolff2098722020-02-24 15:30:04 +0100666 break;
667 default:
668 g_assert_not_reached();
669 }
670
Kevin Wolff27a9bb2020-02-24 15:30:05 +0100671 if (local_err) {
672 error_propagate(errp, local_err);
673 return -1;
674 }
Kevin Wolff2098722020-02-24 15:30:04 +0100675 return 0;
676}
677
678int monitor_init_opts(QemuOpts *opts, Error **errp)
679{
680 Visitor *v;
681 MonitorOptions *options;
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200682 int ret;
Kevin Wolff2098722020-02-24 15:30:04 +0100683
684 v = opts_visitor_new(opts);
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200685 visit_type_MonitorOptions(v, NULL, &options, errp);
Kevin Wolff2098722020-02-24 15:30:04 +0100686 visit_free(v);
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200687 if (!options) {
Kevin Wolff2098722020-02-24 15:30:04 +0100688 return -1;
Kevin Wolfc3e95552020-01-29 11:22:36 +0100689 }
Markus Armbrusterb11a0932020-07-07 18:06:07 +0200690
691 ret = monitor_init(options, true, errp);
692 qapi_free_MonitorOptions(options);
693 return ret;
Kevin Wolfc3e95552020-01-29 11:22:36 +0100694}
695
Kevin Wolf1d95db72019-06-13 17:34:02 +0200696QemuOptsList qemu_mon_opts = {
697 .name = "mon",
698 .implied_opt_name = "chardev",
699 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
700 .desc = {
701 {
702 .name = "mode",
703 .type = QEMU_OPT_STRING,
704 },{
705 .name = "chardev",
706 .type = QEMU_OPT_STRING,
707 },{
708 .name = "pretty",
709 .type = QEMU_OPT_BOOL,
710 },
711 { /* end of list */ }
712 },
713};