blob: 5a291cc98201e4c436e2c289b53544d1712775a2 [file] [log] [blame]
Jes Sorensenc1b0b932010-10-26 10:39:19 +02001/*
2 * os-posix-lib.c
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2010 Red Hat, Inc.
6 *
7 * QEMU library functions on POSIX which are shared between QEMU and
8 * the QEMU tools.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
28
Peter Maydellaafd7582016-01-29 17:49:55 +000029#include "qemu/osdep.h"
Stefan Hajnoczi13401ba2013-11-14 11:54:16 +010030#include <termios.h>
Stefan Hajnoczi13401ba2013-11-14 11:54:16 +010031
Laszlo Erseke2ea3512013-05-18 06:31:48 +020032#include <glib/gprintf.h>
33
Markus Armbrustera8d25322019-05-23 16:35:08 +020034#include "qemu-common.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010035#include "sysemu/sysemu.h"
Jes Sorensenc1b0b932010-10-26 10:39:19 +020036#include "trace.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010037#include "qapi/error.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010038#include "qemu/sockets.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020039#include "qemu/thread.h"
Fam Zheng10f5bff2014-02-10 14:48:51 +080040#include <libgen.h>
Paolo Bonzini38183312014-05-14 17:43:21 +080041#include <sys/signal.h>
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020042#include "qemu/cutils.h"
Jes Sorensenc1b0b932010-10-26 10:39:19 +020043
Paolo Bonzinicbcfa042011-09-12 16:20:11 +020044#ifdef CONFIG_LINUX
45#include <sys/syscall.h>
46#endif
Paolo Bonzinicbcfa042011-09-12 16:20:11 +020047
Andreas Färber41975b22014-03-13 14:27:59 +010048#ifdef __FreeBSD__
49#include <sys/sysctl.h>
Ed Mastea7764f12016-11-21 20:32:45 -050050#include <sys/user.h>
Michal Privoznik7dc9ae42016-09-27 17:24:56 +020051#include <libutil.h>
Andreas Färber41975b22014-03-13 14:27:59 +010052#endif
53
Kamil Rytarowski094611b2017-10-28 21:48:33 +020054#ifdef __NetBSD__
55#include <sys/sysctl.h>
56#endif
57
Markus Armbrustera9c94272016-06-22 19:11:19 +020058#include "qemu/mmap-alloc.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059
Peter Lieven7d992e42016-09-27 11:58:45 +020060#ifdef CONFIG_DEBUG_STACK_USAGE
61#include "qemu/error-report.h"
62#endif
63
Jitendra Kolhedfd0dcc2017-03-21 02:50:06 -040064#define MAX_MEM_PREALLOC_THREAD_COUNT 16
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +053065
66struct MemsetThread {
67 char *addr;
Stefan Weile947d472017-10-16 22:29:12 +020068 size_t numpages;
69 size_t hpagesize;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +053070 QemuThread pgthread;
71 sigjmp_buf env;
72};
73typedef struct MemsetThread MemsetThread;
74
75static MemsetThread *memset_thread;
76static int memset_num_threads;
77static bool memset_thread_failed;
78
Paolo Bonzinicbcfa042011-09-12 16:20:11 +020079int qemu_get_thread_id(void)
80{
81#if defined(__linux__)
82 return syscall(SYS_gettid);
83#else
84 return getpid();
85#endif
86}
Alexandre Raymondf97742d2011-06-06 23:34:10 -040087
88int qemu_daemon(int nochdir, int noclose)
89{
90 return daemon(nochdir, noclose);
91}
92
Marc-André Lureau9e6bdef2018-08-31 16:53:12 +020093bool qemu_write_pidfile(const char *path, Error **errp)
94{
95 int fd;
96 char pidstr[32];
97
98 while (1) {
99 struct stat a, b;
Marc-André Lureau35f7f3f2018-08-31 16:53:13 +0200100 struct flock lock = {
101 .l_type = F_WRLCK,
102 .l_whence = SEEK_SET,
103 .l_len = 0,
104 };
Marc-André Lureau9e6bdef2018-08-31 16:53:12 +0200105
106 fd = qemu_open(path, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR);
107 if (fd == -1) {
108 error_setg_errno(errp, errno, "Cannot open pid file");
109 return false;
110 }
111
112 if (fstat(fd, &b) < 0) {
113 error_setg_errno(errp, errno, "Cannot stat file");
114 goto fail_close;
115 }
116
Marc-André Lureau35f7f3f2018-08-31 16:53:13 +0200117 if (fcntl(fd, F_SETLK, &lock)) {
Marc-André Lureau9e6bdef2018-08-31 16:53:12 +0200118 error_setg_errno(errp, errno, "Cannot lock pid file");
119 goto fail_close;
120 }
121
122 /*
123 * Now make sure the path we locked is the same one that now
124 * exists on the filesystem.
125 */
126 if (stat(path, &a) < 0) {
127 /*
128 * PID file disappeared, someone else must be racing with
129 * us, so try again.
130 */
131 close(fd);
132 continue;
133 }
134
135 if (a.st_ino == b.st_ino) {
136 break;
137 }
138
139 /*
140 * PID file was recreated, someone else must be racing with
141 * us, so try again.
142 */
143 close(fd);
144 }
145
146 if (ftruncate(fd, 0) < 0) {
147 error_setg_errno(errp, errno, "Failed to truncate pid file");
148 goto fail_unlink;
149 }
150
151 snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid());
152 if (write(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) {
153 error_setg(errp, "Failed to write pid file");
154 goto fail_unlink;
155 }
156
157 return true;
158
159fail_unlink:
160 unlink(path);
161fail_close:
162 close(fd);
163 return false;
164}
165
Jes Sorensenb152aa82010-10-26 10:39:26 +0200166void *qemu_oom_check(void *ptr)
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200167{
168 if (ptr == NULL) {
169 fprintf(stderr, "Failed to allocate memory: %s\n", strerror(errno));
170 abort();
171 }
172 return ptr;
173}
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200174
Kevin Wolf7d2a35c2014-05-20 12:24:05 +0200175void *qemu_try_memalign(size_t alignment, size_t size)
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200176{
177 void *ptr;
Kevin Wolfe5354652013-11-29 21:29:17 +0100178
179 if (alignment < sizeof(void*)) {
180 alignment = sizeof(void*);
181 }
182
Andreas Gustafsson9bc5a712018-01-04 19:39:36 +0200183#if defined(CONFIG_POSIX_MEMALIGN)
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200184 int ret;
185 ret = posix_memalign(&ptr, alignment, size);
186 if (ret != 0) {
Kevin Wolf7d2a35c2014-05-20 12:24:05 +0200187 errno = ret;
188 ptr = NULL;
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200189 }
190#elif defined(CONFIG_BSD)
Kevin Wolf7d2a35c2014-05-20 12:24:05 +0200191 ptr = valloc(size);
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200192#else
Kevin Wolf7d2a35c2014-05-20 12:24:05 +0200193 ptr = memalign(alignment, size);
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200194#endif
195 trace_qemu_memalign(alignment, size, ptr);
196 return ptr;
197}
198
Kevin Wolf7d2a35c2014-05-20 12:24:05 +0200199void *qemu_memalign(size_t alignment, size_t size)
200{
201 return qemu_oom_check(qemu_try_memalign(alignment, size));
202}
203
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200204/* alloc shared memory pages */
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +0200205void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared)
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200206{
Avi Kivity36b58622011-09-05 11:07:05 +0300207 size_t align = QEMU_VMALLOC_ALIGN;
Zhang Yi2ac0f162019-02-08 18:10:37 +0800208 void *ptr = qemu_ram_mmap(-1, size, align, shared, false);
Avi Kivity36b58622011-09-05 11:07:05 +0300209
Paolo Bonzini7dda5dc2013-04-09 17:43:43 +0200210 if (ptr == MAP_FAILED) {
Markus Armbruster39228252013-07-31 15:11:11 +0200211 return NULL;
Stefan Weilc2a82382011-10-31 21:29:46 +0100212 }
Stefan Weilc2a82382011-10-31 21:29:46 +0100213
Igor Mammedova2b257d2014-10-31 16:38:37 +0000214 if (alignment) {
215 *alignment = align;
216 }
Michael S. Tsirkinc2dfc5b2015-09-10 16:36:51 +0300217
Paolo Bonzini6eebf952013-05-13 16:19:55 +0200218 trace_qemu_anon_ram_alloc(size, ptr);
Jes Sorensenc7f41112011-07-25 17:13:36 +0200219 return ptr;
Jes Sorensenc1b0b932010-10-26 10:39:19 +0200220}
221
222void qemu_vfree(void *ptr)
223{
224 trace_qemu_vfree(ptr);
225 free(ptr);
226}
Jes Sorensen9549e762010-10-26 10:39:20 +0200227
Paolo Bonzinie7a09b92013-05-13 16:19:56 +0200228void qemu_anon_ram_free(void *ptr, size_t size)
229{
230 trace_qemu_anon_ram_free(ptr, size);
Murilo Opsfelder Araujo53adb9d2019-01-30 21:36:05 -0200231 qemu_ram_munmap(-1, ptr, size);
Paolo Bonzinie7a09b92013-05-13 16:19:56 +0200232}
233
Stefan Hajnoczif9e8cac2013-03-27 10:10:43 +0100234void qemu_set_block(int fd)
Paolo Bonzini154b9a02011-10-05 09:17:32 +0200235{
236 int f;
237 f = fcntl(fd, F_GETFL);
Li Qiangda93b822018-12-15 04:03:53 -0800238 assert(f != -1);
239 f = fcntl(fd, F_SETFL, f & ~O_NONBLOCK);
240 assert(f != -1);
Paolo Bonzini154b9a02011-10-05 09:17:32 +0200241}
242
Stefan Hajnoczif9e8cac2013-03-27 10:10:43 +0100243void qemu_set_nonblock(int fd)
Jes Sorensen9549e762010-10-26 10:39:20 +0200244{
245 int f;
246 f = fcntl(fd, F_GETFL);
Li Qiangda93b822018-12-15 04:03:53 -0800247 assert(f != -1);
248 f = fcntl(fd, F_SETFL, f | O_NONBLOCK);
Philippe Mathieu-Daudé02cdcc92019-03-07 15:28:21 +0100249#ifdef __OpenBSD__
250 if (f == -1) {
251 /*
252 * Previous to OpenBSD 6.3, fcntl(F_SETFL) is not permitted on
253 * memory devices and sets errno to ENODEV.
254 * It's OK if we fail to set O_NONBLOCK on devices like /dev/null,
255 * because they will never block anyway.
256 */
257 assert(errno == ENODEV);
258 }
259#else
Li Qiangda93b822018-12-15 04:03:53 -0800260 assert(f != -1);
Philippe Mathieu-Daudé02cdcc92019-03-07 15:28:21 +0100261#endif
Jes Sorensen9549e762010-10-26 10:39:20 +0200262}
263
Sebastian Ottlik606600a2013-10-02 12:23:12 +0200264int socket_set_fast_reuse(int fd)
265{
266 int val = 1, ret;
267
268 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
269 (const char *)&val, sizeof(val));
270
271 assert(ret == 0);
272
273 return ret;
274}
275
Jes Sorensen9549e762010-10-26 10:39:20 +0200276void qemu_set_cloexec(int fd)
277{
278 int f;
279 f = fcntl(fd, F_GETFD);
Stefano Stabellini7e6478e2017-05-09 12:04:52 -0700280 assert(f != -1);
281 f = fcntl(fd, F_SETFD, f | FD_CLOEXEC);
282 assert(f != -1);
Jes Sorensen9549e762010-10-26 10:39:20 +0200283}
Jes Sorensen70e72ce2010-10-26 10:39:21 +0200284
285/*
286 * Creates a pipe with FD_CLOEXEC set on both file descriptors
287 */
288int qemu_pipe(int pipefd[2])
289{
290 int ret;
291
292#ifdef CONFIG_PIPE2
293 ret = pipe2(pipefd, O_CLOEXEC);
294 if (ret != -1 || errno != ENOSYS) {
295 return ret;
296 }
297#endif
298 ret = pipe(pipefd);
299 if (ret == 0) {
300 qemu_set_cloexec(pipefd[0]);
301 qemu_set_cloexec(pipefd[1]);
302 }
303
304 return ret;
305}
Hidetoshi Seto38671422010-11-24 11:38:10 +0900306
Laszlo Erseke2ea3512013-05-18 06:31:48 +0200307char *
308qemu_get_local_state_pathname(const char *relative_pathname)
309{
310 return g_strdup_printf("%s/%s", CONFIG_QEMU_LOCALSTATEDIR,
311 relative_pathname);
312}
Stefan Hajnoczi13401ba2013-11-14 11:54:16 +0100313
314void qemu_set_tty_echo(int fd, bool echo)
315{
316 struct termios tty;
317
318 tcgetattr(fd, &tty);
319
320 if (echo) {
321 tty.c_lflag |= ECHO | ECHONL | ICANON | IEXTEN;
322 } else {
323 tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN);
324 }
325
326 tcsetattr(fd, TCSANOW, &tty);
327}
Fam Zheng10f5bff2014-02-10 14:48:51 +0800328
329static char exec_dir[PATH_MAX];
330
331void qemu_init_exec_dir(const char *argv0)
332{
333 char *dir;
334 char *p = NULL;
335 char buf[PATH_MAX];
336
337 assert(!exec_dir[0]);
338
339#if defined(__linux__)
340 {
341 int len;
342 len = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
343 if (len > 0) {
344 buf[len] = 0;
345 p = buf;
346 }
347 }
Kamil Rytarowski094611b2017-10-28 21:48:33 +0200348#elif defined(__FreeBSD__) \
349 || (defined(__NetBSD__) && defined(KERN_PROC_PATHNAME))
Fam Zheng10f5bff2014-02-10 14:48:51 +0800350 {
Kamil Rytarowski094611b2017-10-28 21:48:33 +0200351#if defined(__FreeBSD__)
Fam Zheng10f5bff2014-02-10 14:48:51 +0800352 static int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
Kamil Rytarowski094611b2017-10-28 21:48:33 +0200353#else
354 static int mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
355#endif
Fam Zheng10f5bff2014-02-10 14:48:51 +0800356 size_t len = sizeof(buf) - 1;
357
358 *buf = '\0';
359 if (!sysctl(mib, ARRAY_SIZE(mib), buf, &len, NULL, 0) &&
360 *buf) {
361 buf[sizeof(buf) - 1] = '\0';
362 p = buf;
363 }
364 }
365#endif
366 /* If we don't have any way of figuring out the actual executable
367 location then try argv[0]. */
368 if (!p) {
369 if (!argv0) {
370 return;
371 }
372 p = realpath(argv0, buf);
373 if (!p) {
374 return;
375 }
376 }
Wei Jiangang55ad7812016-04-07 10:46:24 +0800377 dir = g_path_get_dirname(p);
Fam Zheng10f5bff2014-02-10 14:48:51 +0800378
379 pstrcpy(exec_dir, sizeof(exec_dir), dir);
Wei Jiangang55ad7812016-04-07 10:46:24 +0800380
381 g_free(dir);
Fam Zheng10f5bff2014-02-10 14:48:51 +0800382}
383
384char *qemu_get_exec_dir(void)
385{
386 return g_strdup(exec_dir);
387}
Paolo Bonzini38183312014-05-14 17:43:21 +0800388
Paolo Bonzini38183312014-05-14 17:43:21 +0800389static void sigbus_handler(int signal)
390{
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530391 int i;
392 if (memset_thread) {
393 for (i = 0; i < memset_num_threads; i++) {
394 if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
395 siglongjmp(memset_thread[i].env, 1);
396 }
397 }
398 }
Paolo Bonzini38183312014-05-14 17:43:21 +0800399}
400
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530401static void *do_touch_pages(void *arg)
402{
403 MemsetThread *memset_args = (MemsetThread *)arg;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530404 sigset_t set, oldset;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530405
406 /* unblock SIGBUS */
407 sigemptyset(&set);
408 sigaddset(&set, SIGBUS);
409 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
410
411 if (sigsetjmp(memset_args->env, 1)) {
412 memset_thread_failed = true;
413 } else {
Stefan Weile947d472017-10-16 22:29:12 +0200414 char *addr = memset_args->addr;
415 size_t numpages = memset_args->numpages;
416 size_t hpagesize = memset_args->hpagesize;
417 size_t i;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530418 for (i = 0; i < numpages; i++) {
Daniel P. Berrange9dc44aa2017-03-03 11:32:55 +0000419 /*
420 * Read & write back the same value, so we don't
421 * corrupt existing user/app data that might be
422 * stored.
423 *
424 * 'volatile' to stop compiler optimizing this away
425 * to a no-op
426 *
427 * TODO: get a better solution from kernel so we
428 * don't need to write at all so we don't cause
429 * wear on the storage backing the region...
430 */
431 *(volatile char *)addr = *addr;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530432 addr += hpagesize;
433 }
434 }
435 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
436 return NULL;
437}
438
Jitendra Kolhedfd0dcc2017-03-21 02:50:06 -0400439static inline int get_memset_num_threads(int smp_cpus)
440{
441 long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
442 int ret = 1;
443
444 if (host_procs > 0) {
445 ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
446 }
447 /* In case sysconf() fails, we fall back to single threaded */
448 return ret;
449}
450
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530451static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
452 int smp_cpus)
453{
Stefan Weile947d472017-10-16 22:29:12 +0200454 size_t numpages_per_thread;
455 size_t size_per_thread;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530456 char *addr = area;
457 int i = 0;
458
459 memset_thread_failed = false;
Jitendra Kolhedfd0dcc2017-03-21 02:50:06 -0400460 memset_num_threads = get_memset_num_threads(smp_cpus);
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530461 memset_thread = g_new0(MemsetThread, memset_num_threads);
462 numpages_per_thread = (numpages / memset_num_threads);
463 size_per_thread = (hpagesize * numpages_per_thread);
464 for (i = 0; i < memset_num_threads; i++) {
465 memset_thread[i].addr = addr;
466 memset_thread[i].numpages = (i == (memset_num_threads - 1)) ?
467 numpages : numpages_per_thread;
468 memset_thread[i].hpagesize = hpagesize;
469 qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
470 do_touch_pages, &memset_thread[i],
471 QEMU_THREAD_JOINABLE);
472 addr += size_per_thread;
473 numpages -= numpages_per_thread;
474 }
475 for (i = 0; i < memset_num_threads; i++) {
476 qemu_thread_join(&memset_thread[i].pgthread);
477 }
478 g_free(memset_thread);
479 memset_thread = NULL;
480
481 return memset_thread_failed;
482}
483
484void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus,
485 Error **errp)
Paolo Bonzini38183312014-05-14 17:43:21 +0800486{
Stefan Weilb7bf8f52014-06-24 22:52:29 +0200487 int ret;
Paolo Bonzini38183312014-05-14 17:43:21 +0800488 struct sigaction act, oldact;
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530489 size_t hpagesize = qemu_fd_getpagesize(fd);
490 size_t numpages = DIV_ROUND_UP(memory, hpagesize);
Paolo Bonzini38183312014-05-14 17:43:21 +0800491
492 memset(&act, 0, sizeof(act));
493 act.sa_handler = &sigbus_handler;
494 act.sa_flags = 0;
495
496 ret = sigaction(SIGBUS, &act, &oldact);
497 if (ret) {
Igor Mammedov056b68a2016-07-20 11:54:03 +0200498 error_setg_errno(errp, errno,
499 "os_mem_prealloc: failed to install signal handler");
500 return;
Paolo Bonzini38183312014-05-14 17:43:21 +0800501 }
502
Jitendra Kolhe1e356fc2017-02-24 09:01:43 +0530503 /* touch pages simultaneously */
504 if (touch_all_pages(area, hpagesize, numpages, smp_cpus)) {
Igor Mammedov056b68a2016-07-20 11:54:03 +0200505 error_setg(errp, "os_mem_prealloc: Insufficient free host memory "
Philippe Mathieu-Daudé462e5d52017-05-15 21:11:49 -0300506 "pages available to allocate guest RAM");
Stefan Weilb7bf8f52014-06-24 22:52:29 +0200507 }
Igor Mammedov056b68a2016-07-20 11:54:03 +0200508
509 ret = sigaction(SIGBUS, &oldact, NULL);
510 if (ret) {
511 /* Terminate QEMU since it can't recover from error */
512 perror("os_mem_prealloc: failed to reinstall signal handler");
513 exit(1);
514 }
Paolo Bonzini38183312014-05-14 17:43:21 +0800515}
Daniel P. Berranged57e4e42015-05-12 17:09:19 +0100516
Michal Privoznik7dc9ae42016-09-27 17:24:56 +0200517char *qemu_get_pid_name(pid_t pid)
518{
519 char *name = NULL;
520
521#if defined(__FreeBSD__)
522 /* BSDs don't have /proc, but they provide a nice substitute */
523 struct kinfo_proc *proc = kinfo_getproc(pid);
524
525 if (proc) {
526 name = g_strdup(proc->ki_comm);
527 free(proc);
528 }
529#else
530 /* Assume a system with reasonable procfs */
531 char *pid_path;
532 size_t len;
533
534 pid_path = g_strdup_printf("/proc/%d/cmdline", pid);
535 g_file_get_contents(pid_path, &name, &len, NULL);
536 g_free(pid_path);
537#endif
538
539 return name;
540}
541
542
Daniel P. Berrange57cb38b2015-08-28 14:40:01 +0100543pid_t qemu_fork(Error **errp)
544{
545 sigset_t oldmask, newmask;
546 struct sigaction sig_action;
547 int saved_errno;
548 pid_t pid;
549
550 /*
551 * Need to block signals now, so that child process can safely
552 * kill off caller's signal handlers without a race.
553 */
554 sigfillset(&newmask);
555 if (pthread_sigmask(SIG_SETMASK, &newmask, &oldmask) != 0) {
556 error_setg_errno(errp, errno,
557 "cannot block signals");
558 return -1;
559 }
560
561 pid = fork();
562 saved_errno = errno;
563
564 if (pid < 0) {
565 /* attempt to restore signal mask, but ignore failure, to
566 * avoid obscuring the fork failure */
567 (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
568 error_setg_errno(errp, saved_errno,
569 "cannot fork child process");
570 errno = saved_errno;
571 return -1;
572 } else if (pid) {
573 /* parent process */
574
575 /* Restore our original signal mask now that the child is
576 * safely running. Only documented failures are EFAULT (not
577 * possible, since we are using just-grabbed mask) or EINVAL
578 * (not possible, since we are using correct arguments). */
579 (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
580 } else {
581 /* child process */
582 size_t i;
583
584 /* Clear out all signal handlers from parent so nothing
585 * unexpected can happen in our child once we unblock
586 * signals */
587 sig_action.sa_handler = SIG_DFL;
588 sig_action.sa_flags = 0;
589 sigemptyset(&sig_action.sa_mask);
590
591 for (i = 1; i < NSIG; i++) {
592 /* Only possible errors are EFAULT or EINVAL The former
593 * won't happen, the latter we expect, so no need to check
594 * return value */
595 (void)sigaction(i, &sig_action, NULL);
596 }
597
598 /* Unmask all signals in child, since we've no idea what the
599 * caller's done with their signal mask and don't want to
600 * propagate that to children */
601 sigemptyset(&newmask);
602 if (pthread_sigmask(SIG_SETMASK, &newmask, NULL) != 0) {
603 Error *local_err = NULL;
604 error_setg_errno(&local_err, errno,
605 "cannot unblock signals");
606 error_report_err(local_err);
607 _exit(1);
608 }
609 }
610 return pid;
611}
Peter Lieven8737d9e2016-09-27 11:58:40 +0200612
613void *qemu_alloc_stack(size_t *sz)
614{
615 void *ptr, *guardpage;
Brad Smithfc3d1ba2018-10-19 08:52:39 -0400616 int flags;
Peter Lieven7d992e42016-09-27 11:58:45 +0200617#ifdef CONFIG_DEBUG_STACK_USAGE
618 void *ptr2;
619#endif
Wei Yang038adc22019-10-13 10:11:45 +0800620 size_t pagesz = qemu_real_host_page_size;
Peter Lieven8737d9e2016-09-27 11:58:40 +0200621#ifdef _SC_THREAD_STACK_MIN
622 /* avoid stacks smaller than _SC_THREAD_STACK_MIN */
623 long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
624 *sz = MAX(MAX(min_stack_sz, 0), *sz);
625#endif
626 /* adjust stack size to a multiple of the page size */
627 *sz = ROUND_UP(*sz, pagesz);
628 /* allocate one extra page for the guard page */
629 *sz += pagesz;
630
Brad Smithfc3d1ba2018-10-19 08:52:39 -0400631 flags = MAP_PRIVATE | MAP_ANONYMOUS;
632#if defined(MAP_STACK) && defined(__OpenBSD__)
633 /* Only enable MAP_STACK on OpenBSD. Other OS's such as
634 * Linux/FreeBSD/NetBSD have a flag with the same name
635 * but have differing functionality. OpenBSD will SEGV
636 * if it spots execution with a stack pointer pointing
637 * at memory that was not allocated with MAP_STACK.
638 */
639 flags |= MAP_STACK;
640#endif
641
642 ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, flags, -1, 0);
Peter Lieven8737d9e2016-09-27 11:58:40 +0200643 if (ptr == MAP_FAILED) {
Eduardo Habkoste916a6e2017-08-29 18:20:53 -0300644 perror("failed to allocate memory for stack");
Peter Lieven8737d9e2016-09-27 11:58:40 +0200645 abort();
646 }
647
648#if defined(HOST_IA64)
649 /* separate register stack */
650 guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
651#elif defined(HOST_HPPA)
652 /* stack grows up */
653 guardpage = ptr + *sz - pagesz;
654#else
655 /* stack grows down */
656 guardpage = ptr;
657#endif
658 if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
Eduardo Habkoste916a6e2017-08-29 18:20:53 -0300659 perror("failed to set up stack guard page");
Peter Lieven8737d9e2016-09-27 11:58:40 +0200660 abort();
661 }
662
Peter Lieven7d992e42016-09-27 11:58:45 +0200663#ifdef CONFIG_DEBUG_STACK_USAGE
664 for (ptr2 = ptr + pagesz; ptr2 < ptr + *sz; ptr2 += sizeof(uint32_t)) {
665 *(uint32_t *)ptr2 = 0xdeadbeaf;
666 }
667#endif
668
Peter Lieven8737d9e2016-09-27 11:58:40 +0200669 return ptr;
670}
671
Peter Lieven7d992e42016-09-27 11:58:45 +0200672#ifdef CONFIG_DEBUG_STACK_USAGE
673static __thread unsigned int max_stack_usage;
674#endif
675
Peter Lieven8737d9e2016-09-27 11:58:40 +0200676void qemu_free_stack(void *stack, size_t sz)
677{
Peter Lieven7d992e42016-09-27 11:58:45 +0200678#ifdef CONFIG_DEBUG_STACK_USAGE
679 unsigned int usage;
680 void *ptr;
681
Wei Yang038adc22019-10-13 10:11:45 +0800682 for (ptr = stack + qemu_real_host_page_size; ptr < stack + sz;
Peter Lieven7d992e42016-09-27 11:58:45 +0200683 ptr += sizeof(uint32_t)) {
684 if (*(uint32_t *)ptr != 0xdeadbeaf) {
685 break;
686 }
687 }
688 usage = sz - (uintptr_t) (ptr - stack);
689 if (usage > max_stack_usage) {
690 error_report("thread %d max stack usage increased from %u to %u",
691 qemu_get_thread_id(), max_stack_usage, usage);
692 max_stack_usage = usage;
693 }
694#endif
695
Peter Lieven8737d9e2016-09-27 11:58:40 +0200696 munmap(stack, sz);
697}
Paolo Bonzinid98d4072017-02-08 13:22:12 +0100698
699void sigaction_invoke(struct sigaction *action,
700 struct qemu_signalfd_siginfo *info)
701{
Peter Maydell02ffa032017-07-20 17:32:21 +0100702 siginfo_t si = {};
Paolo Bonzinid98d4072017-02-08 13:22:12 +0100703 si.si_signo = info->ssi_signo;
704 si.si_errno = info->ssi_errno;
705 si.si_code = info->ssi_code;
706
707 /* Convert the minimal set of fields defined by POSIX.
708 * Positive si_code values are reserved for kernel-generated
709 * signals, where the valid siginfo fields are determined by
710 * the signal number. But according to POSIX, it is unspecified
711 * whether SI_USER and SI_QUEUE have values less than or equal to
712 * zero.
713 */
714 if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE ||
715 info->ssi_code <= 0) {
716 /* SIGTERM, etc. */
717 si.si_pid = info->ssi_pid;
718 si.si_uid = info->ssi_uid;
719 } else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE ||
720 info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) {
721 si.si_addr = (void *)(uintptr_t)info->ssi_addr;
722 } else if (info->ssi_signo == SIGCHLD) {
723 si.si_pid = info->ssi_pid;
724 si.si_status = info->ssi_status;
725 si.si_uid = info->ssi_uid;
Paolo Bonzinid98d4072017-02-08 13:22:12 +0100726 }
727 action->sa_sigaction(info->ssi_signo, &si, NULL);
728}