blob: 893d864354a1f91ad9be152630780c7089debe16 [file] [log] [blame]
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001/*
2 * Support for RAM backed by mmaped host memory.
3 *
4 * Copyright (c) 2015 Red Hat, Inc.
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
11 */
Markus Armbrustera9c94272016-06-22 19:11:19 +020012
Zhang Yi119906af2019-04-22 08:48:48 +080013#ifdef CONFIG_LINUX
14#include <linux/mman.h>
15#else /* !CONFIG_LINUX */
16#define MAP_SYNC 0x0
17#define MAP_SHARED_VALIDATE 0x0
18#endif /* CONFIG_LINUX */
19
Peter Maydellaafd7582016-01-29 17:49:55 +000020#include "qemu/osdep.h"
Markus Armbrustera9c94272016-06-22 19:11:19 +020021#include "qemu/mmap-alloc.h"
Cao jin4a3ecf22016-11-02 21:44:46 +080022#include "qemu/host-utils.h"
David Hildenbrandd94e0bc2021-05-10 13:43:22 +020023#include "qemu/cutils.h"
David Hildenbrand8dbe22c2021-05-10 13:43:21 +020024#include "qemu/error-report.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030025
Michael S. Tsirkin7197fb42015-12-02 21:14:12 +020026#define HUGETLBFS_MAGIC 0x958458f6
27
28#ifdef CONFIG_LINUX
29#include <sys/vfs.h>
30#endif
31
32size_t qemu_fd_getpagesize(int fd)
33{
34#ifdef CONFIG_LINUX
35 struct statfs fs;
36 int ret;
37
38 if (fd != -1) {
39 do {
40 ret = fstatfs(fd, &fs);
41 } while (ret != 0 && errno == EINTR);
42
43 if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) {
44 return fs.f_bsize;
45 }
46 }
Peter Maydell57d1f6d2017-12-08 16:57:28 +000047#ifdef __sparc__
48 /* SPARC Linux needs greater alignment than the pagesize */
49 return QEMU_VMALLOC_ALIGN;
50#endif
Michael S. Tsirkin7197fb42015-12-02 21:14:12 +020051#endif
52
Wei Yang038adc22019-10-13 10:11:45 +080053 return qemu_real_host_page_size;
Michael S. Tsirkin7197fb42015-12-02 21:14:12 +020054}
55
Alexey Kardashevskiy9c607662017-03-02 13:36:11 +110056size_t qemu_mempath_getpagesize(const char *mem_path)
57{
58#ifdef CONFIG_LINUX
59 struct statfs fs;
60 int ret;
61
David Gibson0de6e2a2018-04-03 14:55:11 +100062 if (mem_path) {
63 do {
64 ret = statfs(mem_path, &fs);
65 } while (ret != 0 && errno == EINTR);
Alexey Kardashevskiy9c607662017-03-02 13:36:11 +110066
David Gibson0de6e2a2018-04-03 14:55:11 +100067 if (ret != 0) {
68 fprintf(stderr, "Couldn't statfs() memory path: %s\n",
69 strerror(errno));
70 exit(1);
71 }
Alexey Kardashevskiy9c607662017-03-02 13:36:11 +110072
David Gibson0de6e2a2018-04-03 14:55:11 +100073 if (fs.f_type == HUGETLBFS_MAGIC) {
74 /* It's hugepage, return the huge page size */
75 return fs.f_bsize;
76 }
Alexey Kardashevskiy9c607662017-03-02 13:36:11 +110077 }
Peter Maydell57d1f6d2017-12-08 16:57:28 +000078#ifdef __sparc__
79 /* SPARC Linux needs greater alignment than the pagesize */
80 return QEMU_VMALLOC_ALIGN;
81#endif
Alexey Kardashevskiy9c607662017-03-02 13:36:11 +110082#endif
83
Wei Yang038adc22019-10-13 10:11:45 +080084 return qemu_real_host_page_size;
Alexey Kardashevskiy9c607662017-03-02 13:36:11 +110085}
86
David Hildenbrandd94e0bc2021-05-10 13:43:22 +020087#define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory"
88static bool map_noreserve_effective(int fd, uint32_t qemu_map_flags)
89{
90#if defined(__linux__)
91 const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
92 const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
93 gchar *content = NULL;
94 const char *endptr;
95 unsigned int tmp;
96
97 /*
98 * hugeltb accounting is different than ordinary swap reservation:
99 * a) Hugetlb pages from the pool are reserved for both private and
100 * shared mappings. For shared mappings, all mappers have to specify
101 * MAP_NORESERVE.
102 * b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory.
103 */
104 if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size) {
105 return true;
106 }
107
108 /*
109 * Accountable mappings in the kernel that can be affected by MAP_NORESEVE
110 * are private writable mappings (see mm/mmap.c:accountable_mapping() in
111 * Linux). For all shared or readonly mappings, MAP_NORESERVE is always
112 * implicitly active -- no reservation; this includes shmem. The only
113 * exception is shared anonymous memory, it is accounted like private
114 * anonymous memory.
115 */
116 if (readonly || (shared && fd >= 0)) {
117 return true;
118 }
119
120 /*
121 * MAP_NORESERVE is globally ignored for applicable !hugetlb mappings when
122 * memory overcommit is set to "never". Sparse memory regions aren't really
123 * possible in this system configuration.
124 *
125 * Bail out now instead of silently committing way more memory than
126 * currently desired by the user.
127 */
128 if (g_file_get_contents(OVERCOMMIT_MEMORY_PATH, &content, NULL, NULL) &&
129 !qemu_strtoui(content, &endptr, 0, &tmp) &&
130 (!endptr || *endptr == '\n')) {
131 if (tmp == 2) {
132 error_report("Skipping reservation of swap space is not supported:"
133 " \"" OVERCOMMIT_MEMORY_PATH "\" is \"2\"");
134 return false;
135 }
136 return true;
137 }
138 /* this interface has been around since Linux 2.6 */
139 error_report("Skipping reservation of swap space is not supported:"
140 " Could not read: \"" OVERCOMMIT_MEMORY_PATH "\"");
141 return false;
142#endif
143 /*
144 * E.g., FreeBSD used to define MAP_NORESERVE, never implemented it,
145 * and removed it a while ago.
146 */
147 error_report("Skipping reservation of swap space is not supported");
148 return false;
149}
150
David Hildenbrand01c26ad2021-05-10 13:43:15 +0200151/*
152 * Reserve a new memory region of the requested size to be used for mapping
153 * from the given fd (if any).
154 */
155static void *mmap_reserve(size_t size, int fd)
156{
157 int flags = MAP_PRIVATE;
158
159#if defined(__powerpc64__) && defined(__linux__)
160 /*
161 * On ppc64 mappings in the same segment (aka slice) must share the same
162 * page size. Since we will be re-allocating part of this segment
163 * from the supplied fd, we should make sure to use the same page size, to
164 * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
165 * avoid allocating backing store memory.
166 * We do this unless we are using the system page size, in which case
167 * anonymous memory is OK.
168 */
169 if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) {
170 fd = -1;
171 flags |= MAP_ANONYMOUS;
172 } else {
173 flags |= MAP_NORESERVE;
174 }
175#else
176 fd = -1;
177 flags |= MAP_ANONYMOUS;
178#endif
179
180 return mmap(0, size, PROT_NONE, flags, fd, 0);
181}
182
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200183/*
184 * Activate memory in a reserved region from the given fd (if any), to make
185 * it accessible.
186 */
David Hildenbrandb444f5c2021-05-10 13:43:20 +0200187static void *mmap_activate(void *ptr, size_t size, int fd,
188 uint32_t qemu_map_flags, off_t map_offset)
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200189{
David Hildenbrand8dbe22c2021-05-10 13:43:21 +0200190 const bool noreserve = qemu_map_flags & QEMU_MAP_NORESERVE;
David Hildenbrandb444f5c2021-05-10 13:43:20 +0200191 const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
192 const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
193 const bool sync = qemu_map_flags & QEMU_MAP_SYNC;
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200194 const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
195 int map_sync_flags = 0;
196 int flags = MAP_FIXED;
197 void *activated_ptr;
198
David Hildenbrandd94e0bc2021-05-10 13:43:22 +0200199 if (noreserve && !map_noreserve_effective(fd, qemu_map_flags)) {
David Hildenbrand8dbe22c2021-05-10 13:43:21 +0200200 return MAP_FAILED;
201 }
202
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200203 flags |= fd == -1 ? MAP_ANONYMOUS : 0;
204 flags |= shared ? MAP_SHARED : MAP_PRIVATE;
David Hildenbrandd94e0bc2021-05-10 13:43:22 +0200205 flags |= noreserve ? MAP_NORESERVE : 0;
David Hildenbrandb444f5c2021-05-10 13:43:20 +0200206 if (shared && sync) {
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200207 map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
208 }
209
210 activated_ptr = mmap(ptr, size, prot, flags | map_sync_flags, fd,
211 map_offset);
212 if (activated_ptr == MAP_FAILED && map_sync_flags) {
213 if (errno == ENOTSUP) {
214 char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
215 char *file_name = g_malloc0(PATH_MAX);
216 int len = readlink(proc_link, file_name, PATH_MAX - 1);
217
218 if (len < 0) {
219 len = 0;
220 }
221 file_name[len] = '\0';
222 fprintf(stderr, "Warning: requesting persistence across crashes "
223 "for backend file %s failed. Proceeding without "
224 "persistence, data might become corrupted in case of host "
225 "crash.\n", file_name);
226 g_free(proc_link);
227 g_free(file_name);
Igor Mammedovcdcf7662021-01-11 15:33:32 -0500228 warn_report("Using non DAX backing file with 'pmem=on' option"
229 " is deprecated");
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200230 }
231 /*
232 * If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we will try
233 * again without these flags to handle backwards compatibility.
234 */
235 activated_ptr = mmap(ptr, size, prot, flags, fd, map_offset);
236 }
237 return activated_ptr;
238}
239
David Hildenbrandadad0b32021-05-10 13:43:14 +0200240static inline size_t mmap_guard_pagesize(int fd)
241{
242#if defined(__powerpc64__) && defined(__linux__)
243 /* Mappings in the same segment must share the same page size */
244 return qemu_fd_getpagesize(fd);
245#else
246 return qemu_real_host_page_size;
247#endif
248}
249
Zhang Yi2ac0f162019-02-08 18:10:37 +0800250void *qemu_ram_mmap(int fd,
251 size_t size,
252 size_t align,
David Hildenbrandb444f5c2021-05-10 13:43:20 +0200253 uint32_t qemu_map_flags,
Jagannathan Raman44a4ff32021-01-29 11:46:04 -0500254 off_t map_offset)
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300255{
David Hildenbrandadad0b32021-05-10 13:43:14 +0200256 const size_t guard_pagesize = mmap_guard_pagesize(fd);
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200257 size_t offset, total;
258 void *ptr, *guardptr;
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200259
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300260 /*
261 * Note: this always allocates at least one extra page of virtual address
262 * space, even if size is already aligned.
263 */
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200264 total = size + align;
265
David Hildenbrand01c26ad2021-05-10 13:43:15 +0200266 guardptr = mmap_reserve(total, fd);
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200267 if (guardptr == MAP_FAILED) {
Michael S. Tsirkin9d4ec932015-10-25 17:07:45 +0200268 return MAP_FAILED;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300269 }
270
Cao jin4a3ecf22016-11-02 21:44:46 +0800271 assert(is_power_of_2(align));
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300272 /* Always align to host page size */
David Hildenbrandadad0b32021-05-10 13:43:14 +0200273 assert(align >= guard_pagesize);
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300274
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200275 offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
276
David Hildenbrandb444f5c2021-05-10 13:43:20 +0200277 ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags,
David Hildenbrandd01cbf82021-05-10 13:43:16 +0200278 map_offset);
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200279 if (ptr == MAP_FAILED) {
280 munmap(guardptr, total);
Michael S. Tsirkin9d4ec932015-10-25 17:07:45 +0200281 return MAP_FAILED;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300282 }
283
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300284 if (offset > 0) {
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200285 munmap(guardptr, offset);
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300286 }
287
288 /*
289 * Leave a single PROT_NONE page allocated after the RAM block, to serve as
290 * a guard page guarding against potential buffer overflows.
291 */
Cao jin6e4c8902016-11-02 21:44:47 +0800292 total -= offset;
David Hildenbrandadad0b32021-05-10 13:43:14 +0200293 if (total > size + guard_pagesize) {
294 munmap(ptr + size + guard_pagesize, total - size - guard_pagesize);
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300295 }
296
Murilo Opsfelder Araujo2044c3e2019-01-30 21:36:04 -0200297 return ptr;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300298}
299
Murilo Opsfelder Araujo53adb9d2019-01-30 21:36:05 -0200300void qemu_ram_munmap(int fd, void *ptr, size_t size)
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300301{
302 if (ptr) {
303 /* Unmap both the RAM block and the guard page */
David Hildenbrandadad0b32021-05-10 13:43:14 +0200304 munmap(ptr, size + mmap_guard_pagesize(fd));
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +0300305 }
306}