blob: c614f1bdc14983d88edd217ff4d674f5125c2cec [file] [log] [blame]
Igor Mammedov1f070482014-06-06 17:54:29 +02001/*
2 * QEMU Host Memory Backend
3 *
4 * Copyright (C) 2013-2014 Red Hat Inc
5 *
6 * Authors:
7 * Igor Mammedov <imammedo@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
Markus Armbruster9af23982018-02-11 10:36:01 +010012
Peter Maydell9c058332016-01-29 17:49:54 +000013#include "qemu/osdep.h"
Igor Mammedov1f070482014-06-06 17:54:29 +020014#include "sysemu/hostmem.h"
Markus Armbruster46517dd2019-08-12 07:23:57 +020015#include "sysemu/sysemu.h"
Eduardo Habkost6b269962015-07-16 17:29:12 -030016#include "hw/boards.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Markus Armbrustereb815e22018-02-11 10:36:05 +010018#include "qapi/qapi-builtin-visit.h"
Igor Mammedov1f070482014-06-06 17:54:29 +020019#include "qapi/visitor.h"
Igor Mammedov1f070482014-06-06 17:54:29 +020020#include "qemu/config-file.h"
21#include "qom/object_interfaces.h"
David Gibson2b108082018-04-03 15:05:45 +100022#include "qemu/mmap-alloc.h"
Igor Mammedov1f070482014-06-06 17:54:29 +020023
Hu Tao4cf1b762014-06-10 19:15:25 +080024#ifdef CONFIG_NUMA
25#include <numaif.h>
26QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT);
27QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED);
28QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND);
29QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
30#endif
31
Marc-André Lureaufa0cb342018-09-12 16:18:00 +040032char *
33host_memory_backend_get_name(HostMemoryBackend *backend)
34{
35 if (!backend->use_canonical_path) {
36 return object_get_canonical_path_component(OBJECT(backend));
37 }
38
39 return object_get_canonical_path(OBJECT(backend));
40}
41
Igor Mammedov1f070482014-06-06 17:54:29 +020042static void
Eric Blaked7bce992016-01-29 06:48:55 -070043host_memory_backend_get_size(Object *obj, Visitor *v, const char *name,
44 void *opaque, Error **errp)
Igor Mammedov1f070482014-06-06 17:54:29 +020045{
46 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
47 uint64_t value = backend->size;
48
Eric Blake51e72bc2016-01-29 06:48:54 -070049 visit_type_size(v, name, &value, errp);
Igor Mammedov1f070482014-06-06 17:54:29 +020050}
51
52static void
Eric Blaked7bce992016-01-29 06:48:55 -070053host_memory_backend_set_size(Object *obj, Visitor *v, const char *name,
54 void *opaque, Error **errp)
Igor Mammedov1f070482014-06-06 17:54:29 +020055{
56 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
Igor Mammedov1f070482014-06-06 17:54:29 +020057 uint64_t value;
58
Peter Xu6f4c60e2017-03-10 21:09:30 +080059 if (host_memory_backend_mr_inited(backend)) {
Markus Armbrusterdcfe4802020-07-07 18:06:01 +020060 error_setg(errp, "cannot change property %s of %s ", name,
61 object_get_typename(obj));
62 return;
Igor Mammedov1f070482014-06-06 17:54:29 +020063 }
64
Markus Armbruster668f62e2020-07-07 18:06:02 +020065 if (!visit_type_size(v, name, &value, errp)) {
Markus Armbrusterdcfe4802020-07-07 18:06:01 +020066 return;
Igor Mammedov1f070482014-06-06 17:54:29 +020067 }
68 if (!value) {
Markus Armbrusterdcfe4802020-07-07 18:06:01 +020069 error_setg(errp,
Zhang Yi21d16832019-01-02 13:26:24 +080070 "property '%s' of %s doesn't take value '%" PRIu64 "'",
71 name, object_get_typename(obj), value);
Markus Armbrusterdcfe4802020-07-07 18:06:01 +020072 return;
Igor Mammedov1f070482014-06-06 17:54:29 +020073 }
74 backend->size = value;
Igor Mammedov1f070482014-06-06 17:54:29 +020075}
76
Hu Tao4cf1b762014-06-10 19:15:25 +080077static void
Eric Blaked7bce992016-01-29 06:48:55 -070078host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name,
79 void *opaque, Error **errp)
Hu Tao4cf1b762014-06-10 19:15:25 +080080{
81 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
82 uint16List *host_nodes = NULL;
83 uint16List **node = &host_nodes;
84 unsigned long value;
85
86 value = find_first_bit(backend->host_nodes, MAX_NODES);
Xiao Guangrong1454d332016-07-13 12:18:05 +080087 if (value == MAX_NODES) {
Igor Mammedov15160ab2019-02-14 05:57:33 -050088 goto ret;
Xiao Guangrong1454d332016-07-13 12:18:05 +080089 }
Hu Tao4cf1b762014-06-10 19:15:25 +080090
Markus Armbruster658ae5a2017-03-20 17:13:44 +010091 *node = g_malloc0(sizeof(**node));
92 (*node)->value = value;
93 node = &(*node)->next;
94
Hu Tao4cf1b762014-06-10 19:15:25 +080095 do {
96 value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1);
97 if (value == MAX_NODES) {
98 break;
99 }
100
Markus Armbruster658ae5a2017-03-20 17:13:44 +0100101 *node = g_malloc0(sizeof(**node));
102 (*node)->value = value;
103 node = &(*node)->next;
Hu Tao4cf1b762014-06-10 19:15:25 +0800104 } while (true);
105
Igor Mammedov15160ab2019-02-14 05:57:33 -0500106ret:
Eric Blake51e72bc2016-01-29 06:48:54 -0700107 visit_type_uint16List(v, name, &host_nodes, errp);
Hu Tao4cf1b762014-06-10 19:15:25 +0800108}
109
110static void
Eric Blaked7bce992016-01-29 06:48:55 -0700111host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name,
112 void *opaque, Error **errp)
Hu Tao4cf1b762014-06-10 19:15:25 +0800113{
114#ifdef CONFIG_NUMA
115 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
Eduardo Habkostffa144b2018-11-30 10:28:44 -0200116 uint16List *l, *host_nodes = NULL;
Hu Tao4cf1b762014-06-10 19:15:25 +0800117
Eduardo Habkostffa144b2018-11-30 10:28:44 -0200118 visit_type_uint16List(v, name, &host_nodes, errp);
Hu Tao4cf1b762014-06-10 19:15:25 +0800119
Eduardo Habkostffa144b2018-11-30 10:28:44 -0200120 for (l = host_nodes; l; l = l->next) {
121 if (l->value >= MAX_NODES) {
122 error_setg(errp, "Invalid host-nodes value: %d", l->value);
123 goto out;
124 }
Hu Tao4cf1b762014-06-10 19:15:25 +0800125 }
Eduardo Habkostffa144b2018-11-30 10:28:44 -0200126
127 for (l = host_nodes; l; l = l->next) {
128 bitmap_set(backend->host_nodes, l->value, 1);
129 }
130
131out:
132 qapi_free_uint16List(host_nodes);
Hu Tao4cf1b762014-06-10 19:15:25 +0800133#else
134 error_setg(errp, "NUMA node binding are not supported by this QEMU");
135#endif
136}
137
Daniel P. Berrangea3590da2015-05-27 16:07:56 +0100138static int
139host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED)
Hu Tao4cf1b762014-06-10 19:15:25 +0800140{
141 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
Daniel P. Berrangea3590da2015-05-27 16:07:56 +0100142 return backend->policy;
Hu Tao4cf1b762014-06-10 19:15:25 +0800143}
144
145static void
Daniel P. Berrangea3590da2015-05-27 16:07:56 +0100146host_memory_backend_set_policy(Object *obj, int policy, Error **errp)
Hu Tao4cf1b762014-06-10 19:15:25 +0800147{
148 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
Hu Tao4cf1b762014-06-10 19:15:25 +0800149 backend->policy = policy;
150
151#ifndef CONFIG_NUMA
152 if (policy != HOST_MEM_POLICY_DEFAULT) {
153 error_setg(errp, "NUMA policies are not supported by this QEMU");
154 }
155#endif
156}
157
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800158static bool host_memory_backend_get_merge(Object *obj, Error **errp)
159{
160 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
161
162 return backend->merge;
163}
164
165static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp)
166{
167 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
168
Peter Xu6f4c60e2017-03-10 21:09:30 +0800169 if (!host_memory_backend_mr_inited(backend)) {
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800170 backend->merge = value;
171 return;
172 }
173
174 if (value != backend->merge) {
175 void *ptr = memory_region_get_ram_ptr(&backend->mr);
176 uint64_t sz = memory_region_size(&backend->mr);
177
178 qemu_madvise(ptr, sz,
179 value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE);
180 backend->merge = value;
181 }
182}
183
184static bool host_memory_backend_get_dump(Object *obj, Error **errp)
185{
186 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
187
188 return backend->dump;
189}
190
191static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp)
192{
193 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
194
Peter Xu6f4c60e2017-03-10 21:09:30 +0800195 if (!host_memory_backend_mr_inited(backend)) {
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800196 backend->dump = value;
197 return;
198 }
199
200 if (value != backend->dump) {
201 void *ptr = memory_region_get_ram_ptr(&backend->mr);
202 uint64_t sz = memory_region_size(&backend->mr);
203
204 qemu_madvise(ptr, sz,
205 value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP);
206 backend->dump = value;
207 }
208}
209
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800210static bool host_memory_backend_get_prealloc(Object *obj, Error **errp)
211{
212 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
213
Igor Mammedov4ebc74d2020-02-19 11:09:51 -0500214 return backend->prealloc;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800215}
216
217static void host_memory_backend_set_prealloc(Object *obj, bool value,
218 Error **errp)
219{
Igor Mammedov056b68a2016-07-20 11:54:03 +0200220 Error *local_err = NULL;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800221 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
222
Peter Xu6f4c60e2017-03-10 21:09:30 +0800223 if (!host_memory_backend_mr_inited(backend)) {
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800224 backend->prealloc = value;
225 return;
226 }
227
228 if (value && !backend->prealloc) {
229 int fd = memory_region_get_fd(&backend->mr);
230 void *ptr = memory_region_get_ram_ptr(&backend->mr);
231 uint64_t sz = memory_region_size(&backend->mr);
232
Igor Mammedovffac16f2020-02-19 11:09:50 -0500233 os_mem_prealloc(fd, ptr, sz, backend->prealloc_threads, &local_err);
Igor Mammedov056b68a2016-07-20 11:54:03 +0200234 if (local_err) {
235 error_propagate(errp, local_err);
236 return;
237 }
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800238 backend->prealloc = true;
239 }
240}
241
Igor Mammedovffac16f2020-02-19 11:09:50 -0500242static void host_memory_backend_get_prealloc_threads(Object *obj, Visitor *v,
243 const char *name, void *opaque, Error **errp)
244{
245 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
246 visit_type_uint32(v, name, &backend->prealloc_threads, errp);
247}
248
249static void host_memory_backend_set_prealloc_threads(Object *obj, Visitor *v,
250 const char *name, void *opaque, Error **errp)
251{
252 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
Igor Mammedovffac16f2020-02-19 11:09:50 -0500253 uint32_t value;
254
Markus Armbruster668f62e2020-07-07 18:06:02 +0200255 if (!visit_type_uint32(v, name, &value, errp)) {
Markus Armbrusterdcfe4802020-07-07 18:06:01 +0200256 return;
Igor Mammedovffac16f2020-02-19 11:09:50 -0500257 }
258 if (value <= 0) {
Markus Armbrusterdcfe4802020-07-07 18:06:01 +0200259 error_setg(errp, "property '%s' of %s doesn't take value '%d'", name,
260 object_get_typename(obj), value);
261 return;
Igor Mammedovffac16f2020-02-19 11:09:50 -0500262 }
263 backend->prealloc_threads = value;
Igor Mammedovffac16f2020-02-19 11:09:50 -0500264}
265
Hu Tao58f46622014-06-10 19:15:18 +0800266static void host_memory_backend_init(Object *obj)
Igor Mammedov1f070482014-06-06 17:54:29 +0200267{
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800268 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
Eduardo Habkost6b269962015-07-16 17:29:12 -0300269 MachineState *machine = MACHINE(qdev_get_machine());
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800270
Igor Mammedovffac16f2020-02-19 11:09:50 -0500271 /* TODO: convert access to globals to compat properties */
Eduardo Habkost6b269962015-07-16 17:29:12 -0300272 backend->merge = machine_mem_merge(machine);
273 backend->dump = machine_dump_guest_core(machine);
Igor Mammedov2a4e02d2020-03-25 05:44:22 -0400274 backend->prealloc_threads = 1;
Igor Mammedov1f070482014-06-06 17:54:29 +0200275}
276
Marc-André Lureaufa0cb342018-09-12 16:18:00 +0400277static void host_memory_backend_post_init(Object *obj)
278{
279 object_apply_compat_props(obj);
280}
281
Peter Xu4728b572017-03-10 21:09:29 +0800282bool host_memory_backend_mr_inited(HostMemoryBackend *backend)
283{
284 /*
285 * NOTE: We forbid zero-length memory backend, so here zero means
286 * "we haven't inited the backend memory region yet".
287 */
288 return memory_region_size(&backend->mr) != 0;
289}
290
David Hildenbrand7943e972018-06-19 15:41:36 +0200291MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend)
Igor Mammedov1f070482014-06-06 17:54:29 +0200292{
Peter Xu6f4c60e2017-03-10 21:09:30 +0800293 return host_memory_backend_mr_inited(backend) ? &backend->mr : NULL;
Igor Mammedov1f070482014-06-06 17:54:29 +0200294}
295
Xiao Guangrong2aece632016-07-13 12:18:06 +0800296void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped)
297{
298 backend->is_mapped = mapped;
299}
300
301bool host_memory_backend_is_mapped(HostMemoryBackend *backend)
302{
303 return backend->is_mapped;
304}
305
David Gibson2b108082018-04-03 15:05:45 +1000306#ifdef __linux__
307size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
308{
309 Object *obj = OBJECT(memdev);
310 char *path = object_property_get_str(obj, "mem-path", NULL);
311 size_t pagesize = qemu_mempath_getpagesize(path);
312
313 g_free(path);
314 return pagesize;
315}
316#else
317size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
318{
Wei Yang038adc22019-10-13 10:11:45 +0800319 return qemu_real_host_page_size;
David Gibson2b108082018-04-03 15:05:45 +1000320}
321#endif
322
Hu Taobd9262d2014-06-10 19:15:19 +0800323static void
324host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
325{
326 HostMemoryBackend *backend = MEMORY_BACKEND(uc);
327 HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc);
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800328 Error *local_err = NULL;
329 void *ptr;
330 uint64_t sz;
Hu Taobd9262d2014-06-10 19:15:19 +0800331
332 if (bc->alloc) {
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800333 bc->alloc(backend, &local_err);
334 if (local_err) {
Igor Mammedov056b68a2016-07-20 11:54:03 +0200335 goto out;
Paolo Bonzini605d0a92014-06-10 19:15:22 +0800336 }
337
338 ptr = memory_region_get_ram_ptr(&backend->mr);
339 sz = memory_region_size(&backend->mr);
340
341 if (backend->merge) {
342 qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE);
343 }
344 if (!backend->dump) {
345 qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP);
346 }
Hu Tao4cf1b762014-06-10 19:15:25 +0800347#ifdef CONFIG_NUMA
348 unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES);
349 /* lastbit == MAX_NODES means maxnode = 0 */
350 unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1);
351 /* ensure policy won't be ignored in case memory is preallocated
352 * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
353 * this doesn't catch hugepage case. */
Michael S. Tsirkin288d3322014-08-13 13:50:24 +0200354 unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
Hu Tao4cf1b762014-06-10 19:15:25 +0800355
356 /* check for invalid host-nodes and policies and give more verbose
357 * error messages than mbind(). */
358 if (maxnode && backend->policy == MPOL_DEFAULT) {
359 error_setg(errp, "host-nodes must be empty for policy default,"
360 " or you should explicitly specify a policy other"
361 " than default");
362 return;
363 } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) {
364 error_setg(errp, "host-nodes must be set for policy %s",
Markus Armbruster977c7362017-08-24 10:46:08 +0200365 HostMemPolicy_str(backend->policy));
Hu Tao4cf1b762014-06-10 19:15:25 +0800366 return;
367 }
368
369 /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1
370 * as argument to mbind() due to an old Linux bug (feature?) which
371 * cuts off the last specified node. This means backend->host_nodes
372 * must have MAX_NODES+1 bits available.
373 */
374 assert(sizeof(backend->host_nodes) >=
375 BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
376 assert(maxnode <= MAX_NODES);
Igor Mammedov70b6d522020-04-30 11:46:06 -0400377
378 if (maxnode &&
379 mbind(ptr, sz, backend->policy, backend->host_nodes, maxnode + 1,
380 flags)) {
Pavel Fedina3567ba2015-10-27 15:51:31 +0300381 if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
382 error_setg_errno(errp, errno,
383 "cannot bind memory to host NUMA nodes");
384 return;
385 }
Hu Tao4cf1b762014-06-10 19:15:25 +0800386 }
387#endif
388 /* Preallocate memory after the NUMA policy has been instantiated.
389 * This is necessary to guarantee memory is allocated with
390 * specified NUMA policy in place.
391 */
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800392 if (backend->prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +0200393 os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz,
Igor Mammedovffac16f2020-02-19 11:09:50 -0500394 backend->prealloc_threads, &local_err);
Igor Mammedov056b68a2016-07-20 11:54:03 +0200395 if (local_err) {
396 goto out;
397 }
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800398 }
Hu Taobd9262d2014-06-10 19:15:19 +0800399 }
Igor Mammedov056b68a2016-07-20 11:54:03 +0200400out:
401 error_propagate(errp, local_err);
Hu Taobd9262d2014-06-10 19:15:19 +0800402}
403
Lin Ma36bce5c2015-03-30 16:36:29 +0800404static bool
Eduardo Habkost3beacfb2017-08-29 19:03:37 -0300405host_memory_backend_can_be_deleted(UserCreatable *uc)
Lin Ma36bce5c2015-03-30 16:36:29 +0800406{
Xiao Guangrong2aece632016-07-13 12:18:06 +0800407 if (host_memory_backend_is_mapped(MEMORY_BACKEND(uc))) {
Lin Ma36bce5c2015-03-30 16:36:29 +0800408 return false;
409 } else {
410 return true;
411 }
412}
413
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +0200414static bool host_memory_backend_get_share(Object *o, Error **errp)
415{
416 HostMemoryBackend *backend = MEMORY_BACKEND(o);
417
418 return backend->share;
419}
420
421static void host_memory_backend_set_share(Object *o, bool value, Error **errp)
422{
423 HostMemoryBackend *backend = MEMORY_BACKEND(o);
424
425 if (host_memory_backend_mr_inited(backend)) {
426 error_setg(errp, "cannot change property value");
427 return;
428 }
429 backend->share = value;
430}
431
Marc-André Lureaufa0cb342018-09-12 16:18:00 +0400432static bool
433host_memory_backend_get_use_canonical_path(Object *obj, Error **errp)
434{
435 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
436
437 return backend->use_canonical_path;
438}
439
440static void
441host_memory_backend_set_use_canonical_path(Object *obj, bool value,
442 Error **errp)
443{
444 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
445
446 backend->use_canonical_path = value;
447}
448
Hu Taobd9262d2014-06-10 19:15:19 +0800449static void
450host_memory_backend_class_init(ObjectClass *oc, void *data)
451{
452 UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
453
454 ucc->complete = host_memory_backend_memory_complete;
Lin Ma36bce5c2015-03-30 16:36:29 +0800455 ucc->can_be_deleted = host_memory_backend_can_be_deleted;
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300456
457 object_class_property_add_bool(oc, "merge",
458 host_memory_backend_get_merge,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200459 host_memory_backend_set_merge);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400460 object_class_property_set_description(oc, "merge",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200461 "Mark memory as mergeable");
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300462 object_class_property_add_bool(oc, "dump",
463 host_memory_backend_get_dump,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200464 host_memory_backend_set_dump);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400465 object_class_property_set_description(oc, "dump",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200466 "Set to 'off' to exclude from core dump");
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300467 object_class_property_add_bool(oc, "prealloc",
468 host_memory_backend_get_prealloc,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200469 host_memory_backend_set_prealloc);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400470 object_class_property_set_description(oc, "prealloc",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200471 "Preallocate memory");
Igor Mammedovffac16f2020-02-19 11:09:50 -0500472 object_class_property_add(oc, "prealloc-threads", "int",
473 host_memory_backend_get_prealloc_threads,
474 host_memory_backend_set_prealloc_threads,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200475 NULL, NULL);
Igor Mammedovffac16f2020-02-19 11:09:50 -0500476 object_class_property_set_description(oc, "prealloc-threads",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200477 "Number of CPU threads to use for prealloc");
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300478 object_class_property_add(oc, "size", "int",
479 host_memory_backend_get_size,
480 host_memory_backend_set_size,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200481 NULL, NULL);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400482 object_class_property_set_description(oc, "size",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200483 "Size of the memory region (ex: 500M)");
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300484 object_class_property_add(oc, "host-nodes", "int",
485 host_memory_backend_get_host_nodes,
486 host_memory_backend_set_host_nodes,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200487 NULL, NULL);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400488 object_class_property_set_description(oc, "host-nodes",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200489 "Binds memory to the list of NUMA host nodes");
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300490 object_class_property_add_enum(oc, "policy", "HostMemPolicy",
Marc-André Lureauf7abe0e2017-08-24 10:46:10 +0200491 &HostMemPolicy_lookup,
Eduardo Habkoste62834c2016-10-13 17:52:51 -0300492 host_memory_backend_get_policy,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200493 host_memory_backend_set_policy);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400494 object_class_property_set_description(oc, "policy",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200495 "Set the NUMA policy");
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +0200496 object_class_property_add_bool(oc, "share",
Markus Armbrusterd2623122020-05-05 17:29:22 +0200497 host_memory_backend_get_share, host_memory_backend_set_share);
Marc-André Lureau033bfc52018-09-06 18:39:08 +0400498 object_class_property_set_description(oc, "share",
Markus Armbruster7eecec72020-05-05 17:29:15 +0200499 "Mark the memory as private to QEMU or shared");
Marc-André Lureaufa0cb342018-09-12 16:18:00 +0400500 object_class_property_add_bool(oc, "x-use-canonical-path-for-ramblock-id",
501 host_memory_backend_get_use_canonical_path,
Markus Armbrusterd2623122020-05-05 17:29:22 +0200502 host_memory_backend_set_use_canonical_path);
Igor Mammedove1ff3c62017-01-10 13:53:15 +0100503}
504
Hu Tao58f46622014-06-10 19:15:18 +0800505static const TypeInfo host_memory_backend_info = {
Igor Mammedov1f070482014-06-06 17:54:29 +0200506 .name = TYPE_MEMORY_BACKEND,
507 .parent = TYPE_OBJECT,
508 .abstract = true,
509 .class_size = sizeof(HostMemoryBackendClass),
Hu Taobd9262d2014-06-10 19:15:19 +0800510 .class_init = host_memory_backend_class_init,
Igor Mammedov1f070482014-06-06 17:54:29 +0200511 .instance_size = sizeof(HostMemoryBackend),
Hu Tao58f46622014-06-10 19:15:18 +0800512 .instance_init = host_memory_backend_init,
Marc-André Lureaufa0cb342018-09-12 16:18:00 +0400513 .instance_post_init = host_memory_backend_post_init,
Igor Mammedov1f070482014-06-06 17:54:29 +0200514 .interfaces = (InterfaceInfo[]) {
515 { TYPE_USER_CREATABLE },
516 { }
517 }
518};
519
520static void register_types(void)
521{
Hu Tao58f46622014-06-10 19:15:18 +0800522 type_register_static(&host_memory_backend_info);
Igor Mammedov1f070482014-06-06 17:54:29 +0200523}
524
525type_init(register_types);