aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-06-22 10:25:03 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-06-22 10:25:03 +0100
commitdb7a99cdc1d0f4d8cbf7c41ce9e570dce04f0a11 (patch)
tree47fd7ac1842e033dc163ed63788bc818af0d5a7e /util
parent8dfaf23ae1f2273a9730a9b309cc8471269bb524 (diff)
parent8da54b2507c1cabf60c2de904cf0383b23239231 (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20170619' into staging
Queued TCG patches # gpg: Signature made Mon 19 Jun 2017 19:12:06 BST # gpg: using RSA key 0xAD1270CC4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" # Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC 16A4 AD12 70CC 4DD0 279B * remotes/rth/tags/pull-tcg-20170619: target/arm: Exit after clearing aarch64 interrupt mask target/s390x: Exit after changing PSW mask target/alpha: Use tcg_gen_lookup_and_goto_ptr tcg: Increase hit rate of lookup_tb_ptr tcg/arm: Use ldr (literal) for goto_tb tcg/arm: Try pc-relative addresses for movi tcg/arm: Remove limit on code buffer size tcg/arm: Use indirect branch for goto_tb tcg/aarch64: Use ADR in tcg_out_movi translate-all: consolidate tb init in tb_gen_code tcg: allocate TB structs before the corresponding translated code util: add cacheinfo Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util')
-rw-r--r--util/Makefile.objs1
-rw-r--r--util/cacheinfo.c185
2 files changed, 186 insertions, 0 deletions
diff --git a/util/Makefile.objs b/util/Makefile.objs
index 8a333d3dd7..50a55ecc75 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -20,6 +20,7 @@ util-obj-y += host-utils.o
util-obj-y += bitmap.o bitops.o hbitmap.o
util-obj-y += fifo8.o
util-obj-y += acl.o
+util-obj-y += cacheinfo.o
util-obj-y += error.o qemu-error.o
util-obj-y += id.o
util-obj-y += iov.o qemu-config.o qemu-sockets.o uri.o notify.o
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
new file mode 100644
index 0000000000..f987522df4
--- /dev/null
+++ b/util/cacheinfo.c
@@ -0,0 +1,185 @@
+/*
+ * cacheinfo.c - helpers to query the host about its caches
+ *
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+int qemu_icache_linesize = 0;
+int qemu_dcache_linesize = 0;
+
+/*
+ * Operating system specific detection mechanisms.
+ */
+
+#if defined(_AIX)
+# include <sys/systemcfg.h>
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+ *isize = _system_configuration.icache_line;
+ *dsize = _system_configuration.dcache_line;
+}
+
+#elif defined(_WIN32)
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf;
+ DWORD size = 0;
+ BOOL success;
+ size_t i, n;
+
+ /* Check for the required buffer size first. Note that if the zero
+ size we use for the probe results in success, then there is no
+ data available; fail in that case. */
+ success = GetLogicalProcessorInformation(0, &size);
+ if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ return;
+ }
+
+ n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+ size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+ buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n);
+ if (!GetLogicalProcessorInformation(buf, &size)) {
+ goto fail;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (buf[i].Relationship == RelationCache
+ && buf[i].Cache.Level == 1) {
+ switch (buf[i].Cache.Type) {
+ case CacheUnified:
+ *isize = *dsize = buf[i].Cache.LineSize;
+ break;
+ case CacheInstruction:
+ *isize = buf[i].Cache.LineSize;
+ break;
+ case CacheData:
+ *dsize = buf[i].Cache.LineSize;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ fail:
+ g_free(buf);
+}
+
+#elif defined(__APPLE__) \
+ || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# include <sys/sysctl.h>
+# if defined(__APPLE__)
+# define SYSCTL_CACHELINE_NAME "hw.cachelinesize"
+# else
+# define SYSCTL_CACHELINE_NAME "machdep.cacheline_size"
+# endif
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+ /* There's only a single sysctl for both I/D cache line sizes. */
+ long size;
+ size_t len = sizeof(size);
+ if (!sysctlbyname(SYSCTL_CACHELINE_NAME, &size, &len, NULL, 0)) {
+ *isize = *dsize = size;
+ }
+}
+
+#else
+/* POSIX */
+
+static void sys_cache_info(int *isize, int *dsize)
+{
+# ifdef _SC_LEVEL1_ICACHE_LINESIZE
+ *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
+# endif
+# ifdef _SC_LEVEL1_DCACHE_LINESIZE
+ *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
+# endif
+}
+#endif /* sys_cache_info */
+
+/*
+ * Architecture (+ OS) specific detection mechanisms.
+ */
+
+#if defined(__aarch64__)
+
+static void arch_cache_info(int *isize, int *dsize)
+{
+ if (*isize == 0 || *dsize == 0) {
+ unsigned ctr;
+
+ /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1,
+ but (at least under Linux) these are marked protected by the
+ kernel. However, CTR_EL0 contains the minimum linesize in the
+ entire hierarchy, and is used by userspace cache flushing. */
+ asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr));
+ if (*isize == 0) {
+ *isize = 4 << (ctr & 0xf);
+ }
+ if (*dsize == 0) {
+ *dsize = 4 << ((ctr >> 16) & 0xf);
+ }
+ }
+}
+
+#elif defined(_ARCH_PPC) && defined(__linux__)
+
+static void arch_cache_info(int *isize, int *dsize)
+{
+ if (*isize == 0) {
+ *isize = qemu_getauxval(AT_ICACHEBSIZE);
+ }
+ if (*dsize == 0) {
+ *dsize = qemu_getauxval(AT_DCACHEBSIZE);
+ }
+}
+
+#else
+static void arch_cache_info(int *isize, int *dsize) { }
+#endif /* arch_cache_info */
+
+/*
+ * ... and if all else fails ...
+ */
+
+static void fallback_cache_info(int *isize, int *dsize)
+{
+ /* If we can only find one of the two, assume they're the same. */
+ if (*isize) {
+ if (*dsize) {
+ /* Success! */
+ } else {
+ *dsize = *isize;
+ }
+ } else if (*dsize) {
+ *isize = *dsize;
+ } else {
+#if defined(_ARCH_PPC)
+ /* For PPC, we're going to use the icache size computed for
+ flush_icache_range. Which means that we must use the
+ architecture minimum. */
+ *isize = *dsize = 16;
+#else
+ /* Otherwise, 64 bytes is not uncommon. */
+ *isize = *dsize = 64;
+#endif
+ }
+}
+
+static void __attribute__((constructor)) init_cache_info(void)
+{
+ int isize = 0, dsize = 0;
+
+ sys_cache_info(&isize, &dsize);
+ arch_cache_info(&isize, &dsize);
+ fallback_cache_info(&isize, &dsize);
+
+ qemu_icache_linesize = isize;
+ qemu_dcache_linesize = dsize;
+}