aboutsummaryrefslogtreecommitdiff
path: root/tools/arch
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2015-04-30 12:33:22 -0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2015-05-08 16:05:03 -0300
commit361c564eeff4b78f1303b86e8e8f07fc547bd2c9 (patch)
tree7a3d74423930cbc539766017a3ec9e88275e918b /tools/arch
parentf8bffbf1222a64336a81974fc25fe846656ac53e (diff)
perf tools: Move x86 barrier.h stuff to tools/arch/x86/include/asm/barrier.h
We will need it for atomic.h, so move it from the ad-hoc tools/perf/ place to a tools/ subset of the kernel arch/ hierarchy. Other aches will follow, each in a cset. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: David Ahern <dsahern@gmail.com> Cc: Don Zickus <dzickus@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-vy6bqmsvm6puibpay2cy4wid@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/arch')
-rw-r--r--tools/arch/x86/include/asm/barrier.h28
1 files changed, 28 insertions, 0 deletions
diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h
new file mode 100644
index 000000000000..f366d8e550e4
--- /dev/null
+++ b/tools/arch/x86/include/asm/barrier.h
@@ -0,0 +1,28 @@
+#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
+#define _TOOLS_LINUX_ASM_X86_BARRIER_H
+
+/*
+ * Copied from the Linux kernel sources, and also moving code
+ * out from tools/perf/perf-sys.h so as to make it be located
+ * in a place similar as in the kernel sources.
+ *
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+
+#if defined(__i386__)
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#elif defined(__x86_64__)
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#endif
+
+#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */