summaryrefslogtreecommitdiff
path: root/tools/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'tools/virtio')
-rw-r--r--tools/virtio/linux/compiler.h2
-rw-r--r--tools/virtio/linux/uaccess.h9
-rw-r--r--tools/virtio/ringtest/main.h12
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh5
4 files changed, 21 insertions, 7 deletions
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
index 845960e1cbf2..c9ccfd42ec13 100644
--- a/tools/virtio/linux/compiler.h
+++ b/tools/virtio/linux/compiler.h
@@ -4,6 +4,6 @@
#define WRITE_ONCE(var, val) \
(*((volatile typeof(val) *)(&(var))) = (val))
-#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var))))
+#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
#endif
diff --git a/tools/virtio/linux/uaccess.h b/tools/virtio/linux/uaccess.h
index 0a578fe18653..fa05d01b2c90 100644
--- a/tools/virtio/linux/uaccess.h
+++ b/tools/virtio/linux/uaccess.h
@@ -1,8 +1,9 @@
#ifndef UACCESS_H
#define UACCESS_H
-extern void *__user_addr_min, *__user_addr_max;
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#include <linux/compiler.h>
+
+extern void *__user_addr_min, *__user_addr_max;
static inline void __chk_user_ptr(const volatile void *p, size_t size)
{
@@ -13,7 +14,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
({ \
typeof(ptr) __pu_ptr = (ptr); \
__chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \
- ACCESS_ONCE(*(__pu_ptr)) = x; \
+ WRITE_ONCE(*(__pu_ptr), x); \
0; \
})
@@ -21,7 +22,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
({ \
typeof(ptr) __pu_ptr = (ptr); \
__chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \
- x = ACCESS_ONCE(*(__pu_ptr)); \
+ x = READ_ONCE(*(__pu_ptr)); \
0; \
})
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 34e63cc4c572..14142faf040b 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
#define VMEXIT_CYCLES 500
#define VMENTRY_CYCLES 500
+#elif defined(__s390x__)
+static inline void wait_cycles(unsigned long long cycles)
+{
+ asm volatile("0: brctg %0,0b" : : "d" (cycles));
+}
+
+/* tweak me */
+#define VMEXIT_CYCLES 200
+#define VMENTRY_CYCLES 200
+
#else
static inline void wait_cycles(unsigned long long cycles)
{
@@ -81,6 +91,8 @@ extern unsigned ring_size;
/* Is there a portable way to do this? */
#if defined(__x86_64__) || defined(__i386__)
#define cpu_relax() asm ("rep; nop" ::: "memory")
+#elif defined(__s390x__)
+#define cpu_relax() barrier()
#else
#define cpu_relax() assert(0)
#endif
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 2e69ca812b4c..29b0d3920bfc 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -1,12 +1,13 @@
#!/bin/sh
+CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
#use last CPU for host. Why not the first?
#many devices tend to use cpu0 by default so
#it tends to be busier
-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
+HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
#run command on all cpus
-for cpu in $(seq 0 $HOST_AFFINITY)
+for cpu in $CPUS_ONLINE
do
#Don't run guest and host on same CPU
#It actually works ok if using signalling