[5/8] zdtm/lib/arch: add atomic_cmpxchg

Submitted by Pavel Tikhomirov on Feb. 15, 2018, 10:06 a.m.

Details

Message ID 20180215100655.14225-6-ptikhomirov@virtuozzo.com
State New
Series "don't use wrong pagemap (from other task) on pid reuse"
Headers show

Commit Message

Pavel Tikhomirov Feb. 15, 2018, 10:06 a.m.
From: ptikhomirov <ptikhomirov@virtuozzo.com>

Just copy it with small changes for all architectures from:
include/common/arch/*/asm/{atomic.h,cmpxchg.h,atomic_ops.h}

Change is that atomic_t is uint32_t in zdtm.

https://jira.sw.ru/browse/PSBM-67502

Signed-off-by: Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
---
 test/zdtm/lib/arch/aarch64/include/asm/atomic.h |  22 +++++
 test/zdtm/lib/arch/arm/include/asm/atomic.h     |  26 ++++++
 test/zdtm/lib/arch/arm/include/asm/processor.h  |  28 +++++++
 test/zdtm/lib/arch/ppc64/include/asm/atomic.h   |   4 +
 test/zdtm/lib/arch/ppc64/include/asm/cmpxchg.h  |  96 +++++++++++++++++++++
 test/zdtm/lib/arch/s390/include/asm/atomic.h    |   9 ++
 test/zdtm/lib/arch/x86/include/asm/atomic.h     |   7 ++
 test/zdtm/lib/arch/x86/include/asm/cmpxchg.h    | 107 ++++++++++++++++++++++++
 8 files changed, 299 insertions(+)
 create mode 100644 test/zdtm/lib/arch/arm/include/asm/processor.h
 create mode 100644 test/zdtm/lib/arch/ppc64/include/asm/cmpxchg.h
 create mode 100644 test/zdtm/lib/arch/x86/include/asm/cmpxchg.h

Patch hide | download patch | download mbox

diff --git a/test/zdtm/lib/arch/aarch64/include/asm/atomic.h b/test/zdtm/lib/arch/aarch64/include/asm/atomic.h
index ccf08e700..e16a6fef3 100644
--- a/test/zdtm/lib/arch/aarch64/include/asm/atomic.h
+++ b/test/zdtm/lib/arch/aarch64/include/asm/atomic.h
@@ -70,4 +70,26 @@  static inline int atomic_dec(atomic_t *v) { return atomic_sub_return(1, v) + 1;
 
 #define atomic_inc_return(v)	(atomic_add_return(1, v))
 
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+	unsigned long tmp;
+	int oldval;
+
+	smp_mb();
+
+	asm volatile("// atomic_cmpxchg\n"
+"1:	ldxr	%w1, %2\n"
+"	cmp	%w1, %w3\n"
+"	b.ne	2f\n"
+"	stxr	%w0, %w4, %2\n"
+"	cbnz	%w0, 1b\n"
+"2:"
+	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr)
+	: "Ir" (old), "r" (new)
+	: "cc");
+
+	smp_mb();
+	return oldval;
+}
+
 #endif /* __CR_ATOMIC_H__ */
diff --git a/test/zdtm/lib/arch/arm/include/asm/atomic.h b/test/zdtm/lib/arch/arm/include/asm/atomic.h
index bcf3fe31b..11d6001f1 100644
--- a/test/zdtm/lib/arch/arm/include/asm/atomic.h
+++ b/test/zdtm/lib/arch/arm/include/asm/atomic.h
@@ -1,6 +1,7 @@ 
 #ifndef __CR_ATOMIC_H__
 #define __CR_ATOMIC_H__
 
+#include "asm/processor.h"
 
 typedef uint32_t atomic_t;
 
@@ -9,6 +10,31 @@  typedef uint32_t atomic_t;
 
 #define smp_mb() __asm__ __volatile__ ("dmb" : : : "memory")
 
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+	int oldval;
+	unsigned long res;
+
+	smp_mb();
+	prefetchw(ptr);
+
+	do {
+		__asm__ __volatile__("@ atomic_cmpxchg\n"
+		"ldrex	%1, [%3]\n"
+		"mov	%0, #0\n"
+		"teq	%1, %4\n"
+		"it	eq\n"
+		"strexeq %0, %5, [%3]\n"
+		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr)
+		    : "r" (ptr), "Ir" (old), "r" (new)
+		    : "cc");
+	} while (res);
+
+	smp_mb();
+
+	return oldval;
+}
+
 #define atomic_set(mem,v) (*(mem) = (v))
 #define atomic_get(v) (*(volatile uint32_t *)v)
 
diff --git a/test/zdtm/lib/arch/arm/include/asm/processor.h b/test/zdtm/lib/arch/arm/include/asm/processor.h
new file mode 100644
index 000000000..a390cfd32
--- /dev/null
+++ b/test/zdtm/lib/arch/arm/include/asm/processor.h
@@ -0,0 +1,28 @@ 
+#ifndef __CR_PROCESSOR_H__
+#define __CR_PROCESSOR_H__
+
+/* Copied from linux kernel arch/arm/include/asm/unified.h */
+
+#define WASM(instr)     #instr
+
+/* Copied from linux kernel arch/arm/include/asm/processor.h */
+
+#define __ALT_SMP_ASM(smp, up)						\
+	"9998:	" smp "\n"						\
+	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
+	"	.long	9998b\n"					\
+	"	" up "\n"						\
+	"	.popsection\n"
+
+static inline void prefetchw(const void *ptr)
+{
+	__asm__ __volatile__(
+		".arch_extension	mp\n"
+		__ALT_SMP_ASM(
+			WASM(pldw)		"\t%a0",
+			WASM(pld)		"\t%a0"
+		)
+		:: "p" (ptr));
+}
+
+#endif /* __CR_PROCESSOR_H__ */
diff --git a/test/zdtm/lib/arch/ppc64/include/asm/atomic.h b/test/zdtm/lib/arch/ppc64/include/asm/atomic.h
index bd14cc059..162044d8c 100644
--- a/test/zdtm/lib/arch/ppc64/include/asm/atomic.h
+++ b/test/zdtm/lib/arch/ppc64/include/asm/atomic.h
@@ -8,6 +8,8 @@ 
  */
 typedef uint32_t atomic_t;
 
+#include "asm/cmpxchg.h"
+
 #define PPC_ATOMIC_ENTRY_BARRIER	"lwsync \n"
 #define PPC_ATOMIC_EXIT_BARRIER		"sync  	\n"
 
@@ -84,4 +86,6 @@  static __inline__ void atomic_dec(atomic_t *v)
 	: "cc", "xer");
 }
 
+#define atomic_cmpxchg(v, o, n) (cmpxchg((v), (o), (n)))
+
 #endif /* __CR_ATOMIC_H__ */
diff --git a/test/zdtm/lib/arch/ppc64/include/asm/cmpxchg.h b/test/zdtm/lib/arch/ppc64/include/asm/cmpxchg.h
new file mode 100644
index 000000000..b93fbdef0
--- /dev/null
+++ b/test/zdtm/lib/arch/ppc64/include/asm/cmpxchg.h
@@ -0,0 +1,96 @@ 
+#ifndef __CR_CMPXCHG_H__
+#define __CR_CMPXCHG_H__
+
+/*
+ * Copied from kernel header file arch/powerpc/include/asm/cmpxchg.h
+ */
+
+#define PPC_ACQUIRE_BARRIER		"isync	\n"
+#define PPC_RELEASE_BARRIER		"lwsync	\n"
+
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+
+static __always_inline unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+	unsigned int prev;
+
+	__asm__ __volatile__ (
+	PPC_RELEASE_BARRIER \
+"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
+	cmpw	0,%0,%3\n\
+	bne-	2f\n"
+"	stwcx.	%4,0,%2\n\
+	bne-	1b \n" \
+	PPC_ACQUIRE_BARRIER
+	"\n\
+2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+	PPC_RELEASE_BARRIER \
+"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
+	cmpd	0,%0,%3\n\
+	bne-	2f\n\
+	stdcx.	%4,0,%2\n\
+	bne-	1b \n" \
+	PPC_ACQUIRE_BARRIER
+	"\n\
+2:"
+	: "=&r" (prev), "+m" (*p)
+	: "r" (p), "r" (old), "r" (new)
+	: "cc", "memory");
+
+	return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid cmpxchg().  */
+#ifdef CR_DEBUG
+static inline void __cmpxchg_called_with_bad_pointer(void)
+{
+	__asm__ __volatile__ (
+		"1:	twi 	31,0,0	# trap\n"
+		"	b 	1b"
+		: : : "memory");
+}
+#else
+extern void __cmpxchg_called_with_bad_pointer(void);
+#endif
+
+static __always_inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+	  unsigned int size)
+{
+	switch (size) {
+	case 4:
+		return __cmpxchg_u32(ptr, old, new);
+	case 8:
+		return __cmpxchg_u64(ptr, old, new);
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr, o, n)						 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+#endif /* __CR_CMPXCHG_H__ */
diff --git a/test/zdtm/lib/arch/s390/include/asm/atomic.h b/test/zdtm/lib/arch/s390/include/asm/atomic.h
index b7c4b2c53..b1cd0f0f2 100644
--- a/test/zdtm/lib/arch/s390/include/asm/atomic.h
+++ b/test/zdtm/lib/arch/s390/include/asm/atomic.h
@@ -65,4 +65,13 @@  static inline int atomic_inc(atomic_t *v)
 
 #define atomic_dec(_v)			atomic_sub(1, _v)
 
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+	asm volatile(
+		"	cs	%[old],%[new],%[ptr]"
+		: [old] "+d" (old), [ptr] "+Q" (*ptr)
+		: [new] "d" (new) : "cc", "memory");
+	return old;
+}
+
 #endif /* __ARCH_S390_ATOMIC__  */
diff --git a/test/zdtm/lib/arch/x86/include/asm/atomic.h b/test/zdtm/lib/arch/x86/include/asm/atomic.h
index 7621df09c..8f828e731 100644
--- a/test/zdtm/lib/arch/x86/include/asm/atomic.h
+++ b/test/zdtm/lib/arch/x86/include/asm/atomic.h
@@ -1,6 +1,8 @@ 
 #ifndef ATOMIC_H__
 #define ATOMIC_H__
 
+#include "asm/cmpxchg.h"
+
 #define atomic_set(mem, v)					\
 	({							\
 		asm volatile ("lock xchg %0, %1\n"		\
@@ -46,4 +48,9 @@ 
 		     : "ir" (i));				\
 })
 
+static inline int atomic_cmpxchg(uint32_t *v, uint32_t old, uint32_t new)
+{
+	return cmpxchg(v, old, new);
+}
+
 #endif /* ATOMIC_H__ */
diff --git a/test/zdtm/lib/arch/x86/include/asm/cmpxchg.h b/test/zdtm/lib/arch/x86/include/asm/cmpxchg.h
new file mode 100644
index 000000000..4b6951933
--- /dev/null
+++ b/test/zdtm/lib/arch/x86/include/asm/cmpxchg.h
@@ -0,0 +1,107 @@ 
+#ifndef __CR_CMPXCHG_H__
+#define __CR_CMPXCHG_H__
+
+#include <stdint.h>
+
+#define LOCK_PREFIX "\n\tlock; "
+
+#define __X86_CASE_B	1
+#define __X86_CASE_W	2
+#define __X86_CASE_L	4
+#define __X86_CASE_Q	8
+
+/*
+ * An exchange-type operation, which takes a value and a pointer, and
+ * returns the old value. Make sure you never reach non-case statement
+ * here, otherwise behaviour is undefined.
+ */
+#define __xchg_op(ptr, arg, op, lock)					\
+	({								\
+	        __typeof__ (*(ptr)) __ret = (arg);			\
+		switch (sizeof(*(ptr))) {				\
+		case __X86_CASE_B:					\
+			asm volatile (lock #op "b %b0, %1\n"		\
+				      : "+q" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_W:					\
+			asm volatile (lock #op "w %w0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_L:					\
+			asm volatile (lock #op "l %0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_Q:					\
+			asm volatile (lock #op "q %q0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		}							\
+		__ret;							\
+	})
+
+#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
+#define xadd(ptr, inc)		__xadd((ptr), (inc), "lock ;")
+
+/* Borrowed from linux kernel arch/x86/include/asm/cmpxchg.h */
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __raw_cmpxchg(ptr, old, new, size, lock)			\
+({									\
+	__typeof__(*(ptr)) __ret;					\
+	__typeof__(*(ptr)) __old = (old);				\
+	__typeof__(*(ptr)) __new = (new);				\
+	switch (size) {							\
+	case __X86_CASE_B:						\
+	{								\
+		volatile uint8_t *__ptr = (volatile uint8_t *)(ptr);	\
+		asm volatile(lock "cmpxchgb %2,%1"			\
+			     : "=a" (__ret), "+m" (*__ptr)		\
+			     : "q" (__new), "0" (__old)			\
+			     : "memory");				\
+		break;							\
+	}								\
+	case __X86_CASE_W:						\
+	{								\
+		volatile uint16_t *__ptr = (volatile uint16_t *)(ptr);	\
+		asm volatile(lock "cmpxchgw %2,%1"			\
+			     : "=a" (__ret), "+m" (*__ptr)		\
+			     : "r" (__new), "0" (__old)			\
+			     : "memory");				\
+		break;							\
+	}								\
+	case __X86_CASE_L:						\
+	{								\
+		volatile uint32_t *__ptr = (volatile uint32_t *)(ptr);	\
+		asm volatile(lock "cmpxchgl %2,%1"			\
+			     : "=a" (__ret), "+m" (*__ptr)		\
+			     : "r" (__new), "0" (__old)			\
+			     : "memory");				\
+		break;							\
+	}								\
+	case __X86_CASE_Q:						\
+	{								\
+		volatile uint64_t *__ptr = (volatile uint64_t *)(ptr);	\
+		asm volatile(lock "cmpxchgq %2,%1"			\
+			     : "=a" (__ret), "+m" (*__ptr)		\
+			     : "r" (__new), "0" (__old)			\
+			     : "memory");				\
+		break;							\
+	}								\
+	}								\
+	__ret;								\
+})
+
+#define __cmpxchg(ptr, old, new, size)					\
+	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
+#define cmpxchg(ptr, old, new)						\
+	__cmpxchg(ptr, old, new, sizeof(*(ptr)))
+
+#endif /* __CR_CMPXCHG_H__ */