From 0d902872a1ad9a50f7ccbb5a35c774f6394a9e26 Mon Sep 17 00:00:00 2001 From: Balazs Gerofi Date: Tue, 2 Apr 2019 23:05:53 +0900 Subject: [PATCH] x86: fix xchg() and cmpxchg() macros Change-Id: I6faf0fff8a8595734fca6247634cdae6b86483b3 --- arch/x86_64/kernel/include/arch/cpu.h | 8 ++ arch/x86_64/kernel/include/ihk/atomic.h | 146 ++++++++++++++++++------ 2 files changed, 119 insertions(+), 35 deletions(-) diff --git a/arch/x86_64/kernel/include/arch/cpu.h b/arch/x86_64/kernel/include/arch/cpu.h index e387aa54..1114c29b 100644 --- a/arch/x86_64/kernel/include/arch/cpu.h +++ b/arch/x86_64/kernel/include/arch/cpu.h @@ -34,4 +34,12 @@ static inline unsigned long read_tsc(void) return (low | ((unsigned long)high << 32)); } +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* ARCH_CPU_H */ diff --git a/arch/x86_64/kernel/include/ihk/atomic.h b/arch/x86_64/kernel/include/ihk/atomic.h index 6346cfd2..14f7a52e 100644 --- a/arch/x86_64/kernel/include/ihk/atomic.h +++ b/arch/x86_64/kernel/include/ihk/atomic.h @@ -13,6 +13,8 @@ #ifndef HEADER_X86_COMMON_IHK_ATOMIC_H #define HEADER_X86_COMMON_IHK_ATOMIC_H +#include + /*********************************************************************** * ihk_atomic_t */ @@ -156,43 +158,55 @@ static inline unsigned long xchg8(unsigned long *ptr, unsigned long x) return __x; } -#define __xchg(x, ptr, size) \ -({ \ - __typeof(*(ptr)) __x = (x); \ - switch (size) { \ - case 1: \ - asm volatile("xchgb %b0,%1" \ - : "=q" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ - : "memory"); \ - break; \ - case 2: \ - asm volatile("xchgw %w0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ - : "memory"); \ - break; \ - case 4: \ - asm volatile("xchgl %k0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ - : "memory"); \ - break; \ - case 8: \ - asm volatile("xchgq %0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ - : "memory"); \ - break; \ - default: \ - panic("xchg for wrong size"); \ - } \ - __x; \ -}) +#define __X86_CASE_B 1 +#define __X86_CASE_W 2 +#define __X86_CASE_L 4 +#define __X86_CASE_Q 8 +extern void __xchg_wrong_size(void) + __compiletime_error("Bad argument size for xchg"); -#define xchg(ptr, v) \ - __xchg((v), (ptr), sizeof(*ptr)) +/* + * An exchange-type operation, which takes a value and a pointer, and + * returns the old value. + */ +#define __xchg_op(ptr, arg, op, lock) \ + ({ \ + __typeof__(*(ptr)) __ret = (arg); \ + switch (sizeof(*(ptr))) { \ + case __X86_CASE_B: \ + asm volatile (lock #op "b %b0, %1\n" \ + : "+q" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + case __X86_CASE_W: \ + asm volatile (lock #op "w %w0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + case __X86_CASE_L: \ + asm volatile (lock #op "l %0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + case __X86_CASE_Q: \ + asm volatile (lock #op "q %q0, %1\n" \ + : "+r" (__ret), "+m" (*(ptr)) \ + : : "memory", "cc"); \ + break; \ + default: \ + __xchg_wrong_size(); \ + } \ + __ret; \ + }) + +/* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. + * Since this is generally used to protect other memory information, we + * use "asm volatile" and "memory" clobbers to prevent gcc from moving + * information around. + */ +#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") static inline unsigned long atomic_cmpxchg8(unsigned long *addr, unsigned long oldval, @@ -241,4 +255,66 @@ static inline unsigned long ihk_atomic_add_long_return(long i, long *v) { return i + __i; } +extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ +#define __raw_cmpxchg(ptr, old, new, size, lock) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + switch (size) { \ + case __X86_CASE_B: \ + { \ + volatile uint8_t *__ptr = (volatile uint8_t *)(ptr);\ + asm volatile(lock "cmpxchgb %2,%1" \ + : "=a" (__ret), "+m" (*__ptr) \ + : "q" (__new), "0" (__old) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_W: \ + { \ + volatile uint16_t *__ptr = (volatile uint16_t *)(ptr);\ + asm volatile(lock "cmpxchgw %2,%1" \ + : "=a" (__ret), "+m" (*__ptr) \ + : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_L: \ + { \ + volatile uint32_t *__ptr = (volatile uint32_t *)(ptr);\ + asm volatile(lock "cmpxchgl %2,%1" \ + : "=a" (__ret), "+m" (*__ptr) \ + : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_Q: \ + { \ + volatile uint64_t *__ptr = (volatile uint64_t *)(ptr);\ + asm volatile(lock "cmpxchgq %2,%1" \ + : "=a" (__ret), "+m" (*__ptr) \ + : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + } \ + default: \ + __cmpxchg_wrong_size(); \ + } \ + __ret; \ +}) + +#define __cmpxchg(ptr, old, new, size) \ + __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") + +#define cmpxchg(ptr, old, new) \ + __cmpxchg(ptr, old, new, sizeof(*(ptr))) + #endif