*/
int __attribute__((constructor)) __urcu_cas_init(void);
-static pthread_mutex_t compat_mutex = PTHREAD_MUTEX_INITIALIZER;
-
/*
* -1: unknown
* 1: available
*/
int __urcu_cas_avail = -1;
+static pthread_mutex_t compat_mutex = PTHREAD_MUTEX_INITIALIZER;
+
/*
- * Imported from glibc 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h.
+ * get_eflags/set_eflags/compare_and_swap_is_available imported from glibc
+ * 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h.
*/
-int get_eflags (void)
+static int get_eflags (void)
{
int res;
__asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : );
return res;
}
-void set_eflags (int newflags)
+static void set_eflags (int newflags)
{
__asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc");
}
-int compare_and_swap_is_available (void)
+static int compare_and_swap_is_available (void)
{
int oldflags = get_eflags ();
int changed;
return changed != 0;
}
-unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
+static void mutex_lock_signal_save(pthread_mutex_t *mutex, sigset_t *oldmask)
{
- sigset_t newmask, oldmask;
+ sigset_t newmask;
int ret;
/* Disable signals */
ret = sigemptyset(&newmask);
assert(!ret);
- ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+ ret = pthread_sigmask(SIG_SETMASK, &newmask, oldmask);
assert(!ret);
ret = pthread_mutex_lock(&compat_mutex);
assert(!ret);
+}
+
+static void mutex_lock_signal_restore(pthread_mutex_t *mutex, sigset_t *oldmask)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(&compat_mutex);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_SETMASK, oldmask, NULL);
+ assert(!ret);
+}
+
+unsigned long _compat_uatomic_set(void *addr, unsigned long _new, int len)
+{
+ sigset_t mask;
+ unsigned long result;
+
+ mutex_lock_signal_save(&compat_mutex, &mask);
+ switch (len) {
+ case 1:
+ *(unsigned char *)addr = (unsigned char)_new;
+ result = *(unsigned char *)addr;
+ break;
+ case 2:
+ *(unsigned short *)addr = (unsigned short)_new;
+ result = *(unsigned short *)addr;
+ break;
+ case 4:
+ *(unsigned int *)addr = (unsigned int)_new;
+ result = *(unsigned int *)addr;
+ break;
+ default:
+ /*
+ * generate an illegal instruction. Cannot catch this with
+ * linker tricks when optimizations are disabled.
+ */
+ __asm__ __volatile__("ud2");
+ }
+ mutex_lock_signal_restore(&compat_mutex, &mask);
+ return _new;
+}
+unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len)
+{
+ sigset_t mask;
+ unsigned long retval;
+
+ mutex_lock_signal_save(&compat_mutex, &mask);
+ switch (len) {
+ case 1:
+ retval = *(unsigned char *)addr;
+ *(unsigned char *)addr = (unsigned char)_new;
+ break;
+ case 2:
+ retval = *(unsigned short *)addr;
+ *(unsigned short *)addr = (unsigned short)_new;
+ break;
+ case 4:
+ retval = *(unsigned int *)addr;
+ *(unsigned int *)addr = (unsigned int)_new;
+ break;
+ default:
+ /*
+ * generate an illegal instruction. Cannot catch this with
+ * linker tricks when optimizations are disabled.
+ */
+ __asm__ __volatile__("ud2");
+ }
+ mutex_lock_signal_restore(&compat_mutex, &mask);
+ return retval;
+}
+
+unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ unsigned long retval;
+ sigset_t mask;
+
+ mutex_lock_signal_save(&compat_mutex, &mask);
switch (len) {
case 1:
{
unsigned char result = *(unsigned char *)addr;
- if (result == old)
+ if (result == (unsigned char)old)
*(unsigned char *)addr = (unsigned char)_new;
- return result;
+ retval = result;
+ break;
}
case 2:
{
unsigned short result = *(unsigned short *)addr;
- if (result == old)
+ if (result == (unsigned short)old)
*(unsigned short *)addr = (unsigned short)_new;
- return result;
+ retval = result;
+ break;
}
case 4:
{
unsigned int result = *(unsigned int *)addr;
- if (result == old)
+ if (result == (unsigned int)old)
*(unsigned int *)addr = (unsigned int)_new;
- return result;
+ retval = result;
+ break;
}
+ default:
+ /*
+ * generate an illegal instruction. Cannot catch this with
+ * linker tricks when optimizations are disabled.
+ */
+ __asm__ __volatile__("ud2");
}
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
+ mutex_lock_signal_restore(&compat_mutex, &mask);
+ return retval;
+}
- ret = pthread_mutex_unlock(&compat_mutex);
- assert(!ret);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
+unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
+{
+ sigset_t mask;
+ unsigned long result;
+
+ mutex_lock_signal_save(&compat_mutex, &mask);
+ switch (len) {
+ case 1:
+ *(unsigned char *)addr += (unsigned char)v;
+ result = *(unsigned char *)addr;
+ break;
+ case 2:
+ *(unsigned short *)addr += (unsigned short)v;
+ result = *(unsigned short *)addr;
+ break;
+ case 4:
+ *(unsigned int *)addr += (unsigned int)v;
+ result = *(unsigned int *)addr;
+ break;
+ default:
+ /*
+ * generate an illegal instruction. Cannot catch this with
+ * linker tricks when optimizations are disabled.
+ */
+ __asm__ __volatile__("ud2");
+ }
+ mutex_lock_signal_restore(&compat_mutex, &mask);
+ return result;
}
int __urcu_cas_init(void)
*/
#include <urcu/compiler.h>
+#include <urcu/system.h>
#ifndef __SIZEOF_LONG__
#if defined(__x86_64__) || defined(__amd64__)
};
#define __hp(x) ((struct __uatomic_dummy *)(x))
-#define uatomic_set(addr, v) \
-do { \
- ACCESS_ONCE(*(addr)) = (v); \
-} while (0)
-
-#define uatomic_read(addr) ACCESS_ONCE(*(addr))
+#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
+#define _uatomic_read(addr) LOAD_SHARED(*(addr))
/* cmpxchg */
static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
unsigned long _new, int len)
{
switch (len) {
return 0;
}
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
+#define _uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
sizeof(*(addr))))
/* xchg */
static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
{
/* Note: the "xchg" instruction does not need a "lock" prefix. */
switch (len) {
return 0;
}
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+#define _uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
sizeof(*(addr))))
/* uatomic_add_return, uatomic_sub_return */
static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
+unsigned long __uatomic_add_return(void *addr, unsigned long val,
int len)
{
switch (len) {
return 0;
}
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+#define _uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) __uatomic_add_return((addr), \
(unsigned long)(v), \
sizeof(*(addr))))
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
+#define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v))
/* uatomic_add, uatomic_sub */
static inline __attribute__((always_inline))
-void _uatomic_add(void *addr, unsigned long val, int len)
+void __uatomic_add(void *addr, unsigned long val, int len)
{
switch (len) {
case 1:
return;
}
-#define uatomic_add(addr, v) \
- (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+#define _uatomic_add(addr, v) \
+ (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
+#define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
/* uatomic_inc */
static inline __attribute__((always_inline))
-void _uatomic_inc(void *addr, int len)
+void __uatomic_inc(void *addr, int len)
{
switch (len) {
case 1:
return;
}
-#define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr))))
+#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
/* uatomic_dec */
static inline __attribute__((always_inline))
-void _uatomic_dec(void *addr, int len)
+void __uatomic_dec(void *addr, int len)
{
switch (len) {
case 1:
return;
}
-#define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr))))
+#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if (BITS_PER_LONG == 64)
-#define URCU_CAS_AVAIL() 1
-#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
-#else
+#if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH))
extern int __urcu_cas_avail;
extern int __urcu_cas_init(void);
-#define URCU_CAS_AVAIL() \
- ((likely(__urcu_cas_avail > 0)) ? \
- (1) : \
- ((unlikely(__urcu_cas_avail < 0) ? \
- (__urcu_cas_init()) : \
- (0))))
+
+#define UATOMIC_COMPAT(insn) \
+ ((likely(__urcu_cas_avail > 0)) \
+ ? (_uatomic_##insn) \
+ : ((unlikely(__urcu_cas_avail < 0) \
+ ? ((__urcu_cas_init() > 0) \
+ ? (_uatomic_##insn) \
+ : (compat_uatomic_##insn)) \
+ : (compat_uatomic_##insn))))
+
+extern unsigned long _compat_uatomic_set(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_set(addr, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+
+extern unsigned long _compat_uatomic_xchg(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_xchg(addr, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len);
+ unsigned long _new, int len);
+#define compat_uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
+ (unsigned long)(old), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
-#define compat_uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
+extern unsigned long _compat_uatomic_xchg(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
+ (unsigned long)(v), \
sizeof(*(addr))))
+
+#define compat_uatomic_sub_return(addr, v) \
+ compat_uatomic_add_return((addr), -(v))
+#define compat_uatomic_add(addr, v) \
+ ((void)compat_uatomic_add_return((addr), (v)))
+#define compat_uatomic_sub(addr, v) \
+ ((void)compat_uatomic_sub_return((addr), (v)))
+#define compat_uatomic_inc(addr) \
+ (compat_uatomic_add((addr), 1))
+#define compat_uatomic_dec(addr) \
+ (compat_uatomic_sub((addr), 1))
+
+#else
+#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
#endif
+/* Read is atomic even in compat mode */
+#define uatomic_read(addr) _uatomic_read(addr)
+
+#define uatomic_set(addr, v) \
+ UATOMIC_COMPAT(set(addr, v))
+#define uatomic_cmpxchg(addr, old, _new) \
+ UATOMIC_COMPAT(cmpxchg(addr, old, _new))
+#define uatomic_xchg(addr, v) \
+ UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_add_return(addr, v) \
+ UATOMIC_COMPAT(add_return(addr, v))
+#define uatomic_sub_return(addr, v) \
+ UATOMIC_COMPAT(sub_return(addr, v))
+#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
+#define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
+#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
+#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
+
#endif /* _URCU_ARCH_UATOMIC_X86_H */