Remove the old build system.
Rename arch_uatomic*.h to uatomic_arch.h, to avoid conflits with
non-generated arch_*.h.
Signed-off-by: Pierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu-yield.o
tests/api.h
urcu/arch.h
-urcu/arch_uatomic.h
+urcu/uatomic_arch.h
liburcu-defer.so
liburcu-mb.so
liburcu-qsbr.so
tests/test_urcu_lgc_mb
tests/test_urcu_mb
tests/test_urcu_mb_defer
+tests/test_urcu_assign
+tests/test_urcu_assign_dynamic_link
+tests/test_urcu_bp
+tests/test_urcu_bp_dynamic_link
+tests/*.log
+
+#automake
+/config.h
+.deps/
+.libs/
+Makefile.in
+Makefile
+*.m4
+*.la
+*.bz2
+*.o
+*.lo
+*.loT
+/config.log
+/configure
+/config/
+/libtool
+/stamp-h1
+/config.h.in
+/config.status
+/autom4te.cache/
+++ /dev/null
-include Makefile.inc
--- /dev/null
+ACLOCAL_AMFLAGS = -I m4
+INCLUDES = -I$(top_builddir)/urcu
+
+AM_LDFLAGS=-lpthread
+
+SUBDIRS = tests
+
+include_HEADERS = urcu.h $(top_srcdir)/urcu-*.h
+nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h urcu/rculist.h urcu/system.h
+nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic_arch.h
+
+EXTRA_DIST = $(top_srcdir)/urcu/arch_*.h $(top_srcdir)/urcu/uatomic_arch_*.h gpl-2.0.txt lgpl-2.1.txt lgpl-relicensing.txt README LICENSE
+
+lib_LTLIBRARIES = liburcu.la liburcu-mb.la liburcu-defer.la liburcu-qsbr.la liburcu-bp.la
+
+liburcu_la_SOURCES = urcu.c urcu-pointer.c
+
+liburcu_mb_la_SOURCES = urcu.c urcu-pointer.c
+liburcu_mb_la_CFLAGS = -DURCU_MB
+
+liburcu_bp_la_SOURCES = urcu-bp.c urcu-pointer.c
+
+liburcu_defer_la_SOURCES = urcu-defer.c
+
+liburcu_qsbr_la_SOURCES = urcu-qsbr.c urcu-pointer.c
+
+*.h *.c: urcu/arch.h urcu/uatomic_arch.h
+
+urcu/arch.h: $(top_srcdir)/urcu/arch_@ARCHTYPE@.h
+ mkdir -p $(top_builddir)/urcu
+ cp -f $(top_srcdir)/urcu/arch_@ARCHTYPE@.h $(top_builddir)/urcu/arch.h
+
+urcu/uatomic_arch.h: $(top_srcdir)/urcu/uatomic_arch_@ARCHTYPE@.h
+ mkdir -p $(top_builddir)/urcu
+ cp -f $(top_srcdir)/urcu/uatomic_arch_@ARCHTYPE@.h $(top_builddir)/urcu/uatomic_arch.h
+
+clean-local:
+ rm -f urcu/arch.h urcu/uatomic_arch.h
+++ /dev/null
-
-CFLAGS=-Wall -I.
-
-#optimized
-CFLAGS+=-O2
-
-#debug information
-CFLAGS+=-g
-
-#RCU debug (slower, with error-checks)
-#CFLAGS+=-DDEBUG_RCU
-
-LDFLAGS=-lpthread
-
-HOSTTYPE=$(shell uname -m)
-
-ifeq ("${HOSTTYPE}","x86_64")
-ARCHTYPE=x86
-endif
-ifeq ("${HOSTTYPE}","i586")
-ARCHTYPE=x86
-endif
-ifeq ("${HOSTTYPE}","i686")
-ARCHTYPE=x86
-endif
-ifeq ("${HOSTTYPE}","powerpc")
-ARCHTYPE=ppc
-endif
-ifeq ("${HOSTTYPE}","ppc64")
-ARCHTYPE=ppc
-endif
-ifeq ("${HOSTTYPE}","ppc")
-ARCHTYPE=ppc
-endif
-ifeq ("${HOSTTYPE}","s390")
-ARCHTYPE=s390
-endif
-ifeq ("${HOSTTYPE}","s390x")
-ARCHTYPE=s390
-endif
-
-#Changing the signal number used by the library. SIGUSR1 by default.
-#CFLAGS+=-DSIGURCU=SIGUSR2
-
-SRC_DEP=`echo $^ | sed 's/[^ ]*\.h//g'`
+++ /dev/null
-
-include Makefile.build.inc
-
-DIRS=tests
-
-all: checkarch liburcu.so urcu.o \
- liburcu-qsbr.so urcu-qsbr.o \
- liburcu-mb.so urcu-mb.o \
- liburcu-bp.so urcu-bp.o \
- liburcu-defer.so urcu-defer.o \
- urcu-yield.o \
- subdirs
-
-checkarch:
-ifeq (${ARCHTYPE},)
- @echo "Architecture ${HOSTTYPE} is currently unsupported by liburcu"
- @exit 1
-endif
-
-urcu/arch.h: urcu/arch_${ARCHTYPE}.h
- cp -f urcu/arch_${ARCHTYPE}.h urcu/arch.h
-
-urcu/arch_uatomic.h: urcu/arch_uatomic_${ARCHTYPE}.h
- cp -f urcu/arch_uatomic_${ARCHTYPE}.h urcu/arch_uatomic.h
-
-urcu.h: urcu/arch.h urcu/arch_uatomic.h
-
-urcu-qsbr.h: urcu/arch.h urcu/arch_uatomic.h
-
-urcu-pointer.o: urcu-pointer.c urcu-pointer.h urcu-pointer-static.h
- $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-urcu.o: urcu.c urcu.h
- $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-urcu-mb.o: urcu.c urcu.h
- $(CC) -fPIC -DURCU_MB ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-urcu-bp.o: urcu-bp.c urcu-bp.h
- $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-urcu-qsbr.o: urcu-qsbr.c urcu-qsbr.h
- $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-urcu-defer.o: urcu-defer.c urcu-defer.h
- $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-liburcu.so: urcu.o urcu-pointer.o
- $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
-
-liburcu-qsbr.so: urcu-qsbr.o urcu-pointer.o
- $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
-
-liburcu-mb.so: urcu-mb.o urcu-pointer.o
- $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
-
-liburcu-bp.so: urcu-bp.o urcu-pointer.o
- $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
-
-liburcu-defer.so: urcu-defer.o
- $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
-
-urcu-yield.o: urcu.c urcu.h
- $(CC) -DDEBUG_YIELD ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-.PHONY: clean install checkarch tests
-
-subdirs:
- -for d in ${DIRS}; do cd $${d}; ${MAKE} ${MFLAGS} ${EXTRAMFLAGS}; done
-
-install: liburcu.so
- cp -f liburcu.so liburcu-mb.so liburcu-qsbr.so liburcu-defer.so \
- liburcu-bp.so \
- /usr/lib/
- mkdir -p /usr/include/urcu
- cp -f urcu/arch.h urcu/arch_uatomic.h urcu/compiler.h \
- urcu/system.h urcu/list.h urcu/rculist.h urcu/hlist.h \
- /usr/include/urcu/
- cp -f urcu.h urcu-static.h \
- urcu-qsbr.h urcu-qsbr-static.h \
- urcu-bp.h urcu-bp-static.h \
- urcu-defer.h urcu-defer-static.h \
- urcu-pointer.h urcu-pointer-static.h \
- /usr/include/
-
-clean:
- rm -f *.o *.so urcu/arch.h urcu/arch_uatomic.h
- -for d in ${DIRS}; do cd $${d}; ${MAKE} clean; done
+++ /dev/null
-include Makefile.inc
-
-EXTRAMFLAGS=-f Makefile32
-CFLAGS+=-m32
-LDFLAGS+=-m32
+++ /dev/null
-include Makefile.inc
-
-EXTRAMFLAGS=-f Makefile64
-CFLAGS+=-m64
-LDFLAGS+=-m64
BUILDING
--------
+ ./bootstrap (skip if using tarball)
+ ./configure
make
- #force 32-bit build with: make -f Makefile32
- #force 64-bit build with: make -f Makefile64
make install
--- /dev/null
+#! /bin/sh
+
+set -x
+if [ ! -e config ]; then
+ mkdir config
+fi
+aclocal
+libtoolize --force --copy
+autoheader
+automake --add-missing --copy
+autoconf
+
--- /dev/null
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+
+AC_PREREQ([2.63])
+AC_INIT([userspace-rcu], [0.2.2], [mathieu dot desnoyers at polymtl dot ca])
+AC_CONFIG_AUX_DIR([config])
+AC_CONFIG_MACRO_DIR([m4])
+AM_INIT_AUTOMAKE([foreign dist-bzip2 no-dist-gzip])
+AC_CONFIG_SRCDIR([urcu.h])
+AC_CONFIG_HEADERS([config.h])
+
+# Checks for programs.
+AC_PROG_CC
+AC_PROG_MAKE_SET
+AC_PROG_LIBTOOL
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_INLINE
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_MMAP
+AC_CHECK_FUNCS([bzero gettimeofday munmap strtoul])
+
+# Find arch type
+case $host_cpu in
+ x86_64) ARCHTYPE="x86";;
+ i586) ARCHTYPE="x86" ;;
+ i686) ARCHTYPE="x86" ;;
+ powerpc) ARCHTYPE="ppc" ;;
+ ppc64) ARCHTYPE="ppc" ;;
+ ppc) ARCHTYPE="ppc" ;;
+ s390) ARCHTYPE="s390" ;;
+ s390x) ARCHTYPE="s390" ;;
+ *) ARCHTYPE="unknown";;
+esac
+
+if test "$ARCHTYPE" = "unknown"; then
+ AC_MSG_ERROR([Unable to detect the architecture.])
+fi
+AC_SUBST(ARCHTYPE)
+
+AM_CONDITIONAL([GCC_API], [test "x@ARCHTYPE@" != xx86 -a "x@ARCHTYPE@" != xppc])
+
+
+AC_CONFIG_FILES([
+ Makefile
+ tests/Makefile
+])
+AC_OUTPUT
+++ /dev/null
-include Makefile.inc
--- /dev/null
+AM_LDFLAGS=-lpthread
+AM_CFLAGS=-I$(top_srcdir)
+
+noinst_PROGRAMS = test_urcu test_urcu_dynamic_link test_urcu_timing \
+ test_rwlock_timing test_rwlock test_perthreadlock_timing \
+ test_perthreadlock test_urcu_yield test_urcu_mb \
+ test_qsbr_timing test_qsbr urcutorture \
+ urcutorture-yield test_mutex test_looplen test_urcu_gc \
+ test_urcu_gc_mb test_qsbr_gc test_qsbr_lgc test_urcu_lgc \
+ test_urcu_lgc_mb test_qsbr_dynamic_link test_urcu_mb_defer \
+ test_uatomic test_urcu_assign test_urcu_assign_dynamic_link \
+ test_urcu_bp test_urcu_bp_dynamic_link
+
+noinst_HEADERS = rcutorture.h
+
+URCU_SIGNAL=$(top_builddir)/urcu.c $(top_builddir)/urcu-pointer.c
+# URCU_SIGNAL_YIELD uses urcu.c but -DDEBUG_YIELD must be defined
+URCU_SIGNAL_YIELD=$(top_builddir)/urcu.c $(top_builddir)/urcu-pointer.c
+# URCU_MB uses urcu.c but -DURCU_MB must be defined
+URCU_MB=$(top_builddir)/urcu.c $(top_builddir)/urcu-pointer.c
+URCU_BP=$(top_builddir)/urcu-bp.c $(top_builddir)/urcu-pointer.c
+URCU_QSBR=$(top_builddir)/urcu-qsbr.c $(top_builddir)/urcu-pointer.c
+# -DURCU_MB must be defined
+URCU_MB_DEFER=$(top_builddir)/urcu.c $(top_builddir)/urcu-defer.c $(top_builddir)/urcu-pointer.c
+
+
+if GCC_API
+APIHEADER=api_gcc.h
+else
+APIHEADER=api_@ARCHTYPE@.h
+endif
+
+EXTRA_DIST = $(top_srcdir)/tests/api_*.h
+
+
+test_urcu_SOURCES = test_urcu.c $(URCU_SIGNAL)
+
+test_urcu_dynamic_link_SOURCES = test_urcu.c $(URCU_SIGNAL)
+test_urcu_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
+
+test_urcu_timing_SOURCES = test_urcu_timing.c $(URCU_SIGNAL)
+
+test_rwlock_timing_SOURCES = test_rwlock_timing.c $(URCU_SIGNAL)
+
+test_rwlock_SOURCES = test_rwlock.c $(URCU_SIGNAL)
+
+test_perthreadlock_timing_SOURCES = test_perthreadlock_timing.c $(URCU_SIGNAL)
+
+test_perthreadlock_SOURCES = test_perthreadlock.c $(URCU_SIGNAL)
+
+test_urcu_yield_SOURCES = test_urcu.c $(URCU_SIGNAL_YIELD)
+test_urcu_yield_CFLAGS = -DDEBUG_YIELD $(AM_CFLAGS)
+
+test_urcu_mb_SOURCES = test_urcu.c $(URCU_MB)
+test_urcu_mb_CFLAGS = -DURCU_MB $(AM_CFLAGS)
+
+test_qsbr_timing_SOURCES = test_qsbr_timing.c $(URCU_QSBR)
+
+test_qsbr_SOURCES = test_qsbr.c $(URCU_QSBR)
+
+urcutorture_SOURCES = api.h urcutorture.c $(URCU_SIGNAL)
+
+urcutorture_yield_SOURCES = api.h urcutorture.c $(URCU_SIGNAL_YIELD)
+urcutorture_yield_CFLAGS = -DDEBUG_YIELD $(AM_CFLAGS)
+
+test_mutex_SOURCES = test_mutex.c $(URCU_SIGNAL)
+
+test_looplen_SOURCES = test_looplen.c
+
+test_urcu_gc_SOURCES = test_urcu_gc.c $(URCU_SIGNAL)
+
+test_urcu_gc_mb_SOURCES = test_urcu_gc.c $(URCU_MB)
+test_urcu_gc_mb_CFLAGS = -DURCU_MB $(AM_CFLAGS)
+
+test_qsbr_gc_SOURCES = test_qsbr_gc.c $(URCU_QSBR)
+
+test_qsbr_lgc_SOURCES = test_qsbr_gc.c $(URCU_QSBR)
+test_qsbr_lgc_CFLAGS = -DTEST_LOCAL_GC $(AM_CFLAGS)
+
+test_urcu_lgc_SOURCES = test_urcu_gc.c $(URCU_SIGNAL)
+test_urcu_lgc_CFLAGS = -DTEST_LOCAL_GC $(AM_CFLAGS)
+
+test_urcu_lgc_mb_SOURCES = test_urcu_gc.c $(URCU_MB)
+test_urcu_lgc_mb_CFLAGS = -DTEST_LOCAL_GC -DURCU_MB $(AM_CFLAGS)
+
+test_qsbr_dynamic_link_SOURCES = test_qsbr.c $(URCU_QSBR)
+test_qsbr_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
+
+test_urcu_mb_defer_SOURCES = test_urcu_defer.c $(URCU_MB_DEFER)
+test_urcu_mb_defer_CFLAGS = -DURCU_MB $(AM_CFLAGS)
+
+test_uatomic_SOURCES = test_uatomic.c
+
+test_urcu_assign_SOURCES = test_urcu_assign.c $(URCU_SIGNAL)
+
+test_urcu_assign_dynamic_link_SOURCES = test_urcu_assign.c $(URCU_SIGNAL)
+test_urcu_assign_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
+
+test_urcu_bp_SOURCES = test_urcu_bp.c $(URCU_BP)
+
+test_urcu_bp_dynamic_link_SOURCES = test_urcu_bp.c $(URCU_BP)
+test_urcu_bp_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
+
+urcutorture.o: api.h
+
+api.h: $(APIHEADER)
+ cp -f $(srcdir)/$(APIHEADER) api.h
+
+clean-local:
+ rm -f api.h
+++ /dev/null
-
-include ../Makefile.build.inc
-
-ifeq ($(findstring ${ARCHTYPE},"x86 ppc"),)
-APIHEADER=api_gcc.h
-else
-APIHEADER=api_${ARCHTYPE}.h
-endif
-
-LIBDIR=..
-
-CFLAGS+=-I${LIBDIR}
-
-URCU_SIGNAL=${LIBDIR}/urcu.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
-URCU_SIGNAL_YIELD=${LIBDIR}/urcu-yield.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
-URCU_MB=${LIBDIR}/urcu-mb.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
-URCU_BP=${LIBDIR}/urcu-bp.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
-URCU_QSBR=${LIBDIR}/urcu-qsbr.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu-qsbr.h
-URCU_MB_DEFER=${LIBDIR}/urcu-mb.o ${LIBDIR}/urcu-defer.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
-
-all: test_urcu test_urcu_dynamic_link test_urcu_timing \
- test_rwlock_timing test_rwlock test_perthreadlock_timing \
- test_perthreadlock test_urcu_yield test_urcu_mb \
- urcu-asm.S test_qsbr_timing test_qsbr urcu-asm.o urcutorture \
- urcutorture-yield test_mutex test_looplen test_urcu_gc \
- test_urcu_gc_mb test_qsbr_gc test_qsbr_lgc test_urcu_lgc \
- test_urcu_lgc_mb test_qsbr_dynamic_link test_urcu_mb_defer \
- test_uatomic test_urcu_assign test_urcu_assign_dynamic_link \
- test_urcu_bp test_urcu_bp_dynamic_link
-
-api.h: ${APIHEADER}
- cp -f ${APIHEADER} api.h
-
-test_looplen: test_looplen.c ${LIBDIR}/urcu.h
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-
-test_urcu: test_urcu.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_gc: test_urcu_gc.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_lgc: test_urcu_gc.c ${URCU_SIGNAL}
- $(CC) -DTEST_LOCAL_GC ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_assign: test_urcu_assign.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_assign_dynamic_link: test_urcu_assign.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} -DDYNAMIC_LINK_TEST $(LDFLAGS) -o $@ $(SRC_DEP)
-
-
-test_urcu_mb: test_urcu.c ${URCU_MB}
- $(CC) -DURCU_MB ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_gc_mb: test_urcu_gc.c ${URCU_MB}
- $(CC) -DURCU_MB ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_lgc_mb: test_urcu_gc.c ${URCU_MB}
- $(CC) -DTEST_LOCAL_GC -DURCU_MB ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_mb_defer: test_urcu_defer.c ${URCU_MB_DEFER}
- $(CC) -DURCU_MB ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-
-test_qsbr: test_qsbr.c ${URCU_QSBR}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_qsbr_gc: test_qsbr_gc.c ${URCU_QSBR}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_qsbr_lgc: test_qsbr_gc.c ${URCU_QSBR}
- $(CC) -DTEST_LOCAL_GC ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_qsbr_dynamic_link: test_qsbr.c ${URCU_QSBR}
- $(CC) ${CFLAGS} -DDYNAMIC_LINK_TEST $(LDFLAGS) -o $@ $(SRC_DEP)
-
-
-test_rwlock: test_rwlock.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_perthreadlock: test_perthreadlock.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_mutex: test_mutex.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_dynamic_link: test_urcu.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} -DDYNAMIC_LINK_TEST $(LDFLAGS) -o $@ $(SRC_DEP)
-
-
-test_urcu_bp: test_urcu_bp.c ${URCU_BP}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_bp_dynamic_link: test_urcu_bp.c ${URCU_BP}
- $(CC) -DDYNAMIC_LINK_TEST ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-
-test_urcu_yield: test_urcu.c ${URCU_SIGNAL_YIELD}
- $(CC) -DDEBUG_YIELD ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_urcu_timing: test_urcu_timing.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_qsbr_timing: test_qsbr_timing.c ${URCU_QSBR}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_rwlock_timing: test_rwlock_timing.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_perthreadlock_timing: test_perthreadlock_timing.c ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-urcu-asm.S: urcu-asm.c ${LIBDIR}/urcu.h
- $(CC) ${CFLAGS} -S -o $@ $(SRC_DEP)
-
-urcu-asm.o: urcu-asm.c ${LIBDIR}/urcu.h
- $(CC) ${CFLAGS} -c -o $@ $(SRC_DEP)
-
-urcutorture: urcutorture.c rcutorture.h api.h ${URCU_SIGNAL}
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-urcutorture-yield: urcutorture.c ${URCU_SIGNAL_YIELD} rcutorture.h api.h
- $(CC) -DDEBUG_YIELD ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-test_uatomic: test_uatomic.c ../urcu/arch_uatomic.h
- $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
-
-,PHONY: clean
-
-clean:
- rm -f *.o test_urcu test_urcu_dynamic_link test_urcu_timing \
- test_rwlock_timing test_rwlock test_perthreadlock_timing \
- test_perthreadlock test_urcu_yield test_urcu_mb \
- urcu-asm.S test_qsbr_timing test_qsbr urcutorture \
- urcutorture-yield liburcu.so api.h \
- test_mutex test_urcu_gc test_urcu_gc_mb urcu-asm-1.S \
- test_qsbr_lgc test_qsbr_gc test_looplen test_urcu_lgc \
- test_urcu_lgc_mb test_qsbr_dynamic_link test_urcu_mb_defer \
- test_uatomic
+++ /dev/null
-include Makefile.inc
-
-EXTRAMFLAGS=-f Makefile32
-CFLAGS+=-m32
-LDFLAGS+=-m32
+++ /dev/null
-include Makefile.inc
-
-EXTRAMFLAGS=-f Makefile64
-CFLAGS+=-m64
-LDFLAGS+=-m64
#include <stdio.h>
#include <assert.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#if (defined(__i386__) || defined(__x86_64__))
#define HAS_ATOMIC_BYTE
#include "api.h"
#define _LGPL_SOURCE
#include <urcu.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#include <urcu/rculist.h>
#include "rcutorture.h"
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#include <urcu/list.h>
/*
#include <urcu/compiler.h>
#include <urcu/arch.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#include <urcu/list.h>
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
/**
* _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
#include <urcu/compiler.h>
#include <urcu/arch.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#ifdef _LGPL_SOURCE
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#include <urcu/list.h>
#define futex(...) syscall(__NR_futex, __VA_ARGS__)
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/arch_uatomic.h>
+#include <urcu/uatomic_arch.h>
#include <urcu/list.h>
#define futex(...) syscall(__NR_futex, __VA_ARGS__)
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_PPC_H
-#define _URCU_ARCH_UATOMIC_PPC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-
-#ifndef __SIZEOF_LONG__
-#ifdef __powerpc64__
-#define __SIZEOF_LONG__ 8
-#else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#define ILLEGAL_INSTR ".long 0xd00d00"
-
-#define uatomic_set(addr, v) \
-do { \
- ACCESS_ONCE(*(addr)) = (v); \
-} while (0)
-
-#define uatomic_read(addr) ACCESS_ONCE(*(addr))
-
-/*
- * Using a isync as second barrier for exchange to provide acquire semantic.
- * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
- * explicit that this also has acquire semantics."
- * Derived from AO_compare_and_swap(), but removed the comparison.
- */
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned int)_new),
- "r"((unsigned int)old)
- : "memory", "cc");
-
- return old_val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val),
- : "r"(addr), "r"((unsigned long)_new),
- "r"((unsigned long)old)
- : "memory", "cc");
-
- return old_val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stwcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stdcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
-
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_ATOMIC_S390_H
-#define _URCU_ARCH_ATOMIC_S390_H
-
-/*
- * Atomic exchange operations for the S390 architecture. Based on information
- * taken from the Principles of Operation Appendix A "Conditional Swapping
- * Instructions (CS, CDS)".
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __SIZEOF_LONG__
-#ifdef __s390x__
-#define __SIZEOF_LONG__ 8
-#else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#define uatomic_set(addr, v) \
-do { \
- ACCESS_ONCE(*(addr)) = (v); \
-} while (0)
-
-#define uatomic_read(addr) ACCESS_ONCE(*(addr))
-
-static inline __attribute__((always_inline))
-unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val)
-{
- unsigned int result;
-
- __asm__ __volatile__(
- "0: cs %0,%2,%1\n"
- " brc 4,0b\n"
- : "=&r"(result), "=m" (*addr)
- : "r"(val), "m" (*addr)
- : "memory", "cc");
-
- return result;
-}
-
-#if (BITS_PER_LONG == 64)
-
-static inline __attribute__((always_inline))
-unsigned long uatomic_exchange_64(volatile unsigned long *addr,
- unsigned long val)
-{
- unsigned long result;
-
- __asm__ __volatile__(
- "0: csg %0,%2,%1\n"
- " brc 4,0b\n"
- : "=&r"(result), "=m" (*addr)
- : "r"(val), "m" (*addr)
- : "memory", "cc");
-
- return result;
-}
-
-#endif
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- return uatomic_exchange_32(addr, val);
-#if (BITS_PER_LONG == 64)
- case 8:
- return uatomic_exchange_64(addr, val);
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr)))
-
-
-static inline __attribute__((always_inline))
-void uatomic_add_32(volatile unsigned int *addr, unsigned int val)
-{
- unsigned int result, old;
-
- __asm__ __volatile__(
- " l %0, %1\n"
- "0: lr %2, %0\n"
- " ar %2, %3\n"
- " cs %0,%2,%1\n"
- " brc 4,0b\n"
- : "=&r"(old), "+m" (*addr),
- "=&r"(result)
- : "r"(val)
- : "memory", "cc");
-}
-
-#if (BITS_PER_LONG == 64)
-
-static inline __attribute__((always_inline))
-void uatomic_add_64(volatile unsigned long *addr, unsigned long val)
-{
- unsigned long result, old;
-
- __asm__ __volatile__(
- " lg %0, %1\n"
- "0: lgr %2, %0\n"
- " agr %2, %3\n"
- " csg %0,%2,%1\n"
- " brc 4,0b\n"
- : "=&r"(old), "+m" (*addr),
- "=&r"(result)
- : "r"(val)
- : "memory", "cc");
-}
-
-#endif
-
-static inline __attribute__((always_inline))
-void _uatomic_add(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- uatomic_add_32(addr, val);
- return;
-#if (BITS_PER_LONG == 64)
- case 8:
- uatomic_add_64(addr, val);
- return;
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return;
-}
-
-#define uatomic_add(addr, val) \
- _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr)))
-
-static inline __attribute__((always_inline))
-unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old,
- unsigned int new)
-{
- __asm__ __volatile__(
- " cs %0,%2,%1\n"
- : "+r"(old), "+m"(*addr)
- : "r"(new)
- : "memory", "cc");
-
- return old;
-}
-
-#if (BITS_PER_LONG == 64)
-
-static inline __attribute__((always_inline))
-unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr,
- unsigned long old, unsigned long new)
-{
- __asm__ __volatile__(
- " csg %0,%2,%1\n"
- : "+r"(old), "+m"(*addr)
- : "r"(new)
- : "memory", "cc");
-
- return old;
-}
-
-#endif
-
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long new, int len)
-{
- switch (len) {
- case 4:
- return uatomic_cmpxchg_32(addr, old, new);
-#if (BITS_PER_LONG == 64)
- case 8:
- return uatomic_cmpxchg_64(addr, old, new);
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, new) \
- (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
- (unsigned long)(old), \
- (unsigned long)(new), \
- sizeof(*(addr)))
-
-#endif /* _URCU_ARCH_ATOMIC_S390_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_X86_H
-#define _URCU_ARCH_UATOMIC_X86_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-/*
- * Derived from AO_compare_and_swap() and AO_test_and_set_full().
- */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-#define uatomic_set(addr, v) \
-do { \
- ACCESS_ONCE(*(addr)) = (v); \
-} while (0)
-
-#define uatomic_read(addr) ACCESS_ONCE(*(addr))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "q"((unsigned char)_new)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned short)_new)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned int)_new)
- : "memory");
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned long)_new)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- /* Note: the "xchg" instruction does not need a "lock" prefix. */
- switch (len) {
- case 1:
- {
- unsigned char result;
- __asm__ __volatile__(
- "xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(addr))
- : "0" ((unsigned char)val)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result;
- __asm__ __volatile__(
- "xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned short)val)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result;
- __asm__ __volatile__(
- "xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned int)val)
- : "memory");
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
- __asm__ __volatile__(
- "xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned long)val)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_add_return, uatomic_sub_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = val;
-
- __asm__ __volatile__(
- "lock; xaddb %1, %0"
- : "+m"(*__hp(addr)), "+q" (result)
- :
- : "memory");
- return result + (unsigned char)val;
- }
- case 2:
- {
- unsigned short result = val;
-
- __asm__ __volatile__(
- "lock; xaddw %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned short)val;
- }
- case 4:
- {
- unsigned int result = val;
-
- __asm__ __volatile__(
- "lock; xaddl %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned int)val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = val;
-
- __asm__ __volatile__(
- "lock; xaddq %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned long)val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-/* uatomic_add, uatomic_sub */
-
-static inline __attribute__((always_inline))
-void _uatomic_add(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; addb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; addw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; addl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; addq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define uatomic_add(addr, v) \
- (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-
-#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
-
-
-/* uatomic_inc */
-
-static inline __attribute__((always_inline))
-void _uatomic_inc(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; incb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; incw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; incl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; incq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr))))
-
-/* uatomic_dec */
-
-static inline __attribute__((always_inline))
-void _uatomic_dec(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; decb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; decw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; decl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; decq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr))))
-
-#endif /* _URCU_ARCH_UATOMIC_X86_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_PPC_H
+#define _URCU_ARCH_UATOMIC_PPC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+
+#ifndef __SIZEOF_LONG__
+#ifdef __powerpc64__
+#define __SIZEOF_LONG__ 8
+#else
+#define __SIZEOF_LONG__ 4
+#endif
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+
+#define ILLEGAL_INSTR ".long 0xd00d00"
+
+#define uatomic_set(addr, v) \
+do { \
+ ACCESS_ONCE(*(addr)) = (v); \
+} while (0)
+
+#define uatomic_read(addr) ACCESS_ONCE(*(addr))
+
+/*
+ * Using a isync as second barrier for exchange to provide acquire semantic.
+ * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
+ * explicit that this also has acquire semantics."
+ * Derived from AO_compare_and_swap(), but removed the comparison.
+ */
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int result;
+
+ __asm__ __volatile__(
+ "lwsync\n"
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "lwsync\n"
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val;
+
+ __asm__ __volatile__(
+ "lwsync\n"
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "cmpd %0,%3\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ "2:\n"
+ : "=&r"(old_val)
+ : "r"(addr), "r"((unsigned int)_new),
+ "r"((unsigned int)old)
+ : "memory", "cc");
+
+ return old_val;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old_val;
+
+ __asm__ __volatile__(
+ "lwsync\n"
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "cmpd %0,%3\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ "2:\n"
+ : "=&r"(old_val),
+ : "r"(addr), "r"((unsigned long)_new),
+ "r"((unsigned long)old)
+ : "memory", "cc");
+
+ return old_val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int result;
+
+ __asm__ __volatile__(
+ "lwsync\n"
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "add %0,%2,%0\n" /* add val to value loaded */
+ "stwcx. %0,0,%1\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "lwsync\n"
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "add %0,%2,%0\n" /* add val to value loaded */
+ "stdcx. %0,0,%1\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
+
+#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
+#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
+
+#define uatomic_inc(addr) uatomic_add((addr), 1)
+#define uatomic_dec(addr) uatomic_add((addr), -1)
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
--- /dev/null
+#ifndef _URCU_ARCH_ATOMIC_S390_H
+#define _URCU_ARCH_ATOMIC_S390_H
+
+/*
+ * Atomic exchange operations for the S390 architecture. Based on information
+ * taken from the Principles of Operation Appendix A "Conditional Swapping
+ * Instructions (CS, CDS)".
+ *
+ * Copyright (c) 2009 Novell, Inc.
+ * Author: Jan Blunck <jblunck@suse.de>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __SIZEOF_LONG__
+#ifdef __s390x__
+#define __SIZEOF_LONG__ 8
+#else
+#define __SIZEOF_LONG__ 4
+#endif
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+
+#define uatomic_set(addr, v) \
+do { \
+ ACCESS_ONCE(*(addr)) = (v); \
+} while (0)
+
+#define uatomic_read(addr) ACCESS_ONCE(*(addr))
+
+static inline __attribute__((always_inline))
+unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val)
+{
+ unsigned int result;
+
+ __asm__ __volatile__(
+ "0: cs %0,%2,%1\n"
+ " brc 4,0b\n"
+ : "=&r"(result), "=m" (*addr)
+ : "r"(val), "m" (*addr)
+ : "memory", "cc");
+
+ return result;
+}
+
+#if (BITS_PER_LONG == 64)
+
+static inline __attribute__((always_inline))
+unsigned long uatomic_exchange_64(volatile unsigned long *addr,
+ unsigned long val)
+{
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "0: csg %0,%2,%1\n"
+ " brc 4,0b\n"
+ : "=&r"(result), "=m" (*addr)
+ : "r"(val), "m" (*addr)
+ : "memory", "cc");
+
+ return result;
+}
+
+#endif
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ return uatomic_exchange_32(addr, val);
+#if (BITS_PER_LONG == 64)
+ case 8:
+ return uatomic_exchange_64(addr, val);
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr)))
+
+
+static inline __attribute__((always_inline))
+void uatomic_add_32(volatile unsigned int *addr, unsigned int val)
+{
+ unsigned int result, old;
+
+ __asm__ __volatile__(
+ " l %0, %1\n"
+ "0: lr %2, %0\n"
+ " ar %2, %3\n"
+ " cs %0,%2,%1\n"
+ " brc 4,0b\n"
+ : "=&r"(old), "+m" (*addr),
+ "=&r"(result)
+ : "r"(val)
+ : "memory", "cc");
+}
+
+#if (BITS_PER_LONG == 64)
+
+static inline __attribute__((always_inline))
+void uatomic_add_64(volatile unsigned long *addr, unsigned long val)
+{
+ unsigned long result, old;
+
+ __asm__ __volatile__(
+ " lg %0, %1\n"
+ "0: lgr %2, %0\n"
+ " agr %2, %3\n"
+ " csg %0,%2,%1\n"
+ " brc 4,0b\n"
+ : "=&r"(old), "+m" (*addr),
+ "=&r"(result)
+ : "r"(val)
+ : "memory", "cc");
+}
+
+#endif
+
+static inline __attribute__((always_inline))
+void _uatomic_add(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ uatomic_add_32(addr, val);
+ return;
+#if (BITS_PER_LONG == 64)
+ case 8:
+ uatomic_add_64(addr, val);
+ return;
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return;
+}
+
+#define uatomic_add(addr, val) \
+ _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr)))
+
+static inline __attribute__((always_inline))
+unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old,
+ unsigned int new)
+{
+ __asm__ __volatile__(
+ " cs %0,%2,%1\n"
+ : "+r"(old), "+m"(*addr)
+ : "r"(new)
+ : "memory", "cc");
+
+ return old;
+}
+
+#if (BITS_PER_LONG == 64)
+
+static inline __attribute__((always_inline))
+unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr,
+ unsigned long old, unsigned long new)
+{
+ __asm__ __volatile__(
+ " csg %0,%2,%1\n"
+ : "+r"(old), "+m"(*addr)
+ : "r"(new)
+ : "memory", "cc");
+
+ return old;
+}
+
+#endif
+
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long new, int len)
+{
+ switch (len) {
+ case 4:
+ return uatomic_cmpxchg_32(addr, old, new);
+#if (BITS_PER_LONG == 64)
+ case 8:
+ return uatomic_cmpxchg_64(addr, old, new);
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return 0;
+}
+
+#define uatomic_cmpxchg(addr, old, new) \
+ (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
+ (unsigned long)(old), \
+ (unsigned long)(new), \
+ sizeof(*(addr)))
+
+#endif /* _URCU_ARCH_ATOMIC_S390_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_X86_H
+#define _URCU_ARCH_UATOMIC_X86_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+
+/*
+ * Derived from AO_compare_and_swap() and AO_test_and_set_full().
+ */
+
+struct __uatomic_dummy {
+ unsigned long v[10];
+};
+#define __hp(x) ((struct __uatomic_dummy *)(x))
+
+#define uatomic_set(addr, v) \
+do { \
+ ACCESS_ONCE(*(addr)) = (v); \
+} while (0)
+
+#define uatomic_read(addr) ACCESS_ONCE(*(addr))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgb %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "q"((unsigned char)_new)
+ : "memory");
+ return result;
+ }
+ case 2:
+ {
+ unsigned short result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgw %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned short)_new)
+ : "memory");
+ return result;
+ }
+ case 4:
+ {
+ unsigned int result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgl %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned int)_new)
+ : "memory");
+ return result;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgq %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned long)_new)
+ : "memory");
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ /* Note: the "xchg" instruction does not need a "lock" prefix. */
+ switch (len) {
+ case 1:
+ {
+ unsigned char result;
+ __asm__ __volatile__(
+ "xchgb %0, %1"
+ : "=q"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned char)val)
+ : "memory");
+ return result;
+ }
+ case 2:
+ {
+ unsigned short result;
+ __asm__ __volatile__(
+ "xchgw %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned short)val)
+ : "memory");
+ return result;
+ }
+ case 4:
+ {
+ unsigned int result;
+ __asm__ __volatile__(
+ "xchgl %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned int)val)
+ : "memory");
+ return result;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+ __asm__ __volatile__(
+ "xchgq %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned long)val)
+ : "memory");
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+
+/* uatomic_add_return, uatomic_sub_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddb %1, %0"
+ : "+m"(*__hp(addr)), "+q" (result)
+ :
+ : "memory");
+ return result + (unsigned char)val;
+ }
+ case 2:
+ {
+ unsigned short result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddw %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned short)val;
+ }
+ case 4:
+ {
+ unsigned int result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddl %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned int)val;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddq %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned long)val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
+
+/* uatomic_add, uatomic_sub */
+
+static inline __attribute__((always_inline))
+void _uatomic_add(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; addb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; addw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; addl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; addq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define uatomic_add(addr, v) \
+ (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+
+#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
+
+
+/* uatomic_inc */
+
+static inline __attribute__((always_inline))
+void _uatomic_inc(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; incb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; incw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; incl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; incq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr))))
+
+/* uatomic_dec */
+
+static inline __attribute__((always_inline))
+void _uatomic_dec(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; decb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; decw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; decl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; decq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr))))
+
+#endif /* _URCU_ARCH_UATOMIC_X86_H */