+++ /dev/null
-#ifndef _URCU_ARCH_PPC_H
-#define _URCU_ARCH_PPC_H
-
-/*
- * arch_ppc.h: trivial definitions for the powerpc architecture.
- *
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
-*
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-
-#define CONFIG_HAVE_FENCE 1
-#define CONFIG_HAVE_MEM_COHERENCY
-
-/* Include size of POWER5+ L3 cache lines: 256 bytes */
-#define CACHE_LINE_SIZE 256
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#define mb() asm volatile("sync":::"memory")
-#define rmb() asm volatile("sync":::"memory")
-#define wmb() asm volatile("sync"::: "memory")
-
-/*
- * Architectures without cache coherency need something like the following:
- *
- * #define mb() mc()
- * #define rmb() rmc()
- * #define wmb() wmc()
- * #define mc() arch_cache_flush()
- * #define rmc() arch_cache_flush_read()
- * #define wmc() arch_cache_flush_write()
- */
-
-#define mc() barrier()
-#define rmc() barrier()
-#define wmc() barrier()
-
-/* Assume SMP machine, given we don't have this information */
-#define CONFIG_SMP 1
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_mc() mc()
-#define smp_rmc() rmc()
-#define smp_wmc() wmc()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_mc() barrier()
-#define smp_rmc() barrier()
-#define smp_wmc() barrier()
-#endif
-
-/* Nop everywhere except on alpha. */
-#define smp_read_barrier_depends()
-
-static inline void cpu_relax(void)
-{
- barrier();
-}
-
-/*
- * Serialize core instruction execution. Also acts as a compiler barrier.
- */
-static inline void sync_core()
-{
- asm volatile("isync" : : : "memory");
-}
-
-#define mftbl() \
- ({ \
- unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); \
- rval; \
- })
-
-#define mftbu() \
- ({ \
- unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); \
- rval; \
- })
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- long h, l;
-
- for (;;) {
- h = mftbu();
- barrier();
- l = mftbl();
- barrier();
- if (mftbu() == h)
- return (((cycles_t) h) << 32) + l;
- }
-}
-
-#endif /* _URCU_ARCH_PPC_H */
+++ /dev/null
-#ifndef _ARCH_S390_H
-#define _ARCH_S390_H
-
-/*
- * Trivial definitions for the S390 architecture based on information from the
- * Principles of Operation "CPU Serialization" (5-91), "BRANCH ON CONDITION"
- * (7-25) and "STORE CLOCK" (7-169).
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <compiler.h>
-
-#define CONFIG_HAVE_MEM_COHERENCY
-/* Assume SMP machine, given we don't have this information */
-#define CONFIG_SMP 1
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#define mb() __asm__ __volatile__("bcr 15,0" : : : "memory")
-#define rmb() __asm__ __volatile__("bcr 15,0" : : : "memory");
-#define wmb() __asm__ __volatile__("bcr 15,0" : : : "memory");
-#define mc() barrier()
-#define rmc() barrier()
-#define wmc() barrier()
-
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_mc() mc()
-#define smp_rmc() rmc()
-#define smp_wmc() wmc()
-
-/* Nop everywhere except on alpha. */
-#define smp_read_barrier_depends()
-
-static inline void cpu_relax(void)
-{
- barrier();
-}
-
-static inline void sync_core()
-{
- __asm__ __volatile__("bcr 15,0" : : : "memory");
-}
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- cycles_t cycles;
-
- __asm__ __volatile__("stck %0" : "=m" (cycles) : : "cc", "memory" );
-
- return cycles;
-}
-
-#endif /* _ARCH_S390_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_PPC_H
-#define _URCU_ARCH_UATOMIC_PPC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-
-#ifndef __SIZEOF_LONG__
-#ifdef __powerpc64__
-#define __SIZEOF_LONG__ 8
-#else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#define ILLEGAL_INSTR ".long 0xd00d00"
-
-#define uatomic_set(addr, v) \
-do { \
- ACCESS_ONCE(*(addr)) = (v); \
-} while (0)
-
-#define uatomic_read(addr) ACCESS_ONCE(*(addr))
-
-/*
- * Using a isync as second barrier for exchange to provide acquire semantic.
- * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
- * explicit that this also has acquire semantics."
- * Derived from AO_compare_and_swap(), but removed the comparison.
- */
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned int)_new),
- "r"((unsigned int)old)
- : "memory", "cc");
-
- return old_val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val),
- : "r"(addr), "r"((unsigned long)_new),
- "r"((unsigned long)old)
- : "memory", "cc");
-
- return old_val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stwcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- "lwsync\n"
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stdcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
-
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_ATOMIC_S390_H
-#define _URCU_ARCH_ATOMIC_S390_H
-
-/*
- * Atomic exchange operations for the S390 architecture. Based on information
- * taken from the Principles of Operation Appendix A "Conditional Swapping
- * Instructions (CS, CDS)".
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __SIZEOF_LONG__
-#ifdef __s390x__
-#define __SIZEOF_LONG__ 8
-#else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-static inline __attribute__((always_inline))
-unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val)
-{
- unsigned int result;
-
- __asm__ __volatile__(
- "0: cs %0,%2,%1\n"
- " brc 4,0b\n"
- : "=&r"(result), "=m" (*addr)
- : "r"(val), "m" (*addr)
- : "memory", "cc");
-
- return result;
-}
-
-#if (BITS_PER_LONG == 64)
-
-static inline __attribute__((always_inline))
-unsigned long uatomic_exchange_64(volatile unsigned long *addr,
- unsigned long val)
-{
- unsigned long result;
-
- __asm__ __volatile__(
- "0: csg %0,%2,%1\n"
- " brc 4,0b\n"
- : "=&r"(result), "=m" (*addr)
- : "r"(val), "m" (*addr)
- : "memory", "cc");
-
- return result;
-}
-
-#endif
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- return uatomic_exchange_32(addr, val);
-#if (BITS_PER_LONG == 64)
- case 8:
- return uatomic_exchange_64(addr, val);
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr)))
-
-#endif /* _URCU_ARCH_ATOMIC_S390_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_X86_H
-#define _URCU_ARCH_UATOMIC_X86_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-/*
- * Derived from AO_compare_and_swap() and AO_test_and_set_full().
- */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-#define uatomic_set(addr, v) \
-do { \
- ACCESS_ONCE(*(addr)) = (v); \
-} while (0)
-
-#define uatomic_read(addr) ACCESS_ONCE(*(addr))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "q"((unsigned char)_new)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned short)_new)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned int)_new)
- : "memory");
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned long)_new)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- /* Note: the "xchg" instruction does not need a "lock" prefix. */
- switch (len) {
- case 1:
- {
- unsigned char result;
- __asm__ __volatile__(
- "xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(addr))
- : "0" ((unsigned char)val)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result;
- __asm__ __volatile__(
- "xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned short)val)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result;
- __asm__ __volatile__(
- "xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned int)val)
- : "memory");
- return result;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
- __asm__ __volatile__(
- "xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned long)val)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_add_return, uatomic_sub_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = val;
-
- __asm__ __volatile__(
- "lock; xaddb %1, %0"
- : "+m"(*__hp(addr)), "+q" (result)
- :
- : "memory");
- return result + (unsigned char)val;
- }
- case 2:
- {
- unsigned short result = val;
-
- __asm__ __volatile__(
- "lock; xaddw %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned short)val;
- }
- case 4:
- {
- unsigned int result = val;
-
- __asm__ __volatile__(
- "lock; xaddl %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned int)val;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = val;
-
- __asm__ __volatile__(
- "lock; xaddq %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned long)val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-
-/* uatomic_add, uatomic_sub */
-
-static inline __attribute__((always_inline))
-void _uatomic_add(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; addb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; addw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; addl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; addq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define uatomic_add(addr, v) \
- (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-
-#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
-
-
-/* uatomic_inc */
-
-static inline __attribute__((always_inline))
-void _uatomic_inc(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; incb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; incw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; incl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; incq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr))))
-
-/* uatomic_dec */
-
-static inline __attribute__((always_inline))
-void _uatomic_dec(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; decb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; decw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; decl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; decq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr))))
-
-#endif /* _URCU_ARCH_UATOMIC_X86_H */
+++ /dev/null
-#ifndef _URCU_ARCH_X86_H
-#define _URCU_ARCH_X86_H
-
-/*
- * arch_x86.h: trivial definitions for the x86 architecture.
- *
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
-*
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-
-/* Assume P4 or newer */
-#define CONFIG_HAVE_FENCE 1
-#define CONFIG_HAVE_MEM_COHERENCY
-
-#define CACHE_LINE_SIZE 128
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
-
-#ifdef CONFIG_HAVE_FENCE
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence"::: "memory")
-#else
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
-#endif
-
-/*
- * Architectures without cache coherency need something like the following:
- *
- * #define mb() mc()
- * #define rmb() rmc()
- * #define wmb() wmc()
- * #define mc() arch_cache_flush()
- * #define rmc() arch_cache_flush_read()
- * #define wmc() arch_cache_flush_write()
- */
-
-#define mc() barrier()
-#define rmc() barrier()
-#define wmc() barrier()
-
-/* Assume SMP machine, given we don't have this information */
-#define CONFIG_SMP 1
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_mc() mc()
-#define smp_rmc() rmc()
-#define smp_wmc() wmc()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_mc() barrier()
-#define smp_rmc() barrier()
-#define smp_wmc() barrier()
-#endif
-
-/* Nop everywhere except on alpha. */
-#define smp_read_barrier_depends()
-
-static inline void rep_nop(void)
-{
- asm volatile("rep; nop" : : : "memory");
-}
-
-static inline void cpu_relax(void)
-{
- rep_nop();
-}
-
-/*
- * Serialize core instruction execution. Also acts as a compiler barrier.
- */
-#ifdef __PIC__
-/*
- * Cannot use cpuid because it clobbers the ebx register and clashes
- * with -fPIC :
- * error: PIC register 'ebx' clobbered in 'asm'
- */
-static inline void sync_core(void)
-{
- mb();
-}
-#else
-static inline void sync_core(void)
-{
- asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx");
-}
-#endif
-
-#define rdtscll(val) \
- do { \
- unsigned int __a, __d; \
- asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
- (val) = ((unsigned long long)__a) \
- | (((unsigned long long)__d) << 32); \
- } while(0)
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles(void)
-{
- cycles_t ret = 0;
-
- rdtscll(ret);
- return ret;
-}
-
-#endif /* _URCU_ARCH_X86_H */
+++ /dev/null
-#ifndef _URCU_COMPILER_H
-#define _URCU_COMPILER_H
-
-/*
- * compiler.h
- *
- * Compiler definitions.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-#include <stddef.h> /* for offsetof */
-
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
-
-#define barrier() asm volatile("" : : : "memory");
-
-/*
- * Instruct the compiler to perform only a single access to a variable
- * (prohibits merging and refetching). The compiler is also forbidden to reorder
- * successive instances of ACCESS_ONCE(), but only when the compiler is aware of
- * particular ordering. Compiler ordering can be ensured, for example, by
- * putting two ACCESS_ONCE() in separate C statements.
- *
- * This macro does absolutely -nothing- to prevent the CPU from reordering,
- * merging, or refetching absolutely anything at any time. Its main intended
- * use is to mediate communication between process-level code and irq/NMI
- * handlers, all running on the same CPU.
- */
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&x)
-
-#endif /* _URCU_COMPILER_H */
+++ /dev/null
-#ifndef _KCOMPAT_HLIST_H
-#define _KCOMPAT_HLIST_H
-
-/*
- * Kernel sourcecode compatible lightweight single pointer list head useful
- * for implementing hash tables
- *
- * Copyright (C) 2009 Novell Inc.
- *
- * Author: Jan Blunck <jblunck@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License version 2.1 as
- * published by the Free Software Foundation.
- */
-
-struct hlist_head
-{
- struct hlist_node *next;
-};
-
-struct hlist_node
-{
- struct hlist_node *next;
- struct hlist_node *prev;
-};
-
-/* Initialize a new list head. */
-static inline void INIT_HLIST_HEAD(struct hlist_head *ptr)
-{
- ptr->next = NULL;
-}
-
-/* Get typed element from list at a given position. */
-#define hlist_entry(ptr, type, member) \
- ((type *) ((char *) (ptr) - (unsigned long) (&((type *) 0)->member)))
-
-/* Add new element at the head of the list. */
-static inline void hlist_add_head (struct hlist_node *newp,
- struct hlist_head *head)
-{
- if (head->next)
- head->next->prev = newp;
-
- newp->next = head->next;
- newp->prev = (struct hlist_node *)head;
- head->next = newp;
-}
-
-/* Remove element from list. */
-static inline void hlist_del (struct hlist_node *elem)
-{
- if (elem->next)
- elem->next->prev = elem->prev;
-
- elem->prev->next = elem->next;
-}
-
-#define hlist_for_each_entry(entry, pos, head, member) \
- for (pos = (head)->next, \
- entry = hlist_entry(pos, typeof(*entry), member); \
- pos != NULL; \
- pos = pos->next, \
- entry = hlist_entry(pos, typeof(*entry), member))
-
-#define hlist_for_each_entry_safe(entry, pos, p, head, member) \
- for (pos = (head)->next, \
- entry = hlist_entry(pos, typeof(*entry), member); \
- (pos != NULL) && ({ p = pos->next; 1;}); \
- pos = p, \
- entry = hlist_entry(pos, typeof(*entry), member))
-
-#endif /* _KCOMPAT_HLIST_H */
+++ /dev/null
-/* Copyright (C) 2002 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef _LIST_H
-#define _LIST_H 1
-
-/* The definitions of this file are adopted from those which can be
- found in the Linux kernel headers to enable people familiar with
- the latter find their way in these sources as well. */
-
-
-/* Basic type for the double-link list. */
-typedef struct list_head
-{
- struct list_head *next;
- struct list_head *prev;
-} list_t;
-
-
-/* Define a variable with the head and tail of the list. */
-#define LIST_HEAD(name) \
- list_t name = { &(name), &(name) }
-
-/* Initialize a new list head. */
-#define INIT_LIST_HEAD(ptr) \
- (ptr)->next = (ptr)->prev = (ptr)
-
-#define LIST_HEAD_INIT(name) { .prev = &(name), .next = &(name) }
-
-/* Add new element at the head of the list. */
-static inline void
-list_add (list_t *newp, list_t *head)
-{
- head->next->prev = newp;
- newp->next = head->next;
- newp->prev = head;
- head->next = newp;
-}
-
-
-/* Add new element at the tail of the list. */
-static inline void
-list_add_tail (list_t *newp, list_t *head)
-{
- head->prev->next = newp;
- newp->next = head;
- newp->prev = head->prev;
- head->prev = newp;
-}
-
-
-/* Remove element from list. */
-static inline void
-__list_del (list_t *prev, list_t *next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-/* Remove element from list. */
-static inline void
-list_del (list_t *elem)
-{
- __list_del (elem->prev, elem->next);
-}
-
-/* delete from list, add to another list as head */
-static inline void
-list_move (list_t *elem, list_t *head)
-{
- __list_del (elem->prev, elem->next);
- list_add (elem, head);
-}
-
-/* Join two lists. */
-static inline void
-list_splice (list_t *add, list_t *head)
-{
- /* Do nothing if the list which gets added is empty. */
- if (add != add->next)
- {
- add->next->prev = head;
- add->prev->next = head->next;
- head->next->prev = add->prev;
- head->next = add->next;
- }
-}
-
-
-/* Get typed element from list at a given position. */
-#define list_entry(ptr, type, member) \
- ((type *) ((char *) (ptr) - (unsigned long) (&((type *) 0)->member)))
-
-
-
-/* Iterate forward over the elements of the list. */
-#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
-
-
-/* Iterate forward over the elements of the list. */
-#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
-
-
-/* Iterate backwards over the elements list. The list elements can be
- removed from the list while doing this. */
-#define list_for_each_prev_safe(pos, p, head) \
- for (pos = (head)->prev, p = pos->prev; \
- pos != (head); \
- pos = p, p = pos->prev)
-
-#define list_for_each_entry(pos, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-
-#define list_for_each_entry_reverse(pos, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
-
-#define list_for_each_entry_safe(pos, p, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- p = list_entry(pos->member.next,typeof(*pos), member); \
- &pos->member != (head); \
- pos = p, p = list_entry(pos->member.next, typeof(*pos), member))
-
-static inline int list_empty(list_t *head)
-{
- return head == head->next;
-}
-
-static inline void list_replace_init(list_t *old,
- list_t *new)
-{
- list_t *head = old->next;
- list_del(old);
- list_add_tail(new, head);
- INIT_LIST_HEAD(old);
-}
-
-#endif /* list.h */
+++ /dev/null
-/* Copyright (C) 2002 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- Copyright (C) 2009 Pierre-Marc Fournier
- Conversion to RCU list.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#ifndef _URCU_RCULIST_H
-#define _URCU_RCULIST_H
-
-#include <urcu/list.h>
-#include <urcu.h>
-
-/* Add new element at the head of the list.
- */
-static inline void list_add_rcu(list_t *newp, list_t *head)
-{
- newp->next = head->next;
- newp->prev = head;
- smp_wmb();
- head->next->prev = newp;
- head->next = newp;
-}
-
-
-/* Remove element from list. */
-static inline void list_del_rcu(list_t *elem)
-{
- elem->next->prev = elem->prev;
- elem->prev->next = elem->next;
-}
-
-
-/* Iterate through elements of the list.
- * This must be done while rcu_read_lock() is held.
- */
-
-#define list_for_each_entry_rcu(pos, head, member) \
- for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
-
-#endif /* _URCU_RCULIST_H */