/*
* Check if NULL. Don't let users expect that they are taken into
- * account.
+ * account.
*/
assert(!timeout);
assert(!uaddr2);
/*
* Check if NULL. Don't let users expect that they are taken into
- * account.
+ * account.
*/
assert(!timeout);
assert(!uaddr2);
* To discuss these guarantees, we first define "read" operation as any
* of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
* cds_lfht_first, cds_lfht_next operation, as well as
- * cds_lfht_add_unique (failure).
+ * cds_lfht_add_unique (failure).
*
* We define "read traversal" operation as any of the following
* group of operations
* shrink hash table from order 6 to 5: fini the index=6 bucket node table
*
* A bit of ascii art explanation:
- *
+ *
* The order index is the off-by-one compared to the actual power of 2
* because we use index 0 to deal with the 0 special-case.
- *
+ *
* This shows the nodes for a small table ordered by reversed bits:
- *
+ *
* bits reverse
* 0 000 000
* 4 100 001
* 5 101 101
* 3 011 110
* 7 111 111
- *
- * This shows the nodes in order of non-reversed bits, linked by
+ *
+ * This shows the nodes in order of non-reversed bits, linked by
* reversed-bit order.
- *
+ *
* order bits reverse
* 0 0 000 000
* 1 | 1 001 100 <-
* Originally from Public Domain.
*/
-static const uint8_t BitReverseTable256[256] =
+static const uint8_t BitReverseTable256[256] =
{
#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
static
uint32_t bit_reverse_u32(uint32_t v)
{
- return ((uint32_t) bit_reverse_u8(v) << 24) |
- ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
- ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
+ return ((uint32_t) bit_reverse_u8(v) << 24) |
+ ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
+ ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
((uint32_t) bit_reverse_u8(v >> 24));
}
#else
static
uint64_t bit_reverse_u64(uint64_t v)
{
- return ((uint64_t) bit_reverse_u8(v) << 56) |
- ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
+ return ((uint64_t) bit_reverse_u8(v) << 56) |
+ ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
((uint64_t) bit_reverse_u8(v >> 16) << 40) |
((uint64_t) bit_reverse_u8(v >> 24) << 32) |
- ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
- ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
+ ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
+ ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
((uint64_t) bit_reverse_u8(v >> 48) << 8) |
((uint64_t) bit_reverse_u8(v >> 56));
}
{
}
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
void rcu_barrier(void);
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
/*
* Note: the defer_rcu() API is currently EXPERIMENTAL. It may change in the
* future.
- *
+ *
* Important !
*
* Each thread queuing memory reclamation must be registered with
extern void rcu_defer_barrier(void);
extern void rcu_defer_barrier_thread(void);
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#if defined(_LGPL_SOURCE) || defined(URCU_INLINE_SMALL_FUNCTIONS)
*/
#define rcu_assign_pointer(p, v) rcu_set_pointer((&p), (v))
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#include <urcu/map/urcu-qsbr.h>
extern void rcu_register_thread(void);
extern void rcu_unregister_thread(void);
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#include <urcu/map/urcu.h>
{
}
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#ifdef CONFIG_RCU_ARM_HAVE_DMB
#define cmm_mb() __asm__ __volatile__ ("dmb":::"memory")
#define __NR_membarrier 389
#endif
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#include <stdlib.h>
#include <sys/time.h>
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
/*
* Architectures without cache coherency need something like the following:
*
- * #define cmm_mc() arch_cache_flush()
+ * #define cmm_mc() arch_cache_flush()
* #define cmm_rmc() arch_cache_flush_read()
* #define cmm_wmc() arch_cache_flush_write()
*
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/* Include size of POWER5+ L3 cache lines: 256 bytes */
#define CAA_CACHE_LINE_SIZE 256
#define __NR_membarrier 365
#endif
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#define CAA_CACHE_LINE_SIZE 128
#define __NR_membarrier 356
#endif
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#define CAA_CACHE_LINE_SIZE 256
#define cmm_rmb() membar_safe("#LoadLoad")
#define cmm_wmb() membar_safe("#StoreStore")
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#define CAA_CACHE_LINE_SIZE 128
#endif
#endif
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#endif
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
return URCU_TLS(rcu_reader)->ctr & RCU_GP_CTR_NEST_MASK;
}
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/**
* _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
*/
#define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/*
* This code section can only be included in LGPL 2.1 compatible source code.
cmm_smp_mb();
}
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/* Default is RCU_MEMBARRIER */
#if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
#ifndef _URCU_ARCH_UATOMIC_ARM_H
#define _URCU_ARCH_UATOMIC_ARM_H
-/*
+/*
* Atomics for ARM. This approach is usable on kernels back to 2.6.15.
*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/* xchg */
#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifndef _URCU_ARCH_UATOMIC_GCC_H
#define _URCU_ARCH_UATOMIC_GCC_H
-/*
+/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/*
* If your platform doesn't have a full set of atomics, you will need
#define UATOMIC_HAS_ATOMIC_BYTE
#define UATOMIC_HAS_ATOMIC_SHORT
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifndef _URCU_ARCH_UATOMIC_PPC_H
#define _URCU_ARCH_UATOMIC_PPC_H
-/*
+/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#define ILLEGAL_INSTR ".long 0xd00d00"
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define COMPILER_HAVE_SHORT_MEM_OPERAND
caa_cast_long_keep_sign(_new),\
sizeof(*(addr)))
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
#define _URCU_ARCH_UATOMIC_SPARC64_H
-/*
+/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/* cmpxchg */
caa_cast_long_keep_sign(_new), \
sizeof(*(addr))))
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif
#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
#define _URCU_ARCH_UATOMIC_UNKNOWN_H
-/*
+/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
#ifndef _URCU_ARCH_UATOMIC_X86_H
#define _URCU_ARCH_UATOMIC_X86_H
-/*
+/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/*
* Derived from AO_compare_and_swap() and AO_test_and_set_full().
#define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
#define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
-#ifdef __cplusplus
+#ifdef __cplusplus
}
#endif