theoretically yielding slightly better performance.
+### Usage of `--enable-cds-lfht-iter-debug`
+
+By default the library is configured with extra debugging checks for
+lock-free hash table iterator traversal disabled.
+
+Building liburcu with --enable-cds-lfht-iter-debug and rebuilding
+application to match the ABI change allows finding cases where the hash
+table iterator is re-purposed to be used on a different hash table while
+still being used to iterate on a hash table.
+
+This option alters the rculfhash ABI. Make sure to compile both library
+and application with matching configuration.
+
+
Make targets
------------
AH_TEMPLATE([CONFIG_RCU_HAVE_CLOCK_GETTIME], [clock_gettime() is detected.])
AH_TEMPLATE([CONFIG_RCU_FORCE_SYS_MEMBARRIER], [Require the operating system to support the membarrier system call for default and bulletproof flavors.])
AH_TEMPLATE([CONFIG_RCU_DEBUG], [Enable internal debugging self-checks. Introduce performance penalty.])
+AH_TEMPLATE([CONFIG_CDS_LFHT_ITER_DEBUG], [Enable extra debugging checks for lock-free hash table iterator traversal. Alters the rculfhash ABI. Make sure to compile both library and application with matching configuration.])
# Allow requiring the operating system to support the membarrier system
# call. Applies to default and bulletproof flavors.
AC_DEFINE([CONFIG_RCU_DEBUG], [1])
])
+# rculfhash iterator debugging
+AC_ARG_ENABLE([cds-lfht-iter-debug],
+ AS_HELP_STRING([--enable-cds-lfht-iter-debug], [Enable extra debugging checks for lock-free hash table iterator traversal. Alters the rculfhash ABI. Make sure to compile both library and application with matching configuration.]))
+AS_IF([test "x$enable_cds_lfht_iter_debug" = "xyes"], [
+ AC_DEFINE([CONFIG_CDS_LFHT_ITER_DEBUG], [1])
+])
+
# From the sched_setaffinity(2)'s man page:
# ~~~~
# The CPU affinity system calls were introduced in Linux kernel 2.5.8.
test "x$enable_rcu_debug" = "xyes" && value=1 || value=0
PPRINT_PROP_BOOL([Internal debugging], $value)
+# rculfhash iterator debug enabled/disabled
+test "x$enable_cds_lfht_iter_debug" = "xyes" && value=1 || value=0
+PPRINT_PROP_BOOL([Lock-free hash table iterator debugging], $value)
+
PPRINT_PROP_BOOL([Multi-flavor support], 1)
report_bindir="`eval eval echo $bindir`"
/* Expose multi-flavor support */
#define CONFIG_RCU_MULTIFLAVOR 1
+
+/* Enable extra debugging checks for lock-free hash table iterator
+ traversal. */
+#undef CONFIG_CDS_LFHT_ITER_DEBUG
* _after_ including your URCU flavor.
*/
+#include <urcu/config.h>
#include <stdint.h>
#include <pthread.h>
#include <urcu/compiler.h>
extern "C" {
#endif
+struct cds_lfht;
+
/*
* cds_lfht_node: Contains the next pointers and reverse-hash
* value required for lookup and traversal of the hash table.
/* cds_lfht_iter: Used to track state while traversing a hash chain. */
struct cds_lfht_iter {
struct cds_lfht_node *node, *next;
+ /*
+ * For debugging purposes, build both API users and rculfhash
+ * library with CDS_LFHT_ITER_DEBUG defined. This enables extra
+ * consistency checks for calls to a cds_lfht_next() or
+ * cds_lfht_next_duplicate() after the iterator has been
+ * re-purposed to iterate on a different hash table. This is a
+ * common programming mistake when performing hash table lookup
+ * nested in a hash table traversal.
+ */
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+ struct cds_lfht *lfht;
+#endif
};
static inline
return iter->node;
}
-struct cds_lfht;
struct rcu_flavor_struct;
/*
static void cds_lfht_init_worker(const struct rcu_flavor_struct *flavor);
static void cds_lfht_fini_worker(const struct rcu_flavor_struct *flavor);
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+
+static
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ iter->lfht = ht;
+}
+
+#define cds_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
+
+#else
+
+static
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+}
+
+#define cds_lfht_iter_debug_assert(...)
+
+#endif
+
/*
* Algorithm to reverse bits in a word by lookup table, extended to
* 64-bit words.
if (unique_ret
&& !is_bucket(next)
&& clear_flag(iter)->reverse_hash == node->reverse_hash) {
- struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
+ struct cds_lfht_iter d_iter = {
+ .node = node,
+ .next = iter,
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+ .lfht = ht,
+#endif
+ };
/*
* uniquely adding inserts the node as the first
struct cds_lfht_node *node, *next, *bucket;
unsigned long reverse_hash, size;
+ cds_lfht_iter_debug_set_ht(ht, iter);
+
reverse_hash = bit_reverse_ulong(hash);
size = rcu_dereference(ht->size);
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
+ cds_lfht_iter_debug_assert(ht == iter->lfht);
node = iter->node;
reverse_hash = node->reverse_hash;
next = iter->next;
{
struct cds_lfht_node *node, *next;
+ cds_lfht_iter_debug_assert(ht == iter->lfht);
node = clear_flag(iter->next);
for (;;) {
if (caa_unlikely(is_end(node))) {
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
+ cds_lfht_iter_debug_set_ht(ht, iter);
/*
* Get next after first bucket node. The first bucket node is the
* first node of the linked list.