unsigned long resize_target;
int resize_initiated;
struct rcu_head head;
- struct _rcu_ht_node *tbl[0];
+ struct _cds_lfht_node *tbl[0];
};
-struct rcu_ht {
+struct cds_lfht {
struct rcu_table *t; /* shared */
- ht_hash_fct hash_fct;
- ht_compare_fct compare_fct;
+ cds_lfht_hash_fct hash_fct;
+ cds_lfht_compare_fct compare_fct;
unsigned long hash_seed;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
unsigned int in_progress_resize, in_progress_destroy;
- void (*ht_call_rcu)(struct rcu_head *head,
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
};
struct rcu_resize_work {
struct rcu_head head;
- struct rcu_ht *ht;
+ struct cds_lfht *ht;
};
/*
}
static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
static
-void check_resize(struct rcu_ht *ht, struct rcu_table *t,
+void check_resize(struct cds_lfht *ht, struct rcu_table *t,
uint32_t chain_len)
{
if (chain_len > 100)
dbg_printf("WARNING: large chain length: %u.\n",
chain_len);
if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
- ht_resize_lazy(ht, t,
+ cds_lfht_resize_lazy(ht, t,
get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
}
static
-struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
+struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
{
- return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
+ return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
}
static
-int is_removed(struct rcu_ht_node *node)
+int is_removed(struct cds_lfht_node *node)
{
return ((unsigned long) node) & REMOVED_FLAG;
}
static
-struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
+struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
{
- return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
}
static
-int is_dummy(struct rcu_ht_node *node)
+int is_dummy(struct cds_lfht_node *node)
{
return ((unsigned long) node) & DUMMY_FLAG;
}
static
-struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node)
+struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
{
- return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG);
+ return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
}
static
* Remove all logically deleted nodes from a bucket up to a certain node key.
*/
static
-void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
+void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
{
- struct rcu_ht_node *iter_prev, *iter, *next, *new_next;
+ struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
for (;;) {
iter_prev = dummy;
}
static
-struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
- struct rcu_ht_node *node, int unique, int dummy)
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
+ struct cds_lfht_node *node, int unique, int dummy)
{
- struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next,
+ struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
*dummy_node;
- struct _rcu_ht_node *lookup;
+ struct _cds_lfht_node *lookup;
unsigned long hash, index, order;
if (!t->size) {
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- iter_prev = (struct rcu_ht_node *) lookup;
+ iter_prev = (struct cds_lfht_node *) lookup;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- dummy_node = (struct rcu_ht_node *) lookup;
- _ht_gc_bucket(dummy_node, node);
+ dummy_node = (struct cds_lfht_node *) lookup;
+ _cds_lfht_gc_bucket(dummy_node, node);
return node;
}
static
-int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
+ struct cds_lfht_node *node)
{
- struct rcu_ht_node *dummy, *next, *old;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *dummy, *next, *old;
+ struct _cds_lfht_node *lookup;
int flagged = 0;
unsigned long hash, index, order;
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
- dummy = (struct rcu_ht_node *) lookup;
- _ht_gc_bucket(dummy, node);
+ dummy = (struct cds_lfht_node *) lookup;
+ _cds_lfht_gc_bucket(dummy, node);
end:
/*
* Only the flagging action indicated that we (and no other)
}
static
-void init_table(struct rcu_ht *ht, struct rcu_table *t,
+void init_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
{
unsigned long i, end_order;
len = !i ? 1 : 1UL << (i - 1);
dbg_printf("init order %lu len: %lu\n", i, len);
- t->tbl[i] = calloc(len, sizeof(struct _rcu_ht_node));
+ t->tbl[i] = calloc(len, sizeof(struct _cds_lfht_node));
for (j = 0; j < len; j++) {
dbg_printf("init entry: i %lu j %lu hash %lu\n",
i, j, !i ? 0 : (1UL << (i - 1)) + j);
- struct rcu_ht_node *new_node =
- (struct rcu_ht_node *) &t->tbl[i][j];
+ struct cds_lfht_node *new_node =
+ (struct cds_lfht_node *) &t->tbl[i][j];
new_node->p.reverse_hash =
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- (void) _ht_add(ht, t, new_node, 0, 1);
+ (void) _cds_lfht_add(ht, t, new_node, 0, 1);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
t->resize_initiated = 0;
}
-struct rcu_ht *ht_new(ht_hash_fct hash_fct,
- ht_compare_fct compare_fct,
- unsigned long hash_seed,
- unsigned long init_size,
- void (*ht_call_rcu)(struct rcu_head *head,
- void (*func)(struct rcu_head *head)))
+struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+ cds_lfht_compare_fct compare_fct,
+ unsigned long hash_seed,
+ unsigned long init_size,
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head)))
{
- struct rcu_ht *ht;
+ struct cds_lfht *ht;
unsigned long order;
- ht = calloc(1, sizeof(struct rcu_ht));
+ ht = calloc(1, sizeof(struct cds_lfht));
ht->hash_fct = hash_fct;
ht->compare_fct = compare_fct;
ht->hash_seed = hash_seed;
- ht->ht_call_rcu = ht_call_rcu;
+ ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
ht->in_progress_resize = 0;
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
order = get_count_order_ulong(max(init_size, 1)) + 1;
- ht->t = calloc(1, sizeof(struct rcu_table)
- + (order * sizeof(struct _rcu_ht_node *)));
+ ht->t = calloc(1, sizeof(struct cds_lfht)
+ + (order * sizeof(struct _cds_lfht_node *)));
ht->t->size = 0;
pthread_mutex_lock(&ht->resize_mutex);
init_table(ht, ht->t, 0, order);
return ht;
}
-struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
+struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
{
struct rcu_table *t;
- struct rcu_ht_node *node, *next;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *node, *next;
+ struct _cds_lfht_node *lookup;
unsigned long hash, reverse_hash, index, order;
hash = ht->hash_fct(key, key_len, ht->hash_seed);
lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
hash, index, order, index & ((1UL << (order - 1)) - 1));
- node = (struct rcu_ht_node *) lookup;
+ node = (struct cds_lfht_node *) lookup;
for (;;) {
if (unlikely(!node))
break;
return node;
}
-void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
+void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
{
struct rcu_table *t;
unsigned long hash;
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
t = rcu_dereference(ht->t);
- (void) _ht_add(ht, t, node, 0, 0);
+ (void) _cds_lfht_add(ht, t, node, 0, 0);
}
-struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+ struct cds_lfht_node *node)
{
struct rcu_table *t;
unsigned long hash;
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
t = rcu_dereference(ht->t);
- return _ht_add(ht, t, node, 1, 0);
+ return _cds_lfht_add(ht, t, node, 1, 0);
}
-int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
+int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
{
struct rcu_table *t;
t = rcu_dereference(ht->t);
- return _ht_remove(ht, t, node);
+ return _cds_lfht_remove(ht, t, node);
}
static
-int ht_delete_dummy(struct rcu_ht *ht)
+int cds_lfht_delete_dummy(struct cds_lfht *ht)
{
struct rcu_table *t;
- struct rcu_ht_node *node;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *node;
+ struct _cds_lfht_node *lookup;
unsigned long order, i;
t = ht->t;
/* Check that the table is empty */
lookup = &t->tbl[0][0];
- node = (struct rcu_ht_node *) lookup;
+ node = (struct cds_lfht_node *) lookup;
do {
node = clear_flag(node)->p.next;
if (!is_dummy(node))
* Should only be called when no more concurrent readers nor writers can
* possibly access the table.
*/
-int ht_destroy(struct rcu_ht *ht)
+int cds_lfht_destroy(struct cds_lfht *ht)
{
int ret;
CMM_STORE_SHARED(ht->in_progress_destroy, 1);
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
- ret = ht_delete_dummy(ht);
+ ret = cds_lfht_delete_dummy(ht);
if (ret)
return ret;
free(ht->t);
return ret;
}
-void ht_count_nodes(struct rcu_ht *ht,
+void cds_lfht_count_nodes(struct cds_lfht *ht,
unsigned long *count,
unsigned long *removed)
{
struct rcu_table *t;
- struct rcu_ht_node *node, *next;
- struct _rcu_ht_node *lookup;
+ struct cds_lfht_node *node, *next;
+ struct _cds_lfht_node *lookup;
unsigned long nr_dummy = 0;
*count = 0;
t = rcu_dereference(ht->t);
/* Count non-dummy nodes in the table */
lookup = &t->tbl[0][0];
- node = (struct rcu_ht_node *) lookup;
+ node = (struct cds_lfht_node *) lookup;
do {
next = rcu_dereference(node->p.next);
if (is_removed(next)) {
}
static
-void ht_free_table_cb(struct rcu_head *head)
+void cds_lfht_free_table_cb(struct rcu_head *head)
{
struct rcu_table *t =
caa_container_of(head, struct rcu_table, head);
/* called with resize mutex held */
static
-void _do_ht_resize(struct rcu_ht *ht)
+void _do_cds_lfht_resize(struct cds_lfht *ht)
{
unsigned long new_size, old_size, old_order, new_order;
struct rcu_table *new_t, *old_t;
new_order = get_count_order_ulong(new_size) + 1;
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
- new_t = malloc(sizeof(struct rcu_table)
- + (new_order * sizeof(struct _rcu_ht_node *)));
+ new_t = malloc(sizeof(struct cds_lfht)
+ + (new_order * sizeof(struct _cds_lfht_node *)));
assert(new_size > old_size);
memcpy(&new_t->tbl, &old_t->tbl,
- old_order * sizeof(struct _rcu_ht_node *));
+ old_order * sizeof(struct _cds_lfht_node *));
init_table(ht, new_t, old_order, new_order - old_order);
/* Changing table and size atomically wrt lookups */
rcu_assign_pointer(ht->t, new_t);
- ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
+ ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
}
static
t->size << growth_order);
}
-void ht_resize(struct rcu_ht *ht, int growth)
+void cds_lfht_resize(struct cds_lfht *ht, int growth)
{
struct rcu_table *t = rcu_dereference(ht->t);
unsigned long target_size;
if (t->size < target_size) {
CMM_STORE_SHARED(t->resize_initiated, 1);
pthread_mutex_lock(&ht->resize_mutex);
- _do_ht_resize(ht);
+ _do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
}
}
{
struct rcu_resize_work *work =
caa_container_of(head, struct rcu_resize_work, head);
- struct rcu_ht *ht = work->ht;
+ struct cds_lfht *ht = work->ht;
pthread_mutex_lock(&ht->resize_mutex);
- _do_ht_resize(ht);
+ _do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
free(work);
cmm_smp_mb(); /* finish resize before decrement */
}
static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
{
struct rcu_resize_work *work;
unsigned long target_size;
cmm_smp_mb(); /* increment resize count before calling it */
work = malloc(sizeof(*work));
work->ht = ht;
- ht->ht_call_rcu(&work->head, do_resize_cb);
+ ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
CMM_STORE_SHARED(t->resize_initiated, 1);
}
}
static unsigned long __thread lookup_fail;
static unsigned long __thread lookup_ok;
-static struct rcu_ht *test_ht;
+static struct cds_lfht *test_ht;
struct test_data {
int a;
void *thr_reader(void *_count)
{
unsigned long long *count = _count;
- struct rcu_ht_node *node;
+ struct cds_lfht_node *node;
printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
for (;;) {
rcu_read_lock();
- node = ht_lookup(test_ht,
+ node = cds_lfht_lookup(test_ht,
(void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
sizeof(void *));
if (node == NULL)
static
void free_node_cb(struct rcu_head *head)
{
- struct rcu_ht_node *node =
- caa_container_of(head, struct rcu_ht_node, head);
+ struct cds_lfht_node *node =
+ caa_container_of(head, struct cds_lfht_node, head);
free(node);
}
void *thr_writer(void *_count)
{
- struct rcu_ht_node *node, *ret_node;
+ struct cds_lfht_node *node, *ret_node;
struct wr_count *count = _count;
int ret;
for (;;) {
if (add_only || rand_r(&rand_lookup) & 1) {
- node = malloc(sizeof(struct rcu_ht_node));
+ node = malloc(sizeof(struct cds_lfht_node));
rcu_read_lock();
- ht_node_init(node,
+ cds_lfht_node_init(node,
(void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
sizeof(void *));
if (add_unique)
- ret_node = ht_add_unique(test_ht, node);
+ ret_node = cds_lfht_add_unique(test_ht, node);
else
- ht_add(test_ht, node);
+ cds_lfht_add(test_ht, node);
rcu_read_unlock();
if (add_unique && ret_node != node) {
free(node);
} else {
/* May delete */
rcu_read_lock();
- node = ht_lookup(test_ht,
+ node = cds_lfht_lookup(test_ht,
(void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
sizeof(void *));
if (node)
- ret = ht_remove(test_ht, node);
+ ret = cds_lfht_remove(test_ht, node);
else
ret = -ENOENT;
rcu_read_unlock();
tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
count_reader = malloc(sizeof(*count_reader) * nr_readers);
count_writer = malloc(sizeof(*count_writer) * nr_writers);
- test_ht = ht_new(test_hash, test_compare, 0x42UL,
+ test_ht = cds_lfht_new(test_hash, test_compare, 0x42UL,
init_hash_size, call_rcu);
err = create_all_cpu_call_rcu_data(0);
}
printf("Counting nodes... ");
fflush(stdout);
- ht_count_nodes(test_ht, &count, &removed);
+ cds_lfht_count_nodes(test_ht, &count, &removed);
printf("done.\n");
if (count || removed)
printf("WARNING: nodes left in the hash table upon destroy: "
"%lu nodes + %lu logically removed.\n", count, removed);
- ret = ht_destroy(test_ht);
+ ret = cds_lfht_destroy(test_ht);
if (ret)
printf_verbose("final delete aborted\n");
#endif
/*
- * struct rcu_ht_node and struct _rcu_ht_node should be aligned on
+ * struct cds_lfht_node and struct _cds_lfht_node should be aligned on
* 4-bytes boundaries because the two lower bits are used as flags.
*/
-struct _rcu_ht_node {
- struct rcu_ht_node *next; /* ptr | DUMMY_FLAG | REMOVED_FLAG */
+struct _cds_lfht_node {
+ struct cds_lfht_node *next; /* ptr | DUMMY_FLAG | REMOVED_FLAG */
unsigned long reverse_hash;
};
-struct rcu_ht_node {
+struct cds_lfht_node {
/* cache-hot for iteration */
- struct _rcu_ht_node p; /* needs to be first field */
+ struct _cds_lfht_node p; /* needs to be first field */
void *key;
unsigned int key_len;
/* cache-cold for iteration */
struct rcu_head head;
};
-struct rcu_ht;
+struct cds_lfht;
/*
* Caution !
* Ensure reader and writer threads are registered as urcu readers.
*/
-typedef unsigned long (*ht_hash_fct)(void *key, size_t length,
- unsigned long seed);
-typedef unsigned long (*ht_compare_fct)(void *key1, size_t key1_len,
- void *key2, size_t key2_len);
+typedef unsigned long (*cds_lfht_hash_fct)(void *key, size_t length,
+ unsigned long seed);
+typedef unsigned long (*cds_lfht_compare_fct)(void *key1, size_t key1_len,
+ void *key2, size_t key2_len);
/*
- * ht_node_init - initialize a hash table node
+ * cds_lfht_node_init - initialize a hash table node
*/
static inline
-void ht_node_init(struct rcu_ht_node *node, void *key,
- size_t key_len)
+void cds_lfht_node_init(struct cds_lfht_node *node, void *key,
+ size_t key_len)
{
node->key = key;
node->key_len = key_len;
}
/*
- * ht_new - allocate a hash table.
+ * cds_lfht_new - allocate a hash table.
*
* init_size must be power of two.
*/
-struct rcu_ht *ht_new(ht_hash_fct hash_fct,
- ht_compare_fct compare_fct,
- unsigned long hash_seed,
- unsigned long init_size,
- void (*ht_call_rcu)(struct rcu_head *head,
+struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+ cds_lfht_compare_fct compare_fct,
+ unsigned long hash_seed,
+ unsigned long init_size,
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)));
/*
- * ht_destroy - destroy a hash table.
+ * cds_lfht_destroy - destroy a hash table.
*/
-int ht_destroy(struct rcu_ht *ht);
+int cds_lfht_destroy(struct cds_lfht *ht);
/*
- * ht_count_nodes - count the number of nodes in the hash table.
+ * cds_lfht_count_nodes - count the number of nodes in the hash table.
*
* Call with rcu_read_lock held.
*/
-void ht_count_nodes(struct rcu_ht *ht,
- unsigned long *count,
- unsigned long *removed);
+void cds_lfht_count_nodes(struct cds_lfht *ht,
+ unsigned long *count,
+ unsigned long *removed);
/*
- * ht_lookup - lookup a node by key.
+ * cds_lfht_lookup - lookup a node by key.
*
* Returns NULL if not found.
* Call with rcu_read_lock held.
*/
-struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len);
+struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len);
/*
- * ht_next - get the next item with same key (after a lookup).
+ * cds_lfht_next - get the next item with same key (after a lookup).
*
* Returns NULL if no following node exists with same key.
- * RCU read-side lock must be held across ht_lookup and ht_next calls, and also
- * between ht_next calls using the node returned by a previous ht_next.
+ * RCU read-side lock must be held across cds_lfht_lookup and cds_lfht_next calls, and also
+ * between cds_lfht_next calls using the node returned by a previous cds_lfht_next.
* Call with rcu_read_lock held.
*/
-struct rcu_ht_node *ht_next(struct rcu_ht *ht, struct rcu_ht_node *node);
+struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_node *node);
/*
- * ht_add - add a node to the hash table.
+ * cds_lfht_add - add a node to the hash table.
*
* Call with rcu_read_lock held.
*/
-void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node);
+void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node);
/*
- * ht_add_unique - add a node to hash table, if key is not present.
+ * cds_lfht_add_unique - add a node to hash table, if key is not present.
*
* Returns the node added upon success.
- * Returns the unique node already present upon failure. If ht_add_unique fails,
+ * Returns the unique node already present upon failure. If cds_lfht_add_unique fails,
* the node passed as parameter should be freed by the caller.
* Call with rcu_read_lock held.
*/
-struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node);
+struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, struct cds_lfht_node *node);
/*
- * ht_remove - remove node from hash table.
+ * cds_lfht_remove - remove node from hash table.
*
- * Node can be looked up with ht_lookup. RCU read-side lock must be held between
+ * Node can be looked up with cds_lfht_lookup. RCU read-side lock must be held between
* lookup and removal.
* Call with rcu_read_lock held.
*/
-int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node);
+int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node);
/*
- * ht_resize - Force a hash table resize
+ * cds_lfht_resize - Force a hash table resize
* @growth: growth order (current size is multiplied by 2^growth)
*
* Currently, only expand operation is supported (growth >= 0).
*/
-void ht_resize(struct rcu_ht *ht, int growth);
+void cds_lfht_resize(struct cds_lfht *ht, int growth);
#ifdef __cplusplus
}