* In the unfortunate event the number of CPUs reported would be
* inaccurate, we use modulo arithmetic on the number of CPUs we got.
*/
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+#if defined(HAVE_SYSCONF)
static
void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
poison_free(count);
}
+#if defined(HAVE_SCHED_GETCPU)
static
-int ht_get_split_count_index(void)
+int ht_get_split_count_index(unsigned long hash)
{
int cpu;
assert(split_count_mask >= 0);
cpu = sched_getcpu();
if (unlikely(cpu < 0))
- return cpu;
+ return hash & split_count_mask;
else
return cpu & split_count_mask;
}
+#else /* #if defined(HAVE_SCHED_GETCPU) */
+static
+int ht_get_split_count_index(unsigned long hash)
+{
+ return hash & split_count_mask;
+}
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
static
-void ht_count_add(struct cds_lfht *ht, unsigned long size)
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
unsigned long split_count;
int index;
if (unlikely(!ht->split_count))
return;
- index = ht_get_split_count_index();
- if (unlikely(index < 0))
- return;
+ index = ht_get_split_count_index(hash);
split_count = uatomic_add_return(&ht->split_count[index].add, 1);
if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
long count;
}
static
-void ht_count_del(struct cds_lfht *ht, unsigned long size)
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
unsigned long split_count;
int index;
if (unlikely(!ht->split_count))
return;
- index = ht_get_split_count_index();
- if (unlikely(index < 0))
- return;
+ index = ht_get_split_count_index(hash);
split_count = uatomic_add_return(&ht->split_count[index].del, 1);
if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
long count;
}
}
-#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#else /* #if defined(HAVE_SYSCONF) */
static const long nr_cpus_mask = -2;
static const long split_count_mask = -2;
}
static
-void ht_count_add(struct cds_lfht *ht, unsigned long size)
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
}
static
-void ht_count_del(struct cds_lfht *ht, unsigned long size)
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
}
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#endif /* #else #if defined(HAVE_SYSCONF) */
static
size = rcu_dereference(ht->t.size);
_cds_lfht_add(ht, size, node, NULL, 0);
- ht_count_add(ht, size);
+ ht_count_add(ht, size, hash);
}
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
size = rcu_dereference(ht->t.size);
_cds_lfht_add(ht, size, node, &iter, 0);
if (iter.node == node)
- ht_count_add(ht, size);
+ ht_count_add(ht, size, hash);
return iter.node;
}
for (;;) {
_cds_lfht_add(ht, size, node, &iter, 0);
if (iter.node == node) {
- ht_count_add(ht, size);
+ ht_count_add(ht, size, hash);
return NULL;
}
int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
- unsigned long size;
+ unsigned long size, hash;
int ret;
size = rcu_dereference(ht->t.size);
ret = _cds_lfht_del(ht, size, iter->node, 0);
- if (!ret)
- ht_count_del(ht, size);
+ if (!ret) {
+ hash = bit_reverse_ulong(iter->node->p.reverse_hash);
+ ht_count_del(ht, size, hash);
+ }
return ret;
}
}
}
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+#if defined(HAVE_SYSCONF)
static
void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,