#include <urcu-defer.h>
#include <errno.h>
#include <urcu-ht.h>
+#include <urcu/jhash.h>
struct rcu_ht_node;
};
struct rcu_ht {
- struct rcu_ht_node *tbl[HASH_SIZE];
+ struct rcu_ht_node **tbl;
ht_hash_fct hash_fct;
void (*free_fct)(void *data); /* fct to free data */
+ struct ht_size {
+ unsigned long add;
+ unsigned long lookup;
+ } size;
};
-struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data))
+struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
+ unsigned long init_size)
{
struct rcu_ht *ht;
ht = calloc(1, sizeof(struct rcu_ht));
ht->hash_fct = hash_fct;
ht->free_fct = free_fct;
+ ht->size.add = init_size;
+ ht->size.lookup = init_size;
+ ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
return ht;
}
-/* delete all elements */
-void ht_delete_all(struct rcu_ht *ht)
-{
- unsigned long i;
- struct rcu_ht_node **prev, *node;
-
- for (i = 0; i < HASH_SIZE; i++) {
- rcu_read_lock();
-
- prev = &ht->tbl[i];
- node = rcu_dereference(*prev);
- /*
- * Cut the head, therefore whole bucket will be unused
- * after GP. (except for concurrent additions)
- */
- rcu_assign_pointer(prev, NULL);
- for (;;) {
- if (likely(!node)) {
- break;
- }
- prev = &node->next;
- /*
- * FIXME: calling call_rcu within a read-side C.S. is a
- * bug, because it may call synchronize_rcu() if the
- * callback queue is full.
- */
- if (node->data)
- call_rcu(ht->free_fct, node->data);
- call_rcu(free, node);
- node = rcu_dereference(*prev);
- }
- rcu_read_unlock();
- }
-}
-
-/*
- * Should only be called when no more concurrent readers nor writers can
- * possibly access the table.
- */
-void ht_destroy(struct rcu_ht *ht)
-{
- ht_delete_all(ht);
- free(ht);
-}
-
void *ht_lookup(struct rcu_ht *ht, void *key)
{
unsigned long hash;
struct rcu_ht_node *node;
void *ret;
- hash = ht->hash_fct(key);
+ hash = ht->hash_fct(key) % ht->size.lookup;
rcu_read_lock();
- node = rcu_dereference(ht->tbl[hash]);
+ node = rcu_dereference(*ht->tbl[hash]);
for (;;) {
if (likely(!node)) {
ret = NULL;
new_head = calloc(1, sizeof(struct rcu_ht_node));
new_head->key = key;
new_head->data = data;
- hash = ht->hash_fct(key);
+ hash = ht->hash_fct(key) % ht->size.add;
/* here comes the fun and tricky part.
* Add at the beginning with a cmpxchg.
retry:
rcu_read_lock();
- old_head = node = rcu_dereference(ht->tbl[hash]);
+ old_head = node = rcu_dereference(*ht->tbl[hash]);
for (;;) {
if (likely(!node)) {
break;
}
node = rcu_dereference(node->next);
}
- if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
+ if (rcu_cmpxchg_pointer(ht->tbl[hash], old_head, new_head) != old_head)
goto restart;
end:
rcu_read_unlock();
unsigned long hash;
void *data;
- hash = ht->hash_fct(key);
+ hash = ht->hash_fct(key) % ht->size.lookup;
retry:
rcu_read_lock();
- prev = &ht->tbl[hash];
+ prev = ht->tbl[hash];
node = rcu_dereference(*prev);
for (;;) {
if (likely(!node)) {
}
}
-unsigned long stupid_hash(void *key)
+/* Delete all old elements. Allow concurrent writer accesses. */
+void ht_delete_all(struct rcu_ht *ht)
+{
+ unsigned long i;
+ struct rcu_ht_node **prev, *node, *inext;
+
+ for (i = 0; i < ht->size.lookup; i++) {
+ rcu_read_lock();
+cut_head:
+ prev = ht->tbl[i];
+ if (prev) {
+ /*
+ * Cut the head. After that, we own the first element.
+ */
+ node = rcu_xchg_pointer(prev, NULL);
+ } else {
+ rcu_read_unlock();
+ continue;
+ }
+ /*
+ * We manage a list shared with concurrent writers and readers.
+ * Note that a concurrent add may or may not be deleted by us,
+ * depending if it arrives before or after the head is cut.
+ * "node" points to our first node. Remove first elements
+ * iteratively.
+ */
+ for (;;) {
+ prev = &node->next;
+ inext = rcu_xchg_pointer(prev, NULL);
+ /*
+ * "node" is the first element of the list we have cut.
+ * We therefore own it, no concurrent writer may delete
+ * it. There can only be concurrent lookups. Concurrent
+ * add can only be done on a bucket head, but we've cut
+ * it already. inext is also owned by us, because we
+ * have exchanged it for "NULL". It will therefore be
+ * safe to use it after a G.P.
+ */
+ rcu_read_unlock();
+ if (node->data)
+ call_rcu(ht->free_fct, node->data);
+ call_rcu(free, node);
+ if (likely(!inext))
+ break;
+ rcu_read_lock();
+ node = inext;
+ }
+ }
+}
+
+/*
+ * Should only be called when no more concurrent readers nor writers can
+ * possibly access the table.
+ */
+void ht_destroy(struct rcu_ht *ht)
+{
+ ht_delete_all(ht);
+ free(ht->tbl);
+ free(ht);
+}
+
+uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval)
{
- return (unsigned long)key % HASH_SIZE;
+ return jhash(key, length, initval);
}
--- /dev/null
+#ifndef _KCOMPAT_JHASH_H
+#define _KCOMPAT_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault. -DaveM
+ */
+
+#include <stdint.h>
+
+typedef uint32_t u32;
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+ const u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+ b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+ c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+ __jhash_mix(a,b,c);
+
+ k += 12;
+ len -= 12;
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((u32)k[10]<<24);
+ case 10: c += ((u32)k[9]<<16);
+ case 9 : c += ((u32)k[8]<<8);
+ case 8 : b += ((u32)k[7]<<24);
+ case 7 : b += ((u32)k[6]<<16);
+ case 6 : b += ((u32)k[5]<<8);
+ case 5 : b += k[4];
+ case 4 : a += ((u32)k[3]<<24);
+ case 3 : a += ((u32)k[2]<<16);
+ case 2 : a += ((u32)k[1]<<8);
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+#endif /* _KCOMPAT_JHASH_H */