2 * Copyright (C) 2006 Bob Jenkins
3 * Copyright (C) 2011 EfficiOS Inc.
4 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
11 * These are functions for producing 32-bit hashes for hash table lookup.
12 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
13 * externally useful functions. Routines to test the hash are included if
14 * SELF_TEST is defined. You can use this free for any purpose. It's in the
15 * public domain. It has no warranty.
17 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
18 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
19 * Intel and AMD are little-endian machines. On second thought, you probably
20 * want hashlittle2(), which is identical to hashlittle() except it returns two
21 * 32-bit hashes for the price of one. You could implement hashbig2() if you
22 * wanted but I haven't bothered here.
24 * If you want to find a hash of, say, exactly 7 integers, do
25 * a = i1; b = i2; c = i3;
27 * a += i4; b += i5; c += i6;
31 * then use c as the hash value. If you have a variable length array of
32 * 4-byte integers to hash, use hashword(). If you have a byte array (like
33 * a character string), use hashlittle(). If you have several byte arrays, or
34 * a mix of things, see the comments above hashlittle().
36 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
37 * mix those integers. This is fast (you can do a lot more thorough mixing
38 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
39 * byte), but shoehorning those bytes into integers efficiently is messy.
44 #include <stdint.h> /* defines uint32_t etc */
45 #include <stdio.h> /* defines printf for tests */
47 #include <sys/param.h> /* attempt to define endianness */
48 #include <time.h> /* defines time_t for timings in the test */
49 #include <urcu/compiler.h>
52 #include <common/compat/endian.h> /* attempt to define endianness */
53 #include <common/common.h>
54 #include <common/hashtable/hashtable.h>
57 * My best guess at if you are big-endian or little-endian. This may
60 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
61 __BYTE_ORDER == __LITTLE_ENDIAN) || \
62 (defined(i386) || defined(__i386__) || defined(__i486__) || \
63 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
64 # define HASH_LITTLE_ENDIAN 1
65 # define HASH_BIG_ENDIAN 0
66 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
67 __BYTE_ORDER == __BIG_ENDIAN) || \
68 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
69 # define HASH_LITTLE_ENDIAN 0
70 # define HASH_BIG_ENDIAN 1
72 # define HASH_LITTLE_ENDIAN 0
73 # define HASH_BIG_ENDIAN 0
76 #define hashsize(n) ((uint32_t)1<<(n))
77 #define hashmask(n) (hashsize(n)-1)
78 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
81 * mix -- mix 3 32-bit values reversibly.
83 * This is reversible, so any information in (a,b,c) before mix() is
84 * still in (a,b,c) after mix().
86 * If four pairs of (a,b,c) inputs are run through mix(), or through
87 * mix() in reverse, there are at least 32 bits of the output that
88 * are sometimes the same for one pair and different for another pair.
89 * This was tested for:
90 * * pairs that differed by one bit, by two bits, in any combination
91 * of top bits of (a,b,c), or in any combination of bottom bits of
93 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
94 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
95 * is commonly produced by subtraction) look like a single 1-bit
97 * * the base values were pseudorandom, all zero but one bit set, or
98 * all zero plus a counter that starts at zero.
100 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
105 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
106 * for "differ" defined as + with a one-bit base and a two-bit delta. I
107 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
108 * the operations, constants, and arrangements of the variables.
110 * This does not achieve avalanche. There are input bits of (a,b,c)
111 * that fail to affect some output bits of (a,b,c), especially of a. The
112 * most thoroughly mixed value is c, but it doesn't really even achieve
115 * This allows some parallelism. Read-after-writes are good at doubling
116 * the number of bits affected, so the goal of mixing pulls in the opposite
117 * direction as the goal of parallelism. I did what I could. Rotates
118 * seem to cost as much as shifts on every machine I could lay my hands
119 * on, and rotates are much kinder to the top and bottom bits, so I used
124 a -= c; a ^= rot(c, 4); c += b; \
125 b -= a; b ^= rot(a, 6); a += c; \
126 c -= b; c ^= rot(b, 8); b += a; \
127 a -= c; a ^= rot(c,16); c += b; \
128 b -= a; b ^= rot(a,19); a += c; \
129 c -= b; c ^= rot(b, 4); b += a; \
133 * final -- final mixing of 3 32-bit values (a,b,c) into c
135 * Pairs of (a,b,c) values differing in only a few bits will usually
136 * produce values of c that look totally different. This was tested for
137 * * pairs that differed by one bit, by two bits, in any combination
138 * of top bits of (a,b,c), or in any combination of bottom bits of
140 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
141 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
142 * is commonly produced by subtraction) look like a single 1-bit
144 * * the base values were pseudorandom, all zero but one bit set, or
145 * all zero plus a counter that starts at zero.
147 * These constants passed:
148 * 14 11 25 16 4 14 24
149 * 12 14 25 16 4 14 24
150 * and these came close:
155 #define final(a,b,c) \
157 c ^= b; c -= rot(b,14); \
158 a ^= c; a -= rot(c,11); \
159 b ^= a; b -= rot(a,25); \
160 c ^= b; c -= rot(b,16); \
161 a ^= c; a -= rot(c,4); \
162 b ^= a; b -= rot(a,14); \
163 c ^= b; c -= rot(b,24); \
167 * k - the key, an array of uint32_t values
168 * length - the length of the key, in uint32_ts
169 * initval - the previous hash, or an arbitrary value
171 static uint32_t __attribute__((unused
)) hashword(const uint32_t *k
,
172 size_t length
, uint32_t initval
)
176 /* Set up the internal state */
177 a
= b
= c
= 0xdeadbeef + (((uint32_t) length
) << 2) + initval
;
179 /*----------------------------------------- handle most of the key */
189 /*----------------------------------- handle the last 3 uint32_t's */
190 switch (length
) { /* all the case statements fall through */
195 case 0: /* case 0: nothing left to add */
198 /*---------------------------------------------- report the result */
204 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
205 * values. pc and pb must both be nonnull, and *pc and *pb must both be
206 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
207 * the same as the return value from hashword().
209 static void __attribute__((unused
)) hashword2(const uint32_t *k
, size_t length
,
210 uint32_t *pc
, uint32_t *pb
)
214 /* Set up the internal state */
215 a
= b
= c
= 0xdeadbeef + ((uint32_t) (length
<< 2)) + *pc
;
235 case 0: /* case 0: nothing left to add */
244 * hashlittle() -- hash a variable-length key into a 32-bit value
245 * k : the key (the unaligned variable-length array of bytes)
246 * length : the length of the key, counting by bytes
247 * initval : can be any 4-byte value
248 * Returns a 32-bit value. Every bit of the key affects every bit of
249 * the return value. Two keys differing by one or two bits will have
250 * totally different hash values.
252 * The best hash table sizes are powers of 2. There is no need to do
253 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
254 * use a bitmask. For example, if you need only 10 bits, do
255 * h = (h & hashmask(10));
256 * In which case, the hash table should have hashsize(10) elements.
258 * If you are hashing n strings (uint8_t **)k, do it like this:
259 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
261 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
262 * code any way you wish, private, educational, or commercial. It's free.
264 * Use for hash table lookup, or anything where one collision in 2^^32 is
265 * acceptable. Do NOT use for cryptographic purposes.
267 static uint32_t __attribute__((unused
)) hashlittle(const void *key
,
268 size_t length
, uint32_t initval
)
274 } u
; /* needed for Mac Powerbook G4 */
276 /* Set up the internal state */
277 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
280 if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x3) == 0)) {
281 const uint32_t *k
= (const uint32_t *)key
; /* read 32-bit chunks */
283 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
284 while (length
> 12) {
294 * "k[2]&0xffffff" actually reads beyond the end of the string, but
295 * then masks off the part it's not allowed to read. Because the
296 * string is aligned, the masked-off tail is in the same word as the
297 * rest of the string. Every machine with memory protection I've seen
298 * does it on word boundaries, so is OK with this. But VALGRIND will
299 * still catch it and complain. The masking trick does make the hash
300 * noticably faster for short strings (like English words).
305 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
306 case 11: c
+=k
[2]&0xffffff; b
+=k
[1]; a
+=k
[0]; break;
307 case 10: c
+=k
[2]&0xffff; b
+=k
[1]; a
+=k
[0]; break;
308 case 9 : c
+=k
[2]&0xff; b
+=k
[1]; a
+=k
[0]; break;
309 case 8 : b
+=k
[1]; a
+=k
[0]; break;
310 case 7 : b
+=k
[1]&0xffffff; a
+=k
[0]; break;
311 case 6 : b
+=k
[1]&0xffff; a
+=k
[0]; break;
312 case 5 : b
+=k
[1]&0xff; a
+=k
[0]; break;
313 case 4 : a
+=k
[0]; break;
314 case 3 : a
+=k
[0]&0xffffff; break;
315 case 2 : a
+=k
[0]&0xffff; break;
316 case 1 : a
+=k
[0]&0xff; break;
317 case 0 : return c
; /* zero length strings require no mixing */
319 #else /* make valgrind happy */
322 k8
= (const uint8_t *)k
;
324 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
325 case 11: c
+=((uint32_t)k8
[10])<<16; /* fall through */
326 case 10: c
+=((uint32_t)k8
[9])<<8; /* fall through */
327 case 9 : c
+=k8
[8]; /* fall through */
328 case 8 : b
+=k
[1]; a
+=k
[0]; break;
329 case 7 : b
+=((uint32_t)k8
[6])<<16; /* fall through */
330 case 6 : b
+=((uint32_t)k8
[5])<<8; /* fall through */
331 case 5 : b
+=k8
[4]; /* fall through */
332 case 4 : a
+=k
[0]; break;
333 case 3 : a
+=((uint32_t)k8
[2])<<16; /* fall through */
334 case 2 : a
+=((uint32_t)k8
[1])<<8; /* fall through */
335 case 1 : a
+=k8
[0]; break;
338 #endif /* !valgrind */
339 } else if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x1) == 0)) {
340 const uint16_t *k
= (const uint16_t *)key
; /* read 16-bit chunks */
343 /*--------------- all but last block: aligned reads and different mixing */
344 while (length
> 12) {
345 a
+= k
[0] + (((uint32_t)k
[1])<<16);
346 b
+= k
[2] + (((uint32_t)k
[3])<<16);
347 c
+= k
[4] + (((uint32_t)k
[5])<<16);
353 k8
= (const uint8_t *)k
;
356 c
+=k
[4]+(((uint32_t)k
[5])<<16);
357 b
+=k
[2]+(((uint32_t)k
[3])<<16);
358 a
+=k
[0]+(((uint32_t)k
[1])<<16);
361 c
+=((uint32_t)k8
[10])<<16; /* fall through */
364 b
+=k
[2]+(((uint32_t)k
[3])<<16);
365 a
+=k
[0]+(((uint32_t)k
[1])<<16);
368 c
+=k8
[8]; /* fall through */
370 b
+=k
[2]+(((uint32_t)k
[3])<<16);
371 a
+=k
[0]+(((uint32_t)k
[1])<<16);
374 b
+=((uint32_t)k8
[6])<<16; /* fall through */
377 a
+=k
[0]+(((uint32_t)k
[1])<<16);
380 b
+=k8
[4]; /* fall through */
382 a
+=k
[0]+(((uint32_t)k
[1])<<16);
385 a
+=((uint32_t)k8
[2])<<16; /* fall through */
393 return c
; /* zero length requires no mixing */
396 } else { /* need to read the key one byte at a time */
397 const uint8_t *k
= (const uint8_t *)key
;
399 while (length
> 12) {
401 a
+= ((uint32_t)k
[1])<<8;
402 a
+= ((uint32_t)k
[2])<<16;
403 a
+= ((uint32_t)k
[3])<<24;
405 b
+= ((uint32_t)k
[5])<<8;
406 b
+= ((uint32_t)k
[6])<<16;
407 b
+= ((uint32_t)k
[7])<<24;
409 c
+= ((uint32_t)k
[9])<<8;
410 c
+= ((uint32_t)k
[10])<<16;
411 c
+= ((uint32_t)k
[11])<<24;
417 switch(length
) { /* all the case statements fall through */
418 case 12: c
+=((uint32_t)k
[11])<<24;
419 case 11: c
+=((uint32_t)k
[10])<<16;
420 case 10: c
+=((uint32_t)k
[9])<<8;
422 case 8: b
+=((uint32_t)k
[7])<<24;
423 case 7: b
+=((uint32_t)k
[6])<<16;
424 case 6: b
+=((uint32_t)k
[5])<<8;
426 case 4: a
+=((uint32_t)k
[3])<<24;
427 case 3: a
+=((uint32_t)k
[2])<<16;
428 case 2: a
+=((uint32_t)k
[1])<<8;
442 unsigned long hash_key_u64(const void *_key
, unsigned long seed
)
453 v
.v64
= (uint64_t) seed
;
454 key
.v64
= *(const uint64_t *) _key
;
455 hashword2(key
.v32
, 2, &v
.v32
[0], &v
.v32
[1]);
459 #if (CAA_BITS_PER_LONG == 64)
461 * Hash function for number value.
462 * Pass the value itself as the key, not its address.
465 unsigned long hash_key_ulong(const void *_key
, unsigned long seed
)
467 uint64_t __key
= (uint64_t) _key
;
468 return (unsigned long) hash_key_u64(&__key
, seed
);
472 * Hash function for number value.
473 * Pass the value itself as the key, not its address.
476 unsigned long hash_key_ulong(const void *_key
, unsigned long seed
)
478 uint32_t key
= (uint32_t) _key
;
480 return hashword(&key
, 1, seed
);
482 #endif /* CAA_BITS_PER_LONG */
485 * Hash function for string.
488 unsigned long hash_key_str(const void *key
, unsigned long seed
)
490 return hashlittle(key
, strlen((const char *) key
), seed
);
494 * Hash function for two uint64_t.
497 unsigned long hash_key_two_u64(const void *key
, unsigned long seed
)
499 const struct lttng_ht_two_u64
*k
=
500 (const struct lttng_ht_two_u64
*) key
;
502 return hash_key_u64(&k
->key1
, seed
) ^ hash_key_u64(&k
->key2
, seed
);
506 * Hash function compare for number value.
509 int hash_match_key_ulong(const void *key1
, const void *key2
)
519 * Hash function compare for number value.
522 int hash_match_key_u64(const void *key1
, const void *key2
)
524 if (*(const uint64_t *) key1
== *(const uint64_t *) key2
) {
532 * Hash compare function for string.
535 int hash_match_key_str(const void *key1
, const void *key2
)
537 if (strcmp(key1
, key2
) == 0) {
545 * Hash function compare two uint64_t.
548 int hash_match_key_two_u64(const void *key1
, const void *key2
)
550 const struct lttng_ht_two_u64
*k1
=
551 (const struct lttng_ht_two_u64
*) key1
;
552 const struct lttng_ht_two_u64
*k2
=
553 (const struct lttng_ht_two_u64
*) key2
;
555 if (hash_match_key_u64(&k1
->key1
, &k2
->key1
) &&
556 hash_match_key_u64(&k1
->key2
, &k2
->key2
)) {
This page took 0.044915 seconds and 5 git commands to generate.