2 * Copyright (C) - Bob Jenkins, May 2006
3 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
4 * Copyright (C) 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; only version 2 of the License.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307, USA.
21 * These are functions for producing 32-bit hashes for hash table lookup.
22 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
23 * externally useful functions. Routines to test the hash are included if
24 * SELF_TEST is defined. You can use this free for any purpose. It's in the
25 * public domain. It has no warranty.
27 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
28 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
29 * Intel and AMD are little-endian machines. On second thought, you probably
30 * want hashlittle2(), which is identical to hashlittle() except it returns two
31 * 32-bit hashes for the price of one. You could implement hashbig2() if you
32 * wanted but I haven't bothered here.
34 * If you want to find a hash of, say, exactly 7 integers, do
35 * a = i1; b = i2; c = i3;
37 * a += i4; b += i5; c += i6;
41 * then use c as the hash value. If you have a variable length array of
42 * 4-byte integers to hash, use hashword(). If you have a byte array (like
43 * a character string), use hashlittle(). If you have several byte arrays, or
44 * a mix of things, see the comments above hashlittle().
46 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
47 * mix those integers. This is fast (you can do a lot more thorough mixing
48 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
49 * byte), but shoehorning those bytes into integers efficiently is messy.
53 #include <stdint.h> /* defines uint32_t etc */
54 #include <stdio.h> /* defines printf for tests */
56 #include <sys/param.h> /* attempt to define endianness */
57 #include <time.h> /* defines time_t for timings in the test */
58 #include <urcu/compiler.h>
61 #include <common/compat/endian.h> /* attempt to define endianness */
64 * My best guess at if you are big-endian or little-endian. This may
67 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
68 __BYTE_ORDER == __LITTLE_ENDIAN) || \
69 (defined(i386) || defined(__i386__) || defined(__i486__) || \
70 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
71 # define HASH_LITTLE_ENDIAN 1
72 # define HASH_BIG_ENDIAN 0
73 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
74 __BYTE_ORDER == __BIG_ENDIAN) || \
75 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
76 # define HASH_LITTLE_ENDIAN 0
77 # define HASH_BIG_ENDIAN 1
79 # define HASH_LITTLE_ENDIAN 0
80 # define HASH_BIG_ENDIAN 0
83 #define hashsize(n) ((uint32_t)1<<(n))
84 #define hashmask(n) (hashsize(n)-1)
85 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
88 * mix -- mix 3 32-bit values reversibly.
90 * This is reversible, so any information in (a,b,c) before mix() is
91 * still in (a,b,c) after mix().
93 * If four pairs of (a,b,c) inputs are run through mix(), or through
94 * mix() in reverse, there are at least 32 bits of the output that
95 * are sometimes the same for one pair and different for another pair.
96 * This was tested for:
97 * * pairs that differed by one bit, by two bits, in any combination
98 * of top bits of (a,b,c), or in any combination of bottom bits of
100 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
101 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
102 * is commonly produced by subtraction) look like a single 1-bit
104 * * the base values were pseudorandom, all zero but one bit set, or
105 * all zero plus a counter that starts at zero.
107 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
112 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
113 * for "differ" defined as + with a one-bit base and a two-bit delta. I
114 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
115 * the operations, constants, and arrangements of the variables.
117 * This does not achieve avalanche. There are input bits of (a,b,c)
118 * that fail to affect some output bits of (a,b,c), especially of a. The
119 * most thoroughly mixed value is c, but it doesn't really even achieve
122 * This allows some parallelism. Read-after-writes are good at doubling
123 * the number of bits affected, so the goal of mixing pulls in the opposite
124 * direction as the goal of parallelism. I did what I could. Rotates
125 * seem to cost as much as shifts on every machine I could lay my hands
126 * on, and rotates are much kinder to the top and bottom bits, so I used
131 a -= c; a ^= rot(c, 4); c += b; \
132 b -= a; b ^= rot(a, 6); a += c; \
133 c -= b; c ^= rot(b, 8); b += a; \
134 a -= c; a ^= rot(c,16); c += b; \
135 b -= a; b ^= rot(a,19); a += c; \
136 c -= b; c ^= rot(b, 4); b += a; \
140 * final -- final mixing of 3 32-bit values (a,b,c) into c
142 * Pairs of (a,b,c) values differing in only a few bits will usually
143 * produce values of c that look totally different. This was tested for
144 * * pairs that differed by one bit, by two bits, in any combination
145 * of top bits of (a,b,c), or in any combination of bottom bits of
147 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
148 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
149 * is commonly produced by subtraction) look like a single 1-bit
151 * * the base values were pseudorandom, all zero but one bit set, or
152 * all zero plus a counter that starts at zero.
154 * These constants passed:
155 * 14 11 25 16 4 14 24
156 * 12 14 25 16 4 14 24
157 * and these came close:
162 #define final(a,b,c) \
164 c ^= b; c -= rot(b,14); \
165 a ^= c; a -= rot(c,11); \
166 b ^= a; b -= rot(a,25); \
167 c ^= b; c -= rot(b,16); \
168 a ^= c; a -= rot(c,4); \
169 b ^= a; b -= rot(a,14); \
170 c ^= b; c -= rot(b,24); \
174 * k - the key, an array of uint32_t values
175 * length - the length of the key, in uint32_ts
176 * initval - the previous hash, or an arbitrary value
178 static uint32_t __attribute__((unused
)) hashword(const uint32_t *k
,
179 size_t length
, uint32_t initval
)
183 /* Set up the internal state */
184 a
= b
= c
= 0xdeadbeef + (((uint32_t) length
) << 2) + initval
;
186 /*----------------------------------------- handle most of the key */
196 /*----------------------------------- handle the last 3 uint32_t's */
197 switch (length
) { /* all the case statements fall through */
202 case 0: /* case 0: nothing left to add */
205 /*---------------------------------------------- report the result */
211 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
212 * values. pc and pb must both be nonnull, and *pc and *pb must both be
213 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
214 * the same as the return value from hashword().
216 static void __attribute__((unused
)) hashword2(const uint32_t *k
, size_t length
,
217 uint32_t *pc
, uint32_t *pb
)
221 /* Set up the internal state */
222 a
= b
= c
= 0xdeadbeef + ((uint32_t) (length
<< 2)) + *pc
;
242 case 0: /* case 0: nothing left to add */
251 * hashlittle() -- hash a variable-length key into a 32-bit value
252 * k : the key (the unaligned variable-length array of bytes)
253 * length : the length of the key, counting by bytes
254 * initval : can be any 4-byte value
255 * Returns a 32-bit value. Every bit of the key affects every bit of
256 * the return value. Two keys differing by one or two bits will have
257 * totally different hash values.
259 * The best hash table sizes are powers of 2. There is no need to do
260 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
261 * use a bitmask. For example, if you need only 10 bits, do
262 * h = (h & hashmask(10));
263 * In which case, the hash table should have hashsize(10) elements.
265 * If you are hashing n strings (uint8_t **)k, do it like this:
266 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
268 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
269 * code any way you wish, private, educational, or commercial. It's free.
271 * Use for hash table lookup, or anything where one collision in 2^^32 is
272 * acceptable. Do NOT use for cryptographic purposes.
274 static uint32_t __attribute__((unused
)) hashlittle(const void *key
,
275 size_t length
, uint32_t initval
)
281 } u
; /* needed for Mac Powerbook G4 */
283 /* Set up the internal state */
284 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
287 if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x3) == 0)) {
288 const uint32_t *k
= (const uint32_t *)key
; /* read 32-bit chunks */
290 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
291 while (length
> 12) {
301 * "k[2]&0xffffff" actually reads beyond the end of the string, but
302 * then masks off the part it's not allowed to read. Because the
303 * string is aligned, the masked-off tail is in the same word as the
304 * rest of the string. Every machine with memory protection I've seen
305 * does it on word boundaries, so is OK with this. But VALGRIND will
306 * still catch it and complain. The masking trick does make the hash
307 * noticably faster for short strings (like English words).
312 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
313 case 11: c
+=k
[2]&0xffffff; b
+=k
[1]; a
+=k
[0]; break;
314 case 10: c
+=k
[2]&0xffff; b
+=k
[1]; a
+=k
[0]; break;
315 case 9 : c
+=k
[2]&0xff; b
+=k
[1]; a
+=k
[0]; break;
316 case 8 : b
+=k
[1]; a
+=k
[0]; break;
317 case 7 : b
+=k
[1]&0xffffff; a
+=k
[0]; break;
318 case 6 : b
+=k
[1]&0xffff; a
+=k
[0]; break;
319 case 5 : b
+=k
[1]&0xff; a
+=k
[0]; break;
320 case 4 : a
+=k
[0]; break;
321 case 3 : a
+=k
[0]&0xffffff; break;
322 case 2 : a
+=k
[0]&0xffff; break;
323 case 1 : a
+=k
[0]&0xff; break;
324 case 0 : return c
; /* zero length strings require no mixing */
326 #else /* make valgrind happy */
329 k8
= (const uint8_t *)k
;
331 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
332 case 11: c
+=((uint32_t)k8
[10])<<16; /* fall through */
333 case 10: c
+=((uint32_t)k8
[9])<<8; /* fall through */
334 case 9 : c
+=k8
[8]; /* fall through */
335 case 8 : b
+=k
[1]; a
+=k
[0]; break;
336 case 7 : b
+=((uint32_t)k8
[6])<<16; /* fall through */
337 case 6 : b
+=((uint32_t)k8
[5])<<8; /* fall through */
338 case 5 : b
+=k8
[4]; /* fall through */
339 case 4 : a
+=k
[0]; break;
340 case 3 : a
+=((uint32_t)k8
[2])<<16; /* fall through */
341 case 2 : a
+=((uint32_t)k8
[1])<<8; /* fall through */
342 case 1 : a
+=k8
[0]; break;
345 #endif /* !valgrind */
346 } else if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x1) == 0)) {
347 const uint16_t *k
= (const uint16_t *)key
; /* read 16-bit chunks */
350 /*--------------- all but last block: aligned reads and different mixing */
351 while (length
> 12) {
352 a
+= k
[0] + (((uint32_t)k
[1])<<16);
353 b
+= k
[2] + (((uint32_t)k
[3])<<16);
354 c
+= k
[4] + (((uint32_t)k
[5])<<16);
360 k8
= (const uint8_t *)k
;
363 c
+=k
[4]+(((uint32_t)k
[5])<<16);
364 b
+=k
[2]+(((uint32_t)k
[3])<<16);
365 a
+=k
[0]+(((uint32_t)k
[1])<<16);
368 c
+=((uint32_t)k8
[10])<<16; /* fall through */
371 b
+=k
[2]+(((uint32_t)k
[3])<<16);
372 a
+=k
[0]+(((uint32_t)k
[1])<<16);
375 c
+=k8
[8]; /* fall through */
377 b
+=k
[2]+(((uint32_t)k
[3])<<16);
378 a
+=k
[0]+(((uint32_t)k
[1])<<16);
381 b
+=((uint32_t)k8
[6])<<16; /* fall through */
384 a
+=k
[0]+(((uint32_t)k
[1])<<16);
387 b
+=k8
[4]; /* fall through */
389 a
+=k
[0]+(((uint32_t)k
[1])<<16);
392 a
+=((uint32_t)k8
[2])<<16; /* fall through */
400 return c
; /* zero length requires no mixing */
403 } else { /* need to read the key one byte at a time */
404 const uint8_t *k
= (const uint8_t *)key
;
406 while (length
> 12) {
408 a
+= ((uint32_t)k
[1])<<8;
409 a
+= ((uint32_t)k
[2])<<16;
410 a
+= ((uint32_t)k
[3])<<24;
412 b
+= ((uint32_t)k
[5])<<8;
413 b
+= ((uint32_t)k
[6])<<16;
414 b
+= ((uint32_t)k
[7])<<24;
416 c
+= ((uint32_t)k
[9])<<8;
417 c
+= ((uint32_t)k
[10])<<16;
418 c
+= ((uint32_t)k
[11])<<24;
424 switch(length
) { /* all the case statements fall through */
425 case 12: c
+=((uint32_t)k
[11])<<24;
426 case 11: c
+=((uint32_t)k
[10])<<16;
427 case 10: c
+=((uint32_t)k
[9])<<8;
429 case 8: b
+=((uint32_t)k
[7])<<24;
430 case 7: b
+=((uint32_t)k
[6])<<16;
431 case 6: b
+=((uint32_t)k
[5])<<8;
433 case 4: a
+=((uint32_t)k
[3])<<24;
434 case 3: a
+=((uint32_t)k
[2])<<16;
435 case 2: a
+=((uint32_t)k
[1])<<8;
448 #if (CAA_BITS_PER_LONG == 64)
450 * Hash function for number value.
452 unsigned long hash_key_ulong(void *_key
, unsigned long seed
)
463 v
.v64
= (uint64_t) seed
;
464 key
.v64
= (uint64_t) _key
;
465 hashword2(key
.v32
, 2, &v
.v32
[0], &v
.v32
[1]);
470 * Hash function for number value.
472 unsigned long hash_key_ulong(void *_key
, unsigned long seed
)
474 uint32_t key
= (uint32_t) _key
;
476 return hashword(&key
, 1, seed
);
478 #endif /* CAA_BITS_PER_LONG */
481 * Hash function for string.
483 unsigned long hash_key_str(void *key
, unsigned long seed
)
485 return hashlittle(key
, strlen((char *) key
), seed
);
489 * Hash function compare for number value.
491 int hash_match_key_ulong(void *key1
, void *key2
)
501 * Hash compare function for string.
503 int hash_match_key_str(void *key1
, void *key2
)
505 if (strcmp(key1
, key2
) == 0) {
This page took 0.042104 seconds and 4 git commands to generate.