Initialize lttng_ht_seed on hashtable creation
[lttng-tools.git] / src / common / hashtable / utils.c
CommitLineData
819dc7d4 1/*
66c60361 2 * Copyright (C) - Bob Jenkins, May 2006
819dc7d4 3 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
bec39940 4 * Copyright (C) 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
819dc7d4 5 *
d14d33bf
AM
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
66c60361
DG
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
d14d33bf
AM
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
66c60361
DG
18 */
19
20/*
819dc7d4
DG
21 * These are functions for producing 32-bit hashes for hash table lookup.
22 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
23 * externally useful functions. Routines to test the hash are included if
24 * SELF_TEST is defined. You can use this free for any purpose. It's in the
25 * public domain. It has no warranty.
26 *
27 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
28 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
29 * Intel and AMD are little-endian machines. On second thought, you probably
30 * want hashlittle2(), which is identical to hashlittle() except it returns two
31 * 32-bit hashes for the price of one. You could implement hashbig2() if you
32 * wanted but I haven't bothered here.
33 *
34 * If you want to find a hash of, say, exactly 7 integers, do
35 * a = i1; b = i2; c = i3;
36 * mix(a,b,c);
37 * a += i4; b += i5; c += i6;
38 * mix(a,b,c);
39 * a += i7;
40 * final(a,b,c);
41 * then use c as the hash value. If you have a variable length array of
42 * 4-byte integers to hash, use hashword(). If you have a byte array (like
43 * a character string), use hashlittle(). If you have several byte arrays, or
44 * a mix of things, see the comments above hashlittle().
45 *
46 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
47 * mix those integers. This is fast (you can do a lot more thorough mixing
48 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
49 * byte), but shoehorning those bytes into integers efficiently is messy.
50 */
890d8fe4 51
6c1c0768 52#define _LGPL_SOURCE
bec39940 53#include <assert.h>
bec39940
DG
54#include <stdint.h> /* defines uint32_t etc */
55#include <stdio.h> /* defines printf for tests */
819dc7d4 56#include <string.h>
bec39940
DG
57#include <sys/param.h> /* attempt to define endianness */
58#include <time.h> /* defines time_t for timings in the test */
0df502fd 59#include <urcu/compiler.h>
819dc7d4 60
bec39940 61#include "utils.h"
eb71a0aa 62#include <common/compat/endian.h> /* attempt to define endianness */
90e535ef 63#include <common/common.h>
3c4599b9 64#include <common/hashtable/hashtable.h>
bec39940 65
819dc7d4
DG
66/*
67 * My best guess at if you are big-endian or little-endian. This may
68 * need adjustment.
69 */
70#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
71 __BYTE_ORDER == __LITTLE_ENDIAN) || \
72 (defined(i386) || defined(__i386__) || defined(__i486__) || \
73 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
74# define HASH_LITTLE_ENDIAN 1
75# define HASH_BIG_ENDIAN 0
76#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
77 __BYTE_ORDER == __BIG_ENDIAN) || \
78 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
79# define HASH_LITTLE_ENDIAN 0
80# define HASH_BIG_ENDIAN 1
81#else
82# define HASH_LITTLE_ENDIAN 0
83# define HASH_BIG_ENDIAN 0
84#endif
85
86#define hashsize(n) ((uint32_t)1<<(n))
87#define hashmask(n) (hashsize(n)-1)
88#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
89
90/*
91 * mix -- mix 3 32-bit values reversibly.
92 *
93 * This is reversible, so any information in (a,b,c) before mix() is
94 * still in (a,b,c) after mix().
95 *
96 * If four pairs of (a,b,c) inputs are run through mix(), or through
97 * mix() in reverse, there are at least 32 bits of the output that
98 * are sometimes the same for one pair and different for another pair.
99 * This was tested for:
100 * * pairs that differed by one bit, by two bits, in any combination
101 * of top bits of (a,b,c), or in any combination of bottom bits of
102 * (a,b,c).
103 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
104 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
105 * is commonly produced by subtraction) look like a single 1-bit
106 * difference.
107 * * the base values were pseudorandom, all zero but one bit set, or
108 * all zero plus a counter that starts at zero.
109 *
110 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
111 * satisfy this are
112 * 4 6 8 16 19 4
113 * 9 15 3 18 27 15
114 * 14 9 3 7 17 3
115 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
116 * for "differ" defined as + with a one-bit base and a two-bit delta. I
117 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
118 * the operations, constants, and arrangements of the variables.
119 *
120 * This does not achieve avalanche. There are input bits of (a,b,c)
121 * that fail to affect some output bits of (a,b,c), especially of a. The
122 * most thoroughly mixed value is c, but it doesn't really even achieve
123 * avalanche in c.
124 *
125 * This allows some parallelism. Read-after-writes are good at doubling
126 * the number of bits affected, so the goal of mixing pulls in the opposite
127 * direction as the goal of parallelism. I did what I could. Rotates
128 * seem to cost as much as shifts on every machine I could lay my hands
129 * on, and rotates are much kinder to the top and bottom bits, so I used
130 * rotates.
131 */
132#define mix(a,b,c) \
133{ \
134 a -= c; a ^= rot(c, 4); c += b; \
135 b -= a; b ^= rot(a, 6); a += c; \
136 c -= b; c ^= rot(b, 8); b += a; \
137 a -= c; a ^= rot(c,16); c += b; \
138 b -= a; b ^= rot(a,19); a += c; \
139 c -= b; c ^= rot(b, 4); b += a; \
140}
141
142/*
143 * final -- final mixing of 3 32-bit values (a,b,c) into c
144 *
145 * Pairs of (a,b,c) values differing in only a few bits will usually
146 * produce values of c that look totally different. This was tested for
147 * * pairs that differed by one bit, by two bits, in any combination
148 * of top bits of (a,b,c), or in any combination of bottom bits of
149 * (a,b,c).
150 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
151 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
152 * is commonly produced by subtraction) look like a single 1-bit
153 * difference.
154 * * the base values were pseudorandom, all zero but one bit set, or
155 * all zero plus a counter that starts at zero.
156 *
157 * These constants passed:
158 * 14 11 25 16 4 14 24
159 * 12 14 25 16 4 14 24
160 * and these came close:
161 * 4 8 15 26 3 22 24
162 * 10 8 15 26 3 22 24
163 * 11 8 15 26 3 22 24
164 */
165#define final(a,b,c) \
166{ \
167 c ^= b; c -= rot(b,14); \
168 a ^= c; a -= rot(c,11); \
169 b ^= a; b -= rot(a,25); \
170 c ^= b; c -= rot(b,16); \
171 a ^= c; a -= rot(c,4); \
172 b ^= a; b -= rot(a,14); \
173 c ^= b; c -= rot(b,24); \
174}
175
bec39940
DG
176/*
177 * k - the key, an array of uint32_t values
178 * length - the length of the key, in uint32_ts
179 * initval - the previous hash, or an arbitrary value
180 */
181static uint32_t __attribute__((unused)) hashword(const uint32_t *k,
182 size_t length, uint32_t initval)
0df502fd
MD
183{
184 uint32_t a, b, c;
185
186 /* Set up the internal state */
187 a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
188
189 /*----------------------------------------- handle most of the key */
190 while (length > 3) {
191 a += k[0];
192 b += k[1];
193 c += k[2];
194 mix(a, b, c);
195 length -= 3;
196 k += 3;
197 }
198
199 /*----------------------------------- handle the last 3 uint32_t's */
200 switch (length) { /* all the case statements fall through */
201 case 3: c += k[2];
202 case 2: b += k[1];
203 case 1: a += k[0];
204 final(a, b, c);
205 case 0: /* case 0: nothing left to add */
206 break;
207 }
208 /*---------------------------------------------- report the result */
209 return c;
210}
211
212
819dc7d4
DG
213/*
214 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
215 * values. pc and pb must both be nonnull, and *pc and *pb must both be
216 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
217 * the same as the return value from hashword().
218 */
bec39940 219static void __attribute__((unused)) hashword2(const uint32_t *k, size_t length,
819dc7d4
DG
220 uint32_t *pc, uint32_t *pb)
221{
222 uint32_t a, b, c;
223
224 /* Set up the internal state */
225 a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
226 c += *pb;
227
228 while (length > 3) {
229 a += k[0];
230 b += k[1];
231 c += k[2];
232 mix(a, b, c);
233 length -= 3;
234 k += 3;
235 }
236
237 switch (length) {
238 case 3 :
239 c += k[2];
240 case 2 :
241 b += k[1];
242 case 1 :
243 a += k[0];
244 final(a, b, c);
245 case 0: /* case 0: nothing left to add */
246 break;
247 }
248
249 *pc = c;
250 *pb = b;
251}
252
253/*
254 * hashlittle() -- hash a variable-length key into a 32-bit value
255 * k : the key (the unaligned variable-length array of bytes)
256 * length : the length of the key, counting by bytes
257 * initval : can be any 4-byte value
258 * Returns a 32-bit value. Every bit of the key affects every bit of
259 * the return value. Two keys differing by one or two bits will have
260 * totally different hash values.
261 *
262 * The best hash table sizes are powers of 2. There is no need to do
263 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
264 * use a bitmask. For example, if you need only 10 bits, do
265 * h = (h & hashmask(10));
266 * In which case, the hash table should have hashsize(10) elements.
267 *
268 * If you are hashing n strings (uint8_t **)k, do it like this:
269 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
270 *
271 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
272 * code any way you wish, private, educational, or commercial. It's free.
273 *
274 * Use for hash table lookup, or anything where one collision in 2^^32 is
275 * acceptable. Do NOT use for cryptographic purposes.
276 */
bec39940
DG
277static uint32_t __attribute__((unused)) hashlittle(const void *key,
278 size_t length, uint32_t initval)
819dc7d4
DG
279{
280 uint32_t a,b,c;
281 union {
282 const void *ptr;
283 size_t i;
284 } u; /* needed for Mac Powerbook G4 */
285
286 /* Set up the internal state */
287 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
288
289 u.ptr = key;
290 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
291 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
292
293 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
294 while (length > 12) {
295 a += k[0];
296 b += k[1];
297 c += k[2];
298 mix(a,b,c);
299 length -= 12;
300 k += 3;
301 }
302
303 /*
304 * "k[2]&0xffffff" actually reads beyond the end of the string, but
305 * then masks off the part it's not allowed to read. Because the
306 * string is aligned, the masked-off tail is in the same word as the
307 * rest of the string. Every machine with memory protection I've seen
308 * does it on word boundaries, so is OK with this. But VALGRIND will
309 * still catch it and complain. The masking trick does make the hash
310 * noticably faster for short strings (like English words).
311 */
312#ifndef VALGRIND
313
314 switch (length) {
315 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
316 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
317 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
318 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
319 case 8 : b+=k[1]; a+=k[0]; break;
320 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
321 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
322 case 5 : b+=k[1]&0xff; a+=k[0]; break;
323 case 4 : a+=k[0]; break;
324 case 3 : a+=k[0]&0xffffff; break;
325 case 2 : a+=k[0]&0xffff; break;
326 case 1 : a+=k[0]&0xff; break;
327 case 0 : return c; /* zero length strings require no mixing */
328 }
329#else /* make valgrind happy */
330 const uint8_t *k8;
331
332 k8 = (const uint8_t *)k;
333 switch (length) {
334 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
335 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
336 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
337 case 9 : c+=k8[8]; /* fall through */
338 case 8 : b+=k[1]; a+=k[0]; break;
339 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
340 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
341 case 5 : b+=k8[4]; /* fall through */
342 case 4 : a+=k[0]; break;
343 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
344 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
345 case 1 : a+=k8[0]; break;
346 case 0 : return c;
347 }
348#endif /* !valgrind */
349 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
350 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
351 const uint8_t *k8;
352
353 /*--------------- all but last block: aligned reads and different mixing */
354 while (length > 12) {
355 a += k[0] + (((uint32_t)k[1])<<16);
356 b += k[2] + (((uint32_t)k[3])<<16);
357 c += k[4] + (((uint32_t)k[5])<<16);
358 mix(a,b,c);
359 length -= 12;
360 k += 6;
361 }
362
363 k8 = (const uint8_t *)k;
364 switch (length) {
365 case 12:
366 c+=k[4]+(((uint32_t)k[5])<<16);
367 b+=k[2]+(((uint32_t)k[3])<<16);
368 a+=k[0]+(((uint32_t)k[1])<<16);
369 break;
370 case 11:
371 c+=((uint32_t)k8[10])<<16; /* fall through */
372 case 10:
373 c+=k[4];
374 b+=k[2]+(((uint32_t)k[3])<<16);
375 a+=k[0]+(((uint32_t)k[1])<<16);
376 break;
377 case 9:
378 c+=k8[8]; /* fall through */
379 case 8:
380 b+=k[2]+(((uint32_t)k[3])<<16);
381 a+=k[0]+(((uint32_t)k[1])<<16);
382 break;
383 case 7:
384 b+=((uint32_t)k8[6])<<16; /* fall through */
385 case 6:
386 b+=k[2];
387 a+=k[0]+(((uint32_t)k[1])<<16);
388 break;
389 case 5:
390 b+=k8[4]; /* fall through */
391 case 4:
392 a+=k[0]+(((uint32_t)k[1])<<16);
393 break;
394 case 3:
395 a+=((uint32_t)k8[2])<<16; /* fall through */
396 case 2:
397 a+=k[0];
398 break;
399 case 1:
400 a+=k8[0];
401 break;
402 case 0:
403 return c; /* zero length requires no mixing */
404 }
405
406 } else { /* need to read the key one byte at a time */
407 const uint8_t *k = (const uint8_t *)key;
408
409 while (length > 12) {
410 a += k[0];
411 a += ((uint32_t)k[1])<<8;
412 a += ((uint32_t)k[2])<<16;
413 a += ((uint32_t)k[3])<<24;
414 b += k[4];
415 b += ((uint32_t)k[5])<<8;
416 b += ((uint32_t)k[6])<<16;
417 b += ((uint32_t)k[7])<<24;
418 c += k[8];
419 c += ((uint32_t)k[9])<<8;
420 c += ((uint32_t)k[10])<<16;
421 c += ((uint32_t)k[11])<<24;
422 mix(a,b,c);
423 length -= 12;
424 k += 12;
425 }
426
427 switch(length) { /* all the case statements fall through */
428 case 12: c+=((uint32_t)k[11])<<24;
429 case 11: c+=((uint32_t)k[10])<<16;
430 case 10: c+=((uint32_t)k[9])<<8;
431 case 9: c+=k[8];
432 case 8: b+=((uint32_t)k[7])<<24;
433 case 7: b+=((uint32_t)k[6])<<16;
434 case 6: b+=((uint32_t)k[5])<<8;
435 case 5: b+=k[4];
436 case 4: a+=((uint32_t)k[3])<<24;
437 case 3: a+=((uint32_t)k[2])<<16;
438 case 2: a+=((uint32_t)k[1])<<8;
439 case 1:
440 a+=k[0];
441 break;
442 case 0:
443 return c;
444 }
445 }
446
447 final(a,b,c);
448 return c;
449}
450
90e535ef 451LTTNG_HIDDEN
d88aee68 452unsigned long hash_key_u64(void *_key, unsigned long seed)
0df502fd
MD
453{
454 union {
455 uint64_t v64;
456 uint32_t v32[2];
457 } v;
458 union {
459 uint64_t v64;
460 uint32_t v32[2];
461 } key;
462
0df502fd 463 v.v64 = (uint64_t) seed;
d88aee68 464 key.v64 = *(uint64_t *) _key;
0df502fd
MD
465 hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
466 return v.v64;
467}
d88aee68
DG
468
469#if (CAA_BITS_PER_LONG == 64)
470/*
471 * Hash function for number value.
472 */
473LTTNG_HIDDEN
474unsigned long hash_key_ulong(void *_key, unsigned long seed)
475{
476 uint64_t __key = (uint64_t) _key;
477 return (unsigned long) hash_key_u64(&__key, seed);
478}
0df502fd 479#else
819dc7d4
DG
480/*
481 * Hash function for number value.
482 */
90e535ef 483LTTNG_HIDDEN
bec39940 484unsigned long hash_key_ulong(void *_key, unsigned long seed)
819dc7d4 485{
8da9ba32 486 uint32_t key = (uint32_t) _key;
0df502fd 487
0df502fd 488 return hashword(&key, 1, seed);
819dc7d4 489}
bec39940 490#endif /* CAA_BITS_PER_LONG */
819dc7d4
DG
491
492/*
493 * Hash function for string.
494 */
90e535ef 495LTTNG_HIDDEN
bec39940 496unsigned long hash_key_str(void *key, unsigned long seed)
819dc7d4 497{
bec39940 498 return hashlittle(key, strlen((char *) key), seed);
819dc7d4
DG
499}
500
3c4599b9
JD
501/*
502 * Hash function for two uint64_t.
503 */
504LTTNG_HIDDEN
505unsigned long hash_key_two_u64(void *key, unsigned long seed)
506{
507 struct lttng_ht_two_u64 *k = (struct lttng_ht_two_u64 *) key;
508
509 return hash_key_u64(&k->key1, seed) ^ hash_key_u64(&k->key2, seed);
510}
511
819dc7d4
DG
512/*
513 * Hash function compare for number value.
514 */
90e535ef 515LTTNG_HIDDEN
bec39940 516int hash_match_key_ulong(void *key1, void *key2)
819dc7d4 517{
819dc7d4 518 if (key1 == key2) {
bec39940 519 return 1;
819dc7d4
DG
520 }
521
bec39940 522 return 0;
819dc7d4
DG
523}
524
d88aee68
DG
525/*
526 * Hash function compare for number value.
527 */
528LTTNG_HIDDEN
529int hash_match_key_u64(void *key1, void *key2)
530{
531 if (*(uint64_t *) key1 == *(uint64_t *) key2) {
532 return 1;
533 }
534
535 return 0;
536}
537
819dc7d4
DG
538/*
539 * Hash compare function for string.
540 */
90e535ef 541LTTNG_HIDDEN
bec39940 542int hash_match_key_str(void *key1, void *key2)
819dc7d4 543{
bec39940
DG
544 if (strcmp(key1, key2) == 0) {
545 return 1;
819dc7d4
DG
546 }
547
bec39940 548 return 0;
819dc7d4 549}
3c4599b9
JD
550
551/*
552 * Hash function compare two uint64_t.
553 */
554LTTNG_HIDDEN
555int hash_match_key_two_u64(void *key1, void *key2)
556{
557 struct lttng_ht_two_u64 *k1 = (struct lttng_ht_two_u64 *) key1;
558 struct lttng_ht_two_u64 *k2 = (struct lttng_ht_two_u64 *) key2;
559
560 if (hash_match_key_u64(&k1->key1, &k2->key1) &&
561 hash_match_key_u64(&k1->key2, &k2->key2)) {
562 return 1;
563 }
564
565 return 0;
566}
This page took 0.061484 seconds and 4 git commands to generate.