Fix: file-descriptor: missing include guards
[lttng-tools.git] / src / common / hashtable / utils.cpp
1 /*
2 * Copyright (C) 2006 Bob Jenkins
3 * Copyright (C) 2011 EfficiOS Inc.
4 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * SPDX-License-Identifier: LGPL-2.1-only
7 *
8 */
9
10 /*
11 * These are functions for producing 32-bit hashes for hash table lookup.
12 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
13 * externally useful functions. Routines to test the hash are included if
14 * SELF_TEST is defined. You can use this free for any purpose. It's in the
15 * public domain. It has no warranty.
16 *
17 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
18 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
19 * Intel and AMD are little-endian machines. On second thought, you probably
20 * want hashlittle2(), which is identical to hashlittle() except it returns two
21 * 32-bit hashes for the price of one. You could implement hashbig2() if you
22 * wanted but I haven't bothered here.
23 *
24 * If you want to find a hash of, say, exactly 7 integers, do
25 * a = i1; b = i2; c = i3;
26 * mix(a,b,c);
27 * a += i4; b += i5; c += i6;
28 * mix(a,b,c);
29 * a += i7;
30 * final(a,b,c);
31 * then use c as the hash value. If you have a variable length array of
32 * 4-byte integers to hash, use hashword(). If you have a byte array (like
33 * a character string), use hashlittle(). If you have several byte arrays, or
34 * a mix of things, see the comments above hashlittle().
35 *
36 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
37 * mix those integers. This is fast (you can do a lot more thorough mixing
38 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
39 * byte), but shoehorning those bytes into integers efficiently is messy.
40 */
41
42 #define _LGPL_SOURCE
43 #include "utils.hpp"
44
45 #include <common/common.hpp>
46 #include <common/compat/endian.hpp> /* attempt to define endianness */
47 #include <common/hashtable/hashtable.hpp>
48
49 #include <stdint.h> /* defines uint32_t etc */
50 #include <stdio.h> /* defines printf for tests */
51 #include <string.h>
52 #include <sys/param.h> /* attempt to define endianness */
53 #include <time.h> /* defines time_t for timings in the test */
54 #include <urcu/compiler.h>
55
56 /*
57 * My best guess at if you are big-endian or little-endian. This may
58 * need adjustment.
59 */
60 #if (defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && BYTE_ORDER == LITTLE_ENDIAN) || \
61 (defined(i386) || defined(__i386__) || defined(__i486__) || defined(__i586__) || \
62 defined(__i686__) || defined(vax) || defined(MIPSEL))
63 #define HASH_LITTLE_ENDIAN 1
64 #define HASH_BIG_ENDIAN 0
65 #elif (defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN) || \
66 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
67 #define HASH_LITTLE_ENDIAN 0
68 #define HASH_BIG_ENDIAN 1
69 #else
70 #define HASH_LITTLE_ENDIAN 0
71 #define HASH_BIG_ENDIAN 0
72 #endif
73
74 #define hashsize(n) ((uint32_t) 1 << (n))
75 #define hashmask(n) (hashsize(n) - 1)
76 #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
77
78 /*
79 * mix -- mix 3 32-bit values reversibly.
80 *
81 * This is reversible, so any information in (a,b,c) before mix() is
82 * still in (a,b,c) after mix().
83 *
84 * If four pairs of (a,b,c) inputs are run through mix(), or through
85 * mix() in reverse, there are at least 32 bits of the output that
86 * are sometimes the same for one pair and different for another pair.
87 * This was tested for:
88 * * pairs that differed by one bit, by two bits, in any combination
89 * of top bits of (a,b,c), or in any combination of bottom bits of
90 * (a,b,c).
91 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
92 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
93 * is commonly produced by subtraction) look like a single 1-bit
94 * difference.
95 * * the base values were pseudorandom, all zero but one bit set, or
96 * all zero plus a counter that starts at zero.
97 *
98 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
99 * satisfy this are
100 * 4 6 8 16 19 4
101 * 9 15 3 18 27 15
102 * 14 9 3 7 17 3
103 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
104 * for "differ" defined as + with a one-bit base and a two-bit delta. I
105 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
106 * the operations, constants, and arrangements of the variables.
107 *
108 * This does not achieve avalanche. There are input bits of (a,b,c)
109 * that fail to affect some output bits of (a,b,c), especially of a. The
110 * most thoroughly mixed value is c, but it doesn't really even achieve
111 * avalanche in c.
112 *
113 * This allows some parallelism. Read-after-writes are good at doubling
114 * the number of bits affected, so the goal of mixing pulls in the opposite
115 * direction as the goal of parallelism. I did what I could. Rotates
116 * seem to cost as much as shifts on every machine I could lay my hands
117 * on, and rotates are much kinder to the top and bottom bits, so I used
118 * rotates.
119 */
120 #define mix(a, b, c) \
121 { \
122 (a) -= (c); \
123 (a) ^= rot(c, 4); \
124 (c) += (b); \
125 (b) -= (a); \
126 (b) ^= rot(a, 6); \
127 (a) += (c); \
128 (c) -= (b); \
129 (c) ^= rot(b, 8); \
130 (b) += (a); \
131 (a) -= (c); \
132 (a) ^= rot(c, 16); \
133 (c) += (b); \
134 (b) -= (a); \
135 (b) ^= rot(a, 19); \
136 (a) += (c); \
137 (c) -= (b); \
138 (c) ^= rot(b, 4); \
139 (b) += (a); \
140 }
141
142 /*
143 * final -- final mixing of 3 32-bit values (a,b,c) into c
144 *
145 * Pairs of (a,b,c) values differing in only a few bits will usually
146 * produce values of c that look totally different. This was tested for
147 * * pairs that differed by one bit, by two bits, in any combination
148 * of top bits of (a,b,c), or in any combination of bottom bits of
149 * (a,b,c).
150 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
151 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
152 * is commonly produced by subtraction) look like a single 1-bit
153 * difference.
154 * * the base values were pseudorandom, all zero but one bit set, or
155 * all zero plus a counter that starts at zero.
156 *
157 * These constants passed:
158 * 14 11 25 16 4 14 24
159 * 12 14 25 16 4 14 24
160 * and these came close:
161 * 4 8 15 26 3 22 24
162 * 10 8 15 26 3 22 24
163 * 11 8 15 26 3 22 24
164 */
165 #define final(a, b, c) \
166 { \
167 (c) ^= (b); \
168 (c) -= rot(b, 14); \
169 (a) ^= (c); \
170 (a) -= rot(c, 11); \
171 (b) ^= (a); \
172 (b) -= rot(a, 25); \
173 (c) ^= (b); \
174 (c) -= rot(b, 16); \
175 (a) ^= (c); \
176 (a) -= rot(c, 4); \
177 (b) ^= (a); \
178 (b) -= rot(a, 14); \
179 (c) ^= (b); \
180 (c) -= rot(b, 24); \
181 }
182
183 /*
184 * k - the key, an array of uint32_t values
185 * length - the length of the key, in uint32_ts
186 * initval - the previous hash, or an arbitrary value
187 */
188 static uint32_t __attribute__((unused)) hashword(const uint32_t *k, size_t length, uint32_t initval)
189 {
190 uint32_t a, b, c;
191
192 /* Set up the internal state */
193 a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
194
195 /*----------------------------------------- handle most of the key */
196 while (length > 3) {
197 a += k[0];
198 b += k[1];
199 c += k[2];
200 mix(a, b, c);
201 length -= 3;
202 k += 3;
203 }
204
205 /*----------------------------------- handle the last 3 uint32_t's */
206 switch (length) { /* all the case statements fall through */
207 case 3:
208 c += k[2]; /* fall through */
209 case 2:
210 b += k[1]; /* fall through */
211 case 1:
212 a += k[0];
213 final(a, b, c);
214 case 0: /* case 0: nothing left to add */
215 break;
216 }
217 /*---------------------------------------------- report the result */
218 return c;
219 }
220
221 /*
222 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
223 * values. pc and pb must both be nonnull, and *pc and *pb must both be
224 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
225 * the same as the return value from hashword().
226 */
227 static void __attribute__((unused))
228 hashword2(const uint32_t *k, size_t length, uint32_t *pc, uint32_t *pb)
229 {
230 uint32_t a, b, c;
231
232 /* Set up the internal state */
233 a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
234 c += *pb;
235
236 while (length > 3) {
237 a += k[0];
238 b += k[1];
239 c += k[2];
240 mix(a, b, c);
241 length -= 3;
242 k += 3;
243 }
244
245 switch (length) {
246 case 3:
247 c += k[2];
248 /* fall through */
249 case 2:
250 b += k[1];
251 /* fall through */
252 case 1:
253 a += k[0];
254 final(a, b, c);
255 /* fall through */
256 case 0: /* case 0: nothing left to add */
257 break;
258 }
259
260 *pc = c;
261 *pb = b;
262 }
263
264 /*
265 * hashlittle() -- hash a variable-length key into a 32-bit value
266 * k : the key (the unaligned variable-length array of bytes)
267 * length : the length of the key, counting by bytes
268 * initval : can be any 4-byte value
269 * Returns a 32-bit value. Every bit of the key affects every bit of
270 * the return value. Two keys differing by one or two bits will have
271 * totally different hash values.
272 *
273 * The best hash table sizes are powers of 2. There is no need to do
274 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
275 * use a bitmask. For example, if you need only 10 bits, do
276 * h = (h & hashmask(10));
277 * In which case, the hash table should have hashsize(10) elements.
278 *
279 * If you are hashing n strings (uint8_t **)k, do it like this:
280 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
281 *
282 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
283 * code any way you wish, private, educational, or commercial. It's free.
284 *
285 * Use for hash table lookup, or anything where one collision in 2^^32 is
286 * acceptable. Do NOT use for cryptographic purposes.
287 */
288 LTTNG_NO_SANITIZE_ADDRESS
289 __attribute__((unused)) static uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
290 {
291 uint32_t a, b, c;
292 union {
293 const void *ptr;
294 size_t i;
295 } u; /* needed for Mac Powerbook G4 */
296
297 /* Set up the internal state */
298 a = b = c = 0xdeadbeef + ((uint32_t) length) + initval;
299
300 u.ptr = key;
301 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
302 const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
303
304 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
305 while (length > 12) {
306 a += k[0];
307 b += k[1];
308 c += k[2];
309 mix(a, b, c);
310 length -= 12;
311 k += 3;
312 }
313
314 /*
315 * "k[2]&0xffffff" actually reads beyond the end of the string, but
316 * then masks off the part it's not allowed to read. Because the
317 * string is aligned, the masked-off tail is in the same word as the
318 * rest of the string. Every machine with memory protection I've seen
319 * does it on word boundaries, so is OK with this. But VALGRIND will
320 * still catch it and complain. The masking trick does make the hash
321 * noticably faster for short strings (like English words).
322 */
323 #ifndef VALGRIND
324
325 switch (length) {
326 case 12:
327 c += k[2];
328 b += k[1];
329 a += k[0];
330 break;
331 case 11:
332 c += k[2] & 0xffffff;
333 b += k[1];
334 a += k[0];
335 break;
336 case 10:
337 c += k[2] & 0xffff;
338 b += k[1];
339 a += k[0];
340 break;
341 case 9:
342 c += k[2] & 0xff;
343 b += k[1];
344 a += k[0];
345 break;
346 case 8:
347 b += k[1];
348 a += k[0];
349 break;
350 case 7:
351 b += k[1] & 0xffffff;
352 a += k[0];
353 break;
354 case 6:
355 b += k[1] & 0xffff;
356 a += k[0];
357 break;
358 case 5:
359 b += k[1] & 0xff;
360 a += k[0];
361 break;
362 case 4:
363 a += k[0];
364 break;
365 case 3:
366 a += k[0] & 0xffffff;
367 break;
368 case 2:
369 a += k[0] & 0xffff;
370 break;
371 case 1:
372 a += k[0] & 0xff;
373 break;
374 case 0:
375 return c; /* zero length strings require no mixing */
376 }
377 #else /* make valgrind happy */
378 const uint8_t *k8;
379
380 k8 = (const uint8_t *) k;
381 switch (length) {
382 case 12:
383 c += k[2];
384 b += k[1];
385 a += k[0];
386 break;
387 case 11:
388 c += ((uint32_t) k8[10]) << 16; /* fall through */
389 case 10:
390 c += ((uint32_t) k8[9]) << 8; /* fall through */
391 case 9:
392 c += k8[8]; /* fall through */
393 case 8:
394 b += k[1];
395 a += k[0];
396 break;
397 case 7:
398 b += ((uint32_t) k8[6]) << 16; /* fall through */
399 case 6:
400 b += ((uint32_t) k8[5]) << 8; /* fall through */
401 case 5:
402 b += k8[4]; /* fall through */
403 case 4:
404 a += k[0];
405 break;
406 case 3:
407 a += ((uint32_t) k8[2]) << 16; /* fall through */
408 case 2:
409 a += ((uint32_t) k8[1]) << 8; /* fall through */
410 case 1:
411 a += k8[0];
412 break;
413 case 0:
414 return c;
415 }
416 #endif /* !valgrind */
417 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
418 const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
419 const uint8_t *k8;
420
421 /*--------------- all but last block: aligned reads and different mixing */
422 while (length > 12) {
423 a += k[0] + (((uint32_t) k[1]) << 16);
424 b += k[2] + (((uint32_t) k[3]) << 16);
425 c += k[4] + (((uint32_t) k[5]) << 16);
426 mix(a, b, c);
427 length -= 12;
428 k += 6;
429 }
430
431 k8 = (const uint8_t *) k;
432 switch (length) {
433 case 12:
434 c += k[4] + (((uint32_t) k[5]) << 16);
435 b += k[2] + (((uint32_t) k[3]) << 16);
436 a += k[0] + (((uint32_t) k[1]) << 16);
437 break;
438 case 11:
439 c += ((uint32_t) k8[10]) << 16; /* fall through */
440 case 10:
441 c += k[4];
442 b += k[2] + (((uint32_t) k[3]) << 16);
443 a += k[0] + (((uint32_t) k[1]) << 16);
444 break;
445 case 9:
446 c += k8[8]; /* fall through */
447 case 8:
448 b += k[2] + (((uint32_t) k[3]) << 16);
449 a += k[0] + (((uint32_t) k[1]) << 16);
450 break;
451 case 7:
452 b += ((uint32_t) k8[6]) << 16; /* fall through */
453 case 6:
454 b += k[2];
455 a += k[0] + (((uint32_t) k[1]) << 16);
456 break;
457 case 5:
458 b += k8[4]; /* fall through */
459 case 4:
460 a += k[0] + (((uint32_t) k[1]) << 16);
461 break;
462 case 3:
463 a += ((uint32_t) k8[2]) << 16; /* fall through */
464 case 2:
465 a += k[0];
466 break;
467 case 1:
468 a += k8[0];
469 break;
470 case 0:
471 return c; /* zero length requires no mixing */
472 }
473
474 } else { /* need to read the key one byte at a time */
475 const uint8_t *k = (const uint8_t *) key;
476
477 while (length > 12) {
478 a += k[0];
479 a += ((uint32_t) k[1]) << 8;
480 a += ((uint32_t) k[2]) << 16;
481 a += ((uint32_t) k[3]) << 24;
482 b += k[4];
483 b += ((uint32_t) k[5]) << 8;
484 b += ((uint32_t) k[6]) << 16;
485 b += ((uint32_t) k[7]) << 24;
486 c += k[8];
487 c += ((uint32_t) k[9]) << 8;
488 c += ((uint32_t) k[10]) << 16;
489 c += ((uint32_t) k[11]) << 24;
490 mix(a, b, c);
491 length -= 12;
492 k += 12;
493 }
494
495 switch (length) { /* all the case statements fall through */
496 case 12:
497 c += ((uint32_t) k[11]) << 24; /* fall through */
498 case 11:
499 c += ((uint32_t) k[10]) << 16; /* fall through */
500 case 10:
501 c += ((uint32_t) k[9]) << 8; /* fall through */
502 case 9:
503 c += k[8]; /* fall through */
504 case 8:
505 b += ((uint32_t) k[7]) << 24; /* fall through */
506 case 7:
507 b += ((uint32_t) k[6]) << 16; /* fall through */
508 case 6:
509 b += ((uint32_t) k[5]) << 8; /* fall through */
510 case 5:
511 b += k[4]; /* fall through */
512 case 4:
513 a += ((uint32_t) k[3]) << 24; /* fall through */
514 case 3:
515 a += ((uint32_t) k[2]) << 16; /* fall through */
516 case 2:
517 a += ((uint32_t) k[1]) << 8; /* fall through */
518 case 1:
519 a += k[0];
520 break;
521 case 0:
522 return c;
523 }
524 }
525
526 final(a, b, c);
527 return c;
528 }
529
530 unsigned long hash_key_u64(const void *_key, unsigned long seed)
531 {
532 union {
533 uint64_t v64;
534 uint32_t v32[2];
535 } v;
536 union {
537 uint64_t v64;
538 uint32_t v32[2];
539 } key;
540
541 v.v64 = (uint64_t) seed;
542 key.v64 = *(const uint64_t *) _key;
543 hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
544 return v.v64;
545 }
546
547 #if (CAA_BITS_PER_LONG == 64)
548 /*
549 * Hash function for number value.
550 * Pass the value itself as the key, not its address.
551 */
552 unsigned long hash_key_ulong(const void *_key, unsigned long seed)
553 {
554 uint64_t __key = (uint64_t) _key;
555 return (unsigned long) hash_key_u64(&__key, seed);
556 }
557 #else
558 /*
559 * Hash function for number value.
560 * Pass the value itself as the key, not its address.
561 */
562 unsigned long hash_key_ulong(const void *_key, unsigned long seed)
563 {
564 uint32_t key = (uint32_t) _key;
565
566 return hashword(&key, 1, seed);
567 }
568 #endif /* CAA_BITS_PER_LONG */
569
570 /*
571 * Hash function for string.
572 */
573 unsigned long hash_key_str(const void *key, unsigned long seed)
574 {
575 return hashlittle(key, strlen((const char *) key), seed);
576 }
577
578 /*
579 * Hash function for two uint64_t.
580 */
581 unsigned long hash_key_two_u64(const void *key, unsigned long seed)
582 {
583 const struct lttng_ht_two_u64 *k = (const struct lttng_ht_two_u64 *) key;
584
585 return hash_key_u64(&k->key1, seed) ^ hash_key_u64(&k->key2, seed);
586 }
587
588 /*
589 * Hash function compare for number value.
590 */
591 int hash_match_key_ulong(const void *key1, const void *key2)
592 {
593 if (key1 == key2) {
594 return 1;
595 }
596
597 return 0;
598 }
599
600 /*
601 * Hash function compare for number value.
602 */
603 int hash_match_key_u64(const void *key1, const void *key2)
604 {
605 if (*(const uint64_t *) key1 == *(const uint64_t *) key2) {
606 return 1;
607 }
608
609 return 0;
610 }
611
612 /*
613 * Hash compare function for string.
614 */
615 int hash_match_key_str(const void *key1, const void *key2)
616 {
617 if (strcmp((const char *) key1, (const char *) key2) == 0) {
618 return 1;
619 }
620
621 return 0;
622 }
623
624 /*
625 * Hash function compare two uint64_t.
626 */
627 int hash_match_key_two_u64(const void *key1, const void *key2)
628 {
629 const struct lttng_ht_two_u64 *k1 = (const struct lttng_ht_two_u64 *) key1;
630 const struct lttng_ht_two_u64 *k2 = (const struct lttng_ht_two_u64 *) key2;
631
632 if (hash_match_key_u64(&k1->key1, &k2->key1) && hash_match_key_u64(&k1->key2, &k2->key2)) {
633 return 1;
634 }
635
636 return 0;
637 }
This page took 0.053469 seconds and 4 git commands to generate.