1 /*
2 * vim: filetype=c:tabstop=4:ai:expandtab
3 * SPDX-License-Identifier: BSD-1-Clause
4 * scspell-id: 570dd12a-f630-11ec-9b96-80ee73e9b8e7
5 *
6 * ---------------------------------------------------------------------------
7 *
8 * Copyright (c) 2003-2021 Troy D. Hanson
9 * https://troydhanson.github.io/uthash/
10 * Copyright (c) 2021-2022 The DPS8M Development Team
11 *
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are met:
16 *
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
21 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
24 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * ---------------------------------------------------------------------------
33 */
34
35 #ifndef UTHASH_H
36 # define UTHASH_H
37
38 # include <string.h> /* memcmp,strlen */
39 # include <signal.h> /* raise */
40 # include <stddef.h> /* ptrdiff_t */
41 # include <stdlib.h>
42
43 /*
44 * These macros use decltype or the earlier __typeof GNU extension.
45 * As decltype is only available in newer compilers (VS2010 or gcc 4.3+
46 * when compiling c++ source) this code uses whatever method is needed
47 * or, for VS2008 where neither is available, uses casting workarounds.
48 */
49
50 # ifdef _MSC_VER
51 # if _MSC_VER >= 1600 && defined(__cplusplus)
52 # define DECLTYPE(x) (decltype(x))
53 # else
54 # define NO_DECLTYPE
55 # define DECLTYPE(x)
56 # endif
57 # else
58 # define DECLTYPE(x) (__typeof(x))
59 # endif
60
61 # ifdef NO_DECLTYPE
62 # define DECLTYPE_ASSIGN(dst,src) \
63 do { \
64 char **_da_dst = (char**)(&(dst)); \
65 *_da_dst = (char*)(src); \
66 } while(0)
67 # else
68 # define DECLTYPE_ASSIGN(dst,src) \
69 do { \
70 (dst) = DECLTYPE(dst)(src); \
71 } while(0)
72 # endif
73
74 /*
75 * a number of the hash function use
76 * uint32_t which isn't defined on win32
77 */
78
79 # ifdef _MSC_VER
80 typedef unsigned int uint32_t;
81 typedef unsigned char uint8_t;
82 # else
83 # include <inttypes.h> /* uint32_t */
84 # endif
85
86 # define UTHASH_VERSION 21.9.8
87
88 # undef FREE
89 # ifdef TESTING
90 # define FREE(p) free(p)
91 # else
92 # define FREE(p) do \
93 { \
94 free((p)); \
95 (p) = NULL; \
96 } while(0)
97 # endif /* ifdef TESTING */
98
99 # ifndef uthash_fatal
100 # define uthash_fatal(msg) abort() /* fatal error (out of memory,etc) */
101 # endif
102 # ifndef uthash_malloc
103 # define uthash_malloc(sz) malloc(sz) /* malloc fcn */
104 # endif
105 # ifndef uthash_free
106 # define uthash_free(ptr,sz) FREE(ptr) /* free fcn */
107 # endif
108
109 # ifndef uthash_noexpand_fyi
110 # define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
111 # endif
112 # ifndef uthash_expand_fyi
113 # define uthash_expand_fyi(tbl) /* can be defined to log expands */
114 # endif
115
116 /* initial number of buckets */
117 # define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
118 # define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
119 # define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
120
121 /* calculate the element whose hash handle address is hhe */
122 # define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
123
124 # define HASH_FIND(hh,head,keyptr,keylen,out) \
125 do { \
126 unsigned _hf_bkt,_hf_hashv; \
127 out=NULL; \
128 if (head) { \
129 HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
130 if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \
131 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
132 keyptr,keylen,out); \
133 } \
134 } \
135 } while (0)
136
137 # ifdef HASH_BLOOM
138 # define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM)
139 # define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0)
140 # define HASH_BLOOM_MAKE(tbl) \
141 do { \
142 (tbl)->bloom_nbits = HASH_BLOOM; \
143 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
144 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
145 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
146 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
147 } while (0)
148
149 # define HASH_BLOOM_FREE(tbl) \
150 do { \
151 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
152 } while (0)
153
154 # define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8)))
155 # define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8)))
156
157 # define HASH_BLOOM_ADD(tbl,hashv) \
158 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
159
160 # define HASH_BLOOM_TEST(tbl,hashv) \
161 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
162
163 # else
164 # define HASH_BLOOM_MAKE(tbl)
165 # define HASH_BLOOM_FREE(tbl)
166 # define HASH_BLOOM_ADD(tbl,hashv)
167 # define HASH_BLOOM_TEST(tbl,hashv) (1)
168 # define HASH_BLOOM_BYTELEN 0
169 # endif
170
171 # define HASH_MAKE_TABLE(hh,head) \
172 do { \
173 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
174 sizeof(UT_hash_table)); \
175 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
176 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
177 (head)->hh.tbl->tail = &((head)->hh); \
178 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
179 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
180 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
181 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
182 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
183 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
184 memset((head)->hh.tbl->buckets, 0, \
185 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
186 HASH_BLOOM_MAKE((head)->hh.tbl); \
187 (head)->hh.tbl->signature = HASH_SIGNATURE; \
188 } while(0)
189
190 # define HASH_ADD(hh,head,fieldname,keylen_in,add) \
191 HASH_ADD_KEYPTR(hh,head,&((add)->fieldname),keylen_in,add)
192
193 # define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
194 do { \
195 replaced=NULL; \
196 HASH_FIND(hh,head,&((add)->fieldname),keylen_in,replaced); \
197 if (replaced!=NULL) { \
198 HASH_DELETE(hh,head,replaced); \
199 }; \
200 HASH_ADD(hh,head,fieldname,keylen_in,add); \
201 } while(0)
202
203 # define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
204 do { \
205 unsigned _ha_bkt; \
206 (add)->hh.next = NULL; \
207 (add)->hh.key = (const void*)keyptr; \
208 (add)->hh.keylen = (unsigned)keylen_in; \
209 if (!(head)) { \
210 head = (add); \
211 (head)->hh.prev = NULL; \
212 HASH_MAKE_TABLE(hh,head); \
213 } else { \
214 (head)->hh.tbl->tail->next = (add); \
215 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
216 (head)->hh.tbl->tail = &((add)->hh); \
217 } \
218 (head)->hh.tbl->num_items++; \
219 (add)->hh.tbl = (head)->hh.tbl; \
220 HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
221 (add)->hh.hashv, _ha_bkt); \
222 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
223 HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
224 HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
225 HASH_FSCK(hh,head); \
226 } while(0)
227
228 # define HASH_TO_BKT( hashv, num_bkts, bkt ) \
229 do { \
230 bkt = ((hashv) & ((num_bkts) - 1)); \
231 } while(0)
232
233 /*
234 * delete "delptr" from the hash table.
235 * "the usual" patch-up process for the app-order doubly-linked-list.
236 * The use of _hd_hh_del below deserves special explanation.
237 * These used to be expressed using (delptr) but that led to a bug
238 * if someone used the same symbol for the head and deletee, like
239 * HASH_DELETE(hh,users,users);
240 * We want that to work, but by changing the head (users) below
241 * we were forfeiting our ability to further refer to the deletee (users)
242 * in the patch-up process. Solution: use scratch space to
243 * copy the deletee pointer, then the latter references are via that
244 * scratch pointer rather than through the repointed (users) symbol.
245 */
246
247 # define HASH_DELETE(hh,head,delptr) \
248 do { \
249 unsigned _hd_bkt; \
250 struct UT_hash_handle *_hd_hh_del; \
251 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
252 uthash_free((head)->hh.tbl->buckets, \
253 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
254 HASH_BLOOM_FREE((head)->hh.tbl); \
255 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
256 head = NULL; \
257 } else { \
258 _hd_hh_del = &((delptr)->hh); \
259 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
260 (head)->hh.tbl->tail = \
261 (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
262 (head)->hh.tbl->hho); \
263 } \
264 if ((delptr)->hh.prev) { \
265 ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
266 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
267 } else { \
268 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
269 } \
270 if (_hd_hh_del->next) { \
271 ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \
272 (head)->hh.tbl->hho))->prev = \
273 _hd_hh_del->prev; \
274 } \
275 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
276 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
277 (head)->hh.tbl->num_items--; \
278 } \
279 HASH_FSCK(hh,head); \
280 } while (0)
281
282 /*
283 * convenience forms of
284 * HASH_FIND/HASH_ADD/HASH_DEL
285 */
286
287 # define HASH_FIND_STR(head,findstr,out) \
288 HASH_FIND(hh,head,findstr,strlen(findstr),out)
289 # define HASH_ADD_STR(head,strfield,add) \
290 HASH_ADD(hh,head,strfield,strlen(add->strfield),add)
291 # define HASH_REPLACE_STR(head,strfield,add,replaced) \
292 HASH_REPLACE(hh,head,strfield,strlen(add->strfield),add,replaced)
293 # define HASH_FIND_INT(head,findint,out) \
294 HASH_FIND(hh,head,findint,sizeof(int),out)
295 # define HASH_ADD_INT(head,intfield,add) \
296 HASH_ADD(hh,head,intfield,sizeof(int),add)
297 # define HASH_REPLACE_INT(head,intfield,add,replaced) \
298 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
299 # define HASH_FIND_PTR(head,findptr,out) \
300 HASH_FIND(hh,head,findptr,sizeof(void *),out)
301 # define HASH_ADD_PTR(head,ptrfield,add) \
302 HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
303 # define HASH_REPLACE_PTR(head,ptrfield,add) \
304 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
305 # define HASH_DEL(head,delptr) \
306 HASH_DELETE(hh,head,delptr)
307
308 /*
309 * HASH_FSCK checks hash integrity on every
310 * add/delete when HASH_DEBUG is defined.
311 *
312 * This is for uthash developer only; it compiles
313 * away if HASH_DEBUG isn't defined.
314 */
315
316 # ifdef HASH_DEBUG
317 # define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); abort(); } while (0)
318 # define HASH_FSCK(hh,head) \
319 do { \
320 unsigned _bkt_i; \
321 unsigned _count, _bkt_count; \
322 char *_prev; \
323 struct UT_hash_handle *_thh; \
324 if (head) { \
325 _count = 0; \
326 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
327 _bkt_count = 0; \
328 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
329 _prev = NULL; \
330 while (_thh) { \
331 if (_prev != (char*)(_thh->hh_prev)) { \
332 HASH_OOPS("invalid hh_prev %p, actual %p\n", \
333 _thh->hh_prev, _prev ); \
334 } \
335 _bkt_count++; \
336 _prev = (char*)(_thh); \
337 _thh = _thh->hh_next; \
338 } \
339 _count += _bkt_count; \
340 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
341 HASH_OOPS("invalid bucket count %d, actual %d\n", \
342 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
343 } \
344 } \
345 if (_count != (head)->hh.tbl->num_items) { \
346 HASH_OOPS("invalid hh item count %d, actual %d\n", \
347 (head)->hh.tbl->num_items, _count ); \
348 } \
349 /* traverse hh in app order; check next/prev integrity, count */ \
350 _count = 0; \
351 _prev = NULL; \
352 _thh = &(head)->hh; \
353 while (_thh) { \
354 _count++; \
355 if (_prev !=(char*)(_thh->prev)) { \
356 HASH_OOPS("invalid prev %p, actual %p\n", \
357 _thh->prev, _prev ); \
358 } \
359 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
360 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
361 (head)->hh.tbl->hho) : NULL ); \
362 } \
363 if (_count != (head)->hh.tbl->num_items) { \
364 HASH_OOPS("invalid app item count %d, actual %d\n", \
365 (head)->hh.tbl->num_items, _count ); \
366 } \
367 } \
368 } while (0)
369 # else
370 # define HASH_FSCK(hh,head)
371 # endif
372
373 /*
374 * When compiled with -DHASH_EMIT_KEYS,
375 * length-prefixed keys are emitted to
376 * the descriptor to which this macro is
377 * defined for tuning the hash function.
378 * The app can #include <unistd.h> to
379 * get the prototype for write(2).
380 */
381
382 # ifdef HASH_EMIT_KEYS
383 # define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
384 do { \
385 unsigned _klen = fieldlen; \
386 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
387 write(HASH_EMIT_KEYS, keyptr, fieldlen); \
388 } while (0)
389 # else
390 # define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
391 # endif
392
393 /*
394 * default to Jenkin's hash unless overridden
395 * e.g. DHASH_FUNCTION=HASH_SAX
396 */
397
398 # ifdef HASH_FUNCTION
399 # define HASH_FCN HASH_FUNCTION
400 # else
401 # define HASH_FCN HASH_JEN
402 # endif
403
404 /*
405 * The Bernstein hash function,
406 * used in Perl prior to v5.6
407 */
408
409 # define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
410 do { \
411 unsigned _hb_keylen=keylen; \
412 char *_hb_key=(char*)(key); \
413 (hashv) = 0; \
414 while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
415 bkt = (hashv) & (num_bkts-1); \
416 } while (0)
417
418 /*
419 * SAX/FNV/OAT/JEN hash functions are macro variants of those listed at:
420 * https://web.archive.org/web/20190310205940/http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
421 */
422
423 # define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
424 do { \
425 unsigned _sx_i; \
426 char *_hs_key=(char*)(key); \
427 hashv = 0; \
428 for(_sx_i=0; _sx_i < keylen; _sx_i++) \
429 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
430 bkt = hashv & (num_bkts-1); \
431 } while (0)
432
433 # define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
434 do { \
435 unsigned _fn_i; \
436 char *_hf_key=(char*)(key); \
437 hashv = 2166136261UL; \
438 for(_fn_i=0; _fn_i < keylen; _fn_i++) \
439 hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
440 bkt = hashv & (num_bkts-1); \
441 } while(0)
442
443 # define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
444 do { \
445 unsigned _ho_i; \
446 char *_ho_key=(char*)(key); \
447 hashv = 0; \
448 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
449 hashv += _ho_key[_ho_i]; \
450 hashv += (hashv << 10); \
451 hashv ^= (hashv >> 6); \
452 } \
453 hashv += (hashv << 3); \
454 hashv ^= (hashv >> 11); \
455 hashv += (hashv << 15); \
456 bkt = hashv & (num_bkts-1); \
457 } while(0)
458
459 # define HASH_JEN_MIX(a,b,c) \
460 do { \
461 a -= b; a -= c; a ^= ( c >> 13 ); \
462 b -= c; b -= a; b ^= ( a << 8 ); \
463 c -= a; c -= b; c ^= ( b >> 13 ); \
464 a -= b; a -= c; a ^= ( c >> 12 ); \
465 b -= c; b -= a; b ^= ( a << 16 ); \
466 c -= a; c -= b; c ^= ( b >> 5 ); \
467 a -= b; a -= c; a ^= ( c >> 3 ); \
468 b -= c; b -= a; b ^= ( a << 10 ); \
469 c -= a; c -= b; c ^= ( b >> 15 ); \
470 } while (0)
471
472 # define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
473 do { \
474 unsigned _hj_i,_hj_j,_hj_k; \
475 unsigned char *_hj_key=(unsigned char*)(key); \
476 hashv = 0xfeedbeef; \
477 _hj_i = _hj_j = 0x9e3779b9; \
478 _hj_k = (unsigned)keylen; \
479 while (_hj_k >= 12) { \
480 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
481 + ( (unsigned)_hj_key[2] << 16 ) \
482 + ( (unsigned)_hj_key[3] << 24 ) ); \
483 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
484 + ( (unsigned)_hj_key[6] << 16 ) \
485 + ( (unsigned)_hj_key[7] << 24 ) ); \
486 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
487 + ( (unsigned)_hj_key[10] << 16 ) \
488 + ( (unsigned)_hj_key[11] << 24 ) ); \
489 \
490 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
491 \
492 _hj_key += 12; \
493 _hj_k -= 12; \
494 } \
495 hashv += keylen; \
496 switch ( _hj_k ) { \
497 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
498 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
499 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
500 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
501 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
502 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
503 case 5: _hj_j += _hj_key[4]; \
504 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
505 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
506 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
507 case 1: _hj_i += _hj_key[0]; \
508 } \
509 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
510 bkt = hashv & (num_bkts-1); \
511 } while(0)
512
513 /*
514 * The Paul Hsieh
515 * hash function
516 */
517
518 # undef get16bits
519 # if ( defined (__GNUC__) && defined (__i386__) )
520 # define get16bits(d) (*((const uint16_t *) (d)))
521 # endif
522
523 # if !defined (get16bits)
524 # define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
525 +(uint32_t)(((const uint8_t *)(d))[0]) )
526 # endif
527 # define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
528 do { \
529 unsigned char *_sfh_key=(unsigned char*)(key); \
530 uint32_t _sfh_tmp, _sfh_len = keylen; \
531 \
532 int _sfh_rem = _sfh_len & 3; \
533 _sfh_len >>= 2; \
534 hashv = 0xcafebabe; \
535 \
536 /* Main loop */ \
537 for (;_sfh_len > 0; _sfh_len--) { \
538 hashv += get16bits (_sfh_key); \
539 _sfh_tmp = (uint32_t)(get16bits (_sfh_key+2)) << 11 ^ hashv; \
540 hashv = (hashv << 16) ^ _sfh_tmp; \
541 _sfh_key += 2*sizeof (uint16_t); \
542 hashv += hashv >> 11; \
543 } \
544 \
545 /* Handle end cases */ \
546 switch (_sfh_rem) { \
547 case 3: hashv += get16bits (_sfh_key); \
548 hashv ^= hashv << 16; \
549 hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)] << 18); \
550 hashv += hashv >> 11; \
551 break; \
552 case 2: hashv += get16bits (_sfh_key); \
553 hashv ^= hashv << 11; \
554 hashv += hashv >> 17; \
555 break; \
556 case 1: hashv += *_sfh_key; \
557 hashv ^= hashv << 10; \
558 hashv += hashv >> 1; \
559 } \
560 \
561 /* Force "avalanching" of final 127 bits */ \
562 hashv ^= hashv << 3; \
563 hashv += hashv >> 5; \
564 hashv ^= hashv << 4; \
565 hashv += hashv >> 17; \
566 hashv ^= hashv << 25; \
567 hashv += hashv >> 6; \
568 bkt = hashv & (num_bkts-1); \
569 } while(0)
570
571 # ifdef HASH_USING_NO_STRICT_ALIASING
572
573 /*
574 * The MurmurHash exploits some CPU's
575 * (x86,x86_64) tolerance for unaligned reads.
576 *
577 * For other types of CPU's (e.g. SPARC) an
578 * unaligned read causes a bus error.
579 *
580 * MurmurHash uses the faster approach only
581 * on CPU's where we know it's safe.
582 */
583
584 # if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
585 # define MUR_GETBLOCK(p,i) p[i]
586 # else /* non intel */
587 # define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 0x3) == 0)
588 # define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 0x3) == 1)
589 # define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 0x3) == 2)
590 # define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 0x3) == 3)
591 # define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
592 # if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
593 # define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
594 # define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
595 # define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
596 # else /* assume little endian non-intel */
597 # define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
598 # define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
599 # define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
600 # endif
601 # define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
602 (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
603 (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
604 MUR_ONE_THREE(p))))
605 # endif
606 # define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
607 # define MUR_FMIX(_h) \
608 do { \
609 _h ^= _h >> 16; \
610 _h *= 0x85ebca6b; \
611 _h ^= _h >> 13; \
612 _h *= 0xc2b2ae35l; \
613 _h ^= _h >> 16; \
614 } while(0)
615
616 # define HASH_MUR(key,keylen,num_bkts,hashv,bkt) \
617 do { \
618 const uint8_t *_mur_data = (const uint8_t*)(key); \
619 const int _mur_nblocks = (keylen) / 4; \
620 uint32_t _mur_h1 = 0xf88D5353; \
621 uint32_t _mur_c1 = 0xcc9e2d51; \
622 uint32_t _mur_c2 = 0x1b873593; \
623 uint32_t _mur_k1 = 0; \
624 const uint8_t *_mur_tail; \
625 const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+_mur_nblocks*4); \
626 int _mur_i; \
627 for(_mur_i = -_mur_nblocks; _mur_i; _mur_i++) { \
628 _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
629 _mur_k1 *= _mur_c1; \
630 _mur_k1 = MUR_ROTL32(_mur_k1,15); \
631 _mur_k1 *= _mur_c2; \
632 \
633 _mur_h1 ^= _mur_k1; \
634 _mur_h1 = MUR_ROTL32(_mur_h1,13); \
635 _mur_h1 = _mur_h1*5+0xe6546b64; \
636 } \
637 _mur_tail = (const uint8_t*)(_mur_data + _mur_nblocks*4); \
638 _mur_k1=0; \
639 switch((keylen) & 3) { \
640 case 3: _mur_k1 ^= _mur_tail[2] << 16; \
641 case 2: _mur_k1 ^= _mur_tail[1] << 8; \
642 case 1: _mur_k1 ^= _mur_tail[0]; \
643 _mur_k1 *= _mur_c1; \
644 _mur_k1 = MUR_ROTL32(_mur_k1,15); \
645 _mur_k1 *= _mur_c2; \
646 _mur_h1 ^= _mur_k1; \
647 } \
648 _mur_h1 ^= (keylen); \
649 MUR_FMIX(_mur_h1); \
650 hashv = _mur_h1; \
651 bkt = hashv & (num_bkts-1); \
652 } while(0)
653 # endif /* HASH_USING_NO_STRICT_ALIASING */
654
655 /*
656 * key comparison function;
657 * return 0 if keys equal
658 */
659
660 # define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
661
662 /*
663 * iterate over items in a known
664 * bucket to find desired item
665 */
666
667 # define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
668 do { \
669 if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \
670 else out=NULL; \
671 while (out) { \
672 if ((out)->hh.keylen == keylen_in) { \
673 if ((HASH_KEYCMP((out)->hh.key,keyptr,keylen_in)) == 0) break; \
674 } \
675 if ((out)->hh.hh_next) \
676 DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,(out)->hh.hh_next)); \
677 else out = NULL; \
678 } \
679 } while(0)
680
681 /*
682 * add an item
683 * to a bucket
684 */
685
686 # define HASH_ADD_TO_BKT(head,addhh) \
687 do { \
688 head.count++; \
689 (addhh)->hh_next = head.hh_head; \
690 (addhh)->hh_prev = NULL; \
691 if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
692 (head).hh_head=addhh; \
693 if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
694 && (addhh)->tbl->noexpand != 1) { \
695 HASH_EXPAND_BUCKETS((addhh)->tbl); \
696 } \
697 } while(0)
698
699 /*
700 * remove an item from
701 * a given bucket
702 */
703
704 # define HASH_DEL_IN_BKT(hh,head,hh_del) \
705 (head).count--; \
706 if ((head).hh_head == hh_del) { \
707 (head).hh_head = hh_del->hh_next; \
708 } \
709 if (hh_del->hh_prev) { \
710 hh_del->hh_prev->hh_next = hh_del->hh_next; \
711 } \
712 if (hh_del->hh_next) { \
713 hh_del->hh_next->hh_prev = hh_del->hh_prev; \
714 }
715
716 /*
717 * Bucket expansion has the effect of doubling the number of buckets
718 * and redistributing the items into the new buckets. Ideally the
719 * items will distribute more or less evenly into the new buckets
720 * (the extent to which this is true is a measure of the quality of
721 * the hash function as it applies to the key domain).
722 *
723 * With the items distributed into more buckets, the chain length
724 * (item count) in each bucket is reduced. Thus by expanding buckets
725 * the hash keeps a bound on the chain length. This bounded chain
726 * length is the essence of how a hash provides constant time lookup.
727 *
728 * The calculation of tbl->ideal_chain_maxlen below deserves some
729 * explanation. First, keep in mind that we're calculating the ideal
730 * maximum chain length based on the *new* (doubled) bucket count.
731 * In fractions this is just n/b (n=number of items,b=new num buckets).
732 * Since the ideal chain length is an integer, we want to calculate
733 * ceil(n/b). We don't depend on floating point arithmetic in this
734 * hash, so to calculate ceil(n/b) with integers we could write
735 *
736 * ceil(n/b) = (n/b) + ((n%b)?1:0)
737 *
738 * and in fact a previous version of this hash did just that.
739 * But now we have improved things a bit by recognizing that b is
740 * always a power of two. We keep its base 2 log handy (call it lb),
741 * so now we can write this with a bit shift and logical AND:
742 *
743 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
744 */
745
746 # define HASH_EXPAND_BUCKETS(tbl) \
747 do { \
748 unsigned _he_bkt; \
749 unsigned _he_bkt_i; \
750 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
751 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
752 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
753 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
754 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
755 memset(_he_new_buckets, 0, \
756 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
757 tbl->ideal_chain_maxlen = \
758 (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
759 ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
760 tbl->nonideal_items = 0; \
761 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
762 { \
763 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
764 while (_he_thh) { \
765 _he_hh_nxt = _he_thh->hh_next; \
766 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
767 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
768 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
769 tbl->nonideal_items++; \
770 _he_newbkt->expand_mult = _he_newbkt->count / \
771 tbl->ideal_chain_maxlen; \
772 } \
773 _he_thh->hh_prev = NULL; \
774 _he_thh->hh_next = _he_newbkt->hh_head; \
775 if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
776 _he_thh; \
777 _he_newbkt->hh_head = _he_thh; \
778 _he_thh = _he_hh_nxt; \
779 } \
780 } \
781 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
782 tbl->num_buckets *= 2; \
783 tbl->log2_num_buckets++; \
784 tbl->buckets = _he_new_buckets; \
785 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
786 (tbl->ineff_expands+1) : 0; \
787 if (tbl->ineff_expands > 1) { \
788 tbl->noexpand=1; \
789 uthash_noexpand_fyi(tbl); \
790 } \
791 uthash_expand_fyi(tbl); \
792 } while(0)
793
794 /*
795 * This is an adaptation of Simon
796 * Tatham's O(n log(n)) mergesort
797 */
798
799 /*
800 * Note that HASH_SORT assumes the
801 * hash handle name to be hh.
802 *
803 * HASH_SRT was added to allow the
804 * hash handle name to be passed in.
805 */
806
807 # define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
808 # define HASH_SRT(hh,head,cmpfcn) \
809 do { \
810 unsigned _hs_i; \
811 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
812 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
813 if (head) { \
814 _hs_insize = 1; \
815 _hs_looping = 1; \
816 _hs_list = &((head)->hh); \
817 while (_hs_looping) { \
818 _hs_p = _hs_list; \
819 _hs_list = NULL; \
820 _hs_tail = NULL; \
821 _hs_nmerges = 0; \
822 while (_hs_p) { \
823 _hs_nmerges++; \
824 _hs_q = _hs_p; \
825 _hs_psize = 0; \
826 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
827 _hs_psize++; \
828 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
829 ((void*)((char*)(_hs_q->next) + \
830 (head)->hh.tbl->hho)) : NULL); \
831 if (! (_hs_q) ) break; \
832 } \
833 _hs_qsize = _hs_insize; \
834 while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
835 if (_hs_psize == 0) { \
836 _hs_e = _hs_q; \
837 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
838 ((void*)((char*)(_hs_q->next) + \
839 (head)->hh.tbl->hho)) : NULL); \
840 _hs_qsize--; \
841 } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
842 _hs_e = _hs_p; \
843 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
844 ((void*)((char*)(_hs_p->next) + \
845 (head)->hh.tbl->hho)) : NULL); \
846 _hs_psize--; \
847 } else if (( \
848 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
849 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
850 ) <= 0) { \
851 _hs_e = _hs_p; \
852 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
853 ((void*)((char*)(_hs_p->next) + \
854 (head)->hh.tbl->hho)) : NULL); \
855 _hs_psize--; \
856 } else { \
857 _hs_e = _hs_q; \
858 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
859 ((void*)((char*)(_hs_q->next) + \
860 (head)->hh.tbl->hho)) : NULL); \
861 _hs_qsize--; \
862 } \
863 if ( _hs_tail ) { \
864 _hs_tail->next = ((_hs_e) ? \
865 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
866 } else { \
867 _hs_list = _hs_e; \
868 } \
869 _hs_e->prev = ((_hs_tail) ? \
870 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
871 _hs_tail = _hs_e; \
872 } \
873 _hs_p = _hs_q; \
874 } \
875 _hs_tail->next = NULL; \
876 if ( _hs_nmerges <= 1 ) { \
877 _hs_looping=0; \
878 (head)->hh.tbl->tail = _hs_tail; \
879 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
880 } \
881 _hs_insize *= 2; \
882 } \
883 HASH_FSCK(hh,head); \
884 } \
885 } while (0)
886
887 /*
888 * This function selects items from one hash into another hash.
889 * The end result is that the selected items have dual presence
890 * in both hashes. There is no copy of the items made; rather
891 * they are added into the new hash through a secondary hash
892 * hash handle that must be present in the structure.
893 */
894
895 # define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
896 do { \
897 unsigned _src_bkt, _dst_bkt; \
898 void *_last_elt=NULL, *_elt; \
899 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
900 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
901 if (src) { \
902 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
903 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
904 _src_hh; \
905 _src_hh = _src_hh->hh_next) { \
906 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
907 if (cond(_elt)) { \
908 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
909 _dst_hh->key = _src_hh->key; \
910 _dst_hh->keylen = _src_hh->keylen; \
911 _dst_hh->hashv = _src_hh->hashv; \
912 _dst_hh->prev = _last_elt; \
913 _dst_hh->next = NULL; \
914 if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
915 if (!dst) { \
916 DECLTYPE_ASSIGN(dst,_elt); \
917 HASH_MAKE_TABLE(hh_dst,dst); \
918 } else { \
919 _dst_hh->tbl = (dst)->hh_dst.tbl; \
920 } \
921 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
922 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
923 (dst)->hh_dst.tbl->num_items++; \
924 _last_elt = _elt; \
925 _last_elt_hh = _dst_hh; \
926 } \
927 } \
928 } \
929 } \
930 HASH_FSCK(hh_dst,dst); \
931 } while (0)
932
933 # define HASH_CLEAR(hh,head) \
934 do { \
935 if (head) { \
936 uthash_free((head)->hh.tbl->buckets, \
937 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
938 HASH_BLOOM_FREE((head)->hh.tbl); \
939 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
940 (head)=NULL; \
941 } \
942 } while(0)
943
944 # define HASH_OVERHEAD(hh,head) \
945 (size_t)((((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
946 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
947 (sizeof(UT_hash_table)) + \
948 (HASH_BLOOM_BYTELEN)))
949
950 # ifdef NO_DECLTYPE
951 # define HASH_ITER(hh,head,el,tmp) \
952 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \
953 el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL))
954 # else
955 # define HASH_ITER(hh,head,el,tmp) \
956 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \
957 el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL))
958 # endif
959
960 /* obtain a count of items in the hash */
961 # define HASH_COUNT(head) HASH_CNT(hh,head)
962 # define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0)
963
964 typedef struct UT_hash_bucket {
965 struct UT_hash_handle *hh_head;
966 unsigned count;
967
968 /*
969 * expand_mult is normally set to 0. In this situation, the max chain length
970 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
971 * the bucket's chain exceeds this length, bucket expansion is triggered).
972 * However, setting expand_mult to a non-zero value delays bucket expansion
973 * (that would be triggered by additions to this particular bucket)
974 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
975 * (The multiplier is simply expand_mult+1). The whole idea of this
976 * multiplier is to reduce bucket expansions, since they are expensive, in
977 * situations where we know that a particular bucket tends to be overused.
978 * It is better to let its chain length grow to a longer yet-still-bounded
979 * value, than to do an O(n) bucket expansion too often.
980 */
981
982 unsigned expand_mult;
983
984 } UT_hash_bucket;
985
986 /* random signature used only to find hash tables in external analysis */
987 # define HASH_SIGNATURE 0xa0111fe1
988 # define HASH_BLOOM_SIGNATURE 0xb12220f2
989
990 typedef struct UT_hash_table {
991 UT_hash_bucket *buckets;
992 unsigned num_buckets, log2_num_buckets;
993 unsigned num_items;
994 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
995 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
996
997 /*
998 * in an ideal situation (all buckets used equally), no bucket would have
999 * more than ceil(#items/#buckets) items. that's the ideal chain length.
1000 */
1001
1002 unsigned ideal_chain_maxlen;
1003
1004 /*
1005 * nonideal_items is the number of items in the hash whose chain position
1006 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1007 * hash distribution; reaching them in a chain traversal takes >ideal steps
1008 */
1009
1010 unsigned nonideal_items;
1011
1012 /*
1013 * ineffective expands occur when a bucket doubling was performed, but
1014 * afterward, more than half the items in the hash had nonideal chain
1015 * positions. If this happens on two consecutive expansions we inhibit any
1016 * further expansion, as it's not helping; this happens when the hash
1017 * function isn't a good fit for the key domain. When expansion is inhibited
1018 * the hash will still work, albeit no longer in constant time.
1019 */
1020
1021 unsigned ineff_expands, noexpand;
1022
1023 uint32_t signature; /* used only to find hash tables in external analysis */
1024 # ifdef HASH_BLOOM
1025 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1026 uint8_t *bloom_bv;
1027 char bloom_nbits;
1028 # endif
1029
1030 } UT_hash_table;
1031
1032 typedef struct UT_hash_handle {
1033 struct UT_hash_table *tbl;
1034 void *prev; /* prev element in app order */
1035 void *next; /* next element in app order */
1036 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1037 struct UT_hash_handle *hh_next; /* next hh in bucket order */
1038 const void *key; /* ptr to enclosing struct's key */
1039 unsigned keylen; /* enclosing struct's key len */
1040 unsigned hashv; /* result of hash-fcn(key) */
1041 } UT_hash_handle;
1042
1043 #endif /* UTHASH_H */