5f9c62426e1f52ebe9ab1098ce46e081ba496054
[reactos.git] / reactos / dll / win32 / dbghelp / storage.c
1 /*
2 * Various storage structures (pool allocation, vector, hash table)
3 *
4 * Copyright (C) 1993, Eric Youngdale.
5 * 2004, Eric Pouech
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 */
21
22 #include "dbghelp_private.h"
23
24 #ifdef USE_STATS
25 #include <math.h>
26 #endif
27
28 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);
29
30 struct pool_arena
31 {
32 struct list entry;
33 char *current;
34 char *end;
35 };
36
37 void pool_init(struct pool* a, size_t arena_size)
38 {
39 list_init( &a->arena_list );
40 list_init( &a->arena_full );
41 a->arena_size = arena_size;
42 }
43
44 void pool_destroy(struct pool* pool)
45 {
46 struct pool_arena* arena;
47 struct pool_arena* next;
48
49 #ifdef USE_STATS
50 size_t alloc, used, num;
51
52 alloc = used = num = 0;
53 LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
54 {
55 alloc += arena->end - (char *)arena;
56 used += arena->current - (char*)arena;
57 num++;
58 }
59 LIST_FOR_EACH_ENTRY( arena, &pool->arena_full, struct pool_arena, entry )
60 {
61 alloc += arena->end - (char *)arena;
62 used += arena->current - (char*)arena;
63 num++;
64 }
65 if (alloc == 0) alloc = 1; /* avoid division by zero */
66 FIXME("STATS: pool %p has allocated %u kbytes, used %u kbytes in %u arenas, non-allocation ratio: %.2f%%\n",
67 pool, (unsigned)(alloc >> 10), (unsigned)(used >> 10), (unsigned)num,
68 100.0 - (float)used / (float)alloc * 100.0);
69 #endif
70
71 LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_list, struct pool_arena, entry )
72 {
73 list_remove( &arena->entry );
74 HeapFree(GetProcessHeap(), 0, arena);
75 }
76 LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_full, struct pool_arena, entry )
77 {
78 list_remove( &arena->entry );
79 HeapFree(GetProcessHeap(), 0, arena);
80 }
81 }
82
83 void* pool_alloc(struct pool* pool, size_t len)
84 {
85 struct pool_arena* arena;
86 void* ret;
87 size_t size;
88
89 len = (len + 3) & ~3; /* round up size on DWORD boundary */
90
91 LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
92 {
93 if (arena->end - arena->current >= len)
94 {
95 ret = arena->current;
96 arena->current += len;
97 if (arena->current + 16 >= arena->end)
98 {
99 list_remove( &arena->entry );
100 list_add_tail( &pool->arena_full, &arena->entry );
101 }
102 return ret;
103 }
104 }
105
106 size = max( pool->arena_size, len );
107 arena = HeapAlloc(GetProcessHeap(), 0, size + sizeof(struct pool_arena));
108 if (!arena) return NULL;
109
110 ret = arena + 1;
111 arena->current = (char*)ret + len;
112 arena->end = (char*)ret + size;
113 if (arena->current + 16 >= arena->end)
114 list_add_tail( &pool->arena_full, &arena->entry );
115 else
116 list_add_head( &pool->arena_list, &arena->entry );
117 return ret;
118 }
119
120 char* pool_strdup(struct pool* pool, const char* str)
121 {
122 char* ret;
123 if ((ret = pool_alloc(pool, strlen(str) + 1))) strcpy(ret, str);
124 return ret;
125 }
126
127 void vector_init(struct vector* v, unsigned esz, unsigned bucket_sz)
128 {
129 v->buckets = NULL;
130 /* align size on DWORD boundaries */
131 v->elt_size = (esz + 3) & ~3;
132 switch (bucket_sz)
133 {
134 case 2: v->shift = 1; break;
135 case 4: v->shift = 2; break;
136 case 8: v->shift = 3; break;
137 case 16: v->shift = 4; break;
138 case 32: v->shift = 5; break;
139 case 64: v->shift = 6; break;
140 case 128: v->shift = 7; break;
141 case 256: v->shift = 8; break;
142 case 512: v->shift = 9; break;
143 case 1024: v->shift = 10; break;
144 default: assert(0);
145 }
146 v->num_buckets = 0;
147 v->buckets_allocated = 0;
148 v->num_elts = 0;
149 }
150
151 unsigned vector_length(const struct vector* v)
152 {
153 return v->num_elts;
154 }
155
156 void* vector_at(const struct vector* v, unsigned pos)
157 {
158 unsigned o;
159
160 if (pos >= v->num_elts) return NULL;
161 o = pos & ((1 << v->shift) - 1);
162 return (char*)v->buckets[pos >> v->shift] + o * v->elt_size;
163 }
164
165 void* vector_add(struct vector* v, struct pool* pool)
166 {
167 unsigned ncurr = v->num_elts++;
168
169 /* check that we don't wrap around */
170 assert(v->num_elts > ncurr);
171 if (ncurr == (v->num_buckets << v->shift))
172 {
173 if(v->num_buckets == v->buckets_allocated)
174 {
175 /* Double the bucket cache, so it scales well with big vectors.*/
176 unsigned new_reserved;
177 void* new;
178
179 new_reserved = 2*v->buckets_allocated;
180 if(new_reserved == 0) new_reserved = 1;
181
182 /* Don't even try to resize memory.
183 Pool datastructure is very inefficient with reallocs. */
184 new = pool_alloc(pool, new_reserved * sizeof(void*));
185 memcpy(new, v->buckets, v->buckets_allocated * sizeof(void*));
186 v->buckets = new;
187 v->buckets_allocated = new_reserved;
188 }
189 v->buckets[v->num_buckets] = pool_alloc(pool, v->elt_size << v->shift);
190 return v->buckets[v->num_buckets++];
191 }
192 return vector_at(v, ncurr);
193 }
194
195 /* We construct the sparse array as two vectors (of equal size)
196 * The first vector (key2index) is the lookup table between the key and
197 * an index in the second vector (elements)
198 * When inserting an element, it's always appended in second vector (and
199 * never moved in memory later on), only the first vector is reordered
200 */
201 struct key2index
202 {
203 unsigned long key;
204 unsigned index;
205 };
206
207 void sparse_array_init(struct sparse_array* sa, unsigned elt_sz, unsigned bucket_sz)
208 {
209 vector_init(&sa->key2index, sizeof(struct key2index), bucket_sz);
210 vector_init(&sa->elements, elt_sz, bucket_sz);
211 }
212
213 /******************************************************************
214 * sparse_array_lookup
215 *
216 * Returns the first index which key is >= at passed key
217 */
218 static struct key2index* sparse_array_lookup(const struct sparse_array* sa,
219 unsigned long key, unsigned* idx)
220 {
221 struct key2index* pk2i;
222 unsigned low, high;
223
224 if (!sa->elements.num_elts)
225 {
226 *idx = 0;
227 return NULL;
228 }
229 high = sa->elements.num_elts;
230 pk2i = vector_at(&sa->key2index, high - 1);
231 if (pk2i->key < key)
232 {
233 *idx = high;
234 return NULL;
235 }
236 if (pk2i->key == key)
237 {
238 *idx = high - 1;
239 return pk2i;
240 }
241 low = 0;
242 pk2i = vector_at(&sa->key2index, low);
243 if (pk2i->key >= key)
244 {
245 *idx = 0;
246 return pk2i;
247 }
248 /* now we have: sa(lowest key) < key < sa(highest key) */
249 while (low < high)
250 {
251 *idx = (low + high) / 2;
252 pk2i = vector_at(&sa->key2index, *idx);
253 if (pk2i->key > key) high = *idx;
254 else if (pk2i->key < key) low = *idx + 1;
255 else return pk2i;
256 }
257 /* binary search could return exact item, we search for highest one
258 * below the key
259 */
260 if (pk2i->key < key)
261 pk2i = vector_at(&sa->key2index, ++(*idx));
262 return pk2i;
263 }
264
265 void* sparse_array_find(const struct sparse_array* sa, unsigned long key)
266 {
267 unsigned idx;
268 struct key2index* pk2i;
269
270 if ((pk2i = sparse_array_lookup(sa, key, &idx)) && pk2i->key == key)
271 return vector_at(&sa->elements, pk2i->index);
272 return NULL;
273 }
274
275 void* sparse_array_add(struct sparse_array* sa, unsigned long key,
276 struct pool* pool)
277 {
278 unsigned idx, i;
279 struct key2index* pk2i;
280 struct key2index* to;
281
282 pk2i = sparse_array_lookup(sa, key, &idx);
283 if (pk2i && pk2i->key == key)
284 {
285 FIXME("re adding an existing key\n");
286 return NULL;
287 }
288 to = vector_add(&sa->key2index, pool);
289 if (pk2i)
290 {
291 /* we need to shift vector's content... */
292 /* let's do it brute force... (FIXME) */
293 assert(sa->key2index.num_elts >= 2);
294 for (i = sa->key2index.num_elts - 1; i > idx; i--)
295 {
296 pk2i = vector_at(&sa->key2index, i - 1);
297 *to = *pk2i;
298 to = pk2i;
299 }
300 }
301
302 to->key = key;
303 to->index = sa->elements.num_elts;
304
305 return vector_add(&sa->elements, pool);
306 }
307
308 unsigned sparse_array_length(const struct sparse_array* sa)
309 {
310 return sa->elements.num_elts;
311 }
312
313 static unsigned hash_table_hash(const char* name, unsigned num_buckets)
314 {
315 unsigned hash = 0;
316 while (*name)
317 {
318 hash += *name++;
319 hash += (hash << 10);
320 hash ^= (hash >> 6);
321 }
322 hash += (hash << 3);
323 hash ^= (hash >> 11);
324 hash += (hash << 15);
325 return hash % num_buckets;
326 }
327
328 void hash_table_init(struct pool* pool, struct hash_table* ht, unsigned num_buckets)
329 {
330 ht->num_elts = 0;
331 ht->num_buckets = num_buckets;
332 ht->pool = pool;
333 ht->buckets = NULL;
334 }
335
336 void hash_table_destroy(struct hash_table* ht)
337 {
338 #if defined(USE_STATS)
339 int i;
340 unsigned len;
341 unsigned min = 0xffffffff, max = 0, sq = 0;
342 struct hash_table_elt* elt;
343 double mean, variance;
344
345 for (i = 0; i < ht->num_buckets; i++)
346 {
347 for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
348 if (len < min) min = len;
349 if (len > max) max = len;
350 sq += len * len;
351 }
352 mean = (double)ht->num_elts / ht->num_buckets;
353 variance = (double)sq / ht->num_buckets - mean * mean;
354 FIXME("STATS: elts[num:%-4u size:%u mean:%f] buckets[min:%-4u variance:%+f max:%-4u]\n",
355 ht->num_elts, ht->num_buckets, mean, min, variance, max);
356 #if 1
357 for (i = 0; i < ht->num_buckets; i++)
358 {
359 for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
360 if (len == max)
361 {
362 FIXME("Longuest bucket:\n");
363 for (elt = ht->buckets[i]; elt; elt = elt->next)
364 FIXME("\t%s\n", elt->name);
365 break;
366 }
367
368 }
369 #endif
370 #endif
371 }
372
373 void hash_table_add(struct hash_table* ht, struct hash_table_elt* elt)
374 {
375 unsigned hash = hash_table_hash(elt->name, ht->num_buckets);
376
377 if (!ht->buckets)
378 {
379 ht->buckets = pool_alloc(ht->pool, ht->num_buckets * sizeof(struct hash_table_bucket));
380 assert(ht->buckets);
381 memset(ht->buckets, 0, ht->num_buckets * sizeof(struct hash_table_bucket));
382 }
383
384 /* in some cases, we need to get back the symbols of same name in the order
385 * in which they've been inserted. So insert new elements at the end of the list.
386 */
387 if (!ht->buckets[hash].first)
388 {
389 ht->buckets[hash].first = elt;
390 }
391 else
392 {
393 ht->buckets[hash].last->next = elt;
394 }
395 ht->buckets[hash].last = elt;
396 elt->next = NULL;
397 ht->num_elts++;
398 }
399
400 void hash_table_iter_init(const struct hash_table* ht,
401 struct hash_table_iter* hti, const char* name)
402 {
403 hti->ht = ht;
404 if (name)
405 {
406 hti->last = hash_table_hash(name, ht->num_buckets);
407 hti->index = hti->last - 1;
408 }
409 else
410 {
411 hti->last = ht->num_buckets - 1;
412 hti->index = -1;
413 }
414 hti->element = NULL;
415 }
416
417 void* hash_table_iter_up(struct hash_table_iter* hti)
418 {
419 if (!hti->ht->buckets) return NULL;
420
421 if (hti->element) hti->element = hti->element->next;
422 while (!hti->element && hti->index < hti->last)
423 hti->element = hti->ht->buckets[++hti->index].first;
424 return hti->element;
425 }