[DDK]: Merge 46183 from header-branch.
[reactos.git] / reactos / boot / freeldr / freeldr / rtl / bget.c
1 /*
2
3 B G E T
4
5 Buffer allocator
6
7 Designed and implemented in April of 1972 by John Walker, based on the
8 Case Algol OPRO$ algorithm implemented in 1966.
9
10 Reimplemented in 1975 by John Walker for the Interdata 70.
11 Reimplemented in 1977 by John Walker for the Marinchip 9900.
12 Reimplemented in 1982 by Duff Kurland for the Intel 8080.
13
14 Portable C version implemented in September of 1990 by an older, wiser
15 instance of the original implementor.
16
17 Souped up and/or weighed down slightly shortly thereafter by Greg
18 Lutz.
19
20 AMIX edition, including the new compaction call-back option, prepared
21 by John Walker in July of 1992.
22
23 Bug in built-in test program fixed, ANSI compiler warnings eradicated,
24 buffer pool validator implemented, and guaranteed repeatable test
25 added by John Walker in October of 1995.
26
27 This program is in the public domain.
28
29 1. This is the book of the generations of Adam. In the day that God
30 created man, in the likeness of God made he him;
31 2. Male and female created he them; and blessed them, and called
32 their name Adam, in the day when they were created.
33 3. And Adam lived an hundred and thirty years, and begat a son in
34 his own likeness, and after his image; and called his name Seth:
35 4. And the days of Adam after he had begotten Seth were eight
36 hundred years: and he begat sons and daughters:
37 5. And all the days that Adam lived were nine hundred and thirty
38 years: and he died.
39 6. And Seth lived an hundred and five years, and begat Enos:
40 7. And Seth lived after he begat Enos eight hundred and seven years,
41 and begat sons and daughters:
42 8. And all the days of Seth were nine hundred and twelve years: and
43 he died.
44 9. And Enos lived ninety years, and begat Cainan:
45 10. And Enos lived after he begat Cainan eight hundred and fifteen
46 years, and begat sons and daughters:
47 11. And all the days of Enos were nine hundred and five years: and
48 he died.
49 12. And Cainan lived seventy years and begat Mahalaleel:
50 13. And Cainan lived after he begat Mahalaleel eight hundred and
51 forty years, and begat sons and daughters:
52 14. And all the days of Cainan were nine hundred and ten years: and
53 he died.
54 15. And Mahalaleel lived sixty and five years, and begat Jared:
55 16. And Mahalaleel lived after he begat Jared eight hundred and
56 thirty years, and begat sons and daughters:
57 17. And all the days of Mahalaleel were eight hundred ninety and
58 five years: and he died.
59 18. And Jared lived an hundred sixty and two years, and he begat
60 Enoch:
61 19. And Jared lived after he begat Enoch eight hundred years, and
62 begat sons and daughters:
63 20. And all the days of Jared were nine hundred sixty and two years:
64 and he died.
65 21. And Enoch lived sixty and five years, and begat Methuselah:
66 22. And Enoch walked with God after he begat Methuselah three
67 hundred years, and begat sons and daughters:
68 23. And all the days of Enoch were three hundred sixty and five
69 years:
70 24. And Enoch walked with God: and he was not; for God took him.
71 25. And Methuselah lived an hundred eighty and seven years, and
72 begat Lamech.
73 26. And Methuselah lived after he begat Lamech seven hundred eighty
74 and two years, and begat sons and daughters:
75 27. And all the days of Methuselah were nine hundred sixty and nine
76 years: and he died.
77 28. And Lamech lived an hundred eighty and two years, and begat a
78 son:
79 29. And he called his name Noah, saying, This same shall comfort us
80 concerning our work and toil of our hands, because of the ground
81 which the LORD hath cursed.
82 30. And Lamech lived after he begat Noah five hundred ninety and
83 five years, and begat sons and daughters:
84 31. And all the days of Lamech were seven hundred seventy and seven
85 years: and he died.
86 32. And Noah was five hundred years old: and Noah begat Shem, Ham,
87 and Japheth.
88
89 And buffers begat buffers, and links begat links, and buffer pools
90 begat links to chains of buffer pools containing buffers, and lo the
91 buffers and links and pools of buffers and pools of links to chains of
92 pools of buffers were fruitful and they multiplied and the Operating
93 System looked down upon them and said that it was Good.
94
95
96 INTRODUCTION
97 ============
98
99 BGET is a comprehensive memory allocation package which is easily
100 configured to the needs of an application. BGET is efficient in
101 both the time needed to allocate and release buffers and in the
102 memory overhead required for buffer pool management. It
103 automatically consolidates contiguous space to minimise
104 fragmentation. BGET is configured by compile-time definitions,
105 Major options include:
106
107 * A built-in test program to exercise BGET and
108 demonstrate how the various functions are used.
109
110 * Allocation by either the "first fit" or "best fit"
111 method.
112
113 * Wiping buffers at release time to catch code which
114 references previously released storage.
115
116 * Built-in routines to dump individual buffers or the
117 entire buffer pool.
118
119 * Retrieval of allocation and pool size statistics.
120
121 * Quantisation of buffer sizes to a power of two to
122 satisfy hardware alignment constraints.
123
124 * Automatic pool compaction, growth, and shrinkage by
125 means of call-backs to user defined functions.
126
127 Applications of BGET can range from storage management in
128 ROM-based embedded programs to providing the framework upon which
129 a multitasking system incorporating garbage collection is
130 constructed. BGET incorporates extensive internal consistency
131 checking using the <assert.h> mechanism; all these checks can be
132 turned off by compiling with NDEBUG defined, yielding a version of
133 BGET with minimal size and maximum speed.
134
135 The basic algorithm underlying BGET has withstood the test of
136 time; more than 25 years have passed since the first
137 implementation of this code. And yet, it is substantially more
138 efficient than the native allocation schemes of many operating
139 systems: the Macintosh and Microsoft Windows to name two, on which
140 programs have obtained substantial speed-ups by layering BGET as
141 an application level memory manager atop the underlying system's.
142
143 BGET has been implemented on the largest mainframes and the lowest
144 of microprocessors. It has served as the core for multitasking
145 operating systems, multi-thread applications, embedded software in
146 data network switching processors, and a host of C programs. And
147 while it has accreted flexibility and additional options over the
148 years, it remains fast, memory efficient, portable, and easy to
149 integrate into your program.
150
151
152 BGET IMPLEMENTATION ASSUMPTIONS
153 ===============================
154
155 BGET is written in as portable a dialect of C as possible. The
156 only fundamental assumption about the underlying hardware
157 architecture is that memory is allocated is a linear array which
158 can be addressed as a vector of C "char" objects. On segmented
159 address space architectures, this generally means that BGET should
160 be used to allocate storage within a single segment (although some
161 compilers simulate linear address spaces on segmented
162 architectures). On segmented architectures, then, BGET buffer
163 pools may not be larger than a segment, but since BGET allows any
164 number of separate buffer pools, there is no limit on the total
165 storage which can be managed, only on the largest individual
166 object which can be allocated. Machines with a linear address
167 architecture, such as the VAX, 680x0, Sparc, MIPS, or the Intel
168 80386 and above in native mode, may use BGET without restriction.
169
170
171 GETTING STARTED WITH BGET
172 =========================
173
174 Although BGET can be configured in a multitude of fashions, there
175 are three basic ways of working with BGET. The functions
176 mentioned below are documented in the following section. Please
177 excuse the forward references which are made in the interest of
178 providing a roadmap to guide you to the BGET functions you're
179 likely to need.
180
181 Embedded Applications
182 ---------------------
183
184 Embedded applications typically have a fixed area of memory
185 dedicated to buffer allocation (often in a separate RAM address
186 space distinct from the ROM that contains the executable code).
187 To use BGET in such an environment, simply call bpool() with the
188 start address and length of the buffer pool area in RAM, then
189 allocate buffers with bget() and release them with brel().
190 Embedded applications with very limited RAM but abundant CPU speed
191 may benefit by configuring BGET for BestFit allocation (which is
192 usually not worth it in other environments).
193
194 Malloc() Emulation
195 ------------------
196
197 If the C library malloc() function is too slow, not present in
198 your development environment (for example, an a native Windows or
199 Macintosh program), or otherwise unsuitable, you can replace it
200 with BGET. Initially define a buffer pool of an appropriate size
201 with bpool()--usually obtained by making a call to the operating
202 system's low-level memory allocator. Then allocate buffers with
203 bget(), bgetz(), and bgetr() (the last two permit the allocation
204 of buffers initialised to zero and [inefficient] re-allocation of
205 existing buffers for compatibility with C library functions).
206 Release buffers by calling brel(). If a buffer allocation request
207 fails, obtain more storage from the underlying operating system,
208 add it to the buffer pool by another call to bpool(), and continue
209 execution.
210
211 Automatic Storage Management
212 ----------------------------
213
214 You can use BGET as your application's native memory manager and
215 implement automatic storage pool expansion, contraction, and
216 optionally application-specific memory compaction by compiling
217 BGET with the BECtl variable defined, then calling bectl() and
218 supplying functions for storage compaction, acquisition, and
219 release, as well as a standard pool expansion increment. All of
220 these functions are optional (although it doesn't make much sense
221 to provide a release function without an acquisition function,
222 does it?). Once the call-back functions have been defined with
223 bectl(), you simply use bget() and brel() to allocate and release
224 storage as before. You can supply an initial buffer pool with
225 bpool() or rely on automatic allocation to acquire the entire
226 pool. When a call on bget() cannot be satisfied, BGET first
227 checks if a compaction function has been supplied. If so, it is
228 called (with the space required to satisfy the allocation request
229 and a sequence number to allow the compaction routine to be called
230 successively without looping). If the compaction function is able
231 to free any storage (it needn't know whether the storage it freed
232 was adequate) it should return a nonzero value, whereupon BGET
233 will retry the allocation request and, if it fails again, call the
234 compaction function again with the next-higher sequence number.
235
236 If the compaction function returns zero, indicating failure to
237 free space, or no compaction function is defined, BGET next tests
238 whether a non-NULL allocation function was supplied to bectl().
239 If so, that function is called with an argument indicating how
240 many bytes of additional space are required. This will be the
241 standard pool expansion increment supplied in the call to bectl()
242 unless the original bget() call requested a buffer larger than
243 this; buffers larger than the standard pool block can be managed
244 "off the books" by BGET in this mode. If the allocation function
245 succeeds in obtaining the storage, it returns a pointer to the new
246 block and BGET expands the buffer pool; if it fails, the
247 allocation request fails and returns NULL to the caller. If a
248 non-NULL release function is supplied, expansion blocks which
249 become totally empty are released to the global free pool by
250 passing their addresses to the release function.
251
252 Equipped with appropriate allocation, release, and compaction
253 functions, BGET can be used as part of very sophisticated memory
254 management strategies, including garbage collection. (Note,
255 however, that BGET is *not* a garbage collector by itself, and
256 that developing such a system requires much additional logic and
257 careful design of the application's memory allocation strategy.)
258
259
260 BGET FUNCTION DESCRIPTIONS
261 ==========================
262
263 Functions implemented in this file (some are enabled by certain of
264 the optional settings below):
265
266 void bpool(void *buffer, bufsize len);
267
268 Create a buffer pool of <len> bytes, using the storage starting at
269 <buffer>. You can call bpool() subsequently to contribute
270 additional storage to the overall buffer pool.
271
272 void *bget(bufsize size);
273
274 Allocate a buffer of <size> bytes. The address of the buffer is
275 returned, or NULL if insufficient memory was available to allocate
276 the buffer.
277
278 void *bgetz(bufsize size);
279
280 Allocate a buffer of <size> bytes and clear it to all zeroes. The
281 address of the buffer is returned, or NULL if insufficient memory
282 was available to allocate the buffer.
283
284 void *bgetr(void *buffer, bufsize newsize);
285
286 Reallocate a buffer previously allocated by bget(), changing its
287 size to <newsize> and preserving all existing data. NULL is
288 returned if insufficient memory is available to reallocate the
289 buffer, in which case the original buffer remains intact.
290
291 void brel(void *buf);
292
293 Return the buffer <buf>, previously allocated by bget(), to the
294 free space pool.
295
296 void bectl(int (*compact)(bufsize sizereq, int sequence),
297 void *(*acquire)(bufsize size),
298 void (*release)(void *buf),
299 bufsize pool_incr);
300
301 Expansion control: specify functions through which the package may
302 compact storage (or take other appropriate action) when an
303 allocation request fails, and optionally automatically acquire
304 storage for expansion blocks when necessary, and release such
305 blocks when they become empty. If <compact> is non-NULL, whenever
306 a buffer allocation request fails, the <compact> function will be
307 called with arguments specifying the number of bytes (total buffer
308 size, including header overhead) required to satisfy the
309 allocation request, and a sequence number indicating the number of
310 consecutive calls on <compact> attempting to satisfy this
311 allocation request. The sequence number is 1 for the first call
312 on <compact> for a given allocation request, and increments on
313 subsequent calls, permitting the <compact> function to take
314 increasingly dire measures in an attempt to free up storage. If
315 the <compact> function returns a nonzero value, the allocation
316 attempt is re-tried. If <compact> returns 0 (as it must if it
317 isn't able to release any space or add storage to the buffer
318 pool), the allocation request fails, which can trigger automatic
319 pool expansion if the <acquire> argument is non-NULL. At the time
320 the <compact> function is called, the state of the buffer
321 allocator is identical to that at the moment the allocation
322 request was made; consequently, the <compact> function may call
323 brel(), bpool(), bstats(), and/or directly manipulate the buffer
324 pool in any manner which would be valid were the application in
325 control. This does not, however, relieve the <compact> function
326 of the need to ensure that whatever actions it takes do not change
327 things underneath the application that made the allocation
328 request. For example, a <compact> function that released a buffer
329 in the process of being reallocated with bgetr() would lead to
330 disaster. Implementing a safe and effective <compact> mechanism
331 requires careful design of an application's memory architecture,
332 and cannot generally be easily retrofitted into existing code.
333
334 If <acquire> is non-NULL, that function will be called whenever an
335 allocation request fails. If the <acquire> function succeeds in
336 allocating the requested space and returns a pointer to the new
337 area, allocation will proceed using the expanded buffer pool. If
338 <acquire> cannot obtain the requested space, it should return NULL
339 and the entire allocation process will fail. <pool_incr>
340 specifies the normal expansion block size. Providing an <acquire>
341 function will cause subsequent bget() requests for buffers too
342 large to be managed in the linked-block scheme (in other words,
343 larger than <pool_incr> minus the buffer overhead) to be satisfied
344 directly by calls to the <acquire> function. Automatic release of
345 empty pool blocks will occur only if all pool blocks in the system
346 are the size given by <pool_incr>.
347
348 void bstats(bufsize *curalloc, bufsize *totfree,
349 bufsize *maxfree, long *nget, long *nrel);
350
351 The amount of space currently allocated is stored into the
352 variable pointed to by <curalloc>. The total free space (sum of
353 all free blocks in the pool) is stored into the variable pointed
354 to by <totfree>, and the size of the largest single block in the
355 free space pool is stored into the variable pointed to by
356 <maxfree>. The variables pointed to by <nget> and <nrel> are
357 filled, respectively, with the number of successful (non-NULL
358 return) bget() calls and the number of brel() calls.
359
360 void bstatse(bufsize *pool_incr, long *npool,
361 long *npget, long *nprel,
362 long *ndget, long *ndrel);
363
364 Extended statistics: The expansion block size will be stored into
365 the variable pointed to by <pool_incr>, or the negative thereof if
366 automatic expansion block releases are disabled. The number of
367 currently active pool blocks will be stored into the variable
368 pointed to by <npool>. The variables pointed to by <npget> and
369 <nprel> will be filled with, respectively, the number of expansion
370 block acquisitions and releases which have occurred. The
371 variables pointed to by <ndget> and <ndrel> will be filled with
372 the number of bget() and brel() calls, respectively, managed
373 through blocks directly allocated by the acquisition and release
374 functions.
375
376 void bufdump(void *buf);
377
378 The buffer pointed to by <buf> is dumped on standard output.
379
380 void bpoold(void *pool, int dumpalloc, int dumpfree);
381
382 All buffers in the buffer pool <pool>, previously initialised by a
383 call on bpool(), are listed in ascending memory address order. If
384 <dumpalloc> is nonzero, the contents of allocated buffers are
385 dumped; if <dumpfree> is nonzero, the contents of free blocks are
386 dumped.
387
388 int bpoolv(void *pool);
389
390 The named buffer pool, previously initialised by a call on
391 bpool(), is validated for bad pointers, overwritten data, etc. If
392 compiled with NDEBUG not defined, any error generates an assertion
393 failure. Otherwise 1 is returned if the pool is valid, 0 if an
394 error is found.
395
396
397 BGET CONFIGURATION
398 ==================
399 */
400
401 /*#define TestProg 20000*/ /* Generate built-in test program
402 if defined. The value specifies
403 how many buffer allocation attempts
404 the test program should make. */
405
406 #define SizeQuant 4 /* Buffer allocation size quantum:
407 all buffers allocated are a
408 multiple of this size. This
409 MUST be a power of two. */
410
411 #define BufDump 1 /* Define this symbol to enable the
412 bpoold() function which dumps the
413 buffers in a buffer pool. */
414
415 #define BufValid 1 /* Define this symbol to enable the
416 bpoolv() function for validating
417 a buffer pool. */
418
419 #define DumpData 1 /* Define this symbol to enable the
420 bufdump() function which allows
421 dumping the contents of an allocated
422 or free buffer. */
423
424 #define BufStats 1 /* Define this symbol to enable the
425 bstats() function which calculates
426 the total free space in the buffer
427 pool, the largest available
428 buffer, and the total space
429 currently allocated. */
430
431 #define FreeWipe 1 /* Wipe free buffers to a guaranteed
432 pattern of garbage to trip up
433 miscreants who attempt to use
434 pointers into released buffers. */
435
436 #define BestFit 1 /* Use a best fit algorithm when
437 searching for space for an
438 allocation request. This uses
439 memory more efficiently, but
440 allocation will be much slower. */
441
442 #define BECtl 1 /* Define this symbol to enable the
443 bectl() function for automatic
444 pool space control. */
445
446 #include <stdio.h>
447
448 int TuiPrintf(const char *format, ... );
449 #define printf TuiPrintf
450
451 #ifdef lint
452 #define NDEBUG /* Exits in asserts confuse lint */
453 /* LINTLIBRARY */ /* Don't complain about def, no ref */
454 extern char *sprintf(); /* Sun includes don't define sprintf */
455 #endif
456
457 #define NDEBUG
458
459 #include <assert.h>
460 #include <memory.h>
461
462 #ifdef BufDump /* BufDump implies DumpData */
463 #ifndef DumpData
464 #define DumpData 1
465 #endif
466 #endif
467
468 #ifdef DumpData
469 #include <ctype.h>
470 #endif
471
472 /* Declare the interface, including the requested buffer size type,
473 bufsize. */
474
475 #include "bget.h"
476
477 #define MemSize int /* Type for size arguments to memxxx()
478 functions such as memcmp(). */
479
480 /* Queue links */
481
482 struct qlinks {
483 struct bfhead *flink; /* Forward link */
484 struct bfhead *blink; /* Backward link */
485 };
486
487 /* Header in allocated and free buffers */
488
489 struct bhead {
490 bufsize prevfree; /* Relative link back to previous
491 free buffer in memory or 0 if
492 previous buffer is allocated. */
493 bufsize bsize; /* Buffer size: positive if free,
494 negative if allocated. */
495 };
496 #define BH(p) ((struct bhead *) (p))
497
498 /* Header in directly allocated buffers (by acqfcn) */
499
500 struct bdhead {
501 bufsize tsize; /* Total size, including overhead */
502 struct bhead bh; /* Common header */
503 };
504 #define BDH(p) ((struct bdhead *) (p))
505
506 /* Header in free buffers */
507
508 struct bfhead {
509 struct bhead bh; /* Common allocated/free header */
510 struct qlinks ql; /* Links on free list */
511 };
512 #define BFH(p) ((struct bfhead *) (p))
513
514 static struct bfhead freelist = { /* List of free buffers */
515 {0, 0},
516 {&freelist, &freelist}
517 };
518
519
520 #ifdef BufStats
521 static bufsize totalloc = 0; /* Total space currently allocated */
522 static long numget = 0, numrel = 0; /* Number of bget() and brel() calls */
523 #ifdef BECtl
524 static long numpblk = 0; /* Number of pool blocks */
525 static long numpget = 0, numprel = 0; /* Number of block gets and rels */
526 static long numdget = 0, numdrel = 0; /* Number of direct gets and rels */
527 #endif /* BECtl */
528 #endif /* BufStats */
529
530 #ifdef BECtl
531
532 /* Automatic expansion block management functions */
533
534 static int (*compfcn) _((bufsize sizereq, int sequence)) = NULL;
535 static void *(*acqfcn) _((bufsize size)) = NULL;
536 static void (*relfcn) _((void *buf)) = NULL;
537
538 static bufsize exp_incr = 0; /* Expansion block size */
539 static bufsize pool_len = 0; /* 0: no bpool calls have been made
540 -1: not all pool blocks are
541 the same size
542 >0: (common) block size for all
543 bpool calls made so far
544 */
545 #endif
546
547 /* Minimum allocation quantum: */
548
549 #define QLSize (sizeof(struct qlinks))
550 #define SizeQ ((SizeQuant > QLSize) ? SizeQuant : QLSize)
551
552 #define V (void) /* To denote unwanted returned values */
553
554 /* End sentinel: value placed in bsize field of dummy block delimiting
555 end of pool block. The most negative number which will fit in a
556 bufsize, defined in a way that the compiler will accept. */
557
558 #define ESent ((bufsize) (-(((1L << (sizeof(bufsize) * 8 - 2)) - 1) * 2) - 2))
559
560 /* BGET -- Allocate a buffer. */
561
562 void *bget(requested_size)
563 bufsize requested_size;
564 {
565 bufsize size = requested_size;
566 struct bfhead *b;
567 #ifdef BestFit
568 struct bfhead *best;
569 #endif
570 void *buf;
571 #ifdef BECtl
572 int compactseq = 0;
573 #endif
574
575 assert(size > 0);
576
577 if (size < SizeQ) { /* Need at least room for the */
578 size = SizeQ; /* queue links. */
579 }
580 #ifdef SizeQuant
581 #if SizeQuant > 1
582 size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1));
583 #endif
584 #endif
585
586 size += sizeof(struct bhead); /* Add overhead in allocated buffer
587 to size required. */
588
589 #ifdef BECtl
590 /* If a compact function was provided in the call to bectl(), wrap
591 a loop around the allocation process to allow compaction to
592 intervene in case we don't find a suitable buffer in the chain. */
593
594 while (1) {
595 #endif
596 b = freelist.ql.flink;
597 #ifdef BestFit
598 best = &freelist;
599 #endif
600
601
602 /* Scan the free list searching for the first buffer big enough
603 to hold the requested size buffer. */
604
605 #ifdef BestFit
606 while (b != &freelist) {
607 if (b->bh.bsize >= size) {
608 if ((best == &freelist) || (b->bh.bsize < best->bh.bsize)) {
609 best = b;
610 }
611 }
612 b = b->ql.flink; /* Link to next buffer */
613 }
614 b = best;
615 #endif /* BestFit */
616
617 while (b != &freelist) {
618 if ((bufsize) b->bh.bsize >= size) {
619
620 /* Buffer is big enough to satisfy the request. Allocate it
621 to the caller. We must decide whether the buffer is large
622 enough to split into the part given to the caller and a
623 free buffer that remains on the free list, or whether the
624 entire buffer should be removed from the free list and
625 given to the caller in its entirety. We only split the
626 buffer if enough room remains for a header plus the minimum
627 quantum of allocation. */
628
629 if ((b->bh.bsize - size) > (SizeQ + (sizeof(struct bhead)))) {
630 struct bhead *ba, *bn;
631
632 ba = BH(((char *) b) + (b->bh.bsize - size));
633 bn = BH(((char *) ba) + size);
634 assert(bn->prevfree == b->bh.bsize);
635 /* Subtract size from length of free block. */
636 b->bh.bsize -= size;
637 /* Link allocated buffer to the previous free buffer. */
638 ba->prevfree = b->bh.bsize;
639 /* Plug negative size into user buffer. */
640 ba->bsize = -(bufsize) size;
641 /* Mark buffer after this one not preceded by free block. */
642 bn->prevfree = 0;
643
644 #ifdef BufStats
645 totalloc += size;
646 numget++; /* Increment number of bget() calls */
647 #endif
648 buf = (void *) ((((char *) ba) + sizeof(struct bhead)));
649 return buf;
650 } else {
651 struct bhead *ba;
652
653 ba = BH(((char *) b) + b->bh.bsize);
654 assert(ba->prevfree == b->bh.bsize);
655
656 /* The buffer isn't big enough to split. Give the whole
657 shebang to the caller and remove it from the free list. */
658
659 assert(b->ql.blink->ql.flink == b);
660 assert(b->ql.flink->ql.blink == b);
661 b->ql.blink->ql.flink = b->ql.flink;
662 b->ql.flink->ql.blink = b->ql.blink;
663
664 #ifdef BufStats
665 totalloc += b->bh.bsize;
666 numget++; /* Increment number of bget() calls */
667 #endif
668 /* Negate size to mark buffer allocated. */
669 b->bh.bsize = -(b->bh.bsize);
670
671 /* Zero the back pointer in the next buffer in memory
672 to indicate that this buffer is allocated. */
673 ba->prevfree = 0;
674
675 /* Give user buffer starting at queue links. */
676 buf = (void *) &(b->ql);
677 return buf;
678 }
679 }
680 b = b->ql.flink; /* Link to next buffer */
681 }
682 #ifdef BECtl
683
684 /* We failed to find a buffer. If there's a compact function
685 defined, notify it of the size requested. If it returns
686 TRUE, try the allocation again. */
687
688 if ((compfcn == NULL) || (!(*compfcn)(size, ++compactseq))) {
689 break;
690 }
691 }
692
693 /* No buffer available with requested size free. */
694
695 /* Don't give up yet -- look in the reserve supply. */
696
697 if (acqfcn != NULL) {
698 if (size > exp_incr - sizeof(struct bhead)) {
699
700 /* Request is too large to fit in a single expansion
701 block. Try to satisy it by a direct buffer acquisition. */
702
703 struct bdhead *bdh;
704
705 size += sizeof(struct bdhead) - sizeof(struct bhead);
706 if ((bdh = BDH((*acqfcn)((bufsize) size))) != NULL) {
707
708 /* Mark the buffer special by setting the size field
709 of its header to zero. */
710 bdh->bh.bsize = 0;
711 bdh->bh.prevfree = 0;
712 bdh->tsize = size;
713 #ifdef BufStats
714 totalloc += size;
715 numget++; /* Increment number of bget() calls */
716 numdget++; /* Direct bget() call count */
717 #endif
718 buf = (void *) (bdh + 1);
719 return buf;
720 }
721
722 } else {
723
724 /* Try to obtain a new expansion block */
725
726 void *newpool;
727
728 if ((newpool = (*acqfcn)((bufsize) exp_incr)) != NULL) {
729 bpool(newpool, exp_incr);
730 buf = bget(requested_size); /* This can't, I say, can't
731 get into a loop. */
732 return buf;
733 }
734 }
735 }
736
737 /* Still no buffer available */
738
739 #endif /* BECtl */
740
741 return NULL;
742 }
743
744 /* BGETZ -- Allocate a buffer and clear its contents to zero. We clear
745 the entire contents of the buffer to zero, not just the
746 region requested by the caller. */
747
748 void *bgetz(size)
749 bufsize size;
750 {
751 char *buf = (char *) bget(size);
752
753 if (buf != NULL) {
754 struct bhead *b;
755 bufsize rsize;
756
757 b = BH(buf - sizeof(struct bhead));
758 rsize = -(b->bsize);
759 if (rsize == 0) {
760 struct bdhead *bd;
761
762 bd = BDH(buf - sizeof(struct bdhead));
763 rsize = bd->tsize - sizeof(struct bdhead);
764 } else {
765 rsize -= sizeof(struct bhead);
766 }
767 assert(rsize >= size);
768 V memset(buf, 0, (MemSize) rsize);
769 }
770 return ((void *) buf);
771 }
772
773 /* BGETR -- Reallocate a buffer. This is a minimal implementation,
774 simply in terms of brel() and bget(). It could be
775 enhanced to allow the buffer to grow into adjacent free
776 blocks and to avoid moving data unnecessarily. */
777
778 void *bgetr(buf, size)
779 void *buf;
780 bufsize size;
781 {
782 void *nbuf;
783 bufsize osize; /* Old size of buffer */
784 struct bhead *b;
785
786 if ((nbuf = bget(size)) == NULL) { /* Acquire new buffer */
787 return NULL;
788 }
789 if (buf == NULL) {
790 return nbuf;
791 }
792 b = BH(((char *) buf) - sizeof(struct bhead));
793 osize = -b->bsize;
794 #ifdef BECtl
795 if (osize == 0) {
796 /* Buffer acquired directly through acqfcn. */
797 struct bdhead *bd;
798
799 bd = BDH(((char *) buf) - sizeof(struct bdhead));
800 osize = bd->tsize - sizeof(struct bdhead);
801 } else
802 #endif
803 osize -= sizeof(struct bhead);
804 assert(osize > 0);
805 V memcpy((char *) nbuf, (char *) buf, /* Copy the data */
806 (MemSize) ((size < osize) ? size : osize));
807 brel(buf);
808 return nbuf;
809 }
810
811 /* BREL -- Release a buffer. */
812
813 void brel(buf)
814 void *buf;
815 {
816 struct bfhead *b, *bn;
817
818 b = BFH(((char *) buf) - sizeof(struct bhead));
819 #ifdef BufStats
820 numrel++; /* Increment number of brel() calls */
821 #endif
822 assert(buf != NULL);
823
824 #ifdef BECtl
825 if (b->bh.bsize == 0) { /* Directly-acquired buffer? */
826 struct bdhead *bdh;
827
828 bdh = BDH(((char *) buf) - sizeof(struct bdhead));
829 assert(b->bh.prevfree == 0);
830 #ifdef BufStats
831 totalloc -= bdh->tsize;
832 assert(totalloc >= 0);
833 numdrel++; /* Number of direct releases */
834 #endif /* BufStats */
835 #ifdef FreeWipe
836 V memset((char *) buf, 0x55,
837 (MemSize) (bdh->tsize - sizeof(struct bdhead)));
838 #endif /* FreeWipe */
839 assert(relfcn != NULL);
840 (*relfcn)((void *) bdh); /* Release it directly. */
841 return;
842 }
843 #endif /* BECtl */
844
845 /* Buffer size must be negative, indicating that the buffer is
846 allocated. */
847
848 if (b->bh.bsize >= 0) {
849 bn = NULL;
850 }
851 assert(b->bh.bsize < 0);
852
853 /* Back pointer in next buffer must be zero, indicating the
854 same thing: */
855
856 assert(BH((char *) b - b->bh.bsize)->prevfree == 0);
857
858 #ifdef BufStats
859 totalloc += b->bh.bsize;
860 assert(totalloc >= 0);
861 #endif
862
863 /* If the back link is nonzero, the previous buffer is free. */
864
865 if (b->bh.prevfree != 0) {
866
867 /* The previous buffer is free. Consolidate this buffer with it
868 by adding the length of this buffer to the previous free
869 buffer. Note that we subtract the size in the buffer being
870 released, since it's negative to indicate that the buffer is
871 allocated. */
872
873 register bufsize size = b->bh.bsize;
874
875 /* Make the previous buffer the one we're working on. */
876 assert(BH((char *) b - b->bh.prevfree)->bsize == b->bh.prevfree);
877 b = BFH(((char *) b) - b->bh.prevfree);
878 b->bh.bsize -= size;
879 } else {
880
881 /* The previous buffer isn't allocated. Insert this buffer
882 on the free list as an isolated free block. */
883
884 assert(freelist.ql.blink->ql.flink == &freelist);
885 assert(freelist.ql.flink->ql.blink == &freelist);
886 b->ql.flink = &freelist;
887 b->ql.blink = freelist.ql.blink;
888 freelist.ql.blink = b;
889 b->ql.blink->ql.flink = b;
890 b->bh.bsize = -b->bh.bsize;
891 }
892
893 /* Now we look at the next buffer in memory, located by advancing from
894 the start of this buffer by its size, to see if that buffer is
895 free. If it is, we combine this buffer with the next one in
896 memory, dechaining the second buffer from the free list. */
897
898 bn = BFH(((char *) b) + b->bh.bsize);
899 if (bn->bh.bsize > 0) {
900
901 /* The buffer is free. Remove it from the free list and add
902 its size to that of our buffer. */
903
904 assert(BH((char *) bn + bn->bh.bsize)->prevfree == bn->bh.bsize);
905 assert(bn->ql.blink->ql.flink == bn);
906 assert(bn->ql.flink->ql.blink == bn);
907 bn->ql.blink->ql.flink = bn->ql.flink;
908 bn->ql.flink->ql.blink = bn->ql.blink;
909 b->bh.bsize += bn->bh.bsize;
910
911 /* Finally, advance to the buffer that follows the newly
912 consolidated free block. We must set its backpointer to the
913 head of the consolidated free block. We know the next block
914 must be an allocated block because the process of recombination
915 guarantees that two free blocks will never be contiguous in
916 memory. */
917
918 bn = BFH(((char *) b) + b->bh.bsize);
919 }
920 #ifdef FreeWipe
921 V memset(((char *) b) + sizeof(struct bfhead), 0x55,
922 (MemSize) (b->bh.bsize - sizeof(struct bfhead)));
923 #endif
924 assert(bn->bh.bsize < 0);
925
926 /* The next buffer is allocated. Set the backpointer in it to point
927 to this buffer; the previous free buffer in memory. */
928
929 bn->bh.prevfree = b->bh.bsize;
930
931 #ifdef BECtl
932
933 /* If a block-release function is defined, and this free buffer
934 constitutes the entire block, release it. Note that pool_len
935 is defined in such a way that the test will fail unless all
936 pool blocks are the same size. */
937
938 if (relfcn != NULL &&
939 ((bufsize) b->bh.bsize) == (pool_len - sizeof(struct bhead))) {
940
941 assert(b->bh.prevfree == 0);
942 assert(BH((char *) b + b->bh.bsize)->bsize == ESent);
943 assert(BH((char *) b + b->bh.bsize)->prevfree == b->bh.bsize);
944 /* Unlink the buffer from the free list */
945 b->ql.blink->ql.flink = b->ql.flink;
946 b->ql.flink->ql.blink = b->ql.blink;
947
948 (*relfcn)(b);
949 #ifdef BufStats
950 numprel++; /* Nr of expansion block releases */
951 numpblk--; /* Total number of blocks */
952 assert(numpblk == numpget - numprel);
953 #endif /* BufStats */
954 }
955 #endif /* BECtl */
956 }
957
958 #ifdef BECtl
959
960 /* BECTL -- Establish automatic pool expansion control */
961
962 void bectl(compact, acquire, release, pool_incr)
963 int (*compact) _((bufsize sizereq, int sequence));
964 void *(*acquire) _((bufsize size));
965 void (*release) _((void *buf));
966 bufsize pool_incr;
967 {
968 compfcn = compact;
969 acqfcn = acquire;
970 relfcn = release;
971 exp_incr = pool_incr;
972 }
973 #endif
974
975 /* BPOOL -- Add a region of memory to the buffer pool. */
976
977 void bpool(buf, len)
978 void *buf;
979 bufsize len;
980 {
981 struct bfhead *b = BFH(buf);
982 struct bhead *bn;
983
984 #ifdef SizeQuant
985 len &= ~(SizeQuant - 1);
986 #endif
987 #ifdef BECtl
988 if (pool_len == 0) {
989 pool_len = len;
990 } else if (len != pool_len) {
991 pool_len = -1;
992 }
993 #ifdef BufStats
994 numpget++; /* Number of block acquisitions */
995 numpblk++; /* Number of blocks total */
996 assert(numpblk == numpget - numprel);
997 #endif /* BufStats */
998 #endif /* BECtl */
999
1000 /* Since the block is initially occupied by a single free buffer,
1001 it had better not be (much) larger than the largest buffer
1002 whose size we can store in bhead.bsize. */
1003
1004 assert(len - sizeof(struct bhead) <= -((bufsize) ESent + 1));
1005
1006 /* Clear the backpointer at the start of the block to indicate that
1007 there is no free block prior to this one. That blocks
1008 recombination when the first block in memory is released. */
1009
1010 b->bh.prevfree = 0;
1011
1012 /* Chain the new block to the free list. */
1013
1014 assert(freelist.ql.blink->ql.flink == &freelist);
1015 assert(freelist.ql.flink->ql.blink == &freelist);
1016 b->ql.flink = &freelist;
1017 b->ql.blink = freelist.ql.blink;
1018 freelist.ql.blink = b;
1019 b->ql.blink->ql.flink = b;
1020
1021 /* Create a dummy allocated buffer at the end of the pool. This dummy
1022 buffer is seen when a buffer at the end of the pool is released and
1023 blocks recombination of the last buffer with the dummy buffer at
1024 the end. The length in the dummy buffer is set to the largest
1025 negative number to denote the end of the pool for diagnostic
1026 routines (this specific value is not counted on by the actual
1027 allocation and release functions). */
1028
1029 len -= sizeof(struct bhead);
1030 b->bh.bsize = (bufsize) len;
1031 #ifdef FreeWipe
1032 V memset(((char *) b) + sizeof(struct bfhead), 0x55,
1033 (MemSize) (len - sizeof(struct bfhead)));
1034 #endif
1035 bn = BH(((char *) b) + len);
1036 bn->prevfree = (bufsize) len;
1037 /* Definition of ESent assumes two's complement! */
1038 assert((~0) == -1);
1039 bn->bsize = ESent;
1040 }
1041
1042 #ifdef BufStats
1043
1044 /* BSTATS -- Return buffer allocation free space statistics. */
1045
1046 void bstats(curalloc, totfree, maxfree, nget, nrel)
1047 bufsize *curalloc, *totfree, *maxfree;
1048 long *nget, *nrel;
1049 {
1050 struct bfhead *b = freelist.ql.flink;
1051
1052 *nget = numget;
1053 *nrel = numrel;
1054 *curalloc = totalloc;
1055 *totfree = 0;
1056 *maxfree = -1;
1057 while (b != &freelist) {
1058 assert(b->bh.bsize > 0);
1059 *totfree += b->bh.bsize;
1060 if (b->bh.bsize > *maxfree) {
1061 *maxfree = b->bh.bsize;
1062 }
1063 b = b->ql.flink; /* Link to next buffer */
1064 }
1065 }
1066
1067 #ifdef BECtl
1068
1069 /* BSTATSE -- Return extended statistics */
1070
1071 void bstatse(pool_incr, npool, npget, nprel, ndget, ndrel)
1072 bufsize *pool_incr;
1073 long *npool, *npget, *nprel, *ndget, *ndrel;
1074 {
1075 *pool_incr = (pool_len < 0) ? -exp_incr : exp_incr;
1076 *npool = numpblk;
1077 *npget = numpget;
1078 *nprel = numprel;
1079 *ndget = numdget;
1080 *ndrel = numdrel;
1081 }
1082 #endif /* BECtl */
1083 #endif /* BufStats */
1084
1085 #ifdef DumpData
1086
1087 /* BUFDUMP -- Dump the data in a buffer. This is called with the user
1088 data pointer, and backs up to the buffer header. It will
1089 dump either a free block or an allocated one. */
1090
1091 void bufdump(buf)
1092 void *buf;
1093 {
1094 struct bfhead *b;
1095 unsigned char *bdump;
1096 bufsize bdlen;
1097
1098 b = BFH(((char *) buf) - sizeof(struct bhead));
1099 assert(b->bh.bsize != 0);
1100 if (b->bh.bsize < 0) {
1101 bdump = (unsigned char *) buf;
1102 bdlen = (-b->bh.bsize) - sizeof(struct bhead);
1103 } else {
1104 bdump = (unsigned char *) (((char *) b) + sizeof(struct bfhead));
1105 bdlen = b->bh.bsize - sizeof(struct bfhead);
1106 }
1107
1108 while (bdlen > 0) {
1109 int i, dupes = 0;
1110 bufsize l = bdlen;
1111 char bhex[50], bascii[20];
1112
1113 if (l > 16) {
1114 l = 16;
1115 }
1116
1117 for (i = 0; i < l; i++) {
1118 V sprintf(bhex + i * 3, "%02X ", bdump[i]);
1119 bascii[i] = isprint(bdump[i]) ? bdump[i] : ' ';
1120 }
1121 bascii[i] = 0;
1122 V printf("%-48s %s\n", bhex, bascii);
1123 bdump += l;
1124 bdlen -= l;
1125 while ((bdlen > 16) && (memcmp((char *) (bdump - 16),
1126 (char *) bdump, 16) == 0)) {
1127 dupes++;
1128 bdump += 16;
1129 bdlen -= 16;
1130 }
1131 if (dupes > 1) {
1132 V printf(
1133 " (%d lines [%d bytes] identical to above line skipped)\n",
1134 dupes, dupes * 16);
1135 } else if (dupes == 1) {
1136 bdump -= 16;
1137 bdlen += 16;
1138 }
1139 }
1140 }
1141 #endif
1142
1143 #ifdef BufDump
1144
1145 /* BPOOLD -- Dump a buffer pool. The buffer headers are always listed.
1146 If DUMPALLOC is nonzero, the contents of allocated buffers
1147 are dumped. If DUMPFREE is nonzero, free blocks are
1148 dumped as well. If FreeWipe checking is enabled, free
1149 blocks which have been clobbered will always be dumped. */
1150
1151 void bpoold(buf, dumpalloc, dumpfree)
1152 void *buf;
1153 int dumpalloc, dumpfree;
1154 {
1155 struct bfhead *b = BFH(buf);
1156
1157 while (b->bh.bsize != ESent) {
1158 bufsize bs = b->bh.bsize;
1159
1160 if (bs < 0) {
1161 bs = -bs;
1162 V printf("Allocated buffer: size %6ld bytes.\n", (long) bs);
1163 if (dumpalloc) {
1164 bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1165 }
1166 } else {
1167 char *lerr = "";
1168
1169 assert(bs > 0);
1170 if ((b->ql.blink->ql.flink != b) ||
1171 (b->ql.flink->ql.blink != b)) {
1172 lerr = " (Bad free list links)";
1173 }
1174 V printf("Free block: size %6ld bytes.%s\n",
1175 (long) bs, lerr);
1176 #ifdef FreeWipe
1177 lerr = ((char *) b) + sizeof(struct bfhead);
1178 if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) ||
1179 (memcmp(lerr, lerr + 1,
1180 (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) {
1181 V printf(
1182 "(Contents of above free block have been overstored.)\n");
1183 bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1184 } else
1185 #endif
1186 if (dumpfree) {
1187 bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1188 }
1189 }
1190 b = BFH(((char *) b) + bs);
1191 }
1192 }
1193 #endif /* BufDump */
1194
1195 #ifdef BufValid
1196
1197 /* BPOOLV -- Validate a buffer pool. If NDEBUG isn't defined,
1198 any error generates an assertion failure. */
1199
1200 int bpoolv(buf)
1201 void *buf;
1202 {
1203 struct bfhead *b = BFH(buf);
1204
1205 while (b->bh.bsize != ESent) {
1206 bufsize bs = b->bh.bsize;
1207
1208 if (bs < 0) {
1209 bs = -bs;
1210 } else {
1211 char *lerr = "";
1212
1213 assert(bs > 0);
1214 if (bs <= 0) {
1215 return 0;
1216 }
1217 if ((b->ql.blink->ql.flink != b) ||
1218 (b->ql.flink->ql.blink != b)) {
1219 V printf("Free block: size %6ld bytes. (Bad free list links)\n",
1220 (long) bs);
1221 assert(0);
1222 return 0;
1223 }
1224 #ifdef FreeWipe
1225 lerr = ((char *) b) + sizeof(struct bfhead);
1226 if ((bs > sizeof(struct bfhead)) && ((*lerr != 0x55) ||
1227 (memcmp(lerr, lerr + 1,
1228 (MemSize) (bs - (sizeof(struct bfhead) + 1))) != 0))) {
1229 V printf(
1230 "(Contents of above free block have been overstored.)\n");
1231 bufdump((void *) (((char *) b) + sizeof(struct bhead)));
1232 assert(0);
1233 return 0;
1234 }
1235 #endif
1236 }
1237 b = BFH(((char *) b) + bs);
1238 }
1239 return 1;
1240 }
1241 #endif /* BufValid */
1242
1243 /***********************\
1244 * *
1245 * Built-in test program *
1246 * *
1247 \***********************/
1248
1249 #ifdef TestProg
1250
1251 #define Repeatable 1 /* Repeatable pseudorandom sequence */
1252 /* If Repeatable is not defined, a
1253 time-seeded pseudorandom sequence
1254 is generated, exercising BGET with
1255 a different pattern of calls on each
1256 run. */
1257 #define OUR_RAND /* Use our own built-in version of
1258 rand() to guarantee the test is
1259 100% repeatable. */
1260
1261 #ifdef BECtl
1262 #define PoolSize 300000 /* Test buffer pool size */
1263 #else
1264 #define PoolSize 50000 /* Test buffer pool size */
1265 #endif
1266 #define ExpIncr 32768 /* Test expansion block size */
1267 #define CompactTries 10 /* Maximum tries at compacting */
1268
1269 #define dumpAlloc 0 /* Dump allocated buffers ? */
1270 #define dumpFree 0 /* Dump free buffers ? */
1271
1272 #ifndef Repeatable
1273 extern long time();
1274 #endif
1275
1276 extern char *malloc();
1277 extern int free _((char *));
1278
1279 static char *bchain = NULL; /* Our private buffer chain */
1280 static char *bp = NULL; /* Our initial buffer pool */
1281
1282 #include <math.h>
1283
1284 #ifdef OUR_RAND
1285
1286 static unsigned long int next = 1;
1287
1288 /* Return next random integer */
1289
1290 int rand()
1291 {
1292 next = next * 1103515245L + 12345;
1293 return (unsigned int) (next / 65536L) % 32768L;
1294 }
1295
1296 /* Set seed for random generator */
1297
1298 void srand(seed)
1299 unsigned int seed;
1300 {
1301 next = seed;
1302 }
1303 #endif
1304
1305 /* STATS -- Edit statistics returned by bstats() or bstatse(). */
1306
1307 static void stats(when)
1308 char *when;
1309 {
1310 bufsize cural, totfree, maxfree;
1311 long nget, nfree;
1312 #ifdef BECtl
1313 bufsize pincr;
1314 long totblocks, npget, nprel, ndget, ndrel;
1315 #endif
1316
1317 bstats(&cural, &totfree, &maxfree, &nget, &nfree);
1318 V printf(
1319 "%s: %ld gets, %ld releases. %ld in use, %ld free, largest = %ld\n",
1320 when, nget, nfree, (long) cural, (long) totfree, (long) maxfree);
1321 #ifdef BECtl
1322 bstatse(&pincr, &totblocks, &npget, &nprel, &ndget, &ndrel);
1323 V printf(
1324 " Blocks: size = %ld, %ld (%ld bytes) in use, %ld gets, %ld frees\n",
1325 (long)pincr, totblocks, pincr * totblocks, npget, nprel);
1326 V printf(" %ld direct gets, %ld direct frees\n", ndget, ndrel);
1327 #endif /* BECtl */
1328 }
1329
1330 #ifdef BECtl
1331 static int protect = 0; /* Disable compaction during bgetr() */
1332
1333 /* BCOMPACT -- Compaction call-back function. */
1334
1335 static int bcompact(bsize, seq)
1336 bufsize bsize;
1337 int seq;
1338 {
1339 #ifdef CompactTries
1340 char *bc = bchain;
1341 int i = rand() & 0x3;
1342
1343 #ifdef COMPACTRACE
1344 V printf("Compaction requested. %ld bytes needed, sequence %d.\n",
1345 (long) bsize, seq);
1346 #endif
1347
1348 if (protect || (seq > CompactTries)) {
1349 #ifdef COMPACTRACE
1350 V printf("Compaction gave up.\n");
1351 #endif
1352 return 0;
1353 }
1354
1355 /* Based on a random cast, release a random buffer in the list
1356 of allocated buffers. */
1357
1358 while (i > 0 && bc != NULL) {
1359 bc = *((char **) bc);
1360 i--;
1361 }
1362 if (bc != NULL) {
1363 char *fb;
1364
1365 fb = *((char **) bc);
1366 if (fb != NULL) {
1367 *((char **) bc) = *((char **) fb);
1368 brel((void *) fb);
1369 return 1;
1370 }
1371 }
1372
1373 #ifdef COMPACTRACE
1374 V printf("Compaction bailed out.\n");
1375 #endif
1376 #endif /* CompactTries */
1377 return 0;
1378 }
1379
1380 /* BEXPAND -- Expand pool call-back function. */
1381
1382 static void *bexpand(size)
1383 bufsize size;
1384 {
1385 void *np = NULL;
1386 bufsize cural, totfree, maxfree;
1387 long nget, nfree;
1388
1389 /* Don't expand beyond the total allocated size given by PoolSize. */
1390
1391 bstats(&cural, &totfree, &maxfree, &nget, &nfree);
1392
1393 if (cural < PoolSize) {
1394 np = (void *) malloc((unsigned) size);
1395 }
1396 #ifdef EXPTRACE
1397 V printf("Expand pool by %ld -- %s.\n", (long) size,
1398 np == NULL ? "failed" : "succeeded");
1399 #endif
1400 return np;
1401 }
1402
1403 /* BSHRINK -- Shrink buffer pool call-back function. */
1404
1405 static void bshrink(buf)
1406 void *buf;
1407 {
1408 if (((char *) buf) == bp) {
1409 #ifdef EXPTRACE
1410 V printf("Initial pool released.\n");
1411 #endif
1412 bp = NULL;
1413 }
1414 #ifdef EXPTRACE
1415 V printf("Shrink pool.\n");
1416 #endif
1417 free((char *) buf);
1418 }
1419
1420 #endif /* BECtl */
1421
1422 /* Restrict buffer requests to those large enough to contain our pointer and
1423 small enough for the CPU architecture. */
1424
1425 static bufsize blimit(bs)
1426 bufsize bs;
1427 {
1428 if (bs < sizeof(char *)) {
1429 bs = sizeof(char *);
1430 }
1431
1432 /* This is written out in this ugly fashion because the
1433 cool expression in sizeof(int) that auto-configured
1434 to any length int befuddled some compilers. */
1435
1436 if (sizeof(int) == 2) {
1437 if (bs > 32767) {
1438 bs = 32767;
1439 }
1440 } else {
1441 if (bs > 200000) {
1442 bs = 200000;
1443 }
1444 }
1445 return bs;
1446 }
1447
1448 int main()
1449 {
1450 int i;
1451 double x;
1452
1453 /* Seed the random number generator. If Repeatable is defined, we
1454 always use the same seed. Otherwise, we seed from the clock to
1455 shake things up from run to run. */
1456
1457 #ifdef Repeatable
1458 V srand(1234);
1459 #else
1460 V srand((int) time((long *) NULL));
1461 #endif
1462
1463 /* Compute x such that pow(x, p) ranges between 1 and 4*ExpIncr as
1464 p ranges from 0 to ExpIncr-1, with a concentration in the lower
1465 numbers. */
1466
1467 x = 4.0 * ExpIncr;
1468 x = log(x);
1469 x = exp(log(4.0 * ExpIncr) / (ExpIncr - 1.0));
1470
1471 #ifdef BECtl
1472 bectl(bcompact, bexpand, bshrink, (bufsize) ExpIncr);
1473 bp = malloc(ExpIncr);
1474 assert(bp != NULL);
1475 bpool((void *) bp, (bufsize) ExpIncr);
1476 #else
1477 bp = malloc(PoolSize);
1478 assert(bp != NULL);
1479 bpool((void *) bp, (bufsize) PoolSize);
1480 #endif
1481
1482 stats("Create pool");
1483 V bpoolv((void *) bp);
1484 bpoold((void *) bp, dumpAlloc, dumpFree);
1485
1486 for (i = 0; i < TestProg; i++) {
1487 char *cb;
1488 bufsize bs = pow(x, (double) (rand() & (ExpIncr - 1)));
1489
1490 assert(bs <= (((bufsize) 4) * ExpIncr));
1491 bs = blimit(bs);
1492 if (rand() & 0x400) {
1493 cb = (char *) bgetz(bs);
1494 } else {
1495 cb = (char *) bget(bs);
1496 }
1497 if (cb == NULL) {
1498 #ifdef EasyOut
1499 break;
1500 #else
1501 char *bc = bchain;
1502
1503 if (bc != NULL) {
1504 char *fb;
1505
1506 fb = *((char **) bc);
1507 if (fb != NULL) {
1508 *((char **) bc) = *((char **) fb);
1509 brel((void *) fb);
1510 }
1511 continue;
1512 }
1513 #endif
1514 }
1515 *((char **) cb) = (char *) bchain;
1516 bchain = cb;
1517
1518 /* Based on a random cast, release a random buffer in the list
1519 of allocated buffers. */
1520
1521 if ((rand() & 0x10) == 0) {
1522 char *bc = bchain;
1523 int i = rand() & 0x3;
1524
1525 while (i > 0 && bc != NULL) {
1526 bc = *((char **) bc);
1527 i--;
1528 }
1529 if (bc != NULL) {
1530 char *fb;
1531
1532 fb = *((char **) bc);
1533 if (fb != NULL) {
1534 *((char **) bc) = *((char **) fb);
1535 brel((void *) fb);
1536 }
1537 }
1538 }
1539
1540 /* Based on a random cast, reallocate a random buffer in the list
1541 to a random size */
1542
1543 if ((rand() & 0x20) == 0) {
1544 char *bc = bchain;
1545 int i = rand() & 0x3;
1546
1547 while (i > 0 && bc != NULL) {
1548 bc = *((char **) bc);
1549 i--;
1550 }
1551 if (bc != NULL) {
1552 char *fb;
1553
1554 fb = *((char **) bc);
1555 if (fb != NULL) {
1556 char *newb;
1557
1558 bs = pow(x, (double) (rand() & (ExpIncr - 1)));
1559 bs = blimit(bs);
1560 #ifdef BECtl
1561 protect = 1; /* Protect against compaction */
1562 #endif
1563 newb = (char *) bgetr((void *) fb, bs);
1564 #ifdef BECtl
1565 protect = 0;
1566 #endif
1567 if (newb != NULL) {
1568 *((char **) bc) = newb;
1569 }
1570 }
1571 }
1572 }
1573 }
1574 stats("\nAfter allocation");
1575 if (bp != NULL) {
1576 V bpoolv((void *) bp);
1577 bpoold((void *) bp, dumpAlloc, dumpFree);
1578 }
1579
1580 while (bchain != NULL) {
1581 char *buf = bchain;
1582
1583 bchain = *((char **) buf);
1584 brel((void *) buf);
1585 }
1586 stats("\nAfter release");
1587 #ifndef BECtl
1588 if (bp != NULL) {
1589 V bpoolv((void *) bp);
1590 bpoold((void *) bp, dumpAlloc, dumpFree);
1591 }
1592 #endif
1593
1594 return 0;
1595 }
1596 #endif