completely rewritten PagedPool.
[reactos.git] / reactos / ntoskrnl / mm / RPoolMgr.h
1 /* $Id: RPoolMgr.h,v 1.1 2004/12/17 13:20:05 royce Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
8 * UPDATE HISTORY:
9 */
10
11 #ifndef RPOOLMGR_H
12 #define RPOOLMGR_H
13
14 typedef unsigned long rulong;
15
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (char*)(ptr) < ((char*)pool->UserBase+pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
19
20 #ifndef R_ROUND_UP
21 #define R_ROUND_UP(x,s) ((PVOID)(((rulong)(x)+(s)-1) & ~((s)-1)))
22 #endif//R_ROUND_UP
23
24 #ifndef R_ROUND_DOWN
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((rulong)(x)) & ~((s)-1)))
26 #endif//R_ROUND_DOWN
27
28 #ifndef R_QUEMIN
29 // R_QUEMIN is the minimum number of entries to keep in a que
30 #define R_QUEMIN 0
31 #endif//R_QUEMIN
32
33 #ifndef R_QUECOUNT
34 // 16, 32, 64, 128, 256, 512
35 #define R_QUECOUNT 6
36 #endif//R_QUECOUNT
37
38 #ifndef R_RZ
39 // R_RZ is the redzone size
40 #define R_RZ 4
41 #endif//R_RZ
42
43 #ifndef R_RZ_LOVALUE
44 #define R_RZ_LOVALUE 0x87
45 #endif//R_RZ_LOVALUE
46
47 #ifndef R_RZ_HIVALUE
48 #define R_RZ_HIVALUE 0xA5
49 #endif//R_RZ_HIVALUE
50
51 #ifndef R_STACK
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
53 #define R_STACK 3
54 #endif//R_STACK
55
56 #ifndef R_TAG
57 // R_TAG do we keep track of tags on a per-memory block basis?
58 #define R_TAG 0
59 #endif//R_TAG
60
61 #ifdef R_MAGIC
62 # ifndef R_FREE_MAGIC
63 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
64 # endif//R_FREE_MAGIC
65 # ifndef R_USED_MAGIC
66 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
67 # endif//R_USED_MAGIC
68 #endif//R_MAGIC
69
70 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
71 // in both the R_FREE and R_USED structures
72
73 typedef struct _R_FREE
74 {
75 #ifdef R_MAGIC
76 rulong FreeMagic;
77 #endif//R_MAGIC
78 rulong PrevSize : 30;
79 rulong Status : 2;
80 rulong Size;
81 #if R_STACK
82 rulong LastOwnerStack[R_STACK];
83 #endif//R_STACK
84 struct _R_FREE* NextFree;
85 struct _R_FREE* PrevFree;
86 }
87 R_FREE, *PR_FREE;
88
89 typedef struct _R_USED
90 {
91 #ifdef R_MAGIC
92 rulong UsedMagic;
93 #endif//R_MAGIC
94 rulong PrevSize : 30;
95 rulong Status : 2;
96 rulong Size;
97 #if R_STACK
98 rulong LastOwnerStack[R_STACK];
99 #endif//R_STACK
100 struct _R_USED* NextUsed;
101 #if R_RZ
102 rulong UserSize; // how many bytes the user actually asked for...
103 #endif//R_RZ
104 rulong Tag;
105 }
106 R_USED, *PR_USED;
107
108 typedef struct _R_QUE
109 {
110 rulong Count;
111 PR_USED First, Last;
112 }
113 R_QUE, *PR_QUE;
114
115 typedef struct _R_POOL
116 {
117 void* PoolBase;
118 rulong PoolSize;
119 void* UserBase;
120 rulong UserSize;
121 rulong Alignments[3];
122 PR_FREE FirstFree, LastFree;
123 R_QUE Que[R_QUECOUNT][3];
124 R_MUTEX Mutex;
125 }
126 R_POOL, *PR_POOL;
127
128 static int
129 RQueWhich ( rulong size )
130 {
131 rulong que, quesize;
132 for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
133 {
134 if ( quesize >= size )
135 {
136 return que;
137 }
138 }
139 return -1;
140 }
141
142 static void
143 RQueInit ( PR_QUE que )
144 {
145 que->Count = 0;
146 que->First = NULL;
147 que->Last = NULL;
148 }
149
150 static void
151 RQueAdd ( PR_QUE que, PR_USED Item )
152 {
153 ASSERT(Item);
154 Item->Status = 2;
155 Item->NextUsed = NULL;
156 ++que->Count;
157 if ( !que->Last )
158 {
159 que->First = que->Last = Item;
160 return;
161 }
162 ASSERT(!que->Last->NextUsed);
163 que->Last->NextUsed = Item;
164 que->Last = Item;
165 }
166
167 static PR_USED
168 RQueRemove ( PR_QUE que )
169 {
170 PR_USED Item;
171 #if R_QUEMIN
172 if ( que->count < R_QUEMIN )
173 return NULL;
174 #endif
175 if ( !que->First )
176 return NULL;
177 Item = que->First;
178 que->First = Item->NextUsed;
179 if ( !--que->Count )
180 {
181 ASSERT ( !que->First );
182 que->Last = NULL;
183 }
184 Item->Status = 0;
185 return Item;
186 }
187
188 static void
189 RPoolAddFree ( PR_POOL pool, PR_FREE Item )
190 {
191 ASSERT(pool);
192 ASSERT(Item);
193 if ( !pool->FirstFree )
194 {
195 pool->FirstFree = pool->LastFree = Item;
196 Item->NextFree = NULL;
197 }
198 else
199 {
200 pool->FirstFree->PrevFree = Item;
201 Item->NextFree = pool->FirstFree;
202 pool->FirstFree = Item;
203 }
204 Item->PrevFree = NULL;
205 }
206
207 static void
208 RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
209 {
210 ASSERT(pool);
211 ASSERT(Item);
212 if ( Item->NextFree )
213 Item->NextFree->PrevFree = Item->PrevFree;
214 else
215 {
216 ASSERT ( pool->LastFree == Item );
217 pool->LastFree = Item->PrevFree;
218 }
219 if ( Item->PrevFree )
220 Item->PrevFree->NextFree = Item->NextFree;
221 else
222 {
223 ASSERT ( pool->FirstFree == Item );
224 pool->FirstFree = Item->NextFree;
225 }
226 #if defined(DBG) || defined(KDBG)
227 Item->NextFree = Item->PrevFree = (PR_FREE)0xDEADBEEF;
228 #endif//DBG || KDBG
229 }
230
231 // this function is used to walk up a stack trace... it returns
232 // the pointer to the next return address above the pointer to the
233 // return address pointed to by Frame...
234 static rulong*
235 RNextStackFrame ( rulong* Frame )
236 {
237 if ( !Frame || !*Frame || *Frame == 0xDEADBEAF )
238 return NULL;
239 return (rulong*)( Frame[-1] ) + 1;
240 }
241
242 // this function returns a pointer to the address the
243 // caller will return to. Use RNextStackFrame() above to walk
244 // further up the stack.
245 static rulong*
246 RStackFrame()
247 {
248 rulong* Frame;
249 #if defined __GNUC__
250 __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
251 #elif defined(_MSC_VER)
252 __asm mov [Frame], ebp
253 #endif
254 return RNextStackFrame ( Frame + 1 );
255 }
256
257 static void
258 RFreeFillStack ( PR_FREE free )
259 {
260 rulong* Frame = RStackFrame();
261 int i;
262 memset ( free->LastOwnerStack, 0, sizeof(free->LastOwnerStack) );
263 Frame = RNextStackFrame ( Frame ); // step out of RFreeInit()
264 Frame = RNextStackFrame ( Frame ); // step out of RFreeSplit()/RPoolReclaim()
265 Frame = RNextStackFrame ( Frame ); // step out of RPoolFree()
266 for ( i = 0; i < R_EXTRA_STACK_UP; i++ )
267 Frame = RNextStackFrame ( Frame );
268 for ( i = 0; i < R_STACK && Frame; i++ )
269 {
270 free->LastOwnerStack[i] = *Frame;
271 Frame = RNextStackFrame ( Frame );
272 }
273 }
274
275 static void
276 RUsedFillStack ( PR_USED used )
277 {
278 rulong* Frame = RStackFrame();
279 int i;
280 memset ( used->LastOwnerStack, 0, sizeof(used->LastOwnerStack) );
281 Frame = RNextStackFrame ( Frame ); // step out of RUsedInit()
282 Frame = RNextStackFrame ( Frame ); // step out of RPoolAlloc()
283 for ( i = 0; i < R_EXTRA_STACK_UP; i++ )
284 Frame = RNextStackFrame ( Frame );
285 for ( i = 0; i < R_STACK && Frame; i++ )
286 {
287 used->LastOwnerStack[i] = *Frame;
288 Frame = RNextStackFrame ( Frame );
289 }
290 }
291
292 static PR_FREE
293 RFreeInit ( void* memory )
294 {
295 PR_FREE block = (PR_FREE)memory;
296 #if R_FREEMAGIC
297 block->FreeMagic = R_FREE_MAGIC;
298 #endif//R_FREEMAGIC
299 block->Status = 0;
300 RFreeFillStack ( block );
301 #if defined(DBG) || defined(KDBG)
302 block->PrevFree = block->NextFree = (PR_FREE)0xDEADBEEF;
303 #endif//DBG || KDBG
304 return block;
305 }
306
307 PR_POOL
308 RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
309 {
310 int align, que;
311 PR_POOL pool = (PR_POOL)PoolBase;
312
313 pool->PoolBase = PoolBase;
314 pool->PoolSize = PoolSize;
315 pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
316 pool->UserSize = PoolSize - sizeof(R_POOL);
317 pool->Alignments[0] = align1;
318 pool->Alignments[1] = align2;
319 pool->Alignments[2] = align3;
320 pool->FirstFree = pool->LastFree = NULL;
321
322 RPoolAddFree ( pool,
323 RFreeInit ( pool->UserBase ));
324
325 pool->FirstFree->PrevSize = 0;
326 pool->FirstFree->Size = pool->UserSize;
327
328 for ( que = 0; que < R_QUECOUNT; que++ )
329 {
330 for ( align = 0; align < 3; align++ )
331 {
332 RQueInit ( &pool->Que[que][align] );
333 }
334 }
335 return pool;
336 }
337
338 static const char*
339 RFormatTag ( rulong Tag, char* buf )
340 {
341 int i;
342 *(rulong*)&buf[0] = Tag;
343 buf[4] = 0;
344 for ( i = 0; i < 4; i++ )
345 {
346 if ( !buf[i] )
347 buf[i] = ' ';
348 }
349 return buf;
350 }
351
352 #if !R_RZ
353 #define RUsedRedZoneCheck(pUsed,Addr,file,line)
354 #else//R_RZ
355 static void
356 RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
357 {
358 char tag[5];
359 int i;
360
361 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
362 file, line, violation, Addr );
363
364 #ifdef R_MAGIC
365 R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
366 #endif//R_MAGIC
367 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
368 RFormatTag(pUsed->Tag,tag),
369 pUsed->Tag,
370 pUsed->Size,
371 pUsed->UserSize );
372
373 if ( printzone )
374 {
375 unsigned char* HiZone = Addr + pUsed->UserSize;
376 unsigned char* LoZone = Addr - R_RZ; // this is to simplify indexing below...
377 R_DEBUG ( ", LoZone " );
378 for ( i = 0; i < R_RZ; i++ )
379 R_DEBUG ( "%02x", LoZone[i] );
380 R_DEBUG ( ", HiZone " );
381 for ( i = 0; i < R_RZ; i++ )
382 R_DEBUG ( "%02x", HiZone[i] );
383 }
384 R_DEBUG ( "\n" );
385
386 R_DEBUG ( "First few Stack Frames:" );
387 for ( i = 0; i < R_STACK; i++ )
388 {
389 if ( pUsed->LastOwnerStack[i] != 0xDEADBEEF )
390 {
391 R_DEBUG(" ");
392 if (!R_PRINT_ADDRESS ((PVOID)pUsed->LastOwnerStack[i]) )
393 {
394 R_DEBUG("<%X>", pUsed->LastOwnerStack[i] );
395 }
396 }
397 }
398 R_DEBUG ( "\n" );
399
400 R_PANIC();
401 }
402 static void
403 RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
404 {
405 int i;
406 unsigned char *LoZone, *HiZone;
407 int bLow = 1;
408 int bHigh = 1;
409
410 ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
411 #ifdef R_MAGIC
412 if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
413 {
414 pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
415 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
416 }
417 if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
418 {
419 RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
420 }
421 #endif//R_MAGIC
422 if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
423 {
424 RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
425 }
426 if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
427 {
428 RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
429 }
430 HiZone = Addr + pUsed->UserSize;
431 LoZone = Addr - R_RZ; // this is to simplify indexing below...
432 for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
433 {
434 bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
435 bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
436 }
437 if ( !bLow || !bHigh )
438 {
439 const char* violation = "High and Low-side redzone";
440 if ( bHigh ) // high is okay, so it was just low failed
441 violation = "Low-side redzone";
442 else if ( bLow ) // low side is okay, so it was just high failed
443 violation = "High-side redzone";
444 RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
445 }
446 }
447 #endif//R_RZ
448
449 PR_FREE
450 RPreviousBlock ( PR_FREE Block )
451 {
452 if ( Block->PrevSize > 0 )
453 return (PR_FREE)( (char*)Block - Block->PrevSize );
454 return NULL;
455 }
456
457 PR_FREE
458 RNextBlock ( PR_POOL pool, PR_FREE Block )
459 {
460 PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
461 if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
462 NextBlock = NULL;
463 return NextBlock;
464 }
465
466 inline static void*
467 RHdrToBody ( void* blk )
468 /*
469 * FUNCTION: Translate a block header address to the corresponding block
470 * address (internal)
471 */
472 {
473 return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
474 }
475
476 inline static PR_USED
477 RBodyToHdr ( void* addr )
478 {
479 return (PR_USED)
480 ( ((char*)addr) - sizeof(R_USED) - R_RZ );
481 }
482
483 static int
484 RiInFreeChain ( PR_POOL pool, PR_FREE Block )
485 {
486 PR_FREE Free;
487 Free = pool->FirstFree;
488 if ( Free == Block )
489 return 1;
490 while ( Free != Block )
491 {
492 Free = Free->NextFree;
493 if ( !Free )
494 return 0;
495 }
496 return 1;
497 }
498
499 static void
500 RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
501 {
502 {
503 PR_USED Block = (PR_USED)pool->UserBase;
504 PR_USED NextBlock;
505
506 for ( ;; )
507 {
508 switch ( Block->Status )
509 {
510 case 0: // block is in chain
511 ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
512 break;
513 case 1: // block is allocated
514 RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
515 break;
516 case 2: // block is in que
517 // nothing to verify here yet
518 break;
519 default:
520 ASSERT ( !"invalid status in memory block found in pool!" );
521 }
522 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
523 if ( !NextBlock )
524 break;
525 ASSERT ( NextBlock->PrevSize == Block->Size );
526 Block = NextBlock;
527 }
528 }
529 {
530 // now let's step through the list of free pointers and verify
531 // each one can be found by size-jumping...
532 PR_FREE Free = (PR_FREE)pool->FirstFree;
533 while ( Free )
534 {
535 PR_FREE NextFree = (PR_FREE)pool->UserBase;
536 if ( Free != NextFree )
537 {
538 while ( NextFree != Free )
539 {
540 NextFree = RNextBlock ( pool, NextFree );
541 ASSERT(NextFree);
542 }
543 }
544 Free = Free->NextFree;
545 }
546 }
547 }
548
549 static void
550 RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
551 {
552 R_ASSERT_PTR(pool,Block);
553 ASSERT ( NewSize < pool->UserSize );
554 ASSERT ( NewSize >= sizeof(R_FREE) );
555 Block->Size = NewSize;
556 if ( !NextBlock )
557 NextBlock = RNextBlock ( pool, Block );
558 if ( NextBlock )
559 NextBlock->PrevSize = NewSize;
560 }
561
562 static PR_FREE
563 RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
564 {
565 PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
566 RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
567 RSetSize ( pool, Block, NewSize, NewBlock );
568 RFreeInit ( NewBlock );
569 RPoolAddFree ( pool, NewBlock );
570 return NewBlock;
571 }
572
573 static void
574 RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
575 {
576 ASSERT ( RPreviousBlock(Second) == First );
577 ASSERT ( First->Size == Second->PrevSize );
578 RPoolRemoveFree ( pool, Second );
579 RSetSize ( pool, First, First->Size + Second->Size, NULL );
580 }
581
582 static void
583 RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
584 {
585 PR_FREE NextBlock, PreviousBlock;
586
587 RFreeInit ( FreeBlock );
588 RPoolAddFree ( pool, FreeBlock );
589
590 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
591
592 /*
593 * If the next block is immediately adjacent to the newly freed one then
594 * merge them.
595 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
596 */
597 NextBlock = RNextBlock ( pool, FreeBlock );
598 if ( NextBlock != NULL && !NextBlock->Status )
599 {
600 RFreeMerge ( pool, FreeBlock, NextBlock );
601 }
602
603 /*
604 * If the previous block is adjacent to the newly freed one then
605 * merge them.
606 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
607 */
608 PreviousBlock = RPreviousBlock ( FreeBlock );
609 if ( PreviousBlock != NULL && !PreviousBlock->Status )
610 {
611 RFreeMerge ( pool, PreviousBlock, FreeBlock );
612 }
613 }
614
615 static void
616 RiUsedInit ( PR_USED Block, rulong Tag )
617 {
618 Block->Status = 1;
619 RUsedFillStack ( Block );
620 #if R_MAGIC
621 Block->UsedMagic = R_USED_MAGIC;
622 #endif//R_MAGIC
623 //ASSERT_SIZE ( Block->Size );
624
625 // now add the block to the used block list
626 #if defined(DBG) || defined(KDBG)
627 Block->NextUsed = (PR_USED)0xDEADBEEF;
628 #endif//R_USED_LIST
629
630 Block->Tag = Tag;
631 }
632
633 #if !R_RZ
634 #define RiUsedInitRedZone(Block,UserSize)
635 #else//R_RZ
636 static void
637 RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
638 {
639 // write out buffer-overrun detection bytes
640 char* Addr = (char*)RHdrToBody(Block);
641 Block->UserSize = UserSize;
642 memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
643 memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
644 #if defined(DBG) || defined(KDBG)
645 memset ( Addr, 0xCD, UserSize );
646 #endif//DBG || KDBG
647 }
648 #endif//R_RZ
649
650 static void*
651 RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
652 {
653 PR_USED NewBlock;
654 PR_FREE BestBlock,
655 NextBlock,
656 PreviousBlock,
657 BestPreviousBlock,
658 CurrentBlock;
659 void* BestAlignedAddr;
660 int que,
661 queBytes = NumberOfBytes;
662 rulong BlockSize,
663 Alignment;
664 int que_reclaimed = 0;
665
666 ASSERT ( pool );
667 ASSERT ( align < 3 );
668
669 R_ACQUIRE_MUTEX(pool);
670
671 if ( !NumberOfBytes )
672 {
673 R_DEBUG("0 bytes requested - initiating pool verification\n");
674 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
675 R_RELEASE_MUTEX(pool);
676 return NULL;
677 }
678 if ( NumberOfBytes > pool->PoolSize )
679 {
680 if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
681 {
682 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
683 RUsedRedZoneCheck(pool,RBodyToHdr((void*)NumberOfBytes), (char*)NumberOfBytes, __FILE__, __LINE__ );
684 R_RELEASE_MUTEX(pool);
685 return NULL;
686 }
687 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
688 R_RELEASE_MUTEX(pool);
689 return NULL;
690 }
691
692 que = RQueWhich ( NumberOfBytes );
693 if ( que >= 0 )
694 {
695 if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
696 {
697 R_RELEASE_MUTEX(pool);
698 RiUsedInit ( NewBlock, Tag );
699 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
700 return RHdrToBody(NewBlock);
701 }
702 queBytes = 16 << que;
703 }
704
705 /*
706 * Calculate the total number of bytes we will need.
707 */
708 BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
709 if (BlockSize < sizeof(R_FREE))
710 {
711 /* At least we need the size of the free block header. */
712 BlockSize = sizeof(R_FREE);
713 }
714
715 try_again:
716 /*
717 * Find the best-fitting block.
718 */
719 BestBlock = NULL;
720 Alignment = pool->Alignments[align];
721 PreviousBlock = NULL;
722 BestPreviousBlock = NULL,
723 CurrentBlock = pool->FirstFree;
724 BestAlignedAddr = NULL;
725
726 while ( CurrentBlock != NULL )
727 {
728 PVOID Addr = RHdrToBody(CurrentBlock);
729 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
730 /* calculate last size-aligned address available within this block */
731 PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
732 ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
733
734 /* special case, this address is already size-aligned, and the right size */
735 if ( Addr == AlignedAddr )
736 {
737 BestAlignedAddr = AlignedAddr;
738 BestPreviousBlock = PreviousBlock;
739 BestBlock = CurrentBlock;
740 break;
741 }
742 // if we carve out a size-aligned block... is it still past the end of this
743 // block's free header?
744 else if ( (char*)RBodyToHdr(AlignedAddr)
745 >= (char*)CurrentBlock+sizeof(R_FREE) )
746 {
747 /*
748 * there's enough room to allocate our size-aligned memory out
749 * of this block, see if it's a better choice than any previous
750 * finds
751 */
752 if ( BestBlock == NULL
753 || BestBlock->Size > CurrentBlock->Size )
754 {
755 BestAlignedAddr = AlignedAddr;
756 BestPreviousBlock = PreviousBlock;
757 BestBlock = CurrentBlock;
758 }
759 }
760
761 PreviousBlock = CurrentBlock;
762 CurrentBlock = CurrentBlock->NextFree;
763 }
764
765 /*
766 * We didn't find anything suitable at all.
767 */
768 if (BestBlock == NULL)
769 {
770 if ( !que_reclaimed )
771 {
772 // reclaim que
773 int i, j;
774 for ( i = 0; i < R_QUECOUNT; i++ )
775 {
776 for ( j = 0; j < 3; j++ )
777 {
778 while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
779 {
780 RPoolReclaim ( pool, BestBlock );
781 }
782 }
783 }
784
785 que_reclaimed = 1;
786 goto try_again;
787 }
788 /*DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
789 queBytes );*/
790 return NULL;
791 }
792 /*
793 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
794 * there's room at the beginning of the block for a free block...
795 */
796 {
797 void* Addr = RHdrToBody(BestBlock);
798 if ( BestAlignedAddr != Addr )
799 {
800 PR_FREE NewFreeBlock = RFreeSplit (
801 pool,
802 BestBlock,
803 (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
804 ASSERT ( BestAlignedAddr > Addr );
805
806 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
807 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
808 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
809
810 /* we want the following code to use our size-aligned block */
811 BestPreviousBlock = BestBlock;
812 BestBlock = NewFreeBlock;
813
814 //VerifyPagedPool();
815 }
816 }
817 /*
818 * Is there enough space to create a second block from the unused portion.
819 */
820 if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
821 {
822 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
823 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
824
825 /*
826 * Create the new free block.
827 */
828 NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
829 //ASSERT_SIZE ( NextBlock->Size );
830 }
831 /*
832 * Remove the selected block from the list of free blocks.
833 */
834 //DPRINT ( "Removing selected block from free block list\n" );
835 RPoolRemoveFree ( pool, BestBlock );
836 /*
837 * Create the new used block header.
838 */
839 NewBlock = (PR_USED)BestBlock;
840 RiUsedInit ( NewBlock, Tag );
841
842 R_RELEASE_MUTEX(pool);
843
844 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
845
846 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
847
848 return RHdrToBody(NewBlock);
849 }
850
851 static void
852 RPoolFree ( PR_POOL pool, void* Addr )
853 {
854 PR_USED UsedBlock;
855 rulong UsedSize;
856 PR_FREE FreeBlock;
857 rulong UserSize;
858 int que;
859
860 ASSERT(pool);
861 if ( !Addr )
862 {
863 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
864 R_ACQUIRE_MUTEX(pool);
865 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
866 R_RELEASE_MUTEX(pool);
867 return;
868 }
869 R_ASSERT_PTR(pool,Addr);
870
871 UsedBlock = RBodyToHdr(Addr);
872 UsedSize = UsedBlock->Size;
873 FreeBlock = (PR_FREE)UsedBlock;
874 #if R_RZ
875 UserSize = UsedBlock->UserSize;
876 #else
877 UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
878 #endif//R_RZ
879
880 RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
881
882 #if R_RZ
883 memset ( Addr, 0xCD, UsedBlock->UserSize );
884 #endif
885
886 que = RQueWhich ( UserSize );
887 if ( que >= 0 )
888 {
889 int queBytes = 16 << que;
890 ASSERT( queBytes >= UserSize );
891 if ( que >= 0 )
892 {
893 int align = 0;
894 if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
895 align = 2;
896 else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
897 align = 1;
898 R_ACQUIRE_MUTEX(pool);
899 RQueAdd ( &pool->Que[que][align], UsedBlock );
900 R_RELEASE_MUTEX(pool);
901 return;
902 }
903 }
904
905 R_ACQUIRE_MUTEX(pool);
906 RPoolReclaim ( pool, FreeBlock );
907 R_RELEASE_MUTEX(pool);
908 }
909
910 static void
911 RPoolDumpByTag ( PR_POOL pool, rulong Tag )
912 {
913 PR_USED Block = (PR_USED)pool->UserBase;
914 PR_USED NextBlock;
915 int count = 0;
916 char tag[5];
917
918 // TODO FIXME - should we validate params or ASSERT_IRQL?
919 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag,tag) );
920 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
921
922 R_ACQUIRE_MUTEX(pool);
923 for ( ;; )
924 {
925 if ( Block->Status == 1 && Block->Tag == Tag )
926 {
927 R_DEBUG ( " %08X %08X\n", Block, Block->Size );
928 ++count;
929 }
930 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
931 if ( !NextBlock )
932 break;
933 ASSERT ( NextBlock->PrevSize == Block->Size );
934 Block = NextBlock;
935 }
936 R_RELEASE_MUTEX(pool);
937
938 R_DEBUG ( "Entries found for tag '%s': %i\n", tag, count );
939 }
940
941 rulong
942 RPoolQueryTag ( void* Addr )
943 {
944 PR_USED Block = RBodyToHdr(Addr);
945 // TODO FIXME - should we validate params?
946 #if R_MAGIC
947 if ( Block->UsedMagic != R_USED_MAGIC )
948 return 0xDEADBEEF;
949 #endif//R_MAGIC
950 if ( Block->Status != 1 )
951 return 0xDEADBEEF;
952 return Block->Tag;
953 }
954
955 void
956 RPoolStats ( PR_POOL pool )
957 {
958 int free=0, used=0, qued=0;
959 PR_USED Block = (PR_USED)pool->UserBase;
960
961 R_ACQUIRE_MUTEX(pool);
962 while ( Block )
963 {
964 switch ( Block->Status )
965 {
966 case 0:
967 ++free;
968 break;
969 case 1:
970 ++used;
971 break;
972 case 2:
973 ++qued;
974 break;
975 default:
976 ASSERT ( !"Invalid Status for Block in pool!" );
977 }
978 Block = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
979 }
980 R_RELEASE_MUTEX(pool);
981
982 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free, used, qued, (free+used+qued) );
983 }
984
985 #ifdef R_LARGEST_ALLOC_POSSIBLE
986 static rulong
987 RPoolLargestAllocPossible ( PR_POOL pool, int align )
988 {
989 int Alignment = pool->Alignments[align];
990 rulong LargestUserSize = 0;
991 PR_FREE Block = (PR_FREE)pool->UserBase;
992 while ( Block )
993 {
994 if ( Block->Status != 1 )
995 {
996 void* Addr, *AlignedAddr;
997 rulong BlockMaxUserSize;
998 int cue, cueBytes;
999
1000 Addr = (char*)Block + sizeof(R_USED) + R_RZ;
1001 AlignedAddr = R_ROUND_UP(Addr,Alignment);
1002 if ( Addr != AlignedAddr )
1003 Addr = R_ROUND_UP((char*)Block + sizeof(R_FREE) + sizeof(R_USED) + R_RZ, Alignment );
1004 BlockMaxUserSize = (char*)Block + Block->Size - (char*)Addr - R_RZ;
1005 cue = RQueWhich ( BlockMaxUserSize );
1006 if ( cue >= 0 )
1007 {
1008 cueBytes = 16 << cue;
1009 if ( cueBytes > BlockMaxUserSize );
1010 {
1011 if ( !cue )
1012 BlockMaxUserSize = 0;
1013 else
1014 BlockMaxUserSize = 16 << (cue-1);
1015 }
1016 }
1017 if ( BlockMaxUserSize > LargestUserSize )
1018 LargestUserSize = BlockMaxUserSize;
1019 }
1020 Block = RNextBlock ( pool, Block );
1021 }
1022 return LargestUserSize;
1023 }
1024 #endif//R_LARGEST_ALLOC_POSSIBLE
1025
1026 #endif//RPOOLMGR_H