1 /* $Id: RPoolMgr.h,v 1.1 2004/12/17 13:20:05 royce Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
14 typedef unsigned long rulong
;
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (char*)(ptr) < ((char*)pool->UserBase+pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
21 #define R_ROUND_UP(x,s) ((PVOID)(((rulong)(x)+(s)-1) & ~((s)-1)))
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((rulong)(x)) & ~((s)-1)))
29 // R_QUEMIN is the minimum number of entries to keep in a que
34 // 16, 32, 64, 128, 256, 512
39 // R_RZ is the redzone size
44 #define R_RZ_LOVALUE 0x87
48 #define R_RZ_HIVALUE 0xA5
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
57 // R_TAG do we keep track of tags on a per-memory block basis?
63 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
66 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
70 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
71 // in both the R_FREE and R_USED structures
73 typedef struct _R_FREE
82 rulong LastOwnerStack
[R_STACK
];
84 struct _R_FREE
* NextFree
;
85 struct _R_FREE
* PrevFree
;
89 typedef struct _R_USED
98 rulong LastOwnerStack
[R_STACK
];
100 struct _R_USED
* NextUsed
;
102 rulong UserSize
; // how many bytes the user actually asked for...
108 typedef struct _R_QUE
115 typedef struct _R_POOL
121 rulong Alignments
[3];
122 PR_FREE FirstFree
, LastFree
;
123 R_QUE Que
[R_QUECOUNT
][3];
129 RQueWhich ( rulong size
)
132 for ( que
=0, quesize
=16; que
< R_QUECOUNT
; que
++, quesize
<<=1 )
134 if ( quesize
>= size
)
143 RQueInit ( PR_QUE que
)
151 RQueAdd ( PR_QUE que
, PR_USED Item
)
155 Item
->NextUsed
= NULL
;
159 que
->First
= que
->Last
= Item
;
162 ASSERT(!que
->Last
->NextUsed
);
163 que
->Last
->NextUsed
= Item
;
168 RQueRemove ( PR_QUE que
)
172 if ( que
->count
< R_QUEMIN
)
178 que
->First
= Item
->NextUsed
;
181 ASSERT ( !que
->First
);
189 RPoolAddFree ( PR_POOL pool
, PR_FREE Item
)
193 if ( !pool
->FirstFree
)
195 pool
->FirstFree
= pool
->LastFree
= Item
;
196 Item
->NextFree
= NULL
;
200 pool
->FirstFree
->PrevFree
= Item
;
201 Item
->NextFree
= pool
->FirstFree
;
202 pool
->FirstFree
= Item
;
204 Item
->PrevFree
= NULL
;
208 RPoolRemoveFree ( PR_POOL pool
, PR_FREE Item
)
212 if ( Item
->NextFree
)
213 Item
->NextFree
->PrevFree
= Item
->PrevFree
;
216 ASSERT ( pool
->LastFree
== Item
);
217 pool
->LastFree
= Item
->PrevFree
;
219 if ( Item
->PrevFree
)
220 Item
->PrevFree
->NextFree
= Item
->NextFree
;
223 ASSERT ( pool
->FirstFree
== Item
);
224 pool
->FirstFree
= Item
->NextFree
;
226 #if defined(DBG) || defined(KDBG)
227 Item
->NextFree
= Item
->PrevFree
= (PR_FREE
)0xDEADBEEF;
231 // this function is used to walk up a stack trace... it returns
232 // the pointer to the next return address above the pointer to the
233 // return address pointed to by Frame...
235 RNextStackFrame ( rulong
* Frame
)
237 if ( !Frame
|| !*Frame
|| *Frame
== 0xDEADBEAF )
239 return (rulong
*)( Frame
[-1] ) + 1;
242 // this function returns a pointer to the address the
243 // caller will return to. Use RNextStackFrame() above to walk
244 // further up the stack.
250 __asm__("mov %%ebp, %%ebx" : "=b" (Frame
) : );
251 #elif defined(_MSC_VER)
252 __asm mov
[Frame
], ebp
254 return RNextStackFrame ( Frame
+ 1 );
258 RFreeFillStack ( PR_FREE free
)
260 rulong
* Frame
= RStackFrame();
262 memset ( free
->LastOwnerStack
, 0, sizeof(free
->LastOwnerStack
) );
263 Frame
= RNextStackFrame ( Frame
); // step out of RFreeInit()
264 Frame
= RNextStackFrame ( Frame
); // step out of RFreeSplit()/RPoolReclaim()
265 Frame
= RNextStackFrame ( Frame
); // step out of RPoolFree()
266 for ( i
= 0; i
< R_EXTRA_STACK_UP
; i
++ )
267 Frame
= RNextStackFrame ( Frame
);
268 for ( i
= 0; i
< R_STACK
&& Frame
; i
++ )
270 free
->LastOwnerStack
[i
] = *Frame
;
271 Frame
= RNextStackFrame ( Frame
);
276 RUsedFillStack ( PR_USED used
)
278 rulong
* Frame
= RStackFrame();
280 memset ( used
->LastOwnerStack
, 0, sizeof(used
->LastOwnerStack
) );
281 Frame
= RNextStackFrame ( Frame
); // step out of RUsedInit()
282 Frame
= RNextStackFrame ( Frame
); // step out of RPoolAlloc()
283 for ( i
= 0; i
< R_EXTRA_STACK_UP
; i
++ )
284 Frame
= RNextStackFrame ( Frame
);
285 for ( i
= 0; i
< R_STACK
&& Frame
; i
++ )
287 used
->LastOwnerStack
[i
] = *Frame
;
288 Frame
= RNextStackFrame ( Frame
);
293 RFreeInit ( void* memory
)
295 PR_FREE block
= (PR_FREE
)memory
;
297 block
->FreeMagic
= R_FREE_MAGIC
;
300 RFreeFillStack ( block
);
301 #if defined(DBG) || defined(KDBG)
302 block
->PrevFree
= block
->NextFree
= (PR_FREE
)0xDEADBEEF;
308 RPoolInit ( void* PoolBase
, rulong PoolSize
, int align1
, int align2
, int align3
)
311 PR_POOL pool
= (PR_POOL
)PoolBase
;
313 pool
->PoolBase
= PoolBase
;
314 pool
->PoolSize
= PoolSize
;
315 pool
->UserBase
= (char*)pool
->PoolBase
+ sizeof(R_POOL
);
316 pool
->UserSize
= PoolSize
- sizeof(R_POOL
);
317 pool
->Alignments
[0] = align1
;
318 pool
->Alignments
[1] = align2
;
319 pool
->Alignments
[2] = align3
;
320 pool
->FirstFree
= pool
->LastFree
= NULL
;
323 RFreeInit ( pool
->UserBase
));
325 pool
->FirstFree
->PrevSize
= 0;
326 pool
->FirstFree
->Size
= pool
->UserSize
;
328 for ( que
= 0; que
< R_QUECOUNT
; que
++ )
330 for ( align
= 0; align
< 3; align
++ )
332 RQueInit ( &pool
->Que
[que
][align
] );
339 RFormatTag ( rulong Tag
, char* buf
)
342 *(rulong
*)&buf
[0] = Tag
;
344 for ( i
= 0; i
< 4; i
++ )
353 #define RUsedRedZoneCheck(pUsed,Addr,file,line)
356 RiBadBlock ( PR_USED pUsed
, char* Addr
, const char* violation
, const char* file
, int line
, int printzone
)
361 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
362 file
, line
, violation
, Addr
);
365 R_DEBUG ( "UsedMagic 0x%x, ", pUsed
->UsedMagic
);
367 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
368 RFormatTag(pUsed
->Tag
,tag
),
375 unsigned char* HiZone
= Addr
+ pUsed
->UserSize
;
376 unsigned char* LoZone
= Addr
- R_RZ
; // this is to simplify indexing below...
377 R_DEBUG ( ", LoZone " );
378 for ( i
= 0; i
< R_RZ
; i
++ )
379 R_DEBUG ( "%02x", LoZone
[i
] );
380 R_DEBUG ( ", HiZone " );
381 for ( i
= 0; i
< R_RZ
; i
++ )
382 R_DEBUG ( "%02x", HiZone
[i
] );
386 R_DEBUG ( "First few Stack Frames:" );
387 for ( i
= 0; i
< R_STACK
; i
++ )
389 if ( pUsed
->LastOwnerStack
[i
] != 0xDEADBEEF )
392 if (!R_PRINT_ADDRESS ((PVOID
)pUsed
->LastOwnerStack
[i
]) )
394 R_DEBUG("<%X>", pUsed
->LastOwnerStack
[i
] );
403 RUsedRedZoneCheck ( PR_POOL pool
, PR_USED pUsed
, char* Addr
, const char* file
, int line
)
406 unsigned char *LoZone
, *HiZone
;
410 ASSERT ( Addr
>= (char*)pool
->UserBase
&& Addr
< ((char*)pool
->UserBase
+ pool
->UserSize
- 16) );
412 if ( pUsed
->UsedMagic
== MM_PPOOL_FREEMAGIC
)
414 pUsed
->UserSize
= 0; // just to keep from confusion, MmpBadBlock() doesn't return...
415 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
417 if ( pUsed
->UsedMagic
!= MM_PPOOL_USEDMAGIC
)
419 RiBadBlock ( pUsed
, Addr
, "bad magic", file
, line
, 0 );
422 if ( pUsed
->Size
> pool
->PoolSize
|| pUsed
->Size
== 0 )
424 RiBadBlock ( pUsed
, Addr
, "invalid size", file
, line
, 0 );
426 if ( pUsed
->UserSize
> pool
->PoolSize
|| pUsed
->UserSize
== 0 )
428 RiBadBlock ( pUsed
, Addr
, "invalid user size", file
, line
, 0 );
430 HiZone
= Addr
+ pUsed
->UserSize
;
431 LoZone
= Addr
- R_RZ
; // this is to simplify indexing below...
432 for ( i
= 0; i
< R_RZ
&& bLow
&& bHigh
; i
++ )
434 bLow
= bLow
&& ( LoZone
[i
] == R_RZ_LOVALUE
);
435 bHigh
= bHigh
&& ( HiZone
[i
] == R_RZ_HIVALUE
);
437 if ( !bLow
|| !bHigh
)
439 const char* violation
= "High and Low-side redzone";
440 if ( bHigh
) // high is okay, so it was just low failed
441 violation
= "Low-side redzone";
442 else if ( bLow
) // low side is okay, so it was just high failed
443 violation
= "High-side redzone";
444 RiBadBlock ( pUsed
, Addr
, violation
, file
, line
, 1 );
450 RPreviousBlock ( PR_FREE Block
)
452 if ( Block
->PrevSize
> 0 )
453 return (PR_FREE
)( (char*)Block
- Block
->PrevSize
);
458 RNextBlock ( PR_POOL pool
, PR_FREE Block
)
460 PR_FREE NextBlock
= (PR_FREE
)( (char*)Block
+ Block
->Size
);
461 if ( (char*)NextBlock
>= (char*)pool
->UserBase
+ pool
->UserSize
)
467 RHdrToBody ( void* blk
)
469 * FUNCTION: Translate a block header address to the corresponding block
473 return ( (void *) ((char*)blk
+ sizeof(R_USED
) + R_RZ
) );
476 inline static PR_USED
477 RBodyToHdr ( void* addr
)
480 ( ((char*)addr
) - sizeof(R_USED
) - R_RZ
);
484 RiInFreeChain ( PR_POOL pool
, PR_FREE Block
)
487 Free
= pool
->FirstFree
;
490 while ( Free
!= Block
)
492 Free
= Free
->NextFree
;
500 RPoolRedZoneCheck ( PR_POOL pool
, const char* file
, int line
)
503 PR_USED Block
= (PR_USED
)pool
->UserBase
;
508 switch ( Block
->Status
)
510 case 0: // block is in chain
511 ASSERT ( RiInFreeChain ( pool
, (PR_FREE
)Block
) );
513 case 1: // block is allocated
514 RUsedRedZoneCheck ( pool
, Block
, RHdrToBody(Block
), file
, line
);
516 case 2: // block is in que
517 // nothing to verify here yet
520 ASSERT ( !"invalid status in memory block found in pool!" );
522 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
525 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
530 // now let's step through the list of free pointers and verify
531 // each one can be found by size-jumping...
532 PR_FREE Free
= (PR_FREE
)pool
->FirstFree
;
535 PR_FREE NextFree
= (PR_FREE
)pool
->UserBase
;
536 if ( Free
!= NextFree
)
538 while ( NextFree
!= Free
)
540 NextFree
= RNextBlock ( pool
, NextFree
);
544 Free
= Free
->NextFree
;
550 RSetSize ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
, PR_FREE NextBlock
)
552 R_ASSERT_PTR(pool
,Block
);
553 ASSERT ( NewSize
< pool
->UserSize
);
554 ASSERT ( NewSize
>= sizeof(R_FREE
) );
555 Block
->Size
= NewSize
;
557 NextBlock
= RNextBlock ( pool
, Block
);
559 NextBlock
->PrevSize
= NewSize
;
563 RFreeSplit ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
)
565 PR_FREE NewBlock
= (PR_FREE
)((char*)Block
+ NewSize
);
566 RSetSize ( pool
, NewBlock
, Block
->Size
- NewSize
, NULL
);
567 RSetSize ( pool
, Block
, NewSize
, NewBlock
);
568 RFreeInit ( NewBlock
);
569 RPoolAddFree ( pool
, NewBlock
);
574 RFreeMerge ( PR_POOL pool
, PR_FREE First
, PR_FREE Second
)
576 ASSERT ( RPreviousBlock(Second
) == First
);
577 ASSERT ( First
->Size
== Second
->PrevSize
);
578 RPoolRemoveFree ( pool
, Second
);
579 RSetSize ( pool
, First
, First
->Size
+ Second
->Size
, NULL
);
583 RPoolReclaim ( PR_POOL pool
, PR_FREE FreeBlock
)
585 PR_FREE NextBlock
, PreviousBlock
;
587 RFreeInit ( FreeBlock
);
588 RPoolAddFree ( pool
, FreeBlock
);
590 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
593 * If the next block is immediately adjacent to the newly freed one then
595 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
597 NextBlock
= RNextBlock ( pool
, FreeBlock
);
598 if ( NextBlock
!= NULL
&& !NextBlock
->Status
)
600 RFreeMerge ( pool
, FreeBlock
, NextBlock
);
604 * If the previous block is adjacent to the newly freed one then
606 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
608 PreviousBlock
= RPreviousBlock ( FreeBlock
);
609 if ( PreviousBlock
!= NULL
&& !PreviousBlock
->Status
)
611 RFreeMerge ( pool
, PreviousBlock
, FreeBlock
);
616 RiUsedInit ( PR_USED Block
, rulong Tag
)
619 RUsedFillStack ( Block
);
621 Block
->UsedMagic
= R_USED_MAGIC
;
623 //ASSERT_SIZE ( Block->Size );
625 // now add the block to the used block list
626 #if defined(DBG) || defined(KDBG)
627 Block
->NextUsed
= (PR_USED
)0xDEADBEEF;
634 #define RiUsedInitRedZone(Block,UserSize)
637 RiUsedInitRedZone ( PR_USED Block
, rulong UserSize
)
639 // write out buffer-overrun detection bytes
640 char* Addr
= (char*)RHdrToBody(Block
);
641 Block
->UserSize
= UserSize
;
642 memset ( Addr
- R_RZ
, R_RZ_LOVALUE
, R_RZ
);
643 memset ( Addr
+ Block
->UserSize
, R_RZ_HIVALUE
, R_RZ
);
644 #if defined(DBG) || defined(KDBG)
645 memset ( Addr
, 0xCD, UserSize
);
651 RPoolAlloc ( PR_POOL pool
, rulong NumberOfBytes
, rulong Tag
, rulong align
)
659 void* BestAlignedAddr
;
661 queBytes
= NumberOfBytes
;
664 int que_reclaimed
= 0;
667 ASSERT ( align
< 3 );
669 R_ACQUIRE_MUTEX(pool
);
671 if ( !NumberOfBytes
)
673 R_DEBUG("0 bytes requested - initiating pool verification\n");
674 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
675 R_RELEASE_MUTEX(pool
);
678 if ( NumberOfBytes
> pool
->PoolSize
)
680 if ( R_IS_POOL_PTR(pool
,NumberOfBytes
) )
682 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes
);
683 RUsedRedZoneCheck(pool
,RBodyToHdr((void*)NumberOfBytes
), (char*)NumberOfBytes
, __FILE__
, __LINE__
);
684 R_RELEASE_MUTEX(pool
);
687 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes
);
688 R_RELEASE_MUTEX(pool
);
692 que
= RQueWhich ( NumberOfBytes
);
695 if ( (NewBlock
= RQueRemove ( &pool
->Que
[que
][align
] )) )
697 R_RELEASE_MUTEX(pool
);
698 RiUsedInit ( NewBlock
, Tag
);
699 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
700 return RHdrToBody(NewBlock
);
702 queBytes
= 16 << que
;
706 * Calculate the total number of bytes we will need.
708 BlockSize
= queBytes
+ sizeof(R_USED
) + 2*R_RZ
;
709 if (BlockSize
< sizeof(R_FREE
))
711 /* At least we need the size of the free block header. */
712 BlockSize
= sizeof(R_FREE
);
717 * Find the best-fitting block.
720 Alignment
= pool
->Alignments
[align
];
721 PreviousBlock
= NULL
;
722 BestPreviousBlock
= NULL
,
723 CurrentBlock
= pool
->FirstFree
;
724 BestAlignedAddr
= NULL
;
726 while ( CurrentBlock
!= NULL
)
728 PVOID Addr
= RHdrToBody(CurrentBlock
);
729 PVOID CurrentBlockEnd
= (char*)CurrentBlock
+ CurrentBlock
->Size
;
730 /* calculate last size-aligned address available within this block */
731 PVOID AlignedAddr
= R_ROUND_DOWN((char*)CurrentBlockEnd
-queBytes
-R_RZ
, Alignment
);
732 ASSERT ( (char*)AlignedAddr
+queBytes
+R_RZ
<= (char*)CurrentBlockEnd
);
734 /* special case, this address is already size-aligned, and the right size */
735 if ( Addr
== AlignedAddr
)
737 BestAlignedAddr
= AlignedAddr
;
738 BestPreviousBlock
= PreviousBlock
;
739 BestBlock
= CurrentBlock
;
742 // if we carve out a size-aligned block... is it still past the end of this
743 // block's free header?
744 else if ( (char*)RBodyToHdr(AlignedAddr
)
745 >= (char*)CurrentBlock
+sizeof(R_FREE
) )
748 * there's enough room to allocate our size-aligned memory out
749 * of this block, see if it's a better choice than any previous
752 if ( BestBlock
== NULL
753 || BestBlock
->Size
> CurrentBlock
->Size
)
755 BestAlignedAddr
= AlignedAddr
;
756 BestPreviousBlock
= PreviousBlock
;
757 BestBlock
= CurrentBlock
;
761 PreviousBlock
= CurrentBlock
;
762 CurrentBlock
= CurrentBlock
->NextFree
;
766 * We didn't find anything suitable at all.
768 if (BestBlock
== NULL
)
770 if ( !que_reclaimed
)
774 for ( i
= 0; i
< R_QUECOUNT
; i
++ )
776 for ( j
= 0; j
< 3; j
++ )
778 while ( (BestBlock
= (PR_FREE
)RQueRemove ( &pool
->Que
[i
][j
] )) )
780 RPoolReclaim ( pool
, BestBlock
);
788 /*DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
793 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
794 * there's room at the beginning of the block for a free block...
797 void* Addr
= RHdrToBody(BestBlock
);
798 if ( BestAlignedAddr
!= Addr
)
800 PR_FREE NewFreeBlock
= RFreeSplit (
803 (char*)RBodyToHdr(BestAlignedAddr
) - (char*)BestBlock
);
804 ASSERT ( BestAlignedAddr
> Addr
);
806 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
807 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
808 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
810 /* we want the following code to use our size-aligned block */
811 BestPreviousBlock
= BestBlock
;
812 BestBlock
= NewFreeBlock
;
818 * Is there enough space to create a second block from the unused portion.
820 if ( (BestBlock
->Size
- BlockSize
) > sizeof(R_FREE
) )
822 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
823 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
826 * Create the new free block.
828 NextBlock
= RFreeSplit ( pool
, BestBlock
, BlockSize
);
829 //ASSERT_SIZE ( NextBlock->Size );
832 * Remove the selected block from the list of free blocks.
834 //DPRINT ( "Removing selected block from free block list\n" );
835 RPoolRemoveFree ( pool
, BestBlock
);
837 * Create the new used block header.
839 NewBlock
= (PR_USED
)BestBlock
;
840 RiUsedInit ( NewBlock
, Tag
);
842 R_RELEASE_MUTEX(pool
);
844 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
846 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
848 return RHdrToBody(NewBlock
);
852 RPoolFree ( PR_POOL pool
, void* Addr
)
863 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
864 R_ACQUIRE_MUTEX(pool
);
865 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
866 R_RELEASE_MUTEX(pool
);
869 R_ASSERT_PTR(pool
,Addr
);
871 UsedBlock
= RBodyToHdr(Addr
);
872 UsedSize
= UsedBlock
->Size
;
873 FreeBlock
= (PR_FREE
)UsedBlock
;
875 UserSize
= UsedBlock
->UserSize
;
877 UserSize
= UsedSize
- sizeof(R_USED
) - 2*R_RZ
;
880 RUsedRedZoneCheck ( pool
, UsedBlock
, Addr
, __FILE__
, __LINE__
);
883 memset ( Addr
, 0xCD, UsedBlock
->UserSize
);
886 que
= RQueWhich ( UserSize
);
889 int queBytes
= 16 << que
;
890 ASSERT( queBytes
>= UserSize
);
894 if ( R_ROUND_UP(Addr
,pool
->Alignments
[2]) == Addr
)
896 else if ( R_ROUND_UP(Addr
,pool
->Alignments
[1]) == Addr
)
898 R_ACQUIRE_MUTEX(pool
);
899 RQueAdd ( &pool
->Que
[que
][align
], UsedBlock
);
900 R_RELEASE_MUTEX(pool
);
905 R_ACQUIRE_MUTEX(pool
);
906 RPoolReclaim ( pool
, FreeBlock
);
907 R_RELEASE_MUTEX(pool
);
911 RPoolDumpByTag ( PR_POOL pool
, rulong Tag
)
913 PR_USED Block
= (PR_USED
)pool
->UserBase
;
918 // TODO FIXME - should we validate params or ASSERT_IRQL?
919 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag
,tag
) );
920 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
922 R_ACQUIRE_MUTEX(pool
);
925 if ( Block
->Status
== 1 && Block
->Tag
== Tag
)
927 R_DEBUG ( " %08X %08X\n", Block
, Block
->Size
);
930 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
933 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
936 R_RELEASE_MUTEX(pool
);
938 R_DEBUG ( "Entries found for tag '%s': %i\n", tag
, count
);
942 RPoolQueryTag ( void* Addr
)
944 PR_USED Block
= RBodyToHdr(Addr
);
945 // TODO FIXME - should we validate params?
947 if ( Block
->UsedMagic
!= R_USED_MAGIC
)
950 if ( Block
->Status
!= 1 )
956 RPoolStats ( PR_POOL pool
)
958 int free
=0, used
=0, qued
=0;
959 PR_USED Block
= (PR_USED
)pool
->UserBase
;
961 R_ACQUIRE_MUTEX(pool
);
964 switch ( Block
->Status
)
976 ASSERT ( !"Invalid Status for Block in pool!" );
978 Block
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
980 R_RELEASE_MUTEX(pool
);
982 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free
, used
, qued
, (free
+used
+qued
) );
985 #ifdef R_LARGEST_ALLOC_POSSIBLE
987 RPoolLargestAllocPossible ( PR_POOL pool
, int align
)
989 int Alignment
= pool
->Alignments
[align
];
990 rulong LargestUserSize
= 0;
991 PR_FREE Block
= (PR_FREE
)pool
->UserBase
;
994 if ( Block
->Status
!= 1 )
996 void* Addr
, *AlignedAddr
;
997 rulong BlockMaxUserSize
;
1000 Addr
= (char*)Block
+ sizeof(R_USED
) + R_RZ
;
1001 AlignedAddr
= R_ROUND_UP(Addr
,Alignment
);
1002 if ( Addr
!= AlignedAddr
)
1003 Addr
= R_ROUND_UP((char*)Block
+ sizeof(R_FREE
) + sizeof(R_USED
) + R_RZ
, Alignment
);
1004 BlockMaxUserSize
= (char*)Block
+ Block
->Size
- (char*)Addr
- R_RZ
;
1005 cue
= RQueWhich ( BlockMaxUserSize
);
1008 cueBytes
= 16 << cue
;
1009 if ( cueBytes
> BlockMaxUserSize
);
1012 BlockMaxUserSize
= 0;
1014 BlockMaxUserSize
= 16 << (cue
-1);
1017 if ( BlockMaxUserSize
> LargestUserSize
)
1018 LargestUserSize
= BlockMaxUserSize
;
1020 Block
= RNextBlock ( pool
, Block
);
1022 return LargestUserSize
;
1024 #endif//R_LARGEST_ALLOC_POSSIBLE