3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
14 typedef unsigned long rulong
;
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
21 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
29 // R_QUEMIN is the minimum number of entries to keep in a que
34 // 16, 32, 64, 128, 256, 512
39 // R_RZ is the redzone size
44 #define R_RZ_LOVALUE 0x87
48 #define R_RZ_HIVALUE 0xA5
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
57 // R_TAG do we keep track of tags on a per-memory block basis?
63 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
66 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
70 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
71 // in both the R_FREE and R_USED structures
73 typedef struct _R_FREE
82 ULONG_PTR LastOwnerStack
[R_STACK
];
84 struct _R_FREE
* NextFree
;
85 struct _R_FREE
* PrevFree
;
89 typedef struct _R_USED
98 ULONG_PTR LastOwnerStack
[R_STACK
];
100 struct _R_USED
* NextUsed
;
102 rulong UserSize
; // how many bytes the user actually asked for...
108 typedef struct _R_QUE
115 typedef struct _R_POOL
121 rulong Alignments
[3];
122 PR_FREE FirstFree
, LastFree
;
123 R_QUE Que
[R_QUECOUNT
][3];
129 #define RiPrintLastOwner(Block)
132 RiPrintLastOwner ( PR_USED Block
)
135 for ( i
= 0; i
< R_STACK
; i
++ )
137 if ( Block
->LastOwnerStack
[i
] != 0xDEADBEEF )
140 if (!R_PRINT_ADDRESS ((PVOID
)Block
->LastOwnerStack
[i
]) )
142 R_DEBUG("<%X>", Block
->LastOwnerStack
[i
] );
150 RQueWhich ( rulong size
)
153 for ( que
=0, quesize
=16; que
< R_QUECOUNT
; que
++, quesize
<<=1 )
155 if ( quesize
>= size
)
164 RQueInit ( PR_QUE que
)
172 RQueAdd ( PR_QUE que
, PR_USED Item
)
176 Item
->NextUsed
= NULL
;
180 que
->First
= que
->Last
= Item
;
183 ASSERT(!que
->Last
->NextUsed
);
184 que
->Last
->NextUsed
= Item
;
189 RQueRemove ( PR_QUE que
)
193 if ( que
->count
< R_QUEMIN
)
199 que
->First
= Item
->NextUsed
;
202 ASSERT ( !que
->First
);
210 RPoolAddFree ( PR_POOL pool
, PR_FREE Item
)
214 if ( !pool
->FirstFree
)
216 pool
->FirstFree
= pool
->LastFree
= Item
;
217 Item
->NextFree
= NULL
;
221 pool
->FirstFree
->PrevFree
= Item
;
222 Item
->NextFree
= pool
->FirstFree
;
223 pool
->FirstFree
= Item
;
225 Item
->PrevFree
= NULL
;
229 RPoolRemoveFree ( PR_POOL pool
, PR_FREE Item
)
233 if ( Item
->NextFree
)
234 Item
->NextFree
->PrevFree
= Item
->PrevFree
;
237 ASSERT ( pool
->LastFree
== Item
);
238 pool
->LastFree
= Item
->PrevFree
;
240 if ( Item
->PrevFree
)
241 Item
->PrevFree
->NextFree
= Item
->NextFree
;
244 ASSERT ( pool
->FirstFree
== Item
);
245 pool
->FirstFree
= Item
->NextFree
;
247 #if defined(DBG) || defined(KDBG)
248 Item
->NextFree
= Item
->PrevFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
253 RFreeFillStack ( PR_FREE free
)
256 ULONG stack
[R_EXTRA_STACK_UP
+3]; // need to skip 3 known levels of stack trace
257 memset ( stack
, 0xCD, sizeof(stack
) );
258 R_GET_STACK_FRAMES ( stack
, R_EXTRA_STACK_UP
+3 );
259 for ( i
= 0; i
< R_EXTRA_STACK_UP
; i
++ )
260 free
->LastOwnerStack
[i
] = stack
[i
+3];
264 RUsedFillStack ( PR_USED used
)
267 ULONG stack
[R_EXTRA_STACK_UP
+2]; // need to skip 2 known levels of stack trace
268 memset ( stack
, 0xCD, sizeof(stack
) );
269 R_GET_STACK_FRAMES ( stack
, R_EXTRA_STACK_UP
+2 );
270 for ( i
= 0; i
< R_EXTRA_STACK_UP
; i
++ )
271 used
->LastOwnerStack
[i
] = stack
[i
+2];
275 RFreeInit ( void* memory
)
277 PR_FREE block
= (PR_FREE
)memory
;
279 block
->FreeMagic
= R_FREE_MAGIC
;
282 RFreeFillStack ( block
);
283 #if defined(DBG) || defined(KDBG)
284 block
->PrevFree
= block
->NextFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
290 RPoolInit ( void* PoolBase
, rulong PoolSize
, int align1
, int align2
, int align3
)
293 PR_POOL pool
= (PR_POOL
)PoolBase
;
295 pool
->PoolBase
= PoolBase
;
296 pool
->PoolSize
= PoolSize
;
297 pool
->UserBase
= (char*)pool
->PoolBase
+ sizeof(R_POOL
);
298 pool
->UserSize
= PoolSize
- sizeof(R_POOL
);
299 pool
->Alignments
[0] = align1
;
300 pool
->Alignments
[1] = align2
;
301 pool
->Alignments
[2] = align3
;
302 pool
->FirstFree
= pool
->LastFree
= NULL
;
305 RFreeInit ( pool
->UserBase
));
307 pool
->FirstFree
->PrevSize
= 0;
308 pool
->FirstFree
->Size
= pool
->UserSize
;
310 for ( que
= 0; que
< R_QUECOUNT
; que
++ )
312 for ( align
= 0; align
< 3; align
++ )
314 RQueInit ( &pool
->Que
[que
][align
] );
321 RFormatTag ( rulong Tag
, char* buf
)
324 *(rulong
*)&buf
[0] = Tag
;
326 for ( i
= 0; i
< 4; i
++ )
335 #define RUsedRedZoneCheck(pUsed,Addr,file,line)
338 RiBadBlock ( PR_USED pUsed
, char* Addr
, const char* violation
, const char* file
, int line
, int printzone
)
343 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
344 file
, line
, violation
, Addr
);
347 R_DEBUG ( "UsedMagic 0x%x, ", pUsed
->UsedMagic
);
349 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
350 RFormatTag(pUsed
->Tag
,tag
),
357 unsigned char* HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
358 unsigned char* LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
359 R_DEBUG ( ", LoZone " );
360 for ( i
= 0; i
< R_RZ
; i
++ )
361 R_DEBUG ( "%02x", LoZone
[i
] );
362 R_DEBUG ( ", HiZone " );
363 for ( i
= 0; i
< R_RZ
; i
++ )
364 R_DEBUG ( "%02x", HiZone
[i
] );
368 R_DEBUG ( "First few Stack Frames:" );
369 RiPrintLastOwner ( pUsed
);
372 R_DEBUG ( "Contents of Block:\n" );
373 for ( i
= 0; i
< 8*16 && i
< pUsed
->UserSize
; i
+= 16 )
376 R_DEBUG ( "%04X ", i
);
377 for ( j
= 0; j
< 16; j
++ )
379 if ( i
+j
< pUsed
->UserSize
)
381 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr
[i
+j
] );
389 for ( j
= 0; j
< 16; j
++ )
391 if ( i
+j
< pUsed
->UserSize
)
394 if ( c
< 0x20 || c
> 0x7E )
408 RUsedRedZoneCheck ( PR_POOL pool
, PR_USED pUsed
, char* Addr
, const char* file
, int line
)
411 unsigned char *LoZone
, *HiZone
;
415 ASSERT ( Addr
>= (char*)pool
->UserBase
&& Addr
< ((char*)pool
->UserBase
+ pool
->UserSize
- 16) );
417 if ( pUsed
->UsedMagic
== MM_PPOOL_FREEMAGIC
)
419 pUsed
->UserSize
= 0; // just to keep from confusion, MmpBadBlock() doesn't return...
420 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
422 if ( pUsed
->UsedMagic
!= MM_PPOOL_USEDMAGIC
)
424 RiBadBlock ( pUsed
, Addr
, "bad magic", file
, line
, 0 );
427 switch ( pUsed
->Status
)
429 case 0: // freed into main pool
431 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
432 // no need for break here - RiBadBlock doesn't return
433 case 1: // allocated - this is okay
436 RiBadBlock ( pUsed
, Addr
, "corrupt status", file
, line
, 0 );
438 if ( pUsed
->Status
!= 1 )
440 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
442 if ( pUsed
->Size
> pool
->PoolSize
|| pUsed
->Size
== 0 )
444 RiBadBlock ( pUsed
, Addr
, "invalid size", file
, line
, 0 );
446 if ( pUsed
->UserSize
> pool
->PoolSize
|| pUsed
->UserSize
== 0 )
448 RiBadBlock ( pUsed
, Addr
, "invalid user size", file
, line
, 0 );
450 HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
451 LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
452 for ( i
= 0; i
< R_RZ
&& bLow
&& bHigh
; i
++ )
454 bLow
= bLow
&& ( LoZone
[i
] == R_RZ_LOVALUE
);
455 bHigh
= bHigh
&& ( HiZone
[i
] == R_RZ_HIVALUE
);
457 if ( !bLow
|| !bHigh
)
459 const char* violation
= "High and Low-side redzone overwrite";
460 if ( bHigh
) // high is okay, so it was just low failed
461 violation
= "Low-side redzone overwrite";
462 else if ( bLow
) // low side is okay, so it was just high failed
463 violation
= "High-side redzone overwrite";
464 RiBadBlock ( pUsed
, Addr
, violation
, file
, line
, 1 );
470 RPreviousBlock ( PR_FREE Block
)
472 if ( Block
->PrevSize
> 0 )
473 return (PR_FREE
)( (char*)Block
- Block
->PrevSize
);
478 RNextBlock ( PR_POOL pool
, PR_FREE Block
)
480 PR_FREE NextBlock
= (PR_FREE
)( (char*)Block
+ Block
->Size
);
481 if ( (char*)NextBlock
>= (char*)pool
->UserBase
+ pool
->UserSize
)
487 RHdrToBody ( void* blk
)
489 * FUNCTION: Translate a block header address to the corresponding block
493 return ( (void *) ((char*)blk
+ sizeof(R_USED
) + R_RZ
) );
496 inline static PR_USED
497 RBodyToHdr ( void* addr
)
500 ( ((char*)addr
) - sizeof(R_USED
) - R_RZ
);
504 RiInFreeChain ( PR_POOL pool
, PR_FREE Block
)
507 Free
= pool
->FirstFree
;
510 while ( Free
!= Block
)
512 Free
= Free
->NextFree
;
520 RPoolRedZoneCheck ( PR_POOL pool
, const char* file
, int line
)
523 PR_USED Block
= (PR_USED
)pool
->UserBase
;
528 switch ( Block
->Status
)
530 case 0: // block is in chain
531 ASSERT ( RiInFreeChain ( pool
, (PR_FREE
)Block
) );
533 case 1: // block is allocated
534 RUsedRedZoneCheck ( pool
, Block
, RHdrToBody(Block
), file
, line
);
536 case 2: // block is in que
537 // nothing to verify here yet
540 ASSERT ( !"invalid status in memory block found in pool!" );
542 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
545 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
550 // now let's step through the list of free pointers and verify
551 // each one can be found by size-jumping...
552 PR_FREE Free
= (PR_FREE
)pool
->FirstFree
;
555 PR_FREE NextFree
= (PR_FREE
)pool
->UserBase
;
556 if ( Free
!= NextFree
)
558 while ( NextFree
!= Free
)
560 NextFree
= RNextBlock ( pool
, NextFree
);
564 Free
= Free
->NextFree
;
570 RSetSize ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
, PR_FREE NextBlock
)
572 R_ASSERT_PTR(pool
,Block
);
573 ASSERT ( NewSize
< pool
->UserSize
);
574 ASSERT ( NewSize
>= sizeof(R_FREE
) );
575 Block
->Size
= NewSize
;
577 NextBlock
= RNextBlock ( pool
, Block
);
579 NextBlock
->PrevSize
= NewSize
;
583 RFreeSplit ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
)
585 PR_FREE NewBlock
= (PR_FREE
)((char*)Block
+ NewSize
);
586 RSetSize ( pool
, NewBlock
, Block
->Size
- NewSize
, NULL
);
587 RSetSize ( pool
, Block
, NewSize
, NewBlock
);
588 RFreeInit ( NewBlock
);
589 RPoolAddFree ( pool
, NewBlock
);
594 RFreeMerge ( PR_POOL pool
, PR_FREE First
, PR_FREE Second
)
596 ASSERT ( RPreviousBlock(Second
) == First
);
597 ASSERT ( First
->Size
== Second
->PrevSize
);
598 RPoolRemoveFree ( pool
, Second
);
599 RSetSize ( pool
, First
, First
->Size
+ Second
->Size
, NULL
);
603 RPoolReclaim ( PR_POOL pool
, PR_FREE FreeBlock
)
605 PR_FREE NextBlock
, PreviousBlock
;
607 RFreeInit ( FreeBlock
);
608 RPoolAddFree ( pool
, FreeBlock
);
610 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
613 * If the next block is immediately adjacent to the newly freed one then
615 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
617 NextBlock
= RNextBlock ( pool
, FreeBlock
);
618 if ( NextBlock
!= NULL
&& !NextBlock
->Status
)
620 RFreeMerge ( pool
, FreeBlock
, NextBlock
);
624 * If the previous block is adjacent to the newly freed one then
626 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
628 PreviousBlock
= RPreviousBlock ( FreeBlock
);
629 if ( PreviousBlock
!= NULL
&& !PreviousBlock
->Status
)
631 RFreeMerge ( pool
, PreviousBlock
, FreeBlock
);
636 RiUsedInit ( PR_USED Block
, rulong Tag
)
639 RUsedFillStack ( Block
);
641 Block
->UsedMagic
= R_USED_MAGIC
;
643 //ASSERT_SIZE ( Block->Size );
645 // now add the block to the used block list
646 #if defined(DBG) || defined(KDBG)
647 Block
->NextUsed
= (PR_USED
)(ULONG_PTR
)0xDEADBEEF;
654 #define RiUsedInitRedZone(Block,UserSize)
657 RiUsedInitRedZone ( PR_USED Block
, rulong UserSize
)
659 // write out buffer-overrun detection bytes
660 char* Addr
= (char*)RHdrToBody(Block
);
661 Block
->UserSize
= UserSize
;
662 memset ( Addr
- R_RZ
, R_RZ_LOVALUE
, R_RZ
);
663 memset ( Addr
+ Block
->UserSize
, R_RZ_HIVALUE
, R_RZ
);
664 #if defined(DBG) || defined(KDBG)
665 memset ( Addr
, 0xCD, UserSize
);
671 RPoolAlloc ( PR_POOL pool
, rulong NumberOfBytes
, rulong Tag
, rulong align
)
679 void* BestAlignedAddr
;
681 queBytes
= NumberOfBytes
;
684 int que_reclaimed
= 0;
687 ASSERT ( align
< 3 );
689 R_ACQUIRE_MUTEX(pool
);
691 if ( !NumberOfBytes
)
693 R_DEBUG("0 bytes requested - initiating pool verification\n");
694 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
695 R_RELEASE_MUTEX(pool
);
698 if ( NumberOfBytes
> pool
->PoolSize
)
700 if ( R_IS_POOL_PTR(pool
,NumberOfBytes
) )
702 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes
);
703 RUsedRedZoneCheck(pool
,RBodyToHdr((void*)NumberOfBytes
), (char*)NumberOfBytes
, __FILE__
, __LINE__
);
704 R_RELEASE_MUTEX(pool
);
707 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes
);
708 R_RELEASE_MUTEX(pool
);
712 que
= RQueWhich ( NumberOfBytes
);
715 if ( (NewBlock
= RQueRemove ( &pool
->Que
[que
][align
] )) )
717 R_RELEASE_MUTEX(pool
);
718 RiUsedInit ( NewBlock
, Tag
);
719 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
720 return RHdrToBody(NewBlock
);
722 queBytes
= 16 << que
;
726 * Calculate the total number of bytes we will need.
728 BlockSize
= queBytes
+ sizeof(R_USED
) + 2*R_RZ
;
729 if (BlockSize
< sizeof(R_FREE
))
731 /* At least we need the size of the free block header. */
732 BlockSize
= sizeof(R_FREE
);
737 * Find the best-fitting block.
740 Alignment
= pool
->Alignments
[align
];
741 PreviousBlock
= NULL
;
742 BestPreviousBlock
= NULL
,
743 CurrentBlock
= pool
->FirstFree
;
744 BestAlignedAddr
= NULL
;
746 while ( CurrentBlock
!= NULL
)
748 PVOID Addr
= RHdrToBody(CurrentBlock
);
749 PVOID CurrentBlockEnd
= (char*)CurrentBlock
+ CurrentBlock
->Size
;
750 /* calculate last size-aligned address available within this block */
751 PVOID AlignedAddr
= R_ROUND_DOWN((char*)CurrentBlockEnd
-queBytes
-R_RZ
, Alignment
);
752 ASSERT ( (char*)AlignedAddr
+queBytes
+R_RZ
<= (char*)CurrentBlockEnd
);
754 /* special case, this address is already size-aligned, and the right size */
755 if ( Addr
== AlignedAddr
)
757 BestAlignedAddr
= AlignedAddr
;
758 BestPreviousBlock
= PreviousBlock
;
759 BestBlock
= CurrentBlock
;
762 // if we carve out a size-aligned block... is it still past the end of this
763 // block's free header?
764 else if ( (char*)RBodyToHdr(AlignedAddr
)
765 >= (char*)CurrentBlock
+sizeof(R_FREE
) )
768 * there's enough room to allocate our size-aligned memory out
769 * of this block, see if it's a better choice than any previous
772 if ( BestBlock
== NULL
773 || BestBlock
->Size
> CurrentBlock
->Size
)
775 BestAlignedAddr
= AlignedAddr
;
776 BestPreviousBlock
= PreviousBlock
;
777 BestBlock
= CurrentBlock
;
781 PreviousBlock
= CurrentBlock
;
782 CurrentBlock
= CurrentBlock
->NextFree
;
786 * We didn't find anything suitable at all.
788 if (BestBlock
== NULL
)
790 if ( !que_reclaimed
)
794 for ( i
= 0; i
< R_QUECOUNT
; i
++ )
796 for ( j
= 0; j
< 3; j
++ )
798 while ( (BestBlock
= (PR_FREE
)RQueRemove ( &pool
->Que
[i
][j
] )) )
800 RPoolReclaim ( pool
, BestBlock
);
808 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
810 R_RELEASE_MUTEX(pool
);
814 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
815 * there's room at the beginning of the block for a free block...
818 void* Addr
= RHdrToBody(BestBlock
);
819 if ( BestAlignedAddr
!= Addr
)
821 PR_FREE NewFreeBlock
= RFreeSplit (
824 (char*)RBodyToHdr(BestAlignedAddr
) - (char*)BestBlock
);
825 ASSERT ( BestAlignedAddr
> Addr
);
827 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
828 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
829 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
831 /* we want the following code to use our size-aligned block */
832 BestPreviousBlock
= BestBlock
;
833 BestBlock
= NewFreeBlock
;
839 * Is there enough space to create a second block from the unused portion.
841 if ( (BestBlock
->Size
- BlockSize
) > sizeof(R_FREE
) )
843 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
844 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
847 * Create the new free block.
849 NextBlock
= RFreeSplit ( pool
, BestBlock
, BlockSize
);
850 //ASSERT_SIZE ( NextBlock->Size );
853 * Remove the selected block from the list of free blocks.
855 //DPRINT ( "Removing selected block from free block list\n" );
856 RPoolRemoveFree ( pool
, BestBlock
);
858 * Create the new used block header.
860 NewBlock
= (PR_USED
)BestBlock
;
861 RiUsedInit ( NewBlock
, Tag
);
863 R_RELEASE_MUTEX(pool
);
865 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
867 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
869 return RHdrToBody(NewBlock
);
873 RPoolFree ( PR_POOL pool
, void* Addr
)
884 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
885 R_ACQUIRE_MUTEX(pool
);
886 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
887 R_RELEASE_MUTEX(pool
);
890 R_ASSERT_PTR(pool
,Addr
);
892 UsedBlock
= RBodyToHdr(Addr
);
893 UsedSize
= UsedBlock
->Size
;
894 FreeBlock
= (PR_FREE
)UsedBlock
;
896 UserSize
= UsedBlock
->UserSize
;
898 UserSize
= UsedSize
- sizeof(R_USED
) - 2*R_RZ
;
901 RUsedRedZoneCheck ( pool
, UsedBlock
, Addr
, __FILE__
, __LINE__
);
904 memset ( Addr
, 0xCD, UsedBlock
->UserSize
);
907 que
= RQueWhich ( UserSize
);
910 int queBytes
= 16 << que
;
911 ASSERT( queBytes
>= UserSize
);
915 if ( R_ROUND_UP(Addr
,pool
->Alignments
[2]) == Addr
)
917 else if ( R_ROUND_UP(Addr
,pool
->Alignments
[1]) == Addr
)
919 R_ACQUIRE_MUTEX(pool
);
920 RQueAdd ( &pool
->Que
[que
][align
], UsedBlock
);
921 R_RELEASE_MUTEX(pool
);
926 R_ACQUIRE_MUTEX(pool
);
927 RPoolReclaim ( pool
, FreeBlock
);
928 R_RELEASE_MUTEX(pool
);
932 RPoolDumpByTag ( PR_POOL pool
, rulong Tag
)
934 PR_USED Block
= (PR_USED
)pool
->UserBase
;
939 // TODO FIXME - should we validate params or ASSERT_IRQL?
940 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag
,tag
) );
941 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
943 R_ACQUIRE_MUTEX(pool
);
946 if ( Block
->Status
== 1 && Block
->Tag
== Tag
)
948 R_DEBUG ( " %08X %08X\n", Block
, Block
->Size
);
951 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
954 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
957 R_RELEASE_MUTEX(pool
);
959 R_DEBUG ( "Entries found for tag '%s': %i\n", tag
, count
);
963 RPoolQueryTag ( void* Addr
)
965 PR_USED Block
= RBodyToHdr(Addr
);
966 // TODO FIXME - should we validate params?
968 if ( Block
->UsedMagic
!= R_USED_MAGIC
)
971 if ( Block
->Status
!= 1 )
977 RPoolStats ( PR_POOL pool
)
979 int free
=0, used
=0, qued
=0;
980 PR_USED Block
= (PR_USED
)pool
->UserBase
;
982 R_ACQUIRE_MUTEX(pool
);
985 switch ( Block
->Status
)
997 ASSERT ( !"Invalid Status for Block in pool!" );
999 Block
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
1001 R_RELEASE_MUTEX(pool
);
1003 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free
, used
, qued
, (free
+used
+qued
) );
1006 #ifdef R_LARGEST_ALLOC_POSSIBLE
1008 RPoolLargestAllocPossible ( PR_POOL pool
, int align
)
1010 int Alignment
= pool
->Alignments
[align
];
1011 rulong LargestUserSize
= 0;
1012 PR_FREE Block
= (PR_FREE
)pool
->UserBase
;
1015 if ( Block
->Status
!= 1 )
1017 void* Addr
, *AlignedAddr
;
1018 rulong BlockMaxUserSize
;
1021 Addr
= (char*)Block
+ sizeof(R_USED
) + R_RZ
;
1022 AlignedAddr
= R_ROUND_UP(Addr
,Alignment
);
1023 if ( Addr
!= AlignedAddr
)
1024 Addr
= R_ROUND_UP((char*)Block
+ sizeof(R_FREE
) + sizeof(R_USED
) + R_RZ
, Alignment
);
1025 BlockMaxUserSize
= (char*)Block
+ Block
->Size
- (char*)Addr
- R_RZ
;
1026 cue
= RQueWhich ( BlockMaxUserSize
);
1029 cueBytes
= 16 << cue
;
1030 if ( cueBytes
> BlockMaxUserSize
);
1033 BlockMaxUserSize
= 0;
1035 BlockMaxUserSize
= 16 << (cue
-1);
1038 if ( BlockMaxUserSize
> LargestUserSize
)
1039 LargestUserSize
= BlockMaxUserSize
;
1041 Block
= RNextBlock ( pool
, Block
);
1043 return LargestUserSize
;
1045 #endif//R_LARGEST_ALLOC_POSSIBLE