54a5fb50f96dccf9d5a3cb02acef28d9f73776bd
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
14 typedef unsigned long rulong
;
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
21 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
29 // R_QUEMIN is the minimum number of entries to keep in a que
34 // 16, 32, 64, 128, 256, 512
39 // R_RZ is the redzone size
44 #define R_RZ_LOVALUE 0x87
48 #define R_RZ_HIVALUE 0xA5
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
55 #if R_STACK > 0 && R_STACK < 6
56 /* Increase the frame depth to get a reasonable back trace */
59 #endif // R_STACK > 0 && R_STACK < 6
63 // R_TAG do we keep track of tags on a per-memory block basis?
69 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
72 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
76 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
77 // in both the R_FREE and R_USED structures
79 typedef struct _R_FREE
88 ULONG_PTR LastOwnerStack
[R_STACK
];
90 struct _R_FREE
* NextFree
;
91 struct _R_FREE
* PrevFree
;
95 typedef struct _R_USED
100 rulong PrevSize
: 30;
104 ULONG_PTR LastOwnerStack
[R_STACK
];
106 struct _R_USED
* NextUsed
;
108 rulong UserSize
; // how many bytes the user actually asked for...
114 typedef struct _R_QUE
121 typedef struct _R_POOL
127 rulong Alignments
[3];
128 PR_FREE FirstFree
, LastFree
;
129 R_QUE Que
[R_QUECOUNT
][3];
135 #define RiPrintLastOwner(Block)
138 RiPrintLastOwner ( PR_USED Block
)
141 for ( i
= 0; i
< R_STACK
; i
++ )
143 if ( Block
->LastOwnerStack
[i
] != 0xDEADBEEF )
146 if (!R_PRINT_ADDRESS ((PVOID
)Block
->LastOwnerStack
[i
]) )
148 R_DEBUG("<%X>", Block
->LastOwnerStack
[i
] );
156 RQueWhich ( rulong size
)
159 for ( que
=0, quesize
=16; que
< R_QUECOUNT
; que
++, quesize
<<=1 )
161 if ( quesize
>= size
)
170 RQueInit ( PR_QUE que
)
178 RQueAdd ( PR_QUE que
, PR_USED Item
)
182 Item
->NextUsed
= NULL
;
186 que
->First
= que
->Last
= Item
;
189 ASSERT(!que
->Last
->NextUsed
);
190 que
->Last
->NextUsed
= Item
;
195 RQueRemove ( PR_QUE que
)
199 if ( que
->count
< R_QUEMIN
)
205 que
->First
= Item
->NextUsed
;
208 ASSERT ( !que
->First
);
216 RPoolAddFree ( PR_POOL pool
, PR_FREE Item
)
220 if ( !pool
->FirstFree
)
222 pool
->FirstFree
= pool
->LastFree
= Item
;
223 Item
->NextFree
= NULL
;
227 pool
->FirstFree
->PrevFree
= Item
;
228 Item
->NextFree
= pool
->FirstFree
;
229 pool
->FirstFree
= Item
;
231 Item
->PrevFree
= NULL
;
235 RPoolRemoveFree ( PR_POOL pool
, PR_FREE Item
)
239 if ( Item
->NextFree
)
240 Item
->NextFree
->PrevFree
= Item
->PrevFree
;
243 ASSERT ( pool
->LastFree
== Item
);
244 pool
->LastFree
= Item
->PrevFree
;
246 if ( Item
->PrevFree
)
247 Item
->PrevFree
->NextFree
= Item
->NextFree
;
250 ASSERT ( pool
->FirstFree
== Item
);
251 pool
->FirstFree
= Item
->NextFree
;
254 Item
->NextFree
= Item
->PrevFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
259 #define RFreeFillStack(free)
260 #define RUsedFillStack(used)
263 RFreeFillStack ( PR_FREE free
)
266 ULONG stack
[R_STACK
+3]; // need to skip 3 known levels of stack trace
267 memset ( stack
, 0xCD, sizeof(stack
) );
268 R_GET_STACK_FRAMES ( stack
, R_STACK
+3 );
269 for ( i
= 0; i
< R_STACK
; i
++ )
270 free
->LastOwnerStack
[i
] = stack
[i
+3];
274 RUsedFillStack ( PR_USED used
)
277 ULONG stack
[R_STACK
+2]; // need to skip 2 known levels of stack trace
278 memset ( stack
, 0xCD, sizeof(stack
) );
279 R_GET_STACK_FRAMES ( stack
, R_STACK
+2 );
280 for ( i
= 0; i
< R_STACK
; i
++ )
281 used
->LastOwnerStack
[i
] = stack
[i
+2];
286 RFreeInit ( void* memory
)
288 PR_FREE block
= (PR_FREE
)memory
;
290 block
->FreeMagic
= R_FREE_MAGIC
;
293 RFreeFillStack ( block
);
295 block
->PrevFree
= block
->NextFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
301 RPoolInit ( void* PoolBase
, rulong PoolSize
, int align1
, int align2
, int align3
)
304 PR_POOL pool
= (PR_POOL
)PoolBase
;
306 pool
->PoolBase
= PoolBase
;
307 pool
->PoolSize
= PoolSize
;
308 pool
->UserBase
= (char*)pool
->PoolBase
+ sizeof(R_POOL
);
309 pool
->UserSize
= PoolSize
- sizeof(R_POOL
);
310 pool
->Alignments
[0] = align1
;
311 pool
->Alignments
[1] = align2
;
312 pool
->Alignments
[2] = align3
;
313 pool
->FirstFree
= pool
->LastFree
= NULL
;
316 RFreeInit ( pool
->UserBase
));
318 pool
->FirstFree
->PrevSize
= 0;
319 pool
->FirstFree
->Size
= pool
->UserSize
;
321 for ( que
= 0; que
< R_QUECOUNT
; que
++ )
323 for ( align
= 0; align
< 3; align
++ )
325 RQueInit ( &pool
->Que
[que
][align
] );
332 RFormatTag ( rulong Tag
, char* buf
)
335 *(rulong
*)&buf
[0] = Tag
;
337 for ( i
= 0; i
< 4; i
++ )
346 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
349 RiBadBlock ( PR_USED pUsed
, char* Addr
, const char* violation
, const char* file
, int line
, int printzone
)
354 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
355 file
, line
, violation
, Addr
);
358 R_DEBUG ( "UsedMagic 0x%x, ", pUsed
->UsedMagic
);
360 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
361 RFormatTag(pUsed
->Tag
,tag
),
368 unsigned char* HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
369 unsigned char* LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
370 R_DEBUG ( ", LoZone " );
371 for ( i
= 0; i
< R_RZ
; i
++ )
372 R_DEBUG ( "%02x", LoZone
[i
] );
373 R_DEBUG ( ", HiZone " );
374 for ( i
= 0; i
< R_RZ
; i
++ )
375 R_DEBUG ( "%02x", HiZone
[i
] );
379 R_DEBUG ( "First few Stack Frames:" );
380 RiPrintLastOwner ( pUsed
);
383 R_DEBUG ( "Contents of Block:\n" );
384 for ( i
= 0; i
< 8*16 && i
< pUsed
->UserSize
; i
+= 16 )
387 R_DEBUG ( "%04X ", i
);
388 for ( j
= 0; j
< 16; j
++ )
390 if ( i
+j
< pUsed
->UserSize
)
392 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr
[i
+j
] );
400 for ( j
= 0; j
< 16; j
++ )
402 if ( i
+j
< pUsed
->UserSize
)
405 if ( c
< 0x20 || c
> 0x7E )
419 RUsedRedZoneCheck ( PR_POOL pool
, PR_USED pUsed
, char* Addr
, const char* file
, int line
)
422 unsigned char *LoZone
, *HiZone
;
426 ASSERT ( Addr
>= (char*)pool
->UserBase
&& Addr
< ((char*)pool
->UserBase
+ pool
->UserSize
- 16) );
428 if ( pUsed
->UsedMagic
== MM_PPOOL_FREEMAGIC
)
430 pUsed
->UserSize
= 0; // just to keep from confusion, MmpBadBlock() doesn't return...
431 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
433 if ( pUsed
->UsedMagic
!= MM_PPOOL_USEDMAGIC
)
435 RiBadBlock ( pUsed
, Addr
, "bad magic", file
, line
, 0 );
438 switch ( pUsed
->Status
)
440 case 0: // freed into main pool
442 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
443 // no need for break here - RiBadBlock doesn't return
444 case 1: // allocated - this is okay
447 RiBadBlock ( pUsed
, Addr
, "corrupt status", file
, line
, 0 );
449 if ( pUsed
->Status
!= 1 )
451 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
453 if ( pUsed
->Size
> pool
->PoolSize
|| pUsed
->Size
== 0 )
455 RiBadBlock ( pUsed
, Addr
, "invalid size", file
, line
, 0 );
457 if ( pUsed
->UserSize
> pool
->PoolSize
|| pUsed
->UserSize
== 0 )
459 RiBadBlock ( pUsed
, Addr
, "invalid user size", file
, line
, 0 );
461 HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
462 LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
463 for ( i
= 0; i
< R_RZ
&& bLow
&& bHigh
; i
++ )
465 bLow
= bLow
&& ( LoZone
[i
] == R_RZ_LOVALUE
);
466 bHigh
= bHigh
&& ( HiZone
[i
] == R_RZ_HIVALUE
);
468 if ( !bLow
|| !bHigh
)
470 const char* violation
= "High and Low-side redzone overwrite";
471 if ( bHigh
) // high is okay, so it was just low failed
472 violation
= "Low-side redzone overwrite";
473 else if ( bLow
) // low side is okay, so it was just high failed
474 violation
= "High-side redzone overwrite";
475 RiBadBlock ( pUsed
, Addr
, violation
, file
, line
, 1 );
481 RPreviousBlock ( PR_FREE Block
)
483 if ( Block
->PrevSize
> 0 )
484 return (PR_FREE
)( (char*)Block
- Block
->PrevSize
);
489 RNextBlock ( PR_POOL pool
, PR_FREE Block
)
491 PR_FREE NextBlock
= (PR_FREE
)( (char*)Block
+ Block
->Size
);
492 if ( (char*)NextBlock
>= (char*)pool
->UserBase
+ pool
->UserSize
)
498 RHdrToBody ( void* blk
)
500 * FUNCTION: Translate a block header address to the corresponding block
504 return ( (void *) ((char*)blk
+ sizeof(R_USED
) + R_RZ
) );
507 inline static PR_USED
508 RBodyToHdr ( void* addr
)
511 ( ((char*)addr
) - sizeof(R_USED
) - R_RZ
);
515 RiInFreeChain ( PR_POOL pool
, PR_FREE Block
)
518 Free
= pool
->FirstFree
;
521 while ( Free
!= Block
)
523 Free
= Free
->NextFree
;
531 RPoolRedZoneCheck ( PR_POOL pool
, const char* file
, int line
)
534 PR_USED Block
= (PR_USED
)pool
->UserBase
;
539 switch ( Block
->Status
)
541 case 0: // block is in chain
542 ASSERT ( RiInFreeChain ( pool
, (PR_FREE
)Block
) );
544 case 1: // block is allocated
545 RUsedRedZoneCheck ( pool
, Block
, RHdrToBody(Block
), file
, line
);
547 case 2: // block is in que
548 // nothing to verify here yet
551 ASSERT ( !"invalid status in memory block found in pool!" );
553 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
556 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
561 // now let's step through the list of free pointers and verify
562 // each one can be found by size-jumping...
563 PR_FREE Free
= (PR_FREE
)pool
->FirstFree
;
566 PR_FREE NextFree
= (PR_FREE
)pool
->UserBase
;
567 if ( Free
!= NextFree
)
569 while ( NextFree
!= Free
)
571 NextFree
= RNextBlock ( pool
, NextFree
);
575 Free
= Free
->NextFree
;
581 RSetSize ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
, PR_FREE NextBlock
)
583 R_ASSERT_PTR(pool
,Block
);
584 ASSERT ( NewSize
< pool
->UserSize
);
585 ASSERT ( NewSize
>= sizeof(R_FREE
) );
586 Block
->Size
= NewSize
;
588 NextBlock
= RNextBlock ( pool
, Block
);
590 NextBlock
->PrevSize
= NewSize
;
594 RFreeSplit ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
)
596 PR_FREE NewBlock
= (PR_FREE
)((char*)Block
+ NewSize
);
597 RSetSize ( pool
, NewBlock
, Block
->Size
- NewSize
, NULL
);
598 RSetSize ( pool
, Block
, NewSize
, NewBlock
);
599 RFreeInit ( NewBlock
);
600 RPoolAddFree ( pool
, NewBlock
);
605 RFreeMerge ( PR_POOL pool
, PR_FREE First
, PR_FREE Second
)
607 ASSERT ( RPreviousBlock(Second
) == First
);
608 ASSERT ( First
->Size
== Second
->PrevSize
);
609 RPoolRemoveFree ( pool
, Second
);
610 RSetSize ( pool
, First
, First
->Size
+ Second
->Size
, NULL
);
614 RPoolReclaim ( PR_POOL pool
, PR_FREE FreeBlock
)
616 PR_FREE NextBlock
, PreviousBlock
;
618 RFreeInit ( FreeBlock
);
619 RPoolAddFree ( pool
, FreeBlock
);
621 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
624 * If the next block is immediately adjacent to the newly freed one then
626 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
628 NextBlock
= RNextBlock ( pool
, FreeBlock
);
629 if ( NextBlock
!= NULL
&& !NextBlock
->Status
)
631 RFreeMerge ( pool
, FreeBlock
, NextBlock
);
635 * If the previous block is adjacent to the newly freed one then
637 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
639 PreviousBlock
= RPreviousBlock ( FreeBlock
);
640 if ( PreviousBlock
!= NULL
&& !PreviousBlock
->Status
)
642 RFreeMerge ( pool
, PreviousBlock
, FreeBlock
);
647 RiUsedInit ( PR_USED Block
, rulong Tag
)
650 RUsedFillStack ( Block
);
652 Block
->UsedMagic
= R_USED_MAGIC
;
654 //ASSERT_SIZE ( Block->Size );
656 // now add the block to the used block list
658 Block
->NextUsed
= (PR_USED
)(ULONG_PTR
)0xDEADBEEF;
665 #define RiUsedInitRedZone(Block,UserSize)
668 RiUsedInitRedZone ( PR_USED Block
, rulong UserSize
)
670 // write out buffer-overrun detection bytes
671 char* Addr
= (char*)RHdrToBody(Block
);
672 Block
->UserSize
= UserSize
;
673 memset ( Addr
- R_RZ
, R_RZ_LOVALUE
, R_RZ
);
674 memset ( Addr
+ Block
->UserSize
, R_RZ_HIVALUE
, R_RZ
);
676 memset ( Addr
, 0xCD, UserSize
);
682 RPoolAlloc ( PR_POOL pool
, rulong NumberOfBytes
, rulong Tag
, rulong align
)
690 void* BestAlignedAddr
;
692 queBytes
= NumberOfBytes
;
695 int que_reclaimed
= 0;
698 ASSERT ( align
< 3 );
700 R_ACQUIRE_MUTEX(pool
);
702 if ( !NumberOfBytes
)
704 R_DEBUG("0 bytes requested - initiating pool verification\n");
705 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
706 R_RELEASE_MUTEX(pool
);
709 if ( NumberOfBytes
> pool
->PoolSize
)
711 if ( R_IS_POOL_PTR(pool
,NumberOfBytes
) )
713 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes
);
714 RUsedRedZoneCheck(pool
,RBodyToHdr((void*)NumberOfBytes
), (char*)NumberOfBytes
, __FILE__
, __LINE__
);
715 R_RELEASE_MUTEX(pool
);
718 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes
);
719 R_RELEASE_MUTEX(pool
);
723 que
= RQueWhich ( NumberOfBytes
);
726 if ( (NewBlock
= RQueRemove ( &pool
->Que
[que
][align
] )) )
728 RiUsedInit ( NewBlock
, Tag
);
729 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
730 R_RELEASE_MUTEX(pool
);
731 return RHdrToBody(NewBlock
);
733 queBytes
= 16 << que
;
737 * Calculate the total number of bytes we will need.
739 BlockSize
= queBytes
+ sizeof(R_USED
) + 2*R_RZ
;
740 if (BlockSize
< sizeof(R_FREE
))
742 /* At least we need the size of the free block header. */
743 BlockSize
= sizeof(R_FREE
);
748 * Find the best-fitting block.
751 Alignment
= pool
->Alignments
[align
];
752 PreviousBlock
= NULL
;
753 BestPreviousBlock
= NULL
,
754 CurrentBlock
= pool
->FirstFree
;
755 BestAlignedAddr
= NULL
;
757 while ( CurrentBlock
!= NULL
)
759 PVOID Addr
= RHdrToBody(CurrentBlock
);
760 PVOID CurrentBlockEnd
= (char*)CurrentBlock
+ CurrentBlock
->Size
;
761 /* calculate last size-aligned address available within this block */
762 PVOID AlignedAddr
= R_ROUND_DOWN((char*)CurrentBlockEnd
-queBytes
-R_RZ
, Alignment
);
763 ASSERT ( (char*)AlignedAddr
+queBytes
+R_RZ
<= (char*)CurrentBlockEnd
);
765 /* special case, this address is already size-aligned, and the right size */
766 if ( Addr
== AlignedAddr
)
768 BestAlignedAddr
= AlignedAddr
;
769 BestPreviousBlock
= PreviousBlock
;
770 BestBlock
= CurrentBlock
;
773 // if we carve out a size-aligned block... is it still past the end of this
774 // block's free header?
775 else if ( (char*)RBodyToHdr(AlignedAddr
)
776 >= (char*)CurrentBlock
+sizeof(R_FREE
) )
779 * there's enough room to allocate our size-aligned memory out
780 * of this block, see if it's a better choice than any previous
783 if ( BestBlock
== NULL
784 || BestBlock
->Size
> CurrentBlock
->Size
)
786 BestAlignedAddr
= AlignedAddr
;
787 BestPreviousBlock
= PreviousBlock
;
788 BestBlock
= CurrentBlock
;
792 PreviousBlock
= CurrentBlock
;
793 CurrentBlock
= CurrentBlock
->NextFree
;
797 * We didn't find anything suitable at all.
799 if (BestBlock
== NULL
)
801 if ( !que_reclaimed
)
805 for ( i
= 0; i
< R_QUECOUNT
; i
++ )
807 for ( j
= 0; j
< 3; j
++ )
809 while ( (BestBlock
= (PR_FREE
)RQueRemove ( &pool
->Que
[i
][j
] )) )
811 RPoolReclaim ( pool
, BestBlock
);
819 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
821 R_RELEASE_MUTEX(pool
);
825 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
826 * there's room at the beginning of the block for a free block...
829 void* Addr
= RHdrToBody(BestBlock
);
830 if ( BestAlignedAddr
!= Addr
)
832 PR_FREE NewFreeBlock
= RFreeSplit (
835 (char*)RBodyToHdr(BestAlignedAddr
) - (char*)BestBlock
);
836 ASSERT ( BestAlignedAddr
> Addr
);
838 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
839 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
840 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
842 /* we want the following code to use our size-aligned block */
843 BestPreviousBlock
= BestBlock
;
844 BestBlock
= NewFreeBlock
;
850 * Is there enough space to create a second block from the unused portion.
852 if ( (BestBlock
->Size
- BlockSize
) > sizeof(R_FREE
) )
854 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
855 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
858 * Create the new free block.
860 NextBlock
= RFreeSplit ( pool
, BestBlock
, BlockSize
);
861 //ASSERT_SIZE ( NextBlock->Size );
864 * Remove the selected block from the list of free blocks.
866 //DPRINT ( "Removing selected block from free block list\n" );
867 RPoolRemoveFree ( pool
, BestBlock
);
869 * Create the new used block header.
871 NewBlock
= (PR_USED
)BestBlock
;
872 RiUsedInit ( NewBlock
, Tag
);
874 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
876 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
877 R_RELEASE_MUTEX(pool
);
879 return RHdrToBody(NewBlock
);
883 RPoolFree ( PR_POOL pool
, void* Addr
)
894 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
895 R_ACQUIRE_MUTEX(pool
);
896 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
897 R_RELEASE_MUTEX(pool
);
900 R_ASSERT_PTR(pool
,Addr
);
902 UsedBlock
= RBodyToHdr(Addr
);
903 UsedSize
= UsedBlock
->Size
;
904 FreeBlock
= (PR_FREE
)UsedBlock
;
906 UserSize
= UsedBlock
->UserSize
;
908 UserSize
= UsedSize
- sizeof(R_USED
) - 2*R_RZ
;
911 RUsedRedZoneCheck ( pool
, UsedBlock
, Addr
, __FILE__
, __LINE__
);
914 memset ( Addr
, 0xCD, UsedBlock
->UserSize
);
917 que
= RQueWhich ( UserSize
);
920 int queBytes
= 16 << que
;
921 ASSERT( (rulong
)queBytes
>= UserSize
);
925 if ( R_ROUND_UP(Addr
,pool
->Alignments
[2]) == Addr
)
927 else if ( R_ROUND_UP(Addr
,pool
->Alignments
[1]) == Addr
)
929 R_ACQUIRE_MUTEX(pool
);
930 RQueAdd ( &pool
->Que
[que
][align
], UsedBlock
);
931 R_RELEASE_MUTEX(pool
);
936 R_ACQUIRE_MUTEX(pool
);
937 RPoolReclaim ( pool
, FreeBlock
);
938 R_RELEASE_MUTEX(pool
);
943 RPoolDumpByTag ( PR_POOL pool
, rulong Tag
)
945 PR_USED Block
= (PR_USED
)pool
->UserBase
;
950 // TODO FIXME - should we validate params or ASSERT_IRQL?
951 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag
,tag
) );
952 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
954 R_ACQUIRE_MUTEX(pool
);
957 if ( Block
->Status
== 1 && Block
->Tag
== Tag
)
959 R_DEBUG ( " %08X %08X\n", Block
, Block
->Size
);
962 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
965 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
968 R_RELEASE_MUTEX(pool
);
970 R_DEBUG ( "Entries found for tag '%s': %i\n", tag
, count
);
975 RPoolQueryTag ( void* Addr
)
977 PR_USED Block
= RBodyToHdr(Addr
);
978 // TODO FIXME - should we validate params?
980 if ( Block
->UsedMagic
!= R_USED_MAGIC
)
983 if ( Block
->Status
!= 1 )
989 RPoolStats ( PR_POOL pool
)
991 int free
=0, used
=0, qued
=0;
992 PR_USED Block
= (PR_USED
)pool
->UserBase
;
994 R_ACQUIRE_MUTEX(pool
);
997 switch ( Block
->Status
)
1009 ASSERT ( !"Invalid Status for Block in pool!" );
1011 Block
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
1013 R_RELEASE_MUTEX(pool
);
1015 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free
, used
, qued
, (free
+used
+qued
) );
1018 #ifdef R_LARGEST_ALLOC_POSSIBLE
1020 RPoolLargestAllocPossible ( PR_POOL pool
, int align
)
1022 int Alignment
= pool
->Alignments
[align
];
1023 rulong LargestUserSize
= 0;
1024 PR_FREE Block
= (PR_FREE
)pool
->UserBase
;
1027 if ( Block
->Status
!= 1 )
1029 void* Addr
, *AlignedAddr
;
1030 rulong BlockMaxUserSize
;
1033 Addr
= (char*)Block
+ sizeof(R_USED
) + R_RZ
;
1034 AlignedAddr
= R_ROUND_UP(Addr
,Alignment
);
1035 if ( Addr
!= AlignedAddr
)
1036 Addr
= R_ROUND_UP((char*)Block
+ sizeof(R_FREE
) + sizeof(R_USED
) + R_RZ
, Alignment
);
1037 BlockMaxUserSize
= (char*)Block
+ Block
->Size
- (char*)Addr
- R_RZ
;
1038 cue
= RQueWhich ( BlockMaxUserSize
);
1041 cueBytes
= 16 << cue
;
1042 if ( cueBytes
> BlockMaxUserSize
);
1045 BlockMaxUserSize
= 0;
1047 BlockMaxUserSize
= 16 << (cue
-1);
1050 if ( BlockMaxUserSize
> LargestUserSize
)
1051 LargestUserSize
= BlockMaxUserSize
;
1053 Block
= RNextBlock ( pool
, Block
);
1055 return LargestUserSize
;
1057 #endif//R_LARGEST_ALLOC_POSSIBLE