3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
14 typedef unsigned long rulong
;
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
21 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
29 // R_QUEMIN is the minimum number of entries to keep in a que
34 // 16, 32, 64, 128, 256, 512
39 // R_RZ is the redzone size
44 #define R_RZ_LOVALUE 0x87
48 #define R_RZ_HIVALUE 0xA5
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
55 #if R_STACK > 0 && R_STACK < 6
56 /* Increase the frame depth to get a reasonable back trace */
59 #endif // R_STACK > 0 && R_STACK < 6
63 // R_TAG do we keep track of tags on a per-memory block basis?
69 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
72 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
76 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
77 // in both the R_FREE and R_USED structures
79 typedef struct _R_FREE
88 ULONG_PTR LastOwnerStack
[R_STACK
];
90 struct _R_FREE
* NextFree
;
91 struct _R_FREE
* PrevFree
;
95 typedef struct _R_USED
100 rulong PrevSize
: 30;
104 ULONG_PTR LastOwnerStack
[R_STACK
];
106 struct _R_USED
* NextUsed
;
108 rulong UserSize
; // how many bytes the user actually asked for...
114 typedef struct _R_QUE
121 typedef struct _R_POOL
127 rulong Alignments
[3];
128 PR_FREE FirstFree
, LastFree
;
129 R_QUE Que
[R_QUECOUNT
][3];
135 #define RiPrintLastOwner(Block)
138 RiPrintLastOwner ( PR_USED Block
)
141 for ( i
= 0; i
< R_STACK
; i
++ )
143 if ( Block
->LastOwnerStack
[i
] != 0xDEADBEEF )
146 if (!R_PRINT_ADDRESS ((PVOID
)Block
->LastOwnerStack
[i
]) )
148 R_DEBUG("<%X>", Block
->LastOwnerStack
[i
] );
156 RQueWhich ( rulong size
)
159 for ( que
=0, quesize
=16; que
< R_QUECOUNT
; que
++, quesize
<<=1 )
161 if ( quesize
>= size
)
170 RQueInit ( PR_QUE que
)
178 RQueAdd ( PR_QUE que
, PR_USED Item
)
182 Item
->NextUsed
= NULL
;
186 que
->First
= que
->Last
= Item
;
189 ASSERT(!que
->Last
->NextUsed
);
190 que
->Last
->NextUsed
= Item
;
195 RQueRemove ( PR_QUE que
)
199 if ( que
->count
< R_QUEMIN
)
205 que
->First
= Item
->NextUsed
;
208 ASSERT ( !que
->First
);
216 RPoolAddFree ( PR_POOL pool
, PR_FREE Item
)
220 if ( !pool
->FirstFree
)
222 pool
->FirstFree
= pool
->LastFree
= Item
;
223 Item
->NextFree
= NULL
;
227 pool
->FirstFree
->PrevFree
= Item
;
228 Item
->NextFree
= pool
->FirstFree
;
229 pool
->FirstFree
= Item
;
231 Item
->PrevFree
= NULL
;
235 RPoolRemoveFree ( PR_POOL pool
, PR_FREE Item
)
239 if ( Item
->NextFree
)
240 Item
->NextFree
->PrevFree
= Item
->PrevFree
;
243 ASSERT ( pool
->LastFree
== Item
);
244 pool
->LastFree
= Item
->PrevFree
;
246 if ( Item
->PrevFree
)
247 Item
->PrevFree
->NextFree
= Item
->NextFree
;
250 ASSERT ( pool
->FirstFree
== Item
);
251 pool
->FirstFree
= Item
->NextFree
;
254 Item
->NextFree
= Item
->PrevFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
259 #define RFreeFillStack(free)
260 #define RUsedFillStack(used)
263 RFreeFillStack ( PR_FREE free
)
266 ULONG stack
[R_STACK
+3]; // need to skip 3 known levels of stack trace
267 memset ( stack
, 0xCD, sizeof(stack
) );
268 R_GET_STACK_FRAMES ( stack
, R_STACK
+3 );
269 for ( i
= 0; i
< R_STACK
; i
++ )
270 free
->LastOwnerStack
[i
] = stack
[i
+3];
274 RUsedFillStack ( PR_USED used
)
277 ULONG stack
[R_STACK
+2]; // need to skip 2 known levels of stack trace
278 memset ( stack
, 0xCD, sizeof(stack
) );
279 R_GET_STACK_FRAMES ( stack
, R_STACK
+2 );
280 for ( i
= 0; i
< R_STACK
; i
++ )
281 used
->LastOwnerStack
[i
] = stack
[i
+2];
286 RFreeInit ( void* memory
)
288 PR_FREE block
= (PR_FREE
)memory
;
290 block
->FreeMagic
= R_FREE_MAGIC
;
293 RFreeFillStack ( block
);
295 block
->PrevFree
= block
->NextFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
301 RPoolInit ( void* PoolBase
, rulong PoolSize
, int align1
, int align2
, int align3
)
304 PR_POOL pool
= (PR_POOL
)PoolBase
;
306 pool
->PoolBase
= PoolBase
;
307 pool
->PoolSize
= PoolSize
;
308 pool
->UserBase
= (char*)pool
->PoolBase
+ sizeof(R_POOL
);
309 pool
->UserSize
= PoolSize
- sizeof(R_POOL
);
310 pool
->Alignments
[0] = align1
;
311 pool
->Alignments
[1] = align2
;
312 pool
->Alignments
[2] = align3
;
313 pool
->FirstFree
= pool
->LastFree
= NULL
;
316 RFreeInit ( pool
->UserBase
));
318 pool
->FirstFree
->PrevSize
= 0;
319 pool
->FirstFree
->Size
= pool
->UserSize
;
321 for ( que
= 0; que
< R_QUECOUNT
; que
++ )
323 for ( align
= 0; align
< 3; align
++ )
325 RQueInit ( &pool
->Que
[que
][align
] );
333 RFormatTag ( rulong Tag
, char* buf
)
336 *(rulong
*)&buf
[0] = Tag
;
338 for ( i
= 0; i
< 4; i
++ )
348 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
351 RiBadBlock ( PR_USED pUsed
, char* Addr
, const char* violation
, const char* file
, int line
, int printzone
)
356 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
357 file
, line
, violation
, Addr
);
360 R_DEBUG ( "UsedMagic 0x%x, ", pUsed
->UsedMagic
);
362 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
363 RFormatTag(pUsed
->Tag
,tag
),
370 unsigned char* HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
371 unsigned char* LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
372 R_DEBUG ( ", LoZone " );
373 for ( i
= 0; i
< R_RZ
; i
++ )
374 R_DEBUG ( "%02x", LoZone
[i
] );
375 R_DEBUG ( ", HiZone " );
376 for ( i
= 0; i
< R_RZ
; i
++ )
377 R_DEBUG ( "%02x", HiZone
[i
] );
381 R_DEBUG ( "First few Stack Frames:" );
382 RiPrintLastOwner ( pUsed
);
385 R_DEBUG ( "Contents of Block:\n" );
386 for ( i
= 0; i
< 8*16 && i
< pUsed
->UserSize
; i
+= 16 )
389 R_DEBUG ( "%04X ", i
);
390 for ( j
= 0; j
< 16; j
++ )
392 if ( i
+j
< pUsed
->UserSize
)
394 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr
[i
+j
] );
402 for ( j
= 0; j
< 16; j
++ )
404 if ( i
+j
< pUsed
->UserSize
)
407 if ( c
< 0x20 || c
> 0x7E )
421 RUsedRedZoneCheck ( PR_POOL pool
, PR_USED pUsed
, char* Addr
, const char* file
, int line
)
424 unsigned char *LoZone
, *HiZone
;
428 ASSERT ( Addr
>= (char*)pool
->UserBase
&& Addr
< ((char*)pool
->UserBase
+ pool
->UserSize
- 16) );
430 if ( pUsed
->UsedMagic
== MM_PPOOL_FREEMAGIC
)
432 pUsed
->UserSize
= 0; // just to keep from confusion, MmpBadBlock() doesn't return...
433 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
435 if ( pUsed
->UsedMagic
!= MM_PPOOL_USEDMAGIC
)
437 RiBadBlock ( pUsed
, Addr
, "bad magic", file
, line
, 0 );
440 switch ( pUsed
->Status
)
442 case 0: // freed into main pool
444 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
445 // no need for break here - RiBadBlock doesn't return
446 case 1: // allocated - this is okay
449 RiBadBlock ( pUsed
, Addr
, "corrupt status", file
, line
, 0 );
451 if ( pUsed
->Status
!= 1 )
453 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
455 if ( pUsed
->Size
> pool
->PoolSize
|| pUsed
->Size
== 0 )
457 RiBadBlock ( pUsed
, Addr
, "invalid size", file
, line
, 0 );
459 if ( pUsed
->UserSize
> pool
->PoolSize
|| pUsed
->UserSize
== 0 )
461 RiBadBlock ( pUsed
, Addr
, "invalid user size", file
, line
, 0 );
463 HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
464 LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
465 for ( i
= 0; i
< R_RZ
&& bLow
&& bHigh
; i
++ )
467 bLow
= bLow
&& ( LoZone
[i
] == R_RZ_LOVALUE
);
468 bHigh
= bHigh
&& ( HiZone
[i
] == R_RZ_HIVALUE
);
470 if ( !bLow
|| !bHigh
)
472 const char* violation
= "High and Low-side redzone overwrite";
473 if ( bHigh
) // high is okay, so it was just low failed
474 violation
= "Low-side redzone overwrite";
475 else if ( bLow
) // low side is okay, so it was just high failed
476 violation
= "High-side redzone overwrite";
477 RiBadBlock ( pUsed
, Addr
, violation
, file
, line
, 1 );
483 RPreviousBlock ( PR_FREE Block
)
485 if ( Block
->PrevSize
> 0 )
486 return (PR_FREE
)( (char*)Block
- Block
->PrevSize
);
491 RNextBlock ( PR_POOL pool
, PR_FREE Block
)
493 PR_FREE NextBlock
= (PR_FREE
)( (char*)Block
+ Block
->Size
);
494 if ( (char*)NextBlock
>= (char*)pool
->UserBase
+ pool
->UserSize
)
499 static __inline
void*
500 RHdrToBody ( void* blk
)
502 * FUNCTION: Translate a block header address to the corresponding block
506 return ( (void *) ((char*)blk
+ sizeof(R_USED
) + R_RZ
) );
509 static __inline PR_USED
510 RBodyToHdr ( void* addr
)
513 ( ((char*)addr
) - sizeof(R_USED
) - R_RZ
);
517 RiInFreeChain ( PR_POOL pool
, PR_FREE Block
)
520 Free
= pool
->FirstFree
;
523 while ( Free
!= Block
)
525 Free
= Free
->NextFree
;
533 RPoolRedZoneCheck ( PR_POOL pool
, const char* file
, int line
)
536 PR_USED Block
= (PR_USED
)pool
->UserBase
;
541 switch ( Block
->Status
)
543 case 0: // block is in chain
544 ASSERT ( RiInFreeChain ( pool
, (PR_FREE
)Block
) );
546 case 1: // block is allocated
547 RUsedRedZoneCheck ( pool
, Block
, RHdrToBody(Block
), file
, line
);
549 case 2: // block is in que
550 // nothing to verify here yet
553 ASSERT ( !"invalid status in memory block found in pool!" );
555 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
558 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
563 // now let's step through the list of free pointers and verify
564 // each one can be found by size-jumping...
565 PR_FREE Free
= (PR_FREE
)pool
->FirstFree
;
568 PR_FREE NextFree
= (PR_FREE
)pool
->UserBase
;
569 if ( Free
!= NextFree
)
571 while ( NextFree
!= Free
)
573 NextFree
= RNextBlock ( pool
, NextFree
);
577 Free
= Free
->NextFree
;
583 RSetSize ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
, PR_FREE NextBlock
)
585 R_ASSERT_PTR(pool
,Block
);
586 ASSERT ( NewSize
< pool
->UserSize
);
587 ASSERT ( NewSize
>= sizeof(R_FREE
) );
588 Block
->Size
= NewSize
;
590 NextBlock
= RNextBlock ( pool
, Block
);
592 NextBlock
->PrevSize
= NewSize
;
596 RFreeSplit ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
)
598 PR_FREE NewBlock
= (PR_FREE
)((char*)Block
+ NewSize
);
599 RSetSize ( pool
, NewBlock
, Block
->Size
- NewSize
, NULL
);
600 RSetSize ( pool
, Block
, NewSize
, NewBlock
);
601 RFreeInit ( NewBlock
);
602 RPoolAddFree ( pool
, NewBlock
);
607 RFreeMerge ( PR_POOL pool
, PR_FREE First
, PR_FREE Second
)
609 ASSERT ( RPreviousBlock(Second
) == First
);
610 ASSERT ( First
->Size
== Second
->PrevSize
);
611 RPoolRemoveFree ( pool
, Second
);
612 RSetSize ( pool
, First
, First
->Size
+ Second
->Size
, NULL
);
616 RPoolReclaim ( PR_POOL pool
, PR_FREE FreeBlock
)
618 PR_FREE NextBlock
, PreviousBlock
;
620 RFreeInit ( FreeBlock
);
621 RPoolAddFree ( pool
, FreeBlock
);
623 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
626 * If the next block is immediately adjacent to the newly freed one then
628 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
630 NextBlock
= RNextBlock ( pool
, FreeBlock
);
631 if ( NextBlock
!= NULL
&& !NextBlock
->Status
)
633 RFreeMerge ( pool
, FreeBlock
, NextBlock
);
637 * If the previous block is adjacent to the newly freed one then
639 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
641 PreviousBlock
= RPreviousBlock ( FreeBlock
);
642 if ( PreviousBlock
!= NULL
&& !PreviousBlock
->Status
)
644 RFreeMerge ( pool
, PreviousBlock
, FreeBlock
);
649 RiUsedInit ( PR_USED Block
, rulong Tag
)
652 RUsedFillStack ( Block
);
654 Block
->UsedMagic
= R_USED_MAGIC
;
656 //ASSERT_SIZE ( Block->Size );
658 // now add the block to the used block list
660 Block
->NextUsed
= (PR_USED
)(ULONG_PTR
)0xDEADBEEF;
667 #define RiUsedInitRedZone(Block,UserSize)
670 RiUsedInitRedZone ( PR_USED Block
, rulong UserSize
)
672 // write out buffer-overrun detection bytes
673 char* Addr
= (char*)RHdrToBody(Block
);
674 Block
->UserSize
= UserSize
;
675 memset ( Addr
- R_RZ
, R_RZ_LOVALUE
, R_RZ
);
676 memset ( Addr
+ Block
->UserSize
, R_RZ_HIVALUE
, R_RZ
);
678 memset ( Addr
, 0xCD, UserSize
);
684 RPoolAlloc ( PR_POOL pool
, rulong NumberOfBytes
, rulong Tag
, rulong align
)
692 void* BestAlignedAddr
;
694 queBytes
= NumberOfBytes
;
697 int que_reclaimed
= 0;
700 ASSERT ( align
< 3 );
702 R_ACQUIRE_MUTEX(pool
);
704 if ( !NumberOfBytes
)
706 R_DEBUG("0 bytes requested - initiating pool verification\n");
707 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
708 R_RELEASE_MUTEX(pool
);
711 if ( NumberOfBytes
> pool
->PoolSize
)
713 if ( R_IS_POOL_PTR(pool
,NumberOfBytes
) )
715 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes
);
716 RUsedRedZoneCheck(pool
,RBodyToHdr((void*)NumberOfBytes
), (char*)NumberOfBytes
, __FILE__
, __LINE__
);
717 R_RELEASE_MUTEX(pool
);
720 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes
);
721 R_RELEASE_MUTEX(pool
);
725 que
= RQueWhich ( NumberOfBytes
);
728 if ( (NewBlock
= RQueRemove ( &pool
->Que
[que
][align
] )) )
730 RiUsedInit ( NewBlock
, Tag
);
731 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
732 R_RELEASE_MUTEX(pool
);
733 return RHdrToBody(NewBlock
);
735 queBytes
= 16 << que
;
739 * Calculate the total number of bytes we will need.
741 BlockSize
= queBytes
+ sizeof(R_USED
) + 2*R_RZ
;
742 if (BlockSize
< sizeof(R_FREE
))
744 /* At least we need the size of the free block header. */
745 BlockSize
= sizeof(R_FREE
);
750 * Find the best-fitting block.
753 Alignment
= pool
->Alignments
[align
];
754 PreviousBlock
= NULL
;
755 BestPreviousBlock
= NULL
,
756 CurrentBlock
= pool
->FirstFree
;
757 BestAlignedAddr
= NULL
;
759 while ( CurrentBlock
!= NULL
)
761 PVOID Addr
= RHdrToBody(CurrentBlock
);
762 PVOID CurrentBlockEnd
= (char*)CurrentBlock
+ CurrentBlock
->Size
;
763 /* calculate last size-aligned address available within this block */
764 PVOID AlignedAddr
= R_ROUND_DOWN((char*)CurrentBlockEnd
-queBytes
-R_RZ
, Alignment
);
765 ASSERT ( (char*)AlignedAddr
+queBytes
+R_RZ
<= (char*)CurrentBlockEnd
);
767 /* special case, this address is already size-aligned, and the right size */
768 if ( Addr
== AlignedAddr
)
770 BestAlignedAddr
= AlignedAddr
;
771 BestPreviousBlock
= PreviousBlock
;
772 BestBlock
= CurrentBlock
;
775 // if we carve out a size-aligned block... is it still past the end of this
776 // block's free header?
777 else if ( (char*)RBodyToHdr(AlignedAddr
)
778 >= (char*)CurrentBlock
+sizeof(R_FREE
) )
781 * there's enough room to allocate our size-aligned memory out
782 * of this block, see if it's a better choice than any previous
785 if ( BestBlock
== NULL
786 || BestBlock
->Size
> CurrentBlock
->Size
)
788 BestAlignedAddr
= AlignedAddr
;
789 BestPreviousBlock
= PreviousBlock
;
790 BestBlock
= CurrentBlock
;
794 PreviousBlock
= CurrentBlock
;
795 CurrentBlock
= CurrentBlock
->NextFree
;
799 * We didn't find anything suitable at all.
801 if (BestBlock
== NULL
)
803 if ( !que_reclaimed
)
807 for ( i
= 0; i
< R_QUECOUNT
; i
++ )
809 for ( j
= 0; j
< 3; j
++ )
811 while ( (BestBlock
= (PR_FREE
)RQueRemove ( &pool
->Que
[i
][j
] )) )
813 RPoolReclaim ( pool
, BestBlock
);
821 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
823 R_RELEASE_MUTEX(pool
);
827 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
828 * there's room at the beginning of the block for a free block...
831 void* Addr
= RHdrToBody(BestBlock
);
832 if ( BestAlignedAddr
!= Addr
)
834 PR_FREE NewFreeBlock
= RFreeSplit (
837 (char*)RBodyToHdr(BestAlignedAddr
) - (char*)BestBlock
);
838 ASSERT ( BestAlignedAddr
> Addr
);
840 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
841 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
842 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
844 /* we want the following code to use our size-aligned block */
845 BestPreviousBlock
= BestBlock
;
846 BestBlock
= NewFreeBlock
;
852 * Is there enough space to create a second block from the unused portion.
854 if ( (BestBlock
->Size
- BlockSize
) > sizeof(R_FREE
) )
856 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
857 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
860 * Create the new free block.
862 NextBlock
= RFreeSplit ( pool
, BestBlock
, BlockSize
);
863 //ASSERT_SIZE ( NextBlock->Size );
866 * Remove the selected block from the list of free blocks.
868 //DPRINT ( "Removing selected block from free block list\n" );
869 RPoolRemoveFree ( pool
, BestBlock
);
871 * Create the new used block header.
873 NewBlock
= (PR_USED
)BestBlock
;
874 RiUsedInit ( NewBlock
, Tag
);
876 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
878 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
879 R_RELEASE_MUTEX(pool
);
881 return RHdrToBody(NewBlock
);
885 RPoolFree ( PR_POOL pool
, void* Addr
)
896 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
897 R_ACQUIRE_MUTEX(pool
);
898 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
899 R_RELEASE_MUTEX(pool
);
902 R_ASSERT_PTR(pool
,Addr
);
904 UsedBlock
= RBodyToHdr(Addr
);
905 UsedSize
= UsedBlock
->Size
;
906 FreeBlock
= (PR_FREE
)UsedBlock
;
908 UserSize
= UsedBlock
->UserSize
;
910 UserSize
= UsedSize
- sizeof(R_USED
) - 2*R_RZ
;
913 RUsedRedZoneCheck ( pool
, UsedBlock
, Addr
, __FILE__
, __LINE__
);
916 memset ( Addr
, 0xCD, UsedBlock
->UserSize
);
919 que
= RQueWhich ( UserSize
);
922 int queBytes
= 16 << que
;
923 ASSERT( (rulong
)queBytes
>= UserSize
);
927 if ( R_ROUND_UP(Addr
,pool
->Alignments
[2]) == Addr
)
929 else if ( R_ROUND_UP(Addr
,pool
->Alignments
[1]) == Addr
)
931 R_ACQUIRE_MUTEX(pool
);
932 RQueAdd ( &pool
->Que
[que
][align
], UsedBlock
);
933 R_RELEASE_MUTEX(pool
);
938 R_ACQUIRE_MUTEX(pool
);
939 RPoolReclaim ( pool
, FreeBlock
);
940 R_RELEASE_MUTEX(pool
);
945 RPoolDumpByTag ( PR_POOL pool
, rulong Tag
)
947 PR_USED Block
= (PR_USED
)pool
->UserBase
;
952 // TODO FIXME - should we validate params or ASSERT_IRQL?
953 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag
,tag
) );
954 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
956 R_ACQUIRE_MUTEX(pool
);
959 if ( Block
->Status
== 1 && Block
->Tag
== Tag
)
961 R_DEBUG ( " %08X %08X\n", Block
, Block
->Size
);
964 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
967 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
970 R_RELEASE_MUTEX(pool
);
972 R_DEBUG ( "Entries found for tag '%s': %i\n", tag
, count
);
977 RPoolQueryTag ( void* Addr
)
979 PR_USED Block
= RBodyToHdr(Addr
);
980 // TODO FIXME - should we validate params?
982 if ( Block
->UsedMagic
!= R_USED_MAGIC
)
985 if ( Block
->Status
!= 1 )
991 RPoolStats ( PR_POOL pool
)
993 int free
=0, used
=0, qued
=0;
994 PR_USED Block
= (PR_USED
)pool
->UserBase
;
996 R_ACQUIRE_MUTEX(pool
);
999 switch ( Block
->Status
)
1011 ASSERT ( !"Invalid Status for Block in pool!" );
1013 Block
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
1015 R_RELEASE_MUTEX(pool
);
1017 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free
, used
, qued
, (free
+used
+qued
) );
1020 #ifdef R_LARGEST_ALLOC_POSSIBLE
1022 RPoolLargestAllocPossible ( PR_POOL pool
, int align
)
1024 int Alignment
= pool
->Alignments
[align
];
1025 rulong LargestUserSize
= 0;
1026 PR_FREE Block
= (PR_FREE
)pool
->UserBase
;
1029 if ( Block
->Status
!= 1 )
1031 void* Addr
, *AlignedAddr
;
1032 rulong BlockMaxUserSize
;
1035 Addr
= (char*)Block
+ sizeof(R_USED
) + R_RZ
;
1036 AlignedAddr
= R_ROUND_UP(Addr
,Alignment
);
1037 if ( Addr
!= AlignedAddr
)
1038 Addr
= R_ROUND_UP((char*)Block
+ sizeof(R_FREE
) + sizeof(R_USED
) + R_RZ
, Alignment
);
1039 BlockMaxUserSize
= (char*)Block
+ Block
->Size
- (char*)Addr
- R_RZ
;
1040 cue
= RQueWhich ( BlockMaxUserSize
);
1043 cueBytes
= 16 << cue
;
1044 if ( cueBytes
> BlockMaxUserSize
);
1047 BlockMaxUserSize
= 0;
1049 BlockMaxUserSize
= 16 << (cue
-1);
1052 if ( BlockMaxUserSize
> LargestUserSize
)
1053 LargestUserSize
= BlockMaxUserSize
;
1055 Block
= RNextBlock ( pool
, Block
);
1057 return LargestUserSize
;
1059 #endif//R_LARGEST_ALLOC_POSSIBLE