2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/RPoolMgr.h
5 * PURPOSE: A semi-generic reuseable Pool implementation
6 * PROGRAMMER: Royce Mitchell III
13 typedef unsigned long rulong
;
15 #define R_IS_POOL_PTR(pool,ptr) (((void*)(ULONG_PTR)(ptr) >= pool->UserBase) && ((ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)))
16 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
17 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
20 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
24 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
28 // R_QUEMIN is the minimum number of entries to keep in a que
33 // 16, 32, 64, 128, 256, 512
38 // R_RZ is the redzone size
43 #define R_RZ_LOVALUE 0x87
47 #define R_RZ_HIVALUE 0xA5
51 // R_STACK is the number of stack entries to store in blocks for debug purposes
54 #if R_STACK > 0 && R_STACK < 6
55 /* Increase the frame depth to get a reasonable back trace */
58 #endif // R_STACK > 0 && R_STACK < 6
62 // R_TAG do we keep track of tags on a per-memory block basis?
68 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
71 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
75 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
76 // in both the R_FREE and R_USED structures
78 typedef struct _R_FREE
87 ULONG_PTR LastOwnerStack
[R_STACK
];
89 struct _R_FREE
* NextFree
;
90 struct _R_FREE
* PrevFree
;
94 typedef struct _R_USED
103 ULONG_PTR LastOwnerStack
[R_STACK
];
105 struct _R_USED
* NextUsed
;
107 rulong UserSize
; // how many bytes the user actually asked for...
113 typedef struct _R_QUE
120 typedef struct _R_POOL
126 rulong Alignments
[3];
127 PR_FREE FirstFree
, LastFree
;
128 R_QUE Que
[R_QUECOUNT
][3];
134 #define RiPrintLastOwner(Block)
137 RiPrintLastOwner ( PR_USED Block
)
140 for ( i
= 0; i
< R_STACK
; i
++ )
142 if ( Block
->LastOwnerStack
[i
] != 0xDEADBEEF )
145 //if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
147 R_DEBUG("<%X>", Block
->LastOwnerStack
[i
] );
155 RQueWhich ( rulong size
)
158 for ( que
=0, quesize
=16; que
< R_QUECOUNT
; que
++, quesize
<<=1 )
160 if ( quesize
>= size
)
169 RQueInit ( PR_QUE que
)
177 RQueAdd ( PR_QUE que
, PR_USED Item
)
181 Item
->NextUsed
= NULL
;
185 que
->First
= que
->Last
= Item
;
188 ASSERT(!que
->Last
->NextUsed
);
189 que
->Last
->NextUsed
= Item
;
194 RQueRemove ( PR_QUE que
)
198 if ( que
->count
< R_QUEMIN
)
204 que
->First
= Item
->NextUsed
;
207 ASSERT ( !que
->First
);
215 RPoolAddFree ( PR_POOL pool
, PR_FREE Item
)
219 if ( !pool
->FirstFree
)
221 pool
->FirstFree
= pool
->LastFree
= Item
;
222 Item
->NextFree
= NULL
;
226 pool
->FirstFree
->PrevFree
= Item
;
227 Item
->NextFree
= pool
->FirstFree
;
228 pool
->FirstFree
= Item
;
230 Item
->PrevFree
= NULL
;
234 RPoolRemoveFree ( PR_POOL pool
, PR_FREE Item
)
238 if ( Item
->NextFree
)
239 Item
->NextFree
->PrevFree
= Item
->PrevFree
;
242 ASSERT ( pool
->LastFree
== Item
);
243 pool
->LastFree
= Item
->PrevFree
;
245 if ( Item
->PrevFree
)
246 Item
->PrevFree
->NextFree
= Item
->NextFree
;
249 ASSERT ( pool
->FirstFree
== Item
);
250 pool
->FirstFree
= Item
->NextFree
;
253 Item
->NextFree
= Item
->PrevFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
258 #define RFreeFillStack(free)
259 #define RUsedFillStack(used)
262 RFreeFillStack ( PR_FREE free
)
265 ULONG stack
[R_STACK
+3]; // need to skip 3 known levels of stack trace
266 memset ( stack
, 0xCD, sizeof(stack
) );
267 R_GET_STACK_FRAMES ( stack
, R_STACK
+3 );
268 for ( i
= 0; i
< R_STACK
; i
++ )
269 free
->LastOwnerStack
[i
] = stack
[i
+3];
273 RUsedFillStack ( PR_USED used
)
276 ULONG stack
[R_STACK
+2]; // need to skip 2 known levels of stack trace
277 memset ( stack
, 0xCD, sizeof(stack
) );
278 R_GET_STACK_FRAMES ( stack
, R_STACK
+2 );
279 for ( i
= 0; i
< R_STACK
; i
++ )
280 used
->LastOwnerStack
[i
] = stack
[i
+2];
285 RFreeInit ( void* memory
)
287 PR_FREE block
= (PR_FREE
)memory
;
289 block
->FreeMagic
= R_FREE_MAGIC
;
292 RFreeFillStack ( block
);
294 block
->PrevFree
= block
->NextFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
300 RPoolInit ( void* PoolBase
, rulong PoolSize
, int align1
, int align2
, int align3
)
303 PR_POOL pool
= (PR_POOL
)PoolBase
;
305 pool
->PoolBase
= PoolBase
;
306 pool
->PoolSize
= PoolSize
;
307 pool
->UserBase
= (char*)pool
->PoolBase
+ sizeof(R_POOL
);
308 pool
->UserSize
= PoolSize
- sizeof(R_POOL
);
309 pool
->Alignments
[0] = align1
;
310 pool
->Alignments
[1] = align2
;
311 pool
->Alignments
[2] = align3
;
312 pool
->FirstFree
= pool
->LastFree
= NULL
;
315 RFreeInit ( pool
->UserBase
));
317 pool
->FirstFree
->PrevSize
= 0;
318 pool
->FirstFree
->Size
= pool
->UserSize
;
320 for ( que
= 0; que
< R_QUECOUNT
; que
++ )
322 for ( align
= 0; align
< 3; align
++ )
324 RQueInit ( &pool
->Que
[que
][align
] );
332 RFormatTag ( rulong Tag
, char* buf
)
335 *(rulong
*)&buf
[0] = Tag
;
337 for ( i
= 0; i
< 4; i
++ )
347 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
350 RiBadBlock ( PR_USED pUsed
, char* Addr
, const char* violation
, const char* file
, int line
, int printzone
)
355 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
356 file
, line
, violation
, Addr
);
359 R_DEBUG ( "UsedMagic 0x%x, ", pUsed
->UsedMagic
);
361 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
362 RFormatTag(pUsed
->Tag
,tag
),
369 unsigned char* HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
370 unsigned char* LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
371 R_DEBUG ( ", LoZone " );
372 for ( i
= 0; i
< R_RZ
; i
++ )
373 R_DEBUG ( "%02x", LoZone
[i
] );
374 R_DEBUG ( ", HiZone " );
375 for ( i
= 0; i
< R_RZ
; i
++ )
376 R_DEBUG ( "%02x", HiZone
[i
] );
380 R_DEBUG ( "First few Stack Frames:" );
381 RiPrintLastOwner ( pUsed
);
384 R_DEBUG ( "Contents of Block:\n" );
385 for ( i
= 0; i
< 8*16 && i
< pUsed
->UserSize
; i
+= 16 )
388 R_DEBUG ( "%04X ", i
);
389 for ( j
= 0; j
< 16; j
++ )
391 if ( i
+j
< pUsed
->UserSize
)
393 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr
[i
+j
] );
401 for ( j
= 0; j
< 16; j
++ )
403 if ( i
+j
< pUsed
->UserSize
)
406 if ( c
< 0x20 || c
> 0x7E )
420 RUsedRedZoneCheck ( PR_POOL pool
, PR_USED pUsed
, char* Addr
, const char* file
, int line
)
423 unsigned char *LoZone
, *HiZone
;
427 ASSERT ( Addr
>= (char*)pool
->UserBase
&& Addr
< ((char*)pool
->UserBase
+ pool
->UserSize
- 16) );
429 if ( pUsed
->UsedMagic
== R_FREE_MAGIC
)
431 pUsed
->UserSize
= 0; // just to keep from confusion, MmpBadBlock() doesn't return...
432 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
434 if ( pUsed
->UsedMagic
!= R_USED_MAGIC
)
436 RiBadBlock ( pUsed
, Addr
, "bad magic", file
, line
, 0 );
439 switch ( pUsed
->Status
)
441 case 0: // freed into main pool
443 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
444 // no need for break here - RiBadBlock doesn't return
445 case 1: // allocated - this is okay
448 RiBadBlock ( pUsed
, Addr
, "corrupt status", file
, line
, 0 );
450 if ( pUsed
->Status
!= 1 )
452 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
454 if ( pUsed
->Size
> pool
->PoolSize
|| pUsed
->Size
== 0 )
456 RiBadBlock ( pUsed
, Addr
, "invalid size", file
, line
, 0 );
458 if ( pUsed
->UserSize
> pool
->PoolSize
|| pUsed
->UserSize
== 0 )
460 RiBadBlock ( pUsed
, Addr
, "invalid user size", file
, line
, 0 );
462 HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
463 LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
464 for ( i
= 0; i
< R_RZ
&& bLow
&& bHigh
; i
++ )
466 bLow
= bLow
&& ( LoZone
[i
] == R_RZ_LOVALUE
);
467 bHigh
= bHigh
&& ( HiZone
[i
] == R_RZ_HIVALUE
);
469 if ( !bLow
|| !bHigh
)
471 const char* violation
= "High and Low-side redzone overwrite";
472 if ( bHigh
) // high is okay, so it was just low failed
473 violation
= "Low-side redzone overwrite";
474 else if ( bLow
) // low side is okay, so it was just high failed
475 violation
= "High-side redzone overwrite";
476 RiBadBlock ( pUsed
, Addr
, violation
, file
, line
, 1 );
482 RPreviousBlock ( PR_FREE Block
)
484 if ( Block
->PrevSize
> 0 )
485 return (PR_FREE
)( (char*)Block
- Block
->PrevSize
);
490 RNextBlock ( PR_POOL pool
, PR_FREE Block
)
492 PR_FREE NextBlock
= (PR_FREE
)( (char*)Block
+ Block
->Size
);
493 if ( (char*)NextBlock
>= (char*)pool
->UserBase
+ pool
->UserSize
)
498 static __inline
void*
499 RHdrToBody ( void* blk
)
501 * FUNCTION: Translate a block header address to the corresponding block
505 return ( (void *) ((char*)blk
+ sizeof(R_USED
) + R_RZ
) );
508 static __inline PR_USED
509 RBodyToHdr ( void* addr
)
512 ( ((char*)addr
) - sizeof(R_USED
) - R_RZ
);
516 RiInFreeChain ( PR_POOL pool
, PR_FREE Block
)
519 Free
= pool
->FirstFree
;
522 while ( Free
!= Block
)
524 Free
= Free
->NextFree
;
532 RPoolRedZoneCheck ( PR_POOL pool
, const char* file
, int line
)
535 PR_USED Block
= (PR_USED
)pool
->UserBase
;
540 switch ( Block
->Status
)
542 case 0: // block is in chain
543 ASSERT ( RiInFreeChain ( pool
, (PR_FREE
)Block
) );
545 case 1: // block is allocated
546 RUsedRedZoneCheck ( pool
, Block
, RHdrToBody(Block
), file
, line
);
548 case 2: // block is in que
549 // nothing to verify here yet
552 ASSERT ( !"invalid status in memory block found in pool!" );
554 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
557 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
562 // now let's step through the list of free pointers and verify
563 // each one can be found by size-jumping...
564 PR_FREE Free
= (PR_FREE
)pool
->FirstFree
;
567 PR_FREE NextFree
= (PR_FREE
)pool
->UserBase
;
568 if ( Free
!= NextFree
)
570 while ( NextFree
!= Free
)
572 NextFree
= RNextBlock ( pool
, NextFree
);
576 Free
= Free
->NextFree
;
582 RSetSize ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
, PR_FREE NextBlock
)
584 R_ASSERT_PTR(pool
,Block
);
585 ASSERT ( NewSize
< pool
->UserSize
);
586 ASSERT ( NewSize
>= sizeof(R_FREE
) );
587 Block
->Size
= NewSize
;
589 NextBlock
= RNextBlock ( pool
, Block
);
591 NextBlock
->PrevSize
= NewSize
;
595 RFreeSplit ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
)
597 PR_FREE NewBlock
= (PR_FREE
)((char*)Block
+ NewSize
);
598 RSetSize ( pool
, NewBlock
, Block
->Size
- NewSize
, NULL
);
599 RSetSize ( pool
, Block
, NewSize
, NewBlock
);
600 RFreeInit ( NewBlock
);
601 RPoolAddFree ( pool
, NewBlock
);
606 RFreeMerge ( PR_POOL pool
, PR_FREE First
, PR_FREE Second
)
608 ASSERT ( RPreviousBlock(Second
) == First
);
609 ASSERT ( First
->Size
== Second
->PrevSize
);
610 RPoolRemoveFree ( pool
, Second
);
611 RSetSize ( pool
, First
, First
->Size
+ Second
->Size
, NULL
);
615 RPoolReclaim ( PR_POOL pool
, PR_FREE FreeBlock
)
617 PR_FREE NextBlock
, PreviousBlock
;
619 RFreeInit ( FreeBlock
);
620 RPoolAddFree ( pool
, FreeBlock
);
622 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
625 * If the next block is immediately adjacent to the newly freed one then
627 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
629 NextBlock
= RNextBlock ( pool
, FreeBlock
);
630 if ( NextBlock
!= NULL
&& !NextBlock
->Status
)
632 RFreeMerge ( pool
, FreeBlock
, NextBlock
);
636 * If the previous block is adjacent to the newly freed one then
638 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
640 PreviousBlock
= RPreviousBlock ( FreeBlock
);
641 if ( PreviousBlock
!= NULL
&& !PreviousBlock
->Status
)
643 RFreeMerge ( pool
, PreviousBlock
, FreeBlock
);
648 RiUsedInit ( PR_USED Block
, rulong Tag
)
651 RUsedFillStack ( Block
);
653 Block
->UsedMagic
= R_USED_MAGIC
;
655 //ASSERT_SIZE ( Block->Size );
657 // now add the block to the used block list
659 Block
->NextUsed
= (PR_USED
)(ULONG_PTR
)0xDEADBEEF;
666 #define RiUsedInitRedZone(Block,UserSize)
669 RiUsedInitRedZone ( PR_USED Block
, rulong UserSize
)
671 // write out buffer-overrun detection bytes
672 char* Addr
= (char*)RHdrToBody(Block
);
673 Block
->UserSize
= UserSize
;
674 memset ( Addr
- R_RZ
, R_RZ_LOVALUE
, R_RZ
);
675 memset ( Addr
+ Block
->UserSize
, R_RZ_HIVALUE
, R_RZ
);
677 memset ( Addr
, 0xCD, UserSize
);
683 RPoolAlloc ( PR_POOL pool
, rulong NumberOfBytes
, rulong Tag
, rulong align
)
691 void* BestAlignedAddr
;
693 queBytes
= NumberOfBytes
;
696 int que_reclaimed
= 0;
699 ASSERT ( align
< 3 );
701 R_ACQUIRE_MUTEX(pool
);
703 if ( !NumberOfBytes
)
705 R_DEBUG("0 bytes requested - initiating pool verification\n");
706 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
707 R_RELEASE_MUTEX(pool
);
710 if ( NumberOfBytes
> pool
->PoolSize
)
712 if ( R_IS_POOL_PTR(pool
,NumberOfBytes
) )
714 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes
);
715 RUsedRedZoneCheck(pool
,RBodyToHdr((void*)(ULONG_PTR
)NumberOfBytes
), (char*)(ULONG_PTR
)NumberOfBytes
, __FILE__
, __LINE__
);
716 R_RELEASE_MUTEX(pool
);
719 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes
);
720 R_RELEASE_MUTEX(pool
);
724 que
= RQueWhich ( NumberOfBytes
);
727 if ( (NewBlock
= RQueRemove ( &pool
->Que
[que
][align
] )) )
729 RiUsedInit ( NewBlock
, Tag
);
730 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
731 R_RELEASE_MUTEX(pool
);
732 return RHdrToBody(NewBlock
);
734 queBytes
= 16 << que
;
738 * Calculate the total number of bytes we will need.
740 BlockSize
= queBytes
+ sizeof(R_USED
) + 2*R_RZ
;
741 if (BlockSize
< sizeof(R_FREE
))
743 /* At least we need the size of the free block header. */
744 BlockSize
= sizeof(R_FREE
);
749 * Find the best-fitting block.
752 Alignment
= pool
->Alignments
[align
];
753 PreviousBlock
= NULL
;
754 BestPreviousBlock
= NULL
,
755 CurrentBlock
= pool
->FirstFree
;
756 BestAlignedAddr
= NULL
;
758 while ( CurrentBlock
!= NULL
)
760 PVOID Addr
= RHdrToBody(CurrentBlock
);
761 PVOID CurrentBlockEnd
= (char*)CurrentBlock
+ CurrentBlock
->Size
;
762 /* calculate last size-aligned address available within this block */
763 PVOID AlignedAddr
= R_ROUND_DOWN((char*)CurrentBlockEnd
-queBytes
-R_RZ
, Alignment
);
764 ASSERT ( (char*)AlignedAddr
+queBytes
+R_RZ
<= (char*)CurrentBlockEnd
);
766 /* special case, this address is already size-aligned, and the right size */
767 if ( Addr
== AlignedAddr
)
769 BestAlignedAddr
= AlignedAddr
;
770 BestPreviousBlock
= PreviousBlock
;
771 BestBlock
= CurrentBlock
;
774 // if we carve out a size-aligned block... is it still past the end of this
775 // block's free header?
776 else if ( (char*)RBodyToHdr(AlignedAddr
)
777 >= (char*)CurrentBlock
+sizeof(R_FREE
) )
780 * there's enough room to allocate our size-aligned memory out
781 * of this block, see if it's a better choice than any previous
784 if ( BestBlock
== NULL
785 || BestBlock
->Size
> CurrentBlock
->Size
)
787 BestAlignedAddr
= AlignedAddr
;
788 BestPreviousBlock
= PreviousBlock
;
789 BestBlock
= CurrentBlock
;
793 PreviousBlock
= CurrentBlock
;
794 CurrentBlock
= CurrentBlock
->NextFree
;
798 * We didn't find anything suitable at all.
800 if (BestBlock
== NULL
)
802 if ( !que_reclaimed
)
806 for ( i
= 0; i
< R_QUECOUNT
; i
++ )
808 for ( j
= 0; j
< 3; j
++ )
810 while ( (BestBlock
= (PR_FREE
)RQueRemove ( &pool
->Que
[i
][j
] )) )
812 RPoolReclaim ( pool
, BestBlock
);
820 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
822 R_RELEASE_MUTEX(pool
);
826 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
827 * there's room at the beginning of the block for a free block...
830 void* Addr
= RHdrToBody(BestBlock
);
831 if ( BestAlignedAddr
!= Addr
)
833 PR_FREE NewFreeBlock
= RFreeSplit (
836 (char*)RBodyToHdr(BestAlignedAddr
) - (char*)BestBlock
);
837 ASSERT ( BestAlignedAddr
> Addr
);
839 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
840 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
841 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
843 /* we want the following code to use our size-aligned block */
844 BestPreviousBlock
= BestBlock
;
845 BestBlock
= NewFreeBlock
;
851 * Is there enough space to create a second block from the unused portion.
853 if ( (BestBlock
->Size
- BlockSize
) > sizeof(R_FREE
) )
855 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
856 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
859 * Create the new free block.
861 NextBlock
= RFreeSplit ( pool
, BestBlock
, BlockSize
);
862 //ASSERT_SIZE ( NextBlock->Size );
865 * Remove the selected block from the list of free blocks.
867 //DPRINT ( "Removing selected block from free block list\n" );
868 RPoolRemoveFree ( pool
, BestBlock
);
870 * Create the new used block header.
872 NewBlock
= (PR_USED
)BestBlock
;
873 RiUsedInit ( NewBlock
, Tag
);
875 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
877 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
878 R_RELEASE_MUTEX(pool
);
880 return RHdrToBody(NewBlock
);
884 RPoolFree ( PR_POOL pool
, void* Addr
)
895 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
896 R_ACQUIRE_MUTEX(pool
);
897 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
898 R_RELEASE_MUTEX(pool
);
901 R_ASSERT_PTR(pool
,Addr
);
903 UsedBlock
= RBodyToHdr(Addr
);
904 UsedSize
= UsedBlock
->Size
;
905 FreeBlock
= (PR_FREE
)UsedBlock
;
907 UserSize
= UsedBlock
->UserSize
;
909 UserSize
= UsedSize
- sizeof(R_USED
) - 2*R_RZ
;
912 RUsedRedZoneCheck ( pool
, UsedBlock
, Addr
, __FILE__
, __LINE__
);
915 memset ( Addr
, 0xCD, UsedBlock
->UserSize
);
918 que
= RQueWhich ( UserSize
);
921 int queBytes
= 16 << que
;
922 ASSERT( (rulong
)queBytes
>= UserSize
);
926 if ( R_ROUND_UP(Addr
,pool
->Alignments
[2]) == Addr
)
928 else if ( R_ROUND_UP(Addr
,pool
->Alignments
[1]) == Addr
)
930 R_ACQUIRE_MUTEX(pool
);
931 RQueAdd ( &pool
->Que
[que
][align
], UsedBlock
);
932 R_RELEASE_MUTEX(pool
);
937 R_ACQUIRE_MUTEX(pool
);
938 RPoolReclaim ( pool
, FreeBlock
);
939 R_RELEASE_MUTEX(pool
);
944 RPoolDumpByTag ( PR_POOL pool
, rulong Tag
)
946 PR_USED Block
= (PR_USED
)pool
->UserBase
;
951 // TODO FIXME - should we validate params or ASSERT_IRQL?
952 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag
,tag
) );
953 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
955 R_ACQUIRE_MUTEX(pool
);
958 if ( Block
->Status
== 1 && Block
->Tag
== Tag
)
960 R_DEBUG ( " %08X %08X\n", Block
, Block
->Size
);
963 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
966 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
969 R_RELEASE_MUTEX(pool
);
971 R_DEBUG ( "Entries found for tag '%s': %i\n", tag
, count
);
976 RPoolQueryTag ( void* Addr
)
978 PR_USED Block
= RBodyToHdr(Addr
);
979 // TODO FIXME - should we validate params?
981 if ( Block
->UsedMagic
!= R_USED_MAGIC
)
984 if ( Block
->Status
!= 1 )
990 RPoolStats ( PR_POOL pool
)
992 int free
=0, used
=0, qued
=0;
993 PR_USED Block
= (PR_USED
)pool
->UserBase
;
995 R_ACQUIRE_MUTEX(pool
);
998 switch ( Block
->Status
)
1010 ASSERT ( !"Invalid Status for Block in pool!" );
1012 Block
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
1014 R_RELEASE_MUTEX(pool
);
1016 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free
, used
, qued
, (free
+used
+qued
) );
1019 #ifdef R_LARGEST_ALLOC_POSSIBLE
1021 RPoolLargestAllocPossible ( PR_POOL pool
, int align
)
1023 int Alignment
= pool
->Alignments
[align
];
1024 rulong LargestUserSize
= 0;
1025 PR_FREE Block
= (PR_FREE
)pool
->UserBase
;
1028 if ( Block
->Status
!= 1 )
1030 void* Addr
, *AlignedAddr
;
1031 rulong BlockMaxUserSize
;
1034 Addr
= (char*)Block
+ sizeof(R_USED
) + R_RZ
;
1035 AlignedAddr
= R_ROUND_UP(Addr
,Alignment
);
1036 if ( Addr
!= AlignedAddr
)
1037 Addr
= R_ROUND_UP((char*)Block
+ sizeof(R_FREE
) + sizeof(R_USED
) + R_RZ
, Alignment
);
1038 BlockMaxUserSize
= (char*)Block
+ Block
->Size
- (char*)Addr
- R_RZ
;
1039 cue
= RQueWhich ( BlockMaxUserSize
);
1042 cueBytes
= 16 << cue
;
1043 if ( cueBytes
> BlockMaxUserSize
);
1046 BlockMaxUserSize
= 0;
1048 BlockMaxUserSize
= 16 << (cue
-1);
1051 if ( BlockMaxUserSize
> LargestUserSize
)
1052 LargestUserSize
= BlockMaxUserSize
;
1054 Block
= RNextBlock ( pool
, Block
);
1056 return LargestUserSize
;
1058 #endif//R_LARGEST_ALLOC_POSSIBLE