2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/ppool.c
5 * PURPOSE: Implements the paged pool
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
11 /* INCLUDES *****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePagedPool)
22 #define ASSERT(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); DbgBreakPoint(); }
26 #define R_MUTEX FAST_MUTEX
27 #define R_ACQUIRE_MUTEX(pool) /*DPRINT1("Acquiring PPool Mutex\n");*/ ExAcquireFastMutex(&pool->Mutex)
28 #define R_RELEASE_MUTEX(pool) /*DPRINT1("Releasing PPool Mutex\n");*/ ExReleaseFastMutex(&pool->Mutex)
29 #define R_PRINT_ADDRESS(addr) KeRosPrintAddress(addr)
30 #define R_PANIC() KeBugCheck(MEMORY_MANAGEMENT)
31 #define R_DEBUG DbgPrint
34 #define R_GET_STACK_FRAMES(ptr,cnt)
36 #define R_GET_STACK_FRAMES(ptr,cnt) RtlWalkFrameChain((PVOID*)ptr,cnt, 0)
39 /* GLOBALS ********************************************************************/
41 typedef unsigned long rulong
;
43 #define R_IS_POOL_PTR(pool,ptr) (((void*)(ULONG_PTR)(ptr) >= pool->UserBase) && ((ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)))
44 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
45 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
48 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
52 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
56 // R_QUEMIN is the minimum number of entries to keep in a que
61 // 16, 32, 64, 128, 256, 512
66 // R_RZ is the redzone size
71 #define R_RZ_LOVALUE 0x87
75 #define R_RZ_HIVALUE 0xA5
79 // R_STACK is the number of stack entries to store in blocks for debug purposes
82 #if R_STACK > 0 && R_STACK < 6
83 /* Increase the frame depth to get a reasonable back trace */
86 #endif // R_STACK > 0 && R_STACK < 6
90 // R_TAG do we keep track of tags on a per-memory block basis?
96 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
99 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
100 # endif//R_USED_MAGIC
103 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
104 // in both the R_FREE and R_USED structures
106 typedef struct _R_FREE
111 rulong PrevSize
: 30;
115 ULONG_PTR LastOwnerStack
[R_STACK
];
117 struct _R_FREE
* NextFree
;
118 struct _R_FREE
* PrevFree
;
122 typedef struct _R_USED
127 rulong PrevSize
: 30;
131 ULONG_PTR LastOwnerStack
[R_STACK
];
133 struct _R_USED
* NextUsed
;
135 rulong UserSize
; // how many bytes the user actually asked for...
141 typedef struct _R_QUE
148 typedef struct _R_POOL
154 rulong Alignments
[3];
155 PR_FREE FirstFree
, LastFree
;
156 R_QUE Que
[R_QUECOUNT
][3];
161 PVOID MmPagedPoolBase
;
162 ULONG MmPagedPoolSize
;
163 ULONG MmTotalPagedPoolQuota
= 0; // TODO FIXME commented out until we use it
164 static PR_POOL MmPagedPool
= NULL
;
166 /* FUNCTIONS*******************************************************************/
169 #define RiPrintLastOwner(Block)
172 RiPrintLastOwner ( PR_USED Block
)
175 for ( i
= 0; i
< R_STACK
; i
++ )
177 if ( Block
->LastOwnerStack
[i
] != 0xDEADBEEF )
180 //if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
182 R_DEBUG("<%X>", Block
->LastOwnerStack
[i
] );
190 RQueWhich ( rulong size
)
193 for ( que
=0, quesize
=16; que
< R_QUECOUNT
; que
++, quesize
<<=1 )
195 if ( quesize
>= size
)
204 RQueInit ( PR_QUE que
)
212 RQueAdd ( PR_QUE que
, PR_USED Item
)
216 Item
->NextUsed
= NULL
;
220 que
->First
= que
->Last
= Item
;
223 ASSERT(!que
->Last
->NextUsed
);
224 que
->Last
->NextUsed
= Item
;
229 RQueRemove ( PR_QUE que
)
233 if ( que
->count
< R_QUEMIN
)
239 que
->First
= Item
->NextUsed
;
242 ASSERT ( !que
->First
);
250 RPoolAddFree ( PR_POOL pool
, PR_FREE Item
)
254 if ( !pool
->FirstFree
)
256 pool
->FirstFree
= pool
->LastFree
= Item
;
257 Item
->NextFree
= NULL
;
261 pool
->FirstFree
->PrevFree
= Item
;
262 Item
->NextFree
= pool
->FirstFree
;
263 pool
->FirstFree
= Item
;
265 Item
->PrevFree
= NULL
;
269 RPoolRemoveFree ( PR_POOL pool
, PR_FREE Item
)
273 if ( Item
->NextFree
)
274 Item
->NextFree
->PrevFree
= Item
->PrevFree
;
277 ASSERT ( pool
->LastFree
== Item
);
278 pool
->LastFree
= Item
->PrevFree
;
280 if ( Item
->PrevFree
)
281 Item
->PrevFree
->NextFree
= Item
->NextFree
;
284 ASSERT ( pool
->FirstFree
== Item
);
285 pool
->FirstFree
= Item
->NextFree
;
288 Item
->NextFree
= Item
->PrevFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
293 #define RFreeFillStack(free)
294 #define RUsedFillStack(used)
297 RFreeFillStack ( PR_FREE free
)
300 ULONG stack
[R_STACK
+3]; // need to skip 3 known levels of stack trace
301 memset ( stack
, 0xCD, sizeof(stack
) );
302 R_GET_STACK_FRAMES ( stack
, R_STACK
+3 );
303 for ( i
= 0; i
< R_STACK
; i
++ )
304 free
->LastOwnerStack
[i
] = stack
[i
+3];
308 RUsedFillStack ( PR_USED used
)
311 ULONG stack
[R_STACK
+2]; // need to skip 2 known levels of stack trace
312 memset ( stack
, 0xCD, sizeof(stack
) );
313 R_GET_STACK_FRAMES ( stack
, R_STACK
+2 );
314 for ( i
= 0; i
< R_STACK
; i
++ )
315 used
->LastOwnerStack
[i
] = stack
[i
+2];
320 RFreeInit ( void* memory
)
322 PR_FREE block
= (PR_FREE
)memory
;
324 block
->FreeMagic
= R_FREE_MAGIC
;
327 RFreeFillStack ( block
);
329 block
->PrevFree
= block
->NextFree
= (PR_FREE
)(ULONG_PTR
)0xDEADBEEF;
335 RPoolInit ( void* PoolBase
, rulong PoolSize
, int align1
, int align2
, int align3
)
338 PR_POOL pool
= (PR_POOL
)PoolBase
;
340 pool
->PoolBase
= PoolBase
;
341 pool
->PoolSize
= PoolSize
;
342 pool
->UserBase
= (char*)pool
->PoolBase
+ sizeof(R_POOL
);
343 pool
->UserSize
= PoolSize
- sizeof(R_POOL
);
344 pool
->Alignments
[0] = align1
;
345 pool
->Alignments
[1] = align2
;
346 pool
->Alignments
[2] = align3
;
347 pool
->FirstFree
= pool
->LastFree
= NULL
;
350 RFreeInit ( pool
->UserBase
));
352 pool
->FirstFree
->PrevSize
= 0;
353 pool
->FirstFree
->Size
= pool
->UserSize
;
355 for ( que
= 0; que
< R_QUECOUNT
; que
++ )
357 for ( align
= 0; align
< 3; align
++ )
359 RQueInit ( &pool
->Que
[que
][align
] );
367 RFormatTag ( rulong Tag
, char* buf
)
370 *(rulong
*)&buf
[0] = Tag
;
372 for ( i
= 0; i
< 4; i
++ )
382 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
385 RiBadBlock ( PR_USED pUsed
, char* Addr
, const char* violation
, const char* file
, int line
, int printzone
)
390 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
391 file
, line
, violation
, Addr
);
394 R_DEBUG ( "UsedMagic 0x%x, ", pUsed
->UsedMagic
);
396 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
397 RFormatTag(pUsed
->Tag
,tag
),
404 unsigned char* HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
405 unsigned char* LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
406 R_DEBUG ( ", LoZone " );
407 for ( i
= 0; i
< R_RZ
; i
++ )
408 R_DEBUG ( "%02x", LoZone
[i
] );
409 R_DEBUG ( ", HiZone " );
410 for ( i
= 0; i
< R_RZ
; i
++ )
411 R_DEBUG ( "%02x", HiZone
[i
] );
415 R_DEBUG ( "First few Stack Frames:" );
416 RiPrintLastOwner ( pUsed
);
419 R_DEBUG ( "Contents of Block:\n" );
420 for ( i
= 0; i
< 8*16 && i
< pUsed
->UserSize
; i
+= 16 )
423 R_DEBUG ( "%04X ", i
);
424 for ( j
= 0; j
< 16; j
++ )
426 if ( i
+j
< pUsed
->UserSize
)
428 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr
[i
+j
] );
436 for ( j
= 0; j
< 16; j
++ )
438 if ( i
+j
< pUsed
->UserSize
)
441 if ( c
< 0x20 || c
> 0x7E )
455 RUsedRedZoneCheck ( PR_POOL pool
, PR_USED pUsed
, char* Addr
, const char* file
, int line
)
458 unsigned char *LoZone
, *HiZone
;
462 ASSERT ( Addr
>= (char*)pool
->UserBase
&& Addr
< ((char*)pool
->UserBase
+ pool
->UserSize
- 16) );
464 if ( pUsed
->UsedMagic
== R_FREE_MAGIC
)
466 pUsed
->UserSize
= 0; // just to keep from confusion, MmpBadBlock() doesn't return...
467 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
469 if ( pUsed
->UsedMagic
!= R_USED_MAGIC
)
471 RiBadBlock ( pUsed
, Addr
, "bad magic", file
, line
, 0 );
474 switch ( pUsed
->Status
)
476 case 0: // freed into main pool
478 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
479 // no need for break here - RiBadBlock doesn't return
480 case 1: // allocated - this is okay
483 RiBadBlock ( pUsed
, Addr
, "corrupt status", file
, line
, 0 );
485 if ( pUsed
->Status
!= 1 )
487 RiBadBlock ( pUsed
, Addr
, "double-free", file
, line
, 0 );
489 if ( pUsed
->Size
> pool
->PoolSize
|| pUsed
->Size
== 0 )
491 RiBadBlock ( pUsed
, Addr
, "invalid size", file
, line
, 0 );
493 if ( pUsed
->UserSize
> pool
->PoolSize
|| pUsed
->UserSize
== 0 )
495 RiBadBlock ( pUsed
, Addr
, "invalid user size", file
, line
, 0 );
497 HiZone
= (unsigned char*)Addr
+ pUsed
->UserSize
;
498 LoZone
= (unsigned char*)Addr
- R_RZ
; // this is to simplify indexing below...
499 for ( i
= 0; i
< R_RZ
&& bLow
&& bHigh
; i
++ )
501 bLow
= bLow
&& ( LoZone
[i
] == R_RZ_LOVALUE
);
502 bHigh
= bHigh
&& ( HiZone
[i
] == R_RZ_HIVALUE
);
504 if ( !bLow
|| !bHigh
)
506 const char* violation
= "High and Low-side redzone overwrite";
507 if ( bHigh
) // high is okay, so it was just low failed
508 violation
= "Low-side redzone overwrite";
509 else if ( bLow
) // low side is okay, so it was just high failed
510 violation
= "High-side redzone overwrite";
511 RiBadBlock ( pUsed
, Addr
, violation
, file
, line
, 1 );
517 RPreviousBlock ( PR_FREE Block
)
519 if ( Block
->PrevSize
> 0 )
520 return (PR_FREE
)( (char*)Block
- Block
->PrevSize
);
525 RNextBlock ( PR_POOL pool
, PR_FREE Block
)
527 PR_FREE NextBlock
= (PR_FREE
)( (char*)Block
+ Block
->Size
);
528 if ( (char*)NextBlock
>= (char*)pool
->UserBase
+ pool
->UserSize
)
533 static __inline
void*
534 RHdrToBody ( void* blk
)
536 * FUNCTION: Translate a block header address to the corresponding block
540 return ( (void *) ((char*)blk
+ sizeof(R_USED
) + R_RZ
) );
543 static __inline PR_USED
544 RBodyToHdr ( void* addr
)
547 ( ((char*)addr
) - sizeof(R_USED
) - R_RZ
);
551 RiInFreeChain ( PR_POOL pool
, PR_FREE Block
)
554 Free
= pool
->FirstFree
;
557 while ( Free
!= Block
)
559 Free
= Free
->NextFree
;
567 RPoolRedZoneCheck ( PR_POOL pool
, const char* file
, int line
)
570 PR_USED Block
= (PR_USED
)pool
->UserBase
;
575 switch ( Block
->Status
)
577 case 0: // block is in chain
578 ASSERT ( RiInFreeChain ( pool
, (PR_FREE
)Block
) );
580 case 1: // block is allocated
581 RUsedRedZoneCheck ( pool
, Block
, RHdrToBody(Block
), file
, line
);
583 case 2: // block is in que
584 // nothing to verify here yet
587 ASSERT ( !"invalid status in memory block found in pool!" );
589 NextBlock
= (PR_USED
)RNextBlock(pool
,(PR_FREE
)Block
);
592 ASSERT ( NextBlock
->PrevSize
== Block
->Size
);
597 // now let's step through the list of free pointers and verify
598 // each one can be found by size-jumping...
599 PR_FREE Free
= (PR_FREE
)pool
->FirstFree
;
602 PR_FREE NextFree
= (PR_FREE
)pool
->UserBase
;
603 if ( Free
!= NextFree
)
605 while ( NextFree
!= Free
)
607 NextFree
= RNextBlock ( pool
, NextFree
);
611 Free
= Free
->NextFree
;
617 RSetSize ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
, PR_FREE NextBlock
)
619 R_ASSERT_PTR(pool
,Block
);
620 ASSERT ( NewSize
< pool
->UserSize
);
621 ASSERT ( NewSize
>= sizeof(R_FREE
) );
622 Block
->Size
= NewSize
;
624 NextBlock
= RNextBlock ( pool
, Block
);
626 NextBlock
->PrevSize
= NewSize
;
630 RFreeSplit ( PR_POOL pool
, PR_FREE Block
, rulong NewSize
)
632 PR_FREE NewBlock
= (PR_FREE
)((char*)Block
+ NewSize
);
633 RSetSize ( pool
, NewBlock
, Block
->Size
- NewSize
, NULL
);
634 RSetSize ( pool
, Block
, NewSize
, NewBlock
);
635 RFreeInit ( NewBlock
);
636 RPoolAddFree ( pool
, NewBlock
);
641 RFreeMerge ( PR_POOL pool
, PR_FREE First
, PR_FREE Second
)
643 ASSERT ( RPreviousBlock(Second
) == First
);
644 ASSERT ( First
->Size
== Second
->PrevSize
);
645 RPoolRemoveFree ( pool
, Second
);
646 RSetSize ( pool
, First
, First
->Size
+ Second
->Size
, NULL
);
650 RPoolReclaim ( PR_POOL pool
, PR_FREE FreeBlock
)
652 PR_FREE NextBlock
, PreviousBlock
;
654 RFreeInit ( FreeBlock
);
655 RPoolAddFree ( pool
, FreeBlock
);
657 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
660 * If the next block is immediately adjacent to the newly freed one then
662 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
664 NextBlock
= RNextBlock ( pool
, FreeBlock
);
665 if ( NextBlock
!= NULL
&& !NextBlock
->Status
)
667 RFreeMerge ( pool
, FreeBlock
, NextBlock
);
671 * If the previous block is adjacent to the newly freed one then
673 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
675 PreviousBlock
= RPreviousBlock ( FreeBlock
);
676 if ( PreviousBlock
!= NULL
&& !PreviousBlock
->Status
)
678 RFreeMerge ( pool
, PreviousBlock
, FreeBlock
);
683 RiUsedInit ( PR_USED Block
, rulong Tag
)
686 RUsedFillStack ( Block
);
688 Block
->UsedMagic
= R_USED_MAGIC
;
690 //ASSERT_SIZE ( Block->Size );
692 // now add the block to the used block list
694 Block
->NextUsed
= (PR_USED
)(ULONG_PTR
)0xDEADBEEF;
701 #define RiUsedInitRedZone(Block,UserSize)
704 RiUsedInitRedZone ( PR_USED Block
, rulong UserSize
)
706 // write out buffer-overrun detection bytes
707 char* Addr
= (char*)RHdrToBody(Block
);
708 Block
->UserSize
= UserSize
;
709 memset ( Addr
- R_RZ
, R_RZ_LOVALUE
, R_RZ
);
710 memset ( Addr
+ Block
->UserSize
, R_RZ_HIVALUE
, R_RZ
);
712 memset ( Addr
, 0xCD, UserSize
);
718 RPoolAlloc ( PR_POOL pool
, rulong NumberOfBytes
, rulong Tag
, rulong align
)
726 void* BestAlignedAddr
;
728 queBytes
= NumberOfBytes
;
731 int que_reclaimed
= 0;
734 ASSERT ( align
< 3 );
736 R_ACQUIRE_MUTEX(pool
);
738 if ( !NumberOfBytes
)
740 R_DEBUG("0 bytes requested - initiating pool verification\n");
741 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
742 R_RELEASE_MUTEX(pool
);
745 if ( NumberOfBytes
> pool
->PoolSize
)
747 if ( R_IS_POOL_PTR(pool
,NumberOfBytes
) )
749 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes
);
750 RUsedRedZoneCheck(pool
,RBodyToHdr((void*)(ULONG_PTR
)NumberOfBytes
), (char*)(ULONG_PTR
)NumberOfBytes
, __FILE__
, __LINE__
);
751 R_RELEASE_MUTEX(pool
);
754 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes
);
755 R_RELEASE_MUTEX(pool
);
759 que
= RQueWhich ( NumberOfBytes
);
762 if ( (NewBlock
= RQueRemove ( &pool
->Que
[que
][align
] )) )
764 RiUsedInit ( NewBlock
, Tag
);
765 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
766 R_RELEASE_MUTEX(pool
);
767 return RHdrToBody(NewBlock
);
769 queBytes
= 16 << que
;
773 * Calculate the total number of bytes we will need.
775 BlockSize
= queBytes
+ sizeof(R_USED
) + 2*R_RZ
;
776 if (BlockSize
< sizeof(R_FREE
))
778 /* At least we need the size of the free block header. */
779 BlockSize
= sizeof(R_FREE
);
784 * Find the best-fitting block.
787 Alignment
= pool
->Alignments
[align
];
788 PreviousBlock
= NULL
;
789 BestPreviousBlock
= NULL
,
790 CurrentBlock
= pool
->FirstFree
;
791 BestAlignedAddr
= NULL
;
793 while ( CurrentBlock
!= NULL
)
795 PVOID Addr
= RHdrToBody(CurrentBlock
);
796 PVOID CurrentBlockEnd
= (char*)CurrentBlock
+ CurrentBlock
->Size
;
797 /* calculate last size-aligned address available within this block */
798 PVOID AlignedAddr
= R_ROUND_DOWN((char*)CurrentBlockEnd
-queBytes
-R_RZ
, Alignment
);
799 ASSERT ( (char*)AlignedAddr
+queBytes
+R_RZ
<= (char*)CurrentBlockEnd
);
801 /* special case, this address is already size-aligned, and the right size */
802 if ( Addr
== AlignedAddr
)
804 BestAlignedAddr
= AlignedAddr
;
805 BestPreviousBlock
= PreviousBlock
;
806 BestBlock
= CurrentBlock
;
809 // if we carve out a size-aligned block... is it still past the end of this
810 // block's free header?
811 else if ( (char*)RBodyToHdr(AlignedAddr
)
812 >= (char*)CurrentBlock
+sizeof(R_FREE
) )
815 * there's enough room to allocate our size-aligned memory out
816 * of this block, see if it's a better choice than any previous
819 if ( BestBlock
== NULL
820 || BestBlock
->Size
> CurrentBlock
->Size
)
822 BestAlignedAddr
= AlignedAddr
;
823 BestPreviousBlock
= PreviousBlock
;
824 BestBlock
= CurrentBlock
;
828 PreviousBlock
= CurrentBlock
;
829 CurrentBlock
= CurrentBlock
->NextFree
;
833 * We didn't find anything suitable at all.
835 if (BestBlock
== NULL
)
837 if ( !que_reclaimed
)
841 for ( i
= 0; i
< R_QUECOUNT
; i
++ )
843 for ( j
= 0; j
< 3; j
++ )
845 while ( (BestBlock
= (PR_FREE
)RQueRemove ( &pool
->Que
[i
][j
] )) )
847 RPoolReclaim ( pool
, BestBlock
);
855 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
857 R_RELEASE_MUTEX(pool
);
861 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
862 * there's room at the beginning of the block for a free block...
865 void* Addr
= RHdrToBody(BestBlock
);
866 if ( BestAlignedAddr
!= Addr
)
868 PR_FREE NewFreeBlock
= RFreeSplit (
871 (char*)RBodyToHdr(BestAlignedAddr
) - (char*)BestBlock
);
872 ASSERT ( BestAlignedAddr
> Addr
);
874 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
875 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
876 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
878 /* we want the following code to use our size-aligned block */
879 BestPreviousBlock
= BestBlock
;
880 BestBlock
= NewFreeBlock
;
886 * Is there enough space to create a second block from the unused portion.
888 if ( (BestBlock
->Size
- BlockSize
) > sizeof(R_FREE
) )
890 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
891 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
894 * Create the new free block.
896 NextBlock
= RFreeSplit ( pool
, BestBlock
, BlockSize
);
897 //ASSERT_SIZE ( NextBlock->Size );
900 * Remove the selected block from the list of free blocks.
902 //DPRINT ( "Removing selected block from free block list\n" );
903 RPoolRemoveFree ( pool
, BestBlock
);
905 * Create the new used block header.
907 NewBlock
= (PR_USED
)BestBlock
;
908 RiUsedInit ( NewBlock
, Tag
);
910 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
912 RiUsedInitRedZone ( NewBlock
, NumberOfBytes
);
913 R_RELEASE_MUTEX(pool
);
915 return RHdrToBody(NewBlock
);
919 RPoolFree ( PR_POOL pool
, void* Addr
)
930 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
931 R_ACQUIRE_MUTEX(pool
);
932 RPoolRedZoneCheck ( pool
, __FILE__
, __LINE__
);
933 R_RELEASE_MUTEX(pool
);
936 R_ASSERT_PTR(pool
,Addr
);
938 UsedBlock
= RBodyToHdr(Addr
);
939 UsedSize
= UsedBlock
->Size
;
940 FreeBlock
= (PR_FREE
)UsedBlock
;
942 UserSize
= UsedBlock
->UserSize
;
944 UserSize
= UsedSize
- sizeof(R_USED
) - 2*R_RZ
;
947 RUsedRedZoneCheck ( pool
, UsedBlock
, Addr
, __FILE__
, __LINE__
);
950 memset ( Addr
, 0xCD, UsedBlock
->UserSize
);
953 que
= RQueWhich ( UserSize
);
956 int queBytes
= 16 << que
;
957 ASSERT( (rulong
)queBytes
>= UserSize
);
961 if ( R_ROUND_UP(Addr
,pool
->Alignments
[2]) == Addr
)
963 else if ( R_ROUND_UP(Addr
,pool
->Alignments
[1]) == Addr
)
965 R_ACQUIRE_MUTEX(pool
);
966 RQueAdd ( &pool
->Que
[que
][align
], UsedBlock
);
967 R_RELEASE_MUTEX(pool
);
972 R_ACQUIRE_MUTEX(pool
);
973 RPoolReclaim ( pool
, FreeBlock
);
974 R_RELEASE_MUTEX(pool
);
980 MmInitializePagedPool(VOID
)
983 * We are still at a high IRQL level at this point so explicitly commit
984 * the first page of the paged pool before writing the first block header.
986 MmCommitPagedPoolAddress ( (PVOID
)MmPagedPoolBase
, FALSE
);
988 MmPagedPool
= RPoolInit ( MmPagedPoolBase
,
994 ExInitializeFastMutex(&MmPagedPool
->Mutex
);
998 ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType
,
999 IN ULONG NumberOfBytes
,
1004 if ( NumberOfBytes
>= PAGE_SIZE
)
1006 else if ( PoolType
& CACHE_ALIGNED_POOL_MASK
)
1011 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL
);
1013 return RPoolAlloc ( MmPagedPool
, NumberOfBytes
, Tag
, align
);
1017 ExFreePagedPool(IN PVOID Block
)
1019 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL
);
1020 RPoolFree ( MmPagedPool
, Block
);