Merge 36852, 37322, 37333, 37334, 43428, 43451, 44259, 46404 from amd64 branch.
[reactos.git] / reactos / ntoskrnl / mm / ppool.c
index 1e5dd3d..bfeb42b 100644 (file)
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
  * COPYRIGHT:       See COPYING in the top level directory
  * PROJECT:         ReactOS kernel
  * FILE:            ntoskrnl/mm/ppool.c
 
 #include <ntoskrnl.h>
 #define NDEBUG
-#include <internal/debug.h>
+#include <debug.h>
 
 #if defined (ALLOC_PRAGMA)
 #pragma alloc_text(INIT, MmInitializePagedPool)
 #endif
 
 #undef ASSERT
-#define ASSERT(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); KeBugCheck(0); }
+#define ASSERT(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); DbgBreakPoint(); }
 
 // enable "magic"
 //#define R_MAGIC
 #define R_MUTEX FAST_MUTEX
 #define R_ACQUIRE_MUTEX(pool) /*DPRINT1("Acquiring PPool Mutex\n");*/ ExAcquireFastMutex(&pool->Mutex)
 #define R_RELEASE_MUTEX(pool) /*DPRINT1("Releasing PPool Mutex\n");*/ ExReleaseFastMutex(&pool->Mutex)
-#define R_PRINT_ADDRESS(addr) KiRosPrintAddress(addr)
-#define R_PANIC() KeBugCheck(0)
+#define R_PRINT_ADDRESS(addr) KeRosPrintAddress(addr)
+#define R_PANIC() KeBugCheck(MEMORY_MANAGEMENT)
 #define R_DEBUG DbgPrint
+
+#ifdef _ARM_
+#define R_GET_STACK_FRAMES(ptr,cnt) 
+#else
 #define R_GET_STACK_FRAMES(ptr,cnt) RtlWalkFrameChain((PVOID*)ptr,cnt, 0)
+#endif
 
-#include "rpoolmgr.h"
+/* GLOBALS ********************************************************************/
+
+typedef unsigned long rulong;
+
+#define R_IS_POOL_PTR(pool,ptr) (((void*)(ULONG_PTR)(ptr) >= pool->UserBase) && ((ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)))
+#define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
+#define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
+
+#ifndef R_ROUND_UP
+#define R_ROUND_UP(x,s)    ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
+#endif//R_ROUND_UP
+
+#ifndef R_ROUND_DOWN
+#define R_ROUND_DOWN(x,s)  ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
+#endif//R_ROUND_DOWN
+
+#ifndef R_QUEMIN
+// R_QUEMIN is the minimum number of entries to keep in a que
+#define R_QUEMIN 0
+#endif//R_QUEMIN
+
+#ifndef R_QUECOUNT
+// 16, 32, 64, 128, 256, 512
+#define R_QUECOUNT 6
+#endif//R_QUECOUNT
+
+#ifndef R_RZ
+// R_RZ is the redzone size
+#define R_RZ 4
+#endif//R_RZ
+
+#ifndef R_RZ_LOVALUE
+#define R_RZ_LOVALUE 0x87
+#endif//R_RZ_LOVALUE
+
+#ifndef R_RZ_HIVALUE
+#define R_RZ_HIVALUE 0xA5
+#endif//R_RZ_HIVALUE
+
+#ifndef R_STACK
+// R_STACK is the number of stack entries to store in blocks for debug purposes
+#define R_STACK 6
+#else // R_STACK
+#if R_STACK > 0 && R_STACK < 6
+/* Increase the frame depth to get a reasonable back trace */
+#undef R_STACK
+#define R_STACK 6
+#endif // R_STACK > 0 && R_STACK < 6
+#endif//R_STACK
+
+#ifndef R_TAG
+// R_TAG do we keep track of tags on a per-memory block basis?
+#define R_TAG 0
+#endif//R_TAG
+
+#ifdef R_MAGIC
+#      ifndef R_FREE_MAGIC
+#              define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
+#      endif//R_FREE_MAGIC
+#      ifndef R_USED_MAGIC
+#              define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
+#      endif//R_USED_MAGIC
+#endif//R_MAGIC
+
+// **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
+// in both the R_FREE and R_USED structures
+
+typedef struct _R_FREE
+{
+#ifdef R_MAGIC
+       rulong FreeMagic;
+#endif//R_MAGIC
+       rulong PrevSize : 30;
+       rulong Status : 2;
+       rulong Size;
+#if R_STACK
+       ULONG_PTR LastOwnerStack[R_STACK];
+#endif//R_STACK
+       struct _R_FREE* NextFree;
+       struct _R_FREE* PrevFree;
+}
+R_FREE, *PR_FREE;
 
-/* GLOBALS *******************************************************************/
+typedef struct _R_USED
+{
+#ifdef R_MAGIC
+       rulong UsedMagic;
+#endif//R_MAGIC
+       rulong PrevSize : 30;
+       rulong Status : 2;
+       rulong Size;
+#if R_STACK
+       ULONG_PTR LastOwnerStack[R_STACK];
+#endif//R_STACK
+       struct _R_USED* NextUsed;
+#if R_RZ
+       rulong UserSize; // how many bytes the user actually asked for...
+#endif//R_RZ
+       rulong Tag;
+}
+R_USED, *PR_USED;
+
+typedef struct _R_QUE
+{
+       rulong Count;
+       PR_USED First, Last;
+}
+R_QUE, *PR_QUE;
+
+typedef struct _R_POOL
+{
+       void* PoolBase;
+       rulong PoolSize;
+       void* UserBase;
+       rulong UserSize;
+       rulong Alignments[3];
+       PR_FREE FirstFree, LastFree;
+       R_QUE Que[R_QUECOUNT][3];
+       R_MUTEX Mutex;
+}
+R_POOL, *PR_POOL;
 
 PVOID MmPagedPoolBase;
-ULONG MmPagedPoolSize;
-ULONG MmTotalPagedPoolQuota = 0; // TODO FIXME commented out until we use it
+SIZE_T MmPagedPoolSize;
+SIZE_T MmTotalPagedPoolQuota = 0; // TODO FIXME commented out until we use it
 static PR_POOL MmPagedPool = NULL;
 
-/* FUNCTIONS *****************************************************************/
+/* FUNCTIONS*******************************************************************/
 
-VOID
-INIT_FUNCTION
-NTAPI
-MmInitializePagedPool(VOID)
+#if !R_STACK
+#define RiPrintLastOwner(Block)
+#else
+static void
+RiPrintLastOwner ( PR_USED Block )
 {
-       /*
-        * We are still at a high IRQL level at this point so explicitly commit
-        * the first page of the paged pool before writing the first block header.
-        */
-       MmCommitPagedPoolAddress ( (PVOID)MmPagedPoolBase, FALSE );
+       int i;
+       for ( i = 0; i < R_STACK; i++ )
+       {
+               if ( Block->LastOwnerStack[i] != 0xDEADBEEF )
+               {
+                       R_DEBUG(" ");
+                       //if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
+                       {
+                               R_DEBUG("<%X>", Block->LastOwnerStack[i] );
+                       }
+               }
+       }
+}
+#endif//R_STACK
 
-       MmPagedPool = RPoolInit ( MmPagedPoolBase,
-               MmPagedPoolSize,
-               MM_POOL_ALIGNMENT,
-               MM_CACHE_LINE_SIZE,
-               PAGE_SIZE );
+static int
+RQueWhich ( rulong size )
+{
+       rulong que, quesize;
+       for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
+       {
+               if ( quesize >= size )
+               {
+                       return que;
+               }
+       }
+       return -1;
+}
 
-       ExInitializeFastMutex(&MmPagedPool->Mutex);
+static void
+RQueInit ( PR_QUE que )
+{
+       que->Count = 0;
+       que->First = NULL;
+       que->Last = NULL;
 }
 
-/**********************************************************************
- * NAME       INTERNAL
- * ExAllocatePagedPoolWithTag@12
- *
- * DESCRIPTION
- *
- * ARGUMENTS
- *
- * RETURN VALUE
- */
-PVOID STDCALL
-ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
-                            IN ULONG  NumberOfBytes,
-                            IN ULONG  Tag)
+static void
+RQueAdd ( PR_QUE que, PR_USED Item )
 {
-       int align;
+       ASSERT(Item);
+       Item->Status = 2;
+       Item->NextUsed = NULL;
+       ++que->Count;
+       if ( !que->Last )
+       {
+               que->First = que->Last = Item;
+               return;
+       }
+       ASSERT(!que->Last->NextUsed);
+       que->Last->NextUsed = Item;
+       que->Last = Item;
+}
 
-       if ( NumberOfBytes >= PAGE_SIZE )
-               align = 2;
-       else if ( PoolType & CACHE_ALIGNED_POOL_MASK )
-               align = 1;
+static PR_USED
+RQueRemove ( PR_QUE que )
+{
+       PR_USED Item;
+#if R_QUEMIN
+       if ( que->count < R_QUEMIN )
+               return NULL;
+#endif
+       if ( !que->First )
+               return NULL;
+       Item = que->First;
+       que->First = Item->NextUsed;
+       if ( !--que->Count )
+       {
+               ASSERT ( !que->First );
+               que->Last = NULL;
+       }
+       Item->Status = 0;
+       return Item;
+}
+
+static void
+RPoolAddFree ( PR_POOL pool, PR_FREE Item )
+{
+       ASSERT(pool);
+       ASSERT(Item);
+       if ( !pool->FirstFree )
+       {
+               pool->FirstFree = pool->LastFree = Item;
+               Item->NextFree = NULL;
+       }
        else
-               align = 0;
+       {
+               pool->FirstFree->PrevFree = Item;
+               Item->NextFree = pool->FirstFree;
+               pool->FirstFree = Item;
+       }
+       Item->PrevFree = NULL;
+}
 
-       ASSERT_IRQL(APC_LEVEL);
+static void
+RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
+{
+       ASSERT(pool);
+       ASSERT(Item);
+       if ( Item->NextFree )
+               Item->NextFree->PrevFree = Item->PrevFree;
+       else
+       {
+               ASSERT ( pool->LastFree == Item );
+               pool->LastFree = Item->PrevFree;
+       }
+       if ( Item->PrevFree )
+               Item->PrevFree->NextFree = Item->NextFree;
+       else
+       {
+               ASSERT ( pool->FirstFree == Item );
+               pool->FirstFree = Item->NextFree;
+       }
+#if DBG
+       Item->NextFree = Item->PrevFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
+#endif//DBG
+}
 
-       return RPoolAlloc ( MmPagedPool, NumberOfBytes, Tag, align );
+#if !R_STACK
+#define RFreeFillStack(free)
+#define RUsedFillStack(used)
+#else
+static void
+RFreeFillStack ( PR_FREE free )
+{
+       int i;
+       ULONG stack[R_STACK+3]; // need to skip 3 known levels of stack trace
+       memset ( stack, 0xCD, sizeof(stack) );
+       R_GET_STACK_FRAMES ( stack, R_STACK+3 );
+       for ( i = 0; i < R_STACK; i++ )
+               free->LastOwnerStack[i] = stack[i+3];
 }
 
-VOID STDCALL
-ExFreePagedPool(IN PVOID Block)
+static void
+RUsedFillStack ( PR_USED used )
 {
-       ASSERT_IRQL(APC_LEVEL);
-       RPoolFree ( MmPagedPool, Block );
+       int i;
+       ULONG stack[R_STACK+2]; // need to skip 2 known levels of stack trace
+       memset ( stack, 0xCD, sizeof(stack) );
+       R_GET_STACK_FRAMES ( stack, R_STACK+2 );
+       for ( i = 0; i < R_STACK; i++ )
+               used->LastOwnerStack[i] = stack[i+2];
+}
+#endif
+
+static PR_FREE
+RFreeInit ( void* memory )
+{
+       PR_FREE block = (PR_FREE)memory;
+#if R_FREEMAGIC
+       block->FreeMagic = R_FREE_MAGIC;
+#endif//R_FREEMAGIC
+       block->Status = 0;
+       RFreeFillStack ( block );
+#if DBG
+       block->PrevFree = block->NextFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
+#endif//DBG
+       return block;
+}
+
+PR_POOL
+RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
+{
+       int align, que;
+       PR_POOL pool = (PR_POOL)PoolBase;
+
+       pool->PoolBase = PoolBase;
+       pool->PoolSize = PoolSize;
+       pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
+       pool->UserSize = PoolSize - sizeof(R_POOL);
+       pool->Alignments[0] = align1;
+       pool->Alignments[1] = align2;
+       pool->Alignments[2] = align3;
+       pool->FirstFree = pool->LastFree = NULL;
+
+       RPoolAddFree ( pool,
+               RFreeInit ( pool->UserBase ));
+
+       pool->FirstFree->PrevSize = 0;
+       pool->FirstFree->Size = pool->UserSize;
+
+       for ( que = 0; que < R_QUECOUNT; que++ )
+       {
+               for ( align = 0; align < 3; align++ )
+               {
+                       RQueInit ( &pool->Que[que][align] );
+               }
+       }
+       return pool;
+}
+
+#if R_RZ
+static const char*
+RFormatTag ( rulong Tag, char* buf )
+{
+       int i;
+       *(rulong*)&buf[0] = Tag;
+       buf[4] = 0;
+       for ( i = 0; i < 4; i++ )
+       {
+               if ( !buf[i] )
+                       buf[i] = ' ';
+       }
+       return buf;
+}
+#endif
+
+#if !R_RZ
+#define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
+#else//R_RZ
+static void
+RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
+{
+       char tag[5];
+       unsigned int i;
+
+       R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
+               file, line, violation, Addr );
+
+#ifdef R_MAGIC
+       R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
+#endif//R_MAGIC
+       R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
+               RFormatTag(pUsed->Tag,tag),
+               pUsed->Tag,
+               pUsed->Size,
+               pUsed->UserSize );
+
+       if ( printzone )
+       {
+               unsigned char* HiZone = (unsigned char*)Addr + pUsed->UserSize;
+               unsigned char* LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
+               R_DEBUG ( ", LoZone " );
+               for ( i = 0; i < R_RZ; i++ )
+                       R_DEBUG ( "%02x", LoZone[i] );
+               R_DEBUG ( ", HiZone " );
+               for ( i = 0; i < R_RZ; i++ )
+                       R_DEBUG ( "%02x", HiZone[i] );
+       }
+       R_DEBUG ( "\n" );
+
+       R_DEBUG ( "First few Stack Frames:" );
+       RiPrintLastOwner ( pUsed );
+       R_DEBUG ( "\n" );
+
+       R_DEBUG ( "Contents of Block:\n" );
+       for ( i = 0; i < 8*16 && i < pUsed->UserSize; i += 16 )
+       {
+               int j;
+               R_DEBUG ( "%04X ", i );
+               for ( j = 0; j < 16; j++ )
+               {
+                       if ( i+j < pUsed->UserSize )
+                       {
+                               R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr[i+j] );
+                       }
+                       else
+                       {
+                               R_DEBUG ( "   " );
+                       }
+               }
+               R_DEBUG(" ");
+               for ( j = 0; j < 16; j++ )
+               {
+                       if ( i+j < pUsed->UserSize )
+                       {
+                               char c = Addr[i+j];
+                               if ( c < 0x20 || c > 0x7E )
+                                       c = '.';
+                               R_DEBUG ( "%c", c );
+                       }
+                       else
+                       {
+                               R_DEBUG ( " " );
+                       }
+               }
+               R_DEBUG("\n");
+       }
+       R_PANIC();
+}
+static void
+RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
+{
+       int i;
+       unsigned char *LoZone, *HiZone;
+       int bLow = 1;
+       int bHigh = 1;
+
+       ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
+#ifdef R_MAGIC
+       if ( pUsed->UsedMagic == R_FREE_MAGIC )
+       {
+               pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
+               RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
+       }
+       if ( pUsed->UsedMagic != R_USED_MAGIC )
+       {
+               RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
+       }
+#endif//R_MAGIC
+       switch ( pUsed->Status )
+       {
+       case 0: // freed into main pool
+       case 2: // in ques
+               RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
+               // no need for break here - RiBadBlock doesn't return
+       case 1: // allocated - this is okay
+               break;
+       default:
+               RiBadBlock ( pUsed, Addr, "corrupt status", file, line, 0 );
+       }
+       if ( pUsed->Status != 1 )
+       {
+               RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
+       }
+       if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
+       {
+               RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
+       }
+       if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
+       {
+               RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
+       }
+       HiZone = (unsigned char*)Addr + pUsed->UserSize;
+       LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
+       for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
+       {
+               bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
+               bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
+       }
+       if ( !bLow || !bHigh )
+       {
+               const char* violation = "High and Low-side redzone overwrite";
+               if ( bHigh ) // high is okay, so it was just low failed
+                       violation = "Low-side redzone overwrite";
+               else if ( bLow ) // low side is okay, so it was just high failed
+                       violation = "High-side redzone overwrite";
+               RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
+       }
 }
+#endif//R_RZ
 
+PR_FREE
+RPreviousBlock ( PR_FREE Block )
+{
+       if ( Block->PrevSize > 0 )
+               return (PR_FREE)( (char*)Block - Block->PrevSize );
+       return NULL;
+}
 
-#ifdef PPOOL_UMODE_TEST
+PR_FREE
+RNextBlock ( PR_POOL pool, PR_FREE Block )
+{
+       PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
+       if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
+               NextBlock = NULL;
+       return NextBlock;
+}
 
-PVOID TestAlloc ( ULONG Bytes )
+static __inline void*
+RHdrToBody ( void* blk )
+/*
+ * FUNCTION: Translate a block header address to the corresponding block
+ * address (internal)
+ */
 {
-       PVOID ret;
+       return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
+}
 
-       //printf ( "Allocating block: " ); RPoolStats ( MmPagedPool );
-       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+static __inline PR_USED
+RBodyToHdr ( void* addr )
+{
+       return (PR_USED)
+              ( ((char*)addr) - sizeof(R_USED) - R_RZ );
+}
 
-       ret = ExAllocatePagedPoolWithTag ( PagedPool, Bytes, 0 );
+static int
+RiInFreeChain ( PR_POOL pool, PR_FREE Block )
+{
+       PR_FREE Free;
+       Free = pool->FirstFree;
+       if ( Free == Block )
+               return 1;
+       while ( Free != Block )
+       {
+               Free = Free->NextFree;
+               if ( !Free )
+                       return 0;
+       }
+       return 1;
+}
 
-       //printf ( "Block %x allocated: ", ret ); RPoolStats ( MmPagedPool );
-       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+static void
+RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
+{
+       {
+               PR_USED Block = (PR_USED)pool->UserBase;
+               PR_USED NextBlock;
 
-       return ret;
+               for ( ;; )
+               {
+                       switch ( Block->Status )
+                       {
+                       case 0: // block is in chain
+                               ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
+                               break;
+                       case 1: // block is allocated
+                               RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
+                               break;
+                       case 2: // block is in que
+                               // nothing to verify here yet
+                               break;
+                       default:
+                               ASSERT ( !"invalid status in memory block found in pool!" );
+                       }
+                       NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
+                       if ( !NextBlock )
+                               break;
+                       ASSERT ( NextBlock->PrevSize == Block->Size );
+                       Block = NextBlock;
+               }
+       }
+       {
+               // now let's step through the list of free pointers and verify
+               // each one can be found by size-jumping...
+               PR_FREE Free = (PR_FREE)pool->FirstFree;
+               while ( Free )
+               {
+                       PR_FREE NextFree = (PR_FREE)pool->UserBase;
+                       if ( Free != NextFree )
+                       {
+                               while ( NextFree != Free )
+                               {
+                                       NextFree = RNextBlock ( pool, NextFree );
+                                       ASSERT(NextFree);
+                               }
+                       }
+                       Free = Free->NextFree;
+               }
+       }
+}
+
+static void
+RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
+{
+       R_ASSERT_PTR(pool,Block);
+       ASSERT ( NewSize < pool->UserSize );
+       ASSERT ( NewSize >= sizeof(R_FREE) );
+       Block->Size = NewSize;
+       if ( !NextBlock )
+               NextBlock = RNextBlock ( pool, Block );
+       if ( NextBlock )
+               NextBlock->PrevSize = NewSize;
+}
+
+static PR_FREE
+RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
+{
+       PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
+       RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
+       RSetSize ( pool, Block, NewSize, NewBlock );
+       RFreeInit ( NewBlock );
+       RPoolAddFree ( pool, NewBlock );
+       return NewBlock;
 }
 
-void TestFree ( PVOID ptr )
+static void
+RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
 {
-       //printf ( "Freeing block %x: ", ptr ); RPoolStats ( MmPagedPool );
-       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
-       ExFreePagedPool(ptr);
-       //printf ( "Block %x freed: ", ptr ); RPoolStats ( MmPagedPool );
-       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+       ASSERT ( RPreviousBlock(Second) == First );
+       ASSERT ( First->Size == Second->PrevSize );
+       RPoolRemoveFree ( pool, Second );
+       RSetSize ( pool, First, First->Size + Second->Size, NULL );
 }
 
-int main()
+static void
+RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
 {
-#define COUNT 100
-       int i, j;
-       char* keepers[COUNT];
-       char* trash[COUNT];
-       int AllocSize[] = { 15, 31, 63, 127, 255, 511, 1023, 2047 };
-       const int ALLOCS = sizeof(AllocSize) / sizeof(0[AllocSize]);
-       ULONG dwStart;
+       PR_FREE NextBlock, PreviousBlock;
 
-       MmPagedPoolSize = 1*1024*1024;
-       MmPagedPoolBase = malloc ( MmPagedPoolSize );
-       MmInitializePagedPool();
+       RFreeInit ( FreeBlock );
+       RPoolAddFree ( pool, FreeBlock );
 
-       dwStart = GetTickCount();
+       // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
 
-       printf ( "test #1 phase #1\n" );
-       for ( i = 0; i < COUNT; i++ )
+       /*
+        * If the next block is immediately adjacent to the newly freed one then
+        * merge them.
+        * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
+        */
+       NextBlock = RNextBlock ( pool, FreeBlock );
+       if ( NextBlock != NULL && !NextBlock->Status )
        {
-               //printf ( "keeper %i) ", i );
-               keepers[i] = TestAlloc ( AllocSize[i%ALLOCS] );
-               if ( !keepers[i] ) printf ( "allocation failed\n" );
-               //printf ( "trash %i) ", i );
-               trash[i] = TestAlloc ( AllocSize[i%ALLOCS] );
-               if ( !trash[i] ) printf ( "allocation failed\n" );
+               RFreeMerge ( pool, FreeBlock, NextBlock );
        }
 
-       printf ( "test #1 phase #2\n" );
-       for ( i = 0; i < COUNT; i++ )
+       /*
+        * If the previous block is adjacent to the newly freed one then
+        * merge them.
+        * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
+        */
+       PreviousBlock = RPreviousBlock ( FreeBlock );
+       if ( PreviousBlock != NULL && !PreviousBlock->Status )
        {
-               if ( i == 6 )
-                       i = i;
-               //printf ( "%i) ", i );
-               TestFree ( trash[i] );
+               RFreeMerge ( pool, PreviousBlock, FreeBlock );
        }
+}
 
-       printf ( "test #1 phase #3\n" );
-       for ( i = 0; i < 4; i++ )
+static void
+RiUsedInit ( PR_USED Block, rulong Tag )
+{
+       Block->Status = 1;
+       RUsedFillStack ( Block );
+#ifdef R_MAGIC
+       Block->UsedMagic = R_USED_MAGIC;
+#endif//R_MAGIC
+       //ASSERT_SIZE ( Block->Size );
+
+       // now add the block to the used block list
+#if DBG
+       Block->NextUsed = (PR_USED)(ULONG_PTR)0xDEADBEEF;
+#endif//R_USED_LIST
+
+       Block->Tag = Tag;
+}
+
+#if !R_RZ
+#define RiUsedInitRedZone(Block,UserSize)
+#else//R_RZ
+static void
+RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
+{
+       // write out buffer-overrun detection bytes
+       char* Addr = (char*)RHdrToBody(Block);
+       Block->UserSize = UserSize;
+       memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
+       memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
+#if DBG
+       memset ( Addr, 0xCD, UserSize );
+#endif//DBG
+}
+#endif//R_RZ
+
+static void*
+RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
+{
+       PR_USED NewBlock;
+       PR_FREE BestBlock,
+               NextBlock,
+               PreviousBlock,
+               BestPreviousBlock,
+               CurrentBlock;
+       void* BestAlignedAddr;
+       int que,
+               queBytes = NumberOfBytes;
+       rulong BlockSize,
+               Alignment;
+       int que_reclaimed = 0;
+
+       ASSERT ( pool );
+       ASSERT ( align < 3 );
+
+       R_ACQUIRE_MUTEX(pool);
+
+       if ( !NumberOfBytes )
+       {
+               R_DEBUG("0 bytes requested - initiating pool verification\n");
+               RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
+               R_RELEASE_MUTEX(pool);
+               return NULL;
+       }
+       if ( NumberOfBytes > pool->PoolSize )
        {
-               //printf ( "%i) ", i );
-               keepers[i] = TestAlloc ( 4096 );
-               if ( !keepers[i] ) printf ( "allocation failed\n" );
+               if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
+               {
+                       R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
+                       RUsedRedZoneCheck(pool,RBodyToHdr((void*)(ULONG_PTR)NumberOfBytes), (char*)(ULONG_PTR)NumberOfBytes, __FILE__, __LINE__ );
+                       R_RELEASE_MUTEX(pool);
+                       return NULL;
+               }
+               R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
+               R_RELEASE_MUTEX(pool);
+               return NULL;
        }
 
-       printf ( "test #1 phase #4\n" );
-       for ( i = 0; i < 4; i++ )
+       que = RQueWhich ( NumberOfBytes );
+       if ( que >= 0 )
+       {
+               if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
+               {
+                       RiUsedInit ( NewBlock, Tag );
+                       RiUsedInitRedZone ( NewBlock, NumberOfBytes );
+                       R_RELEASE_MUTEX(pool);
+                       return RHdrToBody(NewBlock);
+               }
+               queBytes = 16 << que;
+       }
+
+       /*
+        * Calculate the total number of bytes we will need.
+        */
+       BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
+       if (BlockSize < sizeof(R_FREE))
        {
-               //printf ( "%i) ", i );
-               TestFree ( keepers[i] );
+               /* At least we need the size of the free block header. */
+               BlockSize = sizeof(R_FREE);
        }
 
-       printf ( "test #1 phase #5\n" );
-       srand(1);
-       for ( i = 0; i < COUNT; i++ )
+try_again:
+       /*
+        * Find the best-fitting block.
+        */
+       BestBlock = NULL;
+       Alignment = pool->Alignments[align];
+       PreviousBlock = NULL;
+       BestPreviousBlock = NULL,
+       CurrentBlock = pool->FirstFree;
+       BestAlignedAddr = NULL;
+
+       while ( CurrentBlock != NULL )
        {
-               //printf ( "%i) ", i );
-               trash[i] = TestAlloc ( rand()%1024+1 );
-               if ( !trash[i] ) printf ( "allocation failed\n" );
+               PVOID Addr = RHdrToBody(CurrentBlock);
+               PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
+               /* calculate last size-aligned address available within this block */
+               PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
+               ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
+
+               /* special case, this address is already size-aligned, and the right size */
+               if ( Addr == AlignedAddr )
+               {
+                       BestAlignedAddr = AlignedAddr;
+                       BestPreviousBlock = PreviousBlock;
+                       BestBlock = CurrentBlock;
+                       break;
+               }
+               // if we carve out a size-aligned block... is it still past the end of this
+               // block's free header?
+               else if ( (char*)RBodyToHdr(AlignedAddr)
+                       >= (char*)CurrentBlock+sizeof(R_FREE) )
+               {
+                       /*
+                        * there's enough room to allocate our size-aligned memory out
+                        * of this block, see if it's a better choice than any previous
+                        * finds
+                        */
+                       if ( BestBlock == NULL
+                               || BestBlock->Size > CurrentBlock->Size )
+                       {
+                               BestAlignedAddr = AlignedAddr;
+                               BestPreviousBlock = PreviousBlock;
+                               BestBlock = CurrentBlock;
+                       }
+               }
+
+               PreviousBlock = CurrentBlock;
+               CurrentBlock = CurrentBlock->NextFree;
        }
-       printf ( "test #1 phase #6\n" );
-       for ( i = 0; i < 10000; i++ )
+
+       /*
+        * We didn't find anything suitable at all.
+        */
+       if (BestBlock == NULL)
        {
-               TestFree ( trash[i%COUNT] );
-               trash[i%COUNT] = TestAlloc ( rand()%1024+1 );
-               if ( !trash[i%COUNT] ) printf ( "allocation failed\n" );
+               if ( !que_reclaimed )
+               {
+                       // reclaim que
+                       int i, j;
+                       for ( i = 0; i < R_QUECOUNT; i++ )
+                       {
+                               for ( j = 0; j < 3; j++ )
+                               {
+                                       while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
+                                       {
+                                               RPoolReclaim ( pool, BestBlock );
+                                       }
+                               }
+                       }
+
+                       que_reclaimed = 1;
+                       goto try_again;
+               }
+               DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
+                       queBytes );
+               R_RELEASE_MUTEX(pool);
+               return NULL;
        }
-       printf ( "test #1 phase #7\n" );
-       j = 0;
-       for ( i = 0; i < COUNT; i++ )
+       /*
+        * we found a best block. If Addr isn't already aligned, we've pre-qualified that
+        * there's room at the beginning of the block for a free block...
+        */
        {
-               if ( trash[i] )
+               void* Addr = RHdrToBody(BestBlock);
+               if ( BestAlignedAddr != Addr )
                {
-                       TestFree ( trash[i] );
-                       ++j;
+                       PR_FREE NewFreeBlock = RFreeSplit (
+                               pool,
+                               BestBlock,
+                               (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
+                       ASSERT ( BestAlignedAddr > Addr );
+
+                       //DPRINT ( "breaking off preceding bytes into their own block...\n" );
+                       /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
+                               NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
+
+                       /* we want the following code to use our size-aligned block */
+                       BestPreviousBlock = BestBlock;
+                       BestBlock = NewFreeBlock;
+
+                       //VerifyPagedPool();
                }
        }
-       printf ( "test #1 phase #8 ( freed %i of %i trash )\n", j, COUNT );
-       if ( !TestAlloc ( 2048 ) )
-               printf ( "Couldn't allocate 2048 bytes after freeing up a whole bunch of blocks\n" );
+       /*
+        * Is there enough space to create a second block from the unused portion.
+        */
+       if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
+       {
+               /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
+                       BestBlock, BestBlock->Size, BlockSize, NewSize );*/
+
+               /*
+                * Create the new free block.
+                */
+               NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
+               //ASSERT_SIZE ( NextBlock->Size );
+       }
+       /*
+        * Remove the selected block from the list of free blocks.
+        */
+       //DPRINT ( "Removing selected block from free block list\n" );
+       RPoolRemoveFree ( pool, BestBlock );
+       /*
+        * Create the new used block header.
+        */
+       NewBlock = (PR_USED)BestBlock;
+       RiUsedInit ( NewBlock, Tag );
 
-       free ( MmPagedPoolBase );
+       /*  RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
 
-       printf ( "test time: %lu\n", GetTickCount() - dwStart );
+       RiUsedInitRedZone ( NewBlock, NumberOfBytes );
+       R_RELEASE_MUTEX(pool);
 
-       printf ( "test #2\n" );
+       return RHdrToBody(NewBlock);
+}
 
-       MmPagedPoolSize = 1024;
-       MmPagedPoolBase = malloc ( MmPagedPoolSize );
-       MmInitializePagedPool();
+static void
+RPoolFree ( PR_POOL pool, void* Addr )
+{
+       PR_USED UsedBlock;
+       rulong UsedSize;
+       PR_FREE FreeBlock;
+       rulong UserSize;
+       int que;
+
+       ASSERT(pool);
+       if ( !Addr )
+       {
+               R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
+               R_ACQUIRE_MUTEX(pool);
+               RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
+               R_RELEASE_MUTEX(pool);
+               return;
+       }
+       R_ASSERT_PTR(pool,Addr);
+
+       UsedBlock = RBodyToHdr(Addr);
+       UsedSize = UsedBlock->Size;
+       FreeBlock = (PR_FREE)UsedBlock;
+#if R_RZ
+       UserSize = UsedBlock->UserSize;
+#else
+       UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
+#endif//R_RZ
+
+       RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
 
-       TestAlloc ( 512 );
-       i = RPoolLargestAllocPossible ( MmPagedPool, 0 );
-       if ( !TestAlloc ( i ) )
+#if R_RZ
+       memset ( Addr, 0xCD, UsedBlock->UserSize );
+#endif
+
+       que = RQueWhich ( UserSize );
+       if ( que >= 0 )
        {
-               printf ( "allocating last available block failed\n" );
+               int queBytes = 16 << que;
+               ASSERT( (rulong)queBytes >= UserSize );
+               if ( que >= 0 )
+               {
+                       int align = 0;
+                       if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
+                               align = 2;
+                       else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
+                               align = 1;
+                       R_ACQUIRE_MUTEX(pool);
+                       RQueAdd ( &pool->Que[que][align], UsedBlock );
+                       R_RELEASE_MUTEX(pool);
+                       return;
+               }
        }
 
-       free ( MmPagedPoolBase );
+       R_ACQUIRE_MUTEX(pool);
+       RPoolReclaim ( pool, FreeBlock );
+       R_RELEASE_MUTEX(pool);
+}
 
-       printf ( "done!\n" );
-       return 0;
+VOID
+INIT_FUNCTION
+NTAPI
+MmInitializePagedPool(VOID)
+{   
+       /*
+        * We are still at a high IRQL level at this point so explicitly commit
+        * the first page of the paged pool before writing the first block header.
+        */
+       MmCommitPagedPoolAddress ( (PVOID)MmPagedPoolBase, FALSE );
+
+       MmPagedPool = RPoolInit ( MmPagedPoolBase,
+               MmPagedPoolSize,
+               MM_POOL_ALIGNMENT,
+               MM_CACHE_LINE_SIZE,
+               PAGE_SIZE );
+
+       ExInitializeFastMutex(&MmPagedPool->Mutex);
+}
+
+PVOID NTAPI
+ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
+                            IN ULONG  NumberOfBytes,
+                            IN ULONG  Tag)
+{
+       int align;
+
+       if ( NumberOfBytes >= PAGE_SIZE )
+               align = 2;
+       else if ( PoolType & CACHE_ALIGNED_POOL_MASK )
+               align = 1;
+       else
+               align = 0;
+
+       ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
+
+       return RPoolAlloc ( MmPagedPool, NumberOfBytes, Tag, align );
+}
+
+VOID NTAPI
+ExFreePagedPool(IN PVOID Block)
+{
+       ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
+       RPoolFree ( MmPagedPool, Block );
 }
-#endif//PPOOL_UMODE_TEST
 
 /* EOF */