completely rewritten PagedPool.
authorRoyce Mitchell III <royce3@ev1.net>
Fri, 17 Dec 2004 13:20:05 +0000 (13:20 +0000)
committerRoyce Mitchell III <royce3@ev1.net>
Fri, 17 Dec 2004 13:20:05 +0000 (13:20 +0000)
- more than 800% faster
- support for multiple pools (SpecialPool, etc anyone?)
- can be used for NPool if wanted, too
- tries not to immediately reallocate a block that's just freed for allocations of <= 512 (helps find memory abusers)

svn path=/trunk/; revision=12165

reactos/ntoskrnl/mm/RPoolMgr.h [new file with mode: 0644]
reactos/ntoskrnl/mm/ppool.c

diff --git a/reactos/ntoskrnl/mm/RPoolMgr.h b/reactos/ntoskrnl/mm/RPoolMgr.h
new file mode 100644 (file)
index 0000000..df79e63
--- /dev/null
@@ -0,0 +1,1026 @@
+/* $Id: RPoolMgr.h,v 1.1 2004/12/17 13:20:05 royce Exp $
+ *
+ * COPYRIGHT:       See COPYING in the top level directory
+ * PROJECT:         ReactOS kernel
+ * FILE:            ntoskrnl/mm/RPoolMgr.h
+ * PURPOSE:         A semi-generic reuseable Pool implementation
+ * PROGRAMMER:      Royce Mitchell III
+ * UPDATE HISTORY:
+ */
+
+#ifndef RPOOLMGR_H
+#define RPOOLMGR_H
+
+typedef unsigned long rulong;
+
+#define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (char*)(ptr) < ((char*)pool->UserBase+pool->UserSize)
+#define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
+#define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
+
+#ifndef R_ROUND_UP
+#define R_ROUND_UP(x,s)    ((PVOID)(((rulong)(x)+(s)-1) & ~((s)-1)))
+#endif//R_ROUND_UP
+
+#ifndef R_ROUND_DOWN
+#define R_ROUND_DOWN(x,s)  ((PVOID)(((rulong)(x)) & ~((s)-1)))
+#endif//R_ROUND_DOWN
+
+#ifndef R_QUEMIN
+// R_QUEMIN is the minimum number of entries to keep in a que
+#define R_QUEMIN 0
+#endif//R_QUEMIN
+
+#ifndef R_QUECOUNT
+// 16, 32, 64, 128, 256, 512
+#define R_QUECOUNT 6
+#endif//R_QUECOUNT
+
+#ifndef R_RZ
+// R_RZ is the redzone size
+#define R_RZ 4
+#endif//R_RZ
+
+#ifndef R_RZ_LOVALUE
+#define R_RZ_LOVALUE 0x87
+#endif//R_RZ_LOVALUE
+
+#ifndef R_RZ_HIVALUE
+#define R_RZ_HIVALUE 0xA5
+#endif//R_RZ_HIVALUE
+
+#ifndef R_STACK
+// R_STACK is the number of stack entries to store in blocks for debug purposes
+#define R_STACK 3
+#endif//R_STACK
+
+#ifndef R_TAG
+// R_TAG do we keep track of tags on a per-memory block basis?
+#define R_TAG 0
+#endif//R_TAG
+
+#ifdef R_MAGIC
+#      ifndef R_FREE_MAGIC
+#              define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
+#      endif//R_FREE_MAGIC
+#      ifndef R_USED_MAGIC
+#              define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
+#      endif//R_USED_MAGIC
+#endif//R_MAGIC
+
+// **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
+// in both the R_FREE and R_USED structures
+
+typedef struct _R_FREE
+{
+#ifdef R_MAGIC
+       rulong FreeMagic;
+#endif//R_MAGIC
+       rulong PrevSize : 30;
+       rulong Status : 2;
+       rulong Size;
+#if R_STACK
+       rulong LastOwnerStack[R_STACK];
+#endif//R_STACK
+       struct _R_FREE* NextFree;
+       struct _R_FREE* PrevFree;
+}
+R_FREE, *PR_FREE;
+
+typedef struct _R_USED
+{
+#ifdef R_MAGIC
+       rulong UsedMagic;
+#endif//R_MAGIC
+       rulong PrevSize : 30;
+       rulong Status : 2;
+       rulong Size;
+#if R_STACK
+       rulong LastOwnerStack[R_STACK];
+#endif//R_STACK
+       struct _R_USED* NextUsed;
+#if R_RZ
+       rulong UserSize; // how many bytes the user actually asked for...
+#endif//R_RZ
+       rulong Tag;
+}
+R_USED, *PR_USED;
+
+typedef struct _R_QUE
+{
+       rulong Count;
+       PR_USED First, Last;
+}
+R_QUE, *PR_QUE;
+
+typedef struct _R_POOL
+{
+       void* PoolBase;
+       rulong PoolSize;
+       void* UserBase;
+       rulong UserSize;
+       rulong Alignments[3];
+       PR_FREE FirstFree, LastFree;
+       R_QUE Que[R_QUECOUNT][3];
+       R_MUTEX Mutex;
+}
+R_POOL, *PR_POOL;
+
+static int
+RQueWhich ( rulong size )
+{
+       rulong que, quesize;
+       for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
+       {
+               if ( quesize >= size )
+               {
+                       return que;
+               }
+       }
+       return -1;
+}
+
+static void
+RQueInit ( PR_QUE que )
+{
+       que->Count = 0;
+       que->First = NULL;
+       que->Last = NULL;
+}
+
+static void
+RQueAdd ( PR_QUE que, PR_USED Item )
+{
+       ASSERT(Item);
+       Item->Status = 2;
+       Item->NextUsed = NULL;
+       ++que->Count;
+       if ( !que->Last )
+       {
+               que->First = que->Last = Item;
+               return;
+       }
+       ASSERT(!que->Last->NextUsed);
+       que->Last->NextUsed = Item;
+       que->Last = Item;
+}
+
+static PR_USED
+RQueRemove ( PR_QUE que )
+{
+       PR_USED Item;
+#if R_QUEMIN
+       if ( que->count < R_QUEMIN )
+               return NULL;
+#endif
+       if ( !que->First )
+               return NULL;
+       Item = que->First;
+       que->First = Item->NextUsed;
+       if ( !--que->Count )
+       {
+               ASSERT ( !que->First );
+               que->Last = NULL;
+       }
+       Item->Status = 0;
+       return Item;
+}
+
+static void
+RPoolAddFree ( PR_POOL pool, PR_FREE Item )
+{
+       ASSERT(pool);
+       ASSERT(Item);
+       if ( !pool->FirstFree )
+       {
+               pool->FirstFree = pool->LastFree = Item;
+               Item->NextFree = NULL;
+       }
+       else
+       {
+               pool->FirstFree->PrevFree = Item;
+               Item->NextFree = pool->FirstFree;
+               pool->FirstFree = Item;
+       }
+       Item->PrevFree = NULL;
+}
+
+static void
+RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
+{
+       ASSERT(pool);
+       ASSERT(Item);
+       if ( Item->NextFree )
+               Item->NextFree->PrevFree = Item->PrevFree;
+       else
+       {
+               ASSERT ( pool->LastFree == Item );
+               pool->LastFree = Item->PrevFree;
+       }
+       if ( Item->PrevFree )
+               Item->PrevFree->NextFree = Item->NextFree;
+       else
+       {
+               ASSERT ( pool->FirstFree == Item );
+               pool->FirstFree = Item->NextFree;
+       }
+#if defined(DBG) || defined(KDBG)
+       Item->NextFree = Item->PrevFree = (PR_FREE)0xDEADBEEF;
+#endif//DBG || KDBG
+}
+
+// this function is used to walk up a stack trace... it returns
+// the pointer to the next return address above the pointer to the
+// return address pointed to by Frame...
+static rulong*
+RNextStackFrame ( rulong* Frame )
+{
+       if ( !Frame || !*Frame || *Frame == 0xDEADBEAF )
+               return NULL;
+       return (rulong*)(  Frame[-1] ) + 1;
+}
+
+// this function returns a pointer to the address the
+// caller will return to. Use RNextStackFrame() above to walk
+// further up the stack.
+static rulong*
+RStackFrame()
+{
+       rulong* Frame;
+#if defined __GNUC__
+       __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
+#elif defined(_MSC_VER)
+       __asm mov [Frame], ebp
+#endif
+       return RNextStackFrame ( Frame + 1 );
+}
+
+static void
+RFreeFillStack ( PR_FREE free )
+{
+       rulong* Frame = RStackFrame();
+       int i;
+       memset ( free->LastOwnerStack, 0, sizeof(free->LastOwnerStack) );
+       Frame = RNextStackFrame ( Frame ); // step out of RFreeInit()
+       Frame = RNextStackFrame ( Frame ); // step out of RFreeSplit()/RPoolReclaim()
+       Frame = RNextStackFrame ( Frame ); // step out of RPoolFree()
+       for ( i = 0; i < R_EXTRA_STACK_UP; i++ )
+               Frame = RNextStackFrame ( Frame );
+       for ( i = 0; i < R_STACK && Frame; i++ )
+       {
+               free->LastOwnerStack[i] = *Frame;
+               Frame = RNextStackFrame ( Frame );
+       }
+}
+
+static void
+RUsedFillStack ( PR_USED used )
+{
+       rulong* Frame = RStackFrame();
+       int i;
+       memset ( used->LastOwnerStack, 0, sizeof(used->LastOwnerStack) );
+       Frame = RNextStackFrame ( Frame ); // step out of RUsedInit()
+       Frame = RNextStackFrame ( Frame ); // step out of RPoolAlloc()
+       for ( i = 0; i < R_EXTRA_STACK_UP; i++ )
+               Frame = RNextStackFrame ( Frame );
+       for ( i = 0; i < R_STACK && Frame; i++ )
+       {
+               used->LastOwnerStack[i] = *Frame;
+               Frame = RNextStackFrame ( Frame );
+       }
+}
+
+static PR_FREE
+RFreeInit ( void* memory )
+{
+       PR_FREE block = (PR_FREE)memory;
+#if R_FREEMAGIC
+       block->FreeMagic = R_FREE_MAGIC;
+#endif//R_FREEMAGIC
+       block->Status = 0;
+       RFreeFillStack ( block );
+#if defined(DBG) || defined(KDBG)
+       block->PrevFree = block->NextFree = (PR_FREE)0xDEADBEEF;
+#endif//DBG || KDBG
+       return block;
+}
+
+PR_POOL
+RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
+{
+       int align, que;
+       PR_POOL pool = (PR_POOL)PoolBase;
+
+       pool->PoolBase = PoolBase;
+       pool->PoolSize = PoolSize;
+       pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
+       pool->UserSize = PoolSize - sizeof(R_POOL);
+       pool->Alignments[0] = align1;
+       pool->Alignments[1] = align2;
+       pool->Alignments[2] = align3;
+       pool->FirstFree = pool->LastFree = NULL;
+
+       RPoolAddFree ( pool,
+               RFreeInit ( pool->UserBase ));
+
+       pool->FirstFree->PrevSize = 0;
+       pool->FirstFree->Size = pool->UserSize;
+
+       for ( que = 0; que < R_QUECOUNT; que++ )
+       {
+               for ( align = 0; align < 3; align++ )
+               {
+                       RQueInit ( &pool->Que[que][align] );
+               }
+       }
+       return pool;
+}
+
+static const char*
+RFormatTag ( rulong Tag, char* buf )
+{
+       int i;
+       *(rulong*)&buf[0] = Tag;
+       buf[4] = 0;
+       for ( i = 0; i < 4; i++ )
+       {
+               if ( !buf[i] )
+                       buf[i] = ' ';
+       }
+       return buf;
+}
+
+#if !R_RZ
+#define RUsedRedZoneCheck(pUsed,Addr,file,line)
+#else//R_RZ
+static void
+RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
+{
+       char tag[5];
+       int i;
+
+       R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
+               file, line, violation, Addr );
+
+#ifdef R_MAGIC
+       R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
+#endif//R_MAGIC
+       R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
+               RFormatTag(pUsed->Tag,tag),
+               pUsed->Tag,
+               pUsed->Size,
+               pUsed->UserSize );
+
+       if ( printzone )
+       {
+               unsigned char* HiZone = Addr + pUsed->UserSize;
+               unsigned char* LoZone = Addr - R_RZ; // this is to simplify indexing below...
+               R_DEBUG ( ", LoZone " );
+               for ( i = 0; i < R_RZ; i++ )
+                       R_DEBUG ( "%02x", LoZone[i] );
+               R_DEBUG ( ", HiZone " );
+               for ( i = 0; i < R_RZ; i++ )
+                       R_DEBUG ( "%02x", HiZone[i] );
+       }
+       R_DEBUG ( "\n" );
+
+       R_DEBUG ( "First few Stack Frames:" );
+       for ( i = 0; i < R_STACK; i++ )
+       {
+               if ( pUsed->LastOwnerStack[i] != 0xDEADBEEF )
+               {
+                       R_DEBUG(" ");
+                       if (!R_PRINT_ADDRESS ((PVOID)pUsed->LastOwnerStack[i]) )
+                       {
+                               R_DEBUG("<%X>", pUsed->LastOwnerStack[i] );
+                       }
+               }
+       }
+       R_DEBUG ( "\n" );
+
+       R_PANIC();
+}
+static void
+RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
+{
+       int i;
+       unsigned char *LoZone, *HiZone;
+       int bLow = 1;
+       int bHigh = 1;
+
+       ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
+#ifdef R_MAGIC
+       if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
+       {
+               pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
+               RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
+       }
+       if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
+       {
+               RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
+       }
+#endif//R_MAGIC
+       if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
+       {
+               RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
+       }
+       if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
+       {
+               RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
+       }
+       HiZone = Addr + pUsed->UserSize;
+       LoZone = Addr - R_RZ; // this is to simplify indexing below...
+       for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
+       {
+               bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
+               bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
+       }
+       if ( !bLow || !bHigh )
+       {
+               const char* violation = "High and Low-side redzone";
+               if ( bHigh ) // high is okay, so it was just low failed
+                       violation = "Low-side redzone";
+               else if ( bLow ) // low side is okay, so it was just high failed
+                       violation = "High-side redzone";
+               RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
+       }
+}
+#endif//R_RZ
+
+PR_FREE
+RPreviousBlock ( PR_FREE Block )
+{
+       if ( Block->PrevSize > 0 )
+               return (PR_FREE)( (char*)Block - Block->PrevSize );
+       return NULL;
+}
+
+PR_FREE
+RNextBlock ( PR_POOL pool, PR_FREE Block )
+{
+       PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
+       if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
+               NextBlock = NULL;
+       return NextBlock;
+}
+
+inline static void*
+RHdrToBody ( void* blk )
+/*
+ * FUNCTION: Translate a block header address to the corresponding block
+ * address (internal)
+ */
+{
+       return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
+}
+
+inline static PR_USED
+RBodyToHdr ( void* addr )
+{
+       return (PR_USED)
+              ( ((char*)addr) - sizeof(R_USED) - R_RZ );
+}
+
+static int
+RiInFreeChain ( PR_POOL pool, PR_FREE Block )
+{
+       PR_FREE Free;
+       Free = pool->FirstFree;
+       if ( Free == Block )
+               return 1;
+       while ( Free != Block )
+       {
+               Free = Free->NextFree;
+               if ( !Free )
+                       return 0;
+       }
+       return 1;
+}
+
+static void
+RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
+{
+       {
+               PR_USED Block = (PR_USED)pool->UserBase;
+               PR_USED NextBlock;
+
+               for ( ;; )
+               {
+                       switch ( Block->Status )
+                       {
+                       case 0: // block is in chain
+                               ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
+                               break;
+                       case 1: // block is allocated
+                               RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
+                               break;
+                       case 2: // block is in que
+                               // nothing to verify here yet
+                               break;
+                       default:
+                               ASSERT ( !"invalid status in memory block found in pool!" );
+                       }
+                       NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
+                       if ( !NextBlock )
+                               break;
+                       ASSERT ( NextBlock->PrevSize == Block->Size );
+                       Block = NextBlock;
+               }
+       }
+       {
+               // now let's step through the list of free pointers and verify
+               // each one can be found by size-jumping...
+               PR_FREE Free = (PR_FREE)pool->FirstFree;
+               while ( Free )
+               {
+                       PR_FREE NextFree = (PR_FREE)pool->UserBase;
+                       if ( Free != NextFree )
+                       {
+                               while ( NextFree != Free )
+                               {
+                                       NextFree = RNextBlock ( pool, NextFree );
+                                       ASSERT(NextFree);
+                               }
+                       }
+                       Free = Free->NextFree;
+               }
+       }
+}
+
+static void
+RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
+{
+       R_ASSERT_PTR(pool,Block);
+       ASSERT ( NewSize < pool->UserSize );
+       ASSERT ( NewSize >= sizeof(R_FREE) );
+       Block->Size = NewSize;
+       if ( !NextBlock )
+               NextBlock = RNextBlock ( pool, Block );
+       if ( NextBlock )
+               NextBlock->PrevSize = NewSize;
+}
+
+static PR_FREE
+RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
+{
+       PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
+       RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
+       RSetSize ( pool, Block, NewSize, NewBlock );
+       RFreeInit ( NewBlock );
+       RPoolAddFree ( pool, NewBlock );
+       return NewBlock;
+}
+
+static void
+RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
+{
+       ASSERT ( RPreviousBlock(Second) == First );
+       ASSERT ( First->Size == Second->PrevSize );
+       RPoolRemoveFree ( pool, Second );
+       RSetSize ( pool, First, First->Size + Second->Size, NULL );
+}
+
+static void
+RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
+{
+       PR_FREE NextBlock, PreviousBlock;
+
+       RFreeInit ( FreeBlock );
+       RPoolAddFree ( pool, FreeBlock );
+
+       // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
+
+       /*
+        * If the next block is immediately adjacent to the newly freed one then
+        * merge them.
+        * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
+        */
+       NextBlock = RNextBlock ( pool, FreeBlock );
+       if ( NextBlock != NULL && !NextBlock->Status )
+       {
+               RFreeMerge ( pool, FreeBlock, NextBlock );
+       }
+
+       /*
+        * If the previous block is adjacent to the newly freed one then
+        * merge them.
+        * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
+        */
+       PreviousBlock = RPreviousBlock ( FreeBlock );
+       if ( PreviousBlock != NULL && !PreviousBlock->Status )
+       {
+               RFreeMerge ( pool, PreviousBlock, FreeBlock );
+       }
+}
+
+static void
+RiUsedInit ( PR_USED Block, rulong Tag )
+{
+       Block->Status = 1;
+       RUsedFillStack ( Block );
+#if R_MAGIC
+       Block->UsedMagic = R_USED_MAGIC;
+#endif//R_MAGIC
+       //ASSERT_SIZE ( Block->Size );
+
+       // now add the block to the used block list
+#if defined(DBG) || defined(KDBG)
+       Block->NextUsed = (PR_USED)0xDEADBEEF;
+#endif//R_USED_LIST
+
+       Block->Tag = Tag;
+}
+
+#if !R_RZ
+#define RiUsedInitRedZone(Block,UserSize)
+#else//R_RZ
+static void
+RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
+{
+       // write out buffer-overrun detection bytes
+       char* Addr = (char*)RHdrToBody(Block);
+       Block->UserSize = UserSize;
+       memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
+       memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
+#if defined(DBG) || defined(KDBG)
+       memset ( Addr, 0xCD, UserSize );
+#endif//DBG || KDBG
+}
+#endif//R_RZ
+
+static void*
+RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
+{
+       PR_USED NewBlock;
+       PR_FREE BestBlock,
+               NextBlock,
+               PreviousBlock,
+               BestPreviousBlock,
+               CurrentBlock;
+       void* BestAlignedAddr;
+       int que,
+               queBytes = NumberOfBytes;
+       rulong BlockSize,
+               Alignment;
+       int que_reclaimed = 0;
+
+       ASSERT ( pool );
+       ASSERT ( align < 3 );
+
+       R_ACQUIRE_MUTEX(pool);
+
+       if ( !NumberOfBytes )
+       {
+               R_DEBUG("0 bytes requested - initiating pool verification\n");
+               RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
+               R_RELEASE_MUTEX(pool);
+               return NULL;
+       }
+       if ( NumberOfBytes > pool->PoolSize )
+       {
+               if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
+               {
+                       R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
+                       RUsedRedZoneCheck(pool,RBodyToHdr((void*)NumberOfBytes), (char*)NumberOfBytes, __FILE__, __LINE__ );
+                       R_RELEASE_MUTEX(pool);
+                       return NULL;
+               }
+               R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
+               R_RELEASE_MUTEX(pool);
+               return NULL;
+       }
+
+       que = RQueWhich ( NumberOfBytes );
+       if ( que >= 0 )
+       {
+               if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
+               {
+                       R_RELEASE_MUTEX(pool);
+                       RiUsedInit ( NewBlock, Tag );
+                       RiUsedInitRedZone ( NewBlock, NumberOfBytes );
+                       return RHdrToBody(NewBlock);
+               }
+               queBytes = 16 << que;
+       }
+
+       /*
+        * Calculate the total number of bytes we will need.
+        */
+       BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
+       if (BlockSize < sizeof(R_FREE))
+       {
+               /* At least we need the size of the free block header. */
+               BlockSize = sizeof(R_FREE);
+       }
+
+try_again:
+       /*
+        * Find the best-fitting block.
+        */
+       BestBlock = NULL;
+       Alignment = pool->Alignments[align];
+       PreviousBlock = NULL;
+       BestPreviousBlock = NULL,
+       CurrentBlock = pool->FirstFree;
+       BestAlignedAddr = NULL;
+
+       while ( CurrentBlock != NULL )
+       {
+               PVOID Addr = RHdrToBody(CurrentBlock);
+               PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
+               /* calculate last size-aligned address available within this block */
+               PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
+               ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
+
+               /* special case, this address is already size-aligned, and the right size */
+               if ( Addr == AlignedAddr )
+               {
+                       BestAlignedAddr = AlignedAddr;
+                       BestPreviousBlock = PreviousBlock;
+                       BestBlock = CurrentBlock;
+                       break;
+               }
+               // if we carve out a size-aligned block... is it still past the end of this
+               // block's free header?
+               else if ( (char*)RBodyToHdr(AlignedAddr)
+                       >= (char*)CurrentBlock+sizeof(R_FREE) )
+               {
+                       /*
+                        * there's enough room to allocate our size-aligned memory out
+                        * of this block, see if it's a better choice than any previous
+                        * finds
+                        */
+                       if ( BestBlock == NULL
+                               || BestBlock->Size > CurrentBlock->Size )
+                       {
+                               BestAlignedAddr = AlignedAddr;
+                               BestPreviousBlock = PreviousBlock;
+                               BestBlock = CurrentBlock;
+                       }
+               }
+
+               PreviousBlock = CurrentBlock;
+               CurrentBlock = CurrentBlock->NextFree;
+       }
+
+       /*
+        * We didn't find anything suitable at all.
+        */
+       if (BestBlock == NULL)
+       {
+               if ( !que_reclaimed )
+               {
+                       // reclaim que
+                       int i, j;
+                       for ( i = 0; i < R_QUECOUNT; i++ )
+                       {
+                               for ( j = 0; j < 3; j++ )
+                               {
+                                       while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
+                                       {
+                                               RPoolReclaim ( pool, BestBlock );
+                                       }
+                               }
+                       }
+
+                       que_reclaimed = 1;
+                       goto try_again;
+               }
+               /*DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
+                       queBytes );*/
+               return NULL;
+       }
+       /*
+        * we found a best block. If Addr isn't already aligned, we've pre-qualified that
+        * there's room at the beginning of the block for a free block...
+        */
+       {
+               void* Addr = RHdrToBody(BestBlock);
+               if ( BestAlignedAddr != Addr )
+               {
+                       PR_FREE NewFreeBlock = RFreeSplit (
+                               pool,
+                               BestBlock,
+                               (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
+                       ASSERT ( BestAlignedAddr > Addr );
+
+                       //DPRINT ( "breaking off preceding bytes into their own block...\n" );
+                       /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
+                               NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
+
+                       /* we want the following code to use our size-aligned block */
+                       BestPreviousBlock = BestBlock;
+                       BestBlock = NewFreeBlock;
+
+                       //VerifyPagedPool();
+               }
+       }
+       /*
+        * Is there enough space to create a second block from the unused portion.
+        */
+       if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
+       {
+               /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
+                       BestBlock, BestBlock->Size, BlockSize, NewSize );*/
+
+               /*
+                * Create the new free block.
+                */
+               NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
+               //ASSERT_SIZE ( NextBlock->Size );
+       }
+       /*
+        * Remove the selected block from the list of free blocks.
+        */
+       //DPRINT ( "Removing selected block from free block list\n" );
+       RPoolRemoveFree ( pool, BestBlock );
+       /*
+        * Create the new used block header.
+        */
+       NewBlock = (PR_USED)BestBlock;
+       RiUsedInit ( NewBlock, Tag );
+
+       R_RELEASE_MUTEX(pool);
+
+       /*  RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
+
+       RiUsedInitRedZone ( NewBlock, NumberOfBytes );
+
+       return RHdrToBody(NewBlock);
+}
+
+static void
+RPoolFree ( PR_POOL pool, void* Addr )
+{
+       PR_USED UsedBlock;
+       rulong UsedSize;
+       PR_FREE FreeBlock;
+       rulong UserSize;
+       int que;
+
+       ASSERT(pool);
+       if ( !Addr )
+       {
+               R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
+               R_ACQUIRE_MUTEX(pool);
+               RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
+               R_RELEASE_MUTEX(pool);
+               return;
+       }
+       R_ASSERT_PTR(pool,Addr);
+
+       UsedBlock = RBodyToHdr(Addr);
+       UsedSize = UsedBlock->Size;
+       FreeBlock = (PR_FREE)UsedBlock;
+#if R_RZ
+       UserSize = UsedBlock->UserSize;
+#else
+       UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
+#endif//R_RZ
+
+       RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
+
+#if R_RZ
+       memset ( Addr, 0xCD, UsedBlock->UserSize );
+#endif
+
+       que = RQueWhich ( UserSize );
+       if ( que >= 0 )
+       {
+               int queBytes = 16 << que;
+               ASSERT( queBytes >= UserSize );
+               if ( que >= 0 )
+               {
+                       int align = 0;
+                       if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
+                               align = 2;
+                       else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
+                               align = 1;
+                       R_ACQUIRE_MUTEX(pool);
+                       RQueAdd ( &pool->Que[que][align], UsedBlock );
+                       R_RELEASE_MUTEX(pool);
+                       return;
+               }
+       }
+
+       R_ACQUIRE_MUTEX(pool);
+       RPoolReclaim ( pool, FreeBlock );
+       R_RELEASE_MUTEX(pool);
+}
+
+static void
+RPoolDumpByTag ( PR_POOL pool, rulong Tag )
+{
+       PR_USED Block = (PR_USED)pool->UserBase;
+       PR_USED NextBlock;
+       int count = 0;
+       char tag[5];
+
+       // TODO FIXME - should we validate params or ASSERT_IRQL?
+       R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag,tag) );
+       R_DEBUG ( "  -BLOCK-- --SIZE--\n" );
+
+       R_ACQUIRE_MUTEX(pool);
+       for ( ;; )
+       {
+               if ( Block->Status == 1 && Block->Tag == Tag )
+               {
+                       R_DEBUG ( "  %08X %08X\n", Block, Block->Size );
+                       ++count;
+               }
+               NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
+               if ( !NextBlock )
+                       break;
+               ASSERT ( NextBlock->PrevSize == Block->Size );
+               Block = NextBlock;
+       }
+       R_RELEASE_MUTEX(pool);
+
+       R_DEBUG ( "Entries found for tag '%s': %i\n", tag, count );
+}
+
+rulong
+RPoolQueryTag ( void* Addr )
+{
+       PR_USED Block = RBodyToHdr(Addr);
+       // TODO FIXME - should we validate params?
+#if R_MAGIC
+       if ( Block->UsedMagic != R_USED_MAGIC )
+               return 0xDEADBEEF;
+#endif//R_MAGIC
+       if ( Block->Status != 1 )
+               return 0xDEADBEEF;
+       return Block->Tag;
+}
+
+void
+RPoolStats ( PR_POOL pool )
+{
+       int free=0, used=0, qued=0;
+       PR_USED Block = (PR_USED)pool->UserBase;
+
+       R_ACQUIRE_MUTEX(pool);
+       while ( Block )
+       {
+               switch ( Block->Status )
+               {
+               case 0:
+                       ++free;
+                       break;
+               case 1:
+                       ++used;
+                       break;
+               case 2:
+                       ++qued;
+                       break;
+               default:
+                       ASSERT ( !"Invalid Status for Block in pool!" );
+               }
+               Block = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
+       }
+       R_RELEASE_MUTEX(pool);
+
+       R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free, used, qued, (free+used+qued) );
+}
+
+#ifdef R_LARGEST_ALLOC_POSSIBLE
+static rulong
+RPoolLargestAllocPossible ( PR_POOL pool, int align )
+{
+       int Alignment = pool->Alignments[align];
+       rulong LargestUserSize = 0;
+       PR_FREE Block = (PR_FREE)pool->UserBase;
+       while ( Block )
+       {
+               if ( Block->Status != 1 )
+               {
+                       void* Addr, *AlignedAddr;
+                       rulong BlockMaxUserSize;
+                       int cue, cueBytes;
+
+                       Addr = (char*)Block + sizeof(R_USED) + R_RZ;
+                       AlignedAddr = R_ROUND_UP(Addr,Alignment);
+                       if ( Addr != AlignedAddr )
+                               Addr = R_ROUND_UP((char*)Block + sizeof(R_FREE) + sizeof(R_USED) + R_RZ, Alignment );
+                       BlockMaxUserSize = (char*)Block + Block->Size - (char*)Addr - R_RZ;
+                       cue = RQueWhich ( BlockMaxUserSize );
+                       if ( cue >= 0 )
+                       {
+                               cueBytes = 16 << cue;
+                               if ( cueBytes > BlockMaxUserSize );
+                               {
+                                       if ( !cue )
+                                               BlockMaxUserSize = 0;
+                                       else
+                                               BlockMaxUserSize = 16 << (cue-1);
+                               }
+                       }
+                       if ( BlockMaxUserSize > LargestUserSize )
+                               LargestUserSize = BlockMaxUserSize;
+               }
+               Block = RNextBlock ( pool, Block );
+       }
+       return LargestUserSize;
+}
+#endif//R_LARGEST_ALLOC_POSSIBLE
+
+#endif//RPOOLMGR_H
index 8eeeb66..ef93d52 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: ppool.c,v 1.36 2004/12/13 20:11:08 arty Exp $
+/* $Id: ppool.c,v 1.37 2004/12/17 13:20:05 royce Exp $
  *
  * COPYRIGHT:       See COPYING in the top level directory
  * PROJECT:         ReactOS kernel
  * PROGRAMMER:      David Welch (welch@mcmail.com)
  * UPDATE HISTORY:
  *                  Created 22/05/98
+ *                  Complete Rewrite Dec 2004 by Royce Mitchell III
  */
 
 /* INCLUDES *****************************************************************/
 
+#ifdef PPOOL_UMODE_TEST
+#include "ppool_umode.h"
+#else//PPOOL_UMODE_TEST
 #include <ntoskrnl.h>
 #define NDEBUG
 #include <internal/debug.h>
-
-/* GLOBALS *******************************************************************/
-
-/* Define to enable strict checking of the paged pool on every allocation */
-/* #define ENABLE_VALIDATE_POOL */
+#endif//PPOOL_UMODE_TEST
 
 #undef ASSERT
 #define ASSERT(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); KeBugCheck(0); }
-#define ASSERT_SIZE(n) ASSERT ( (n) <= MmPagedPoolSize && (n) > 0 )
-#define IS_PPOOL_PTR(p) ((size_t)(p)) >= ((size_t)MmPagedPoolBase) && ((size_t)(p)) < ((size_t)((size_t)MmPagedPoolBase+MmPagedPoolSize))
-#define ASSERT_PTR(p) ASSERT ( IS_PPOOL_PTR(p) )
-
-// to disable buffer over/under-run detection, set the following macro to 0
-#if !defined(DBG) && !defined(KDBG)
-#define MM_PPOOL_REDZONE_BYTES 0
-#else
-#define MM_PPOOL_REDZONE_BYTES 4
-#define MM_PPOOL_REDZONE_LOVALUE 0x87
-#define MM_PPOOL_REDZONE_HIVALUE 0xA5
-#define MM_PPOOL_FREEMAGIC (ULONG)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
-#define MM_PPOOL_USEDMAGIC (ULONG)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
-#define MM_PPOOL_LASTOWNER_ENTRIES 3
-#endif
-
-typedef struct _MM_PPOOL_FREE_BLOCK_HEADER
-{
-#if MM_PPOOL_REDZONE_BYTES
-   ULONG FreeMagic;
-#endif//MM_PPOOL_REDZONE_BYTES
-   ULONG Size;
-   struct _MM_PPOOL_FREE_BLOCK_HEADER* NextFree;
-#if MM_PPOOL_REDZONE_BYTES
-   ULONG LastOwnerStack[MM_PPOOL_LASTOWNER_ENTRIES];
-#endif//MM_PPOOL_REDZONE_BYTES
-}
-MM_PPOOL_FREE_BLOCK_HEADER, *PMM_PPOOL_FREE_BLOCK_HEADER;
 
-typedef struct _MM_PPOOL_USED_BLOCK_HEADER
-{
-#if MM_PPOOL_REDZONE_BYTES
-   ULONG UsedMagic;
-#endif//MM_PPOOL_REDZONE_BYTES
-   ULONG Size;
-#if MM_PPOOL_REDZONE_BYTES
-   ULONG UserSize; // how many bytes the user actually asked for...
-#endif//MM_PPOOL_REDZONE_BYTES
-   struct _MM_PPOOL_USED_BLOCK_HEADER* NextUsed;
-   ULONG Tag;
-#if MM_PPOOL_REDZONE_BYTES
-   ULONG LastOwnerStack[MM_PPOOL_LASTOWNER_ENTRIES];    
-#endif//MM_PPOOL_REDZONE_BYTES
-}
-MM_PPOOL_USED_BLOCK_HEADER, *PMM_PPOOL_USED_BLOCK_HEADER;
+// enable "magic"
+//#define R_MAGIC
+#define R_MUTEX FAST_MUTEX
+#define R_ACQUIRE_MUTEX(pool) /*DPRINT1("Acquiring PPool Mutex\n");*/ ExAcquireFastMutex(&pool->Mutex)
+#define R_RELEASE_MUTEX(pool) /*DPRINT1("Releasing PPool Mutex\n");*/ ExReleaseFastMutex(&pool->Mutex)
+#define R_PRINT_ADDRESS(addr) 0
+#define R_PANIC() KeBugCheck(0)
+#define R_DEBUG DbgPrint
+#define R_EXTRA_STACK_UP 2
+
+#include "RPoolMgr.h"
+
+/* GLOBALS *******************************************************************/
 
 PVOID MmPagedPoolBase;
 ULONG MmPagedPoolSize;
-ULONG MmTotalPagedPoolQuota = 0;
-static FAST_MUTEX MmPagedPoolLock;
-static PMM_PPOOL_FREE_BLOCK_HEADER MmPagedPoolFirstFreeBlock;
-static PMM_PPOOL_USED_BLOCK_HEADER MmPagedPoolFirstUsedBlock;
+ULONG MmTotalPagedPoolQuota = 0; // TODO FIXME commented out until we use it
+static PR_POOL MmPagedPool = NULL;
 
 /* FUNCTIONS *****************************************************************/
 
-inline static void* block_to_address ( PVOID blk )
-/*
- * FUNCTION: Translate a block header address to the corresponding block
- * address (internal)
- */
-{
-   return ( (void *) ((char*)blk + sizeof(MM_PPOOL_USED_BLOCK_HEADER) + MM_PPOOL_REDZONE_BYTES) );
-}
-
-inline static PMM_PPOOL_USED_BLOCK_HEADER address_to_block(PVOID addr)
-{
-   return (PMM_PPOOL_USED_BLOCK_HEADER)
-          ( ((char*)addr) - sizeof(MM_PPOOL_USED_BLOCK_HEADER) - MM_PPOOL_REDZONE_BYTES );
-}
-
 VOID INIT_FUNCTION
-MmInitializePagedPool(VOID)
-{
-   MmPagedPoolFirstFreeBlock = (PMM_PPOOL_FREE_BLOCK_HEADER)MmPagedPoolBase;
-   /*
-    * We are still at a high IRQL level at this point so explicitly commit
-    * the first page of the paged pool before writing the first block header.
-    */
-   MmCommitPagedPoolAddress((PVOID)MmPagedPoolFirstFreeBlock, FALSE);
-   MmPagedPoolFirstFreeBlock->Size = MmPagedPoolSize;
-   MmPagedPoolFirstFreeBlock->NextFree = NULL;
-
-#if MM_PPOOL_REDZONE_BYTES
-   MmPagedPoolFirstFreeBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
-   {
-      int i;
-      for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
-         MmPagedPoolFirstFreeBlock->LastOwnerStack[i] = 0;
-   }
-
-   MmPagedPoolFirstUsedBlock = NULL;
-#endif//MM_PPOOL_REDZONE_BYTES
-
-   ExInitializeFastMutex(&MmPagedPoolLock);
-}
-
-#ifdef ENABLE_VALIDATE_POOL
-static void VerifyPagedPool ( int line )
+MmInitializePagedPool()
 {
-   PMM_PPOOL_FREE_BLOCK_HEADER p = MmPagedPoolFirstFreeBlock;
-   int count = 0;
-   DPRINT ( "VerifyPagedPool(%i):\n", line );
-   while ( p )
-   {
-      DPRINT ( "  0x%x: %lu bytes (next 0x%x)\n", p, p->Size, p->NextFree );
-#if MM_PPOOL_REDZONE_BYTES
-      ASSERT ( p->FreeMagic == MM_PPOOL_FREEMAGIC );
-#endif//MM_PPOOL_REDZONE_BYTES
-      ASSERT_PTR(p);
-      ASSERT_SIZE(p->Size);
-      count++;
-      p = p->NextFree;
-   }
-   DPRINT ( "VerifyPagedPool(%i): (%lu blocks)\n", line, count );
-}
-#define VerifyPagedPool() VerifyPagedPool(__LINE__)
-#else
-#define VerifyPagedPool()
-#endif
-
-BOOLEAN STDCALL
-KeRosPrintAddress(PVOID address);
-
-#if !MM_PPOOL_REDZONE_BYTES
-#define MmpRedZoneCheck(pUsed,Addr,file,line)
-#else//MM_PPOOL_REDZONE_BYTES
-static VOID FASTCALL
-MmpRedZoneCheck ( PMM_PPOOL_USED_BLOCK_HEADER pUsed, PUCHAR Addr, const char* file, int line )
-{
-   int i;
-   PUCHAR AddrEnd = Addr + pUsed->UserSize;
-   BOOL bLow = TRUE;
-   BOOL bHigh = TRUE;
-
-   ASSERT_PTR(Addr);
-   if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
-   {
-      PMM_PPOOL_FREE_BLOCK_HEADER pFree = (PMM_PPOOL_FREE_BLOCK_HEADER)pUsed;
-      DPRINT1 ( "Double-free detected for Block 0x%x (kthread=0x%x)!\n", Addr, KeGetCurrentThread() );
-      DbgPrint ( "First Free Stack Frames:" );
-      for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
-         {
-                 if ( pFree->LastOwnerStack[i] != 0xDEADBEEF )
-                 {
-                       DbgPrint(" ");
-                       if (!KeRosPrintAddress ((PVOID)pFree->LastOwnerStack[i]) )
-                       {
-                               DbgPrint("<%X>", pFree->LastOwnerStack[i] );
-                       }
-                 }
-         }
-      DbgPrint ( "\n" );
-      KEBUGCHECK(BAD_POOL_HEADER);
-   }
-   if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
-   {
-      DPRINT1 ( "Bad magic in Block 0x%x!\n", Addr );
-      KEBUGCHECK(BAD_POOL_HEADER);
-   }
-   ASSERT_SIZE(pUsed->Size);
-   ASSERT_SIZE(pUsed->UserSize);
-   ASSERT_PTR(AddrEnd);
-   Addr -= MM_PPOOL_REDZONE_BYTES; // this is to simplify indexing below...
-   for ( i = 0; i < MM_PPOOL_REDZONE_BYTES && bLow && bHigh; i++ )
-   {
-      bLow = bLow && ( Addr[i] == MM_PPOOL_REDZONE_LOVALUE );
-      bHigh = bHigh && ( AddrEnd[i] == MM_PPOOL_REDZONE_HIVALUE );
-   }
-   if ( !bLow || !bHigh )
-   {
-      const char* violation = "High and Low-side";
-      if ( bHigh ) // high is okay, so it was just low failed
-         violation = "Low-side";
-      else if ( bLow ) // low side is okay, so it was just high failed
-         violation = "High-side";
-      DbgPrint("%s(%i): %s redzone violation detected for paged pool address 0x%x\n",
-               file, line, violation, Addr );
-
-      DbgPrint ( "UsedMagic 0x%x, Tag 0x%x, LoZone ", 
-                pUsed->UsedMagic,
-                pUsed->Tag);
-
-      for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
-         DbgPrint ( "%02x", Addr[i] );
-      DbgPrint ( ", HiZone " );
-      for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
-         DbgPrint ( "%02x", AddrEnd[i] );
-      DbgPrint ( "\n" );
-
-      DbgPrint ( "First Free Stack Frames:" );
-      for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
-      {
-         if ( pUsed->LastOwnerStack[i] != 0xDEADBEEF )
-         {
-             DbgPrint(" ");
-             if (!KeRosPrintAddress ((PVOID)pUsed->LastOwnerStack[i]) )
-             {
-                 DbgPrint("<%X>", pUsed->LastOwnerStack[i] );
-             }
-         }
-      }
-      DbgPrint ( "\n" );
-
-      KEBUGCHECK(BAD_POOL_HEADER);
-   }
-}
-#endif//MM_PPOOL_REDZONE_BYTES
-
-VOID STDCALL
-MmDbgPagedPoolRedZoneCheck ( const char* file, int line )
-{
-#if MM_PPOOL_REDZONE_BYTES
-   PMM_PPOOL_USED_BLOCK_HEADER pUsed = MmPagedPoolFirstUsedBlock;
-
-   while ( pUsed )
-   {
-      MmpRedZoneCheck ( pUsed, block_to_address(pUsed), __FILE__, __LINE__ );
-      pUsed = pUsed->NextUsed;
-   }
-#endif//MM_PPOOL_REDZONE_BYTES
+       /*
+        * We are still at a high IRQL level at this point so explicitly commit
+        * the first page of the paged pool before writing the first block header.
+        */
+       MmCommitPagedPoolAddress ( (PVOID)MmPagedPoolBase, FALSE );
+
+       MmPagedPool = RPoolInit ( MmPagedPoolBase,
+               MmPagedPoolSize,
+               MM_POOL_ALIGNMENT,
+               MM_CACHE_LINE_SIZE,
+               PAGE_SIZE );
+
+       ExInitializeFastMutex(&MmPagedPool->Mutex);
 }
 
 /**********************************************************************
@@ -259,501 +77,169 @@ ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
                             IN ULONG  NumberOfBytes,
                             IN ULONG  Tag)
 {
-   PMM_PPOOL_FREE_BLOCK_HEADER BestBlock;
-   PMM_PPOOL_FREE_BLOCK_HEADER CurrentBlock;
-   ULONG BlockSize;
-   PMM_PPOOL_USED_BLOCK_HEADER NewBlock;
-   PMM_PPOOL_FREE_BLOCK_HEADER NextBlock;
-   PMM_PPOOL_FREE_BLOCK_HEADER PreviousBlock;
-   PMM_PPOOL_FREE_BLOCK_HEADER BestPreviousBlock;
-   PVOID BlockAddress;
-   ULONG Alignment;
-
-   ASSERT_IRQL(APC_LEVEL);
-
-   ExAcquireFastMutex(&MmPagedPoolLock);
-
-   /*
-    * Don't bother allocating anything for a zero-byte block.
-    */
-   if (NumberOfBytes == 0)
-   {
-      MmDbgPagedPoolRedZoneCheck(__FILE__,__LINE__);
-      ExReleaseFastMutex(&MmPagedPoolLock);
-      return(NULL);
-   }
-
-   DPRINT ( "ExAllocatePagedPoolWithTag(%i,%lu,%lu)\n", PoolType, NumberOfBytes, Tag );
-   VerifyPagedPool();
-
-   if (NumberOfBytes >= PAGE_SIZE)
-   {
-      Alignment = PAGE_SIZE;
-   }
-   else if (PoolType == PagedPoolCacheAligned)
-   {
-      Alignment = MM_CACHE_LINE_SIZE;
-   }
-   else
-   {
-      Alignment = MM_POOL_ALIGNMENT;
-   }
-
-   /*
-    * Calculate the total number of bytes we will need.
-    */
-   BlockSize = NumberOfBytes + sizeof(MM_PPOOL_USED_BLOCK_HEADER) + 2*MM_PPOOL_REDZONE_BYTES;
-   if (BlockSize < sizeof(MM_PPOOL_FREE_BLOCK_HEADER))
-   {
-      /* At least we need the size of the free block header. */
-      BlockSize = sizeof(MM_PPOOL_FREE_BLOCK_HEADER);
-   }
-
-
-   /*
-    * Find the best-fitting block.
-    */
-   PreviousBlock = NULL;
-   BestPreviousBlock = BestBlock = NULL;
-   CurrentBlock = MmPagedPoolFirstFreeBlock;
-   if ( Alignment > 0 )
-   {
-      PVOID BestAlignedAddr = NULL;
-      while ( CurrentBlock != NULL )
-      {
-         PVOID Addr = block_to_address(CurrentBlock);
-         PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
-         /* calculate last size-aligned address available within this block */
-         PVOID AlignedAddr = MM_ROUND_DOWN((char*)CurrentBlockEnd-NumberOfBytes-MM_PPOOL_REDZONE_BYTES, Alignment);
-         ASSERT ( (char*)AlignedAddr+NumberOfBytes+MM_PPOOL_REDZONE_BYTES <= (char*)CurrentBlockEnd );
-
-         /* special case, this address is already size-aligned, and the right size */
-         if ( Addr == AlignedAddr )
-         {
-            BestAlignedAddr = AlignedAddr;
-            BestPreviousBlock = PreviousBlock;
-            BestBlock = CurrentBlock;
-            break;
-         }
-         else if ( Addr < (PVOID)address_to_block(AlignedAddr) )
-         {
-            /*
-             * there's enough room to allocate our size-aligned memory out
-             * of this block, see if it's a better choice than any previous
-             * finds
-             */
-            if ( BestBlock == NULL || BestBlock->Size > CurrentBlock->Size )
-            {
-               BestAlignedAddr = AlignedAddr;
-               BestPreviousBlock = PreviousBlock;
-               BestBlock = CurrentBlock;
-            }
-         }
-
-         PreviousBlock = CurrentBlock;
-         CurrentBlock = CurrentBlock->NextFree;
-      }
-
-      /*
-       * we found a best block can/should we chop a few bytes off the beginning
-       * into a separate memory block?
-       */
-      if ( BestBlock != NULL )
-      {
-         PVOID Addr = block_to_address(BestBlock);
-         if ( BestAlignedAddr != Addr )
-         {
-            PMM_PPOOL_FREE_BLOCK_HEADER NewFreeBlock =
-               (PMM_PPOOL_FREE_BLOCK_HEADER)address_to_block(BestAlignedAddr);
-            ASSERT ( BestAlignedAddr > Addr );
-            NewFreeBlock->Size = (char*)Addr + BestBlock->Size - (char*)BestAlignedAddr;
-#if MM_PPOOL_REDZONE_BYTES
-           NewFreeBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
-#endif//MM_PPOOL_REDZONE_BYTES
-            ASSERT_SIZE(NewFreeBlock->Size);
-            BestBlock->Size = (size_t)NewFreeBlock - (size_t)Addr;
-            ASSERT_SIZE(BestBlock->Size);
-
-            DPRINT ( "breaking off preceding bytes into their own block...\n" );
-            DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
-                     NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );
-
-            /* insert the new block into the chain */
-            NewFreeBlock->NextFree = BestBlock->NextFree;
-            BestBlock->NextFree = NewFreeBlock;
-
-            /* we want the following code to use our size-aligned block */
-            BestPreviousBlock = BestBlock;
-            BestBlock = NewFreeBlock;
-
-            //VerifyPagedPool();
-         }
-      }
-   }
-   /*
-    * non-size-aligned block search
-    */
-   else
-      while ( CurrentBlock != NULL )
-      {
-         if (    CurrentBlock->Size >= BlockSize
-                 && ( BestBlock == NULL || BestBlock->Size > CurrentBlock->Size )
-            )
-         {
-            BestPreviousBlock = PreviousBlock;
-            BestBlock = CurrentBlock;
-         }
-
-         PreviousBlock = CurrentBlock;
-         CurrentBlock = CurrentBlock->NextFree;
-      }
-
-   /*
-    * We didn't find anything suitable at all.
-    */
-   if (BestBlock == NULL)
-   {
-      DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
-              NumberOfBytes );
-      ExReleaseFastMutex(&MmPagedPoolLock);
-      return(NULL);
-   }
-
-   DPRINT("BestBlock 0x%x NextFree 0x%x\n", BestBlock, BestBlock->NextFree );
-
-   //VerifyPagedPool();
-
-   /*
-    * Is there enough space to create a second block from the unused portion.
-    */
-   if ( BestBlock->Size > BlockSize
-         && (BestBlock->Size - BlockSize) > sizeof(MM_PPOOL_FREE_BLOCK_HEADER)
-      )
-   {
-      ULONG NewSize = BestBlock->Size - BlockSize;
-      ASSERT_SIZE ( NewSize );
-
-      //DPRINT("creating 2nd block from unused portion\n");
-      DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
-             BestBlock, BestBlock->Size, BlockSize, NewSize );
-
-      /*
-       * Create the new free block.
-       */
-      //DPRINT("creating the new free block");
-      NextBlock = (PMM_PPOOL_FREE_BLOCK_HEADER)((char*)BestBlock + BlockSize);
-      //DPRINT(".");
-      NextBlock->Size = NewSize;
-#if MM_PPOOL_REDZONE_BYTES
-      NextBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
-#endif//MM_PPOOL_REDZONE_BYTES
-      ASSERT_SIZE ( NextBlock->Size );
-      //DPRINT(".");
-      NextBlock->NextFree = BestBlock->NextFree;
-      //DPRINT(".\n");
-
-      /*
-       * Replace the old free block with it.
-       */
-      //DPRINT("replacing old free block with it");
-      if (BestPreviousBlock == NULL)
-      {
-         //DPRINT("(from beginning)");
-         MmPagedPoolFirstFreeBlock = NextBlock;
-      }
-      else
-      {
-         //DPRINT("(from previous)");
-         BestPreviousBlock->NextFree = NextBlock;
-      }
-      //DPRINT(".\n");
-
-      /*
-       * Create the new used block header.
-       */
-      //DPRINT("create new used block header");
-      NewBlock = (PMM_PPOOL_USED_BLOCK_HEADER)BestBlock;
-      //DPRINT(".");
-      NewBlock->Size = BlockSize;
-#if MM_PPOOL_REDZONE_BYTES
-      {
-         PULONG Frame;
-         int i;
-#if defined __GNUC__
-         __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
-#elif defined(_MSC_VER)
-         __asm mov [Frame], ebp
-#endif
-
-         NewBlock->UsedMagic = MM_PPOOL_USEDMAGIC;
-         
-         Frame = (PULONG)Frame[0]; // step out of ExFreePagedPool
-         for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
-         {
-             if ( Frame == 0 || (ULONG)Frame == 0xDEADBEEF )
-                 NewBlock->LastOwnerStack[i] = 0xDEADBEEF;
-             else
-             {
-                 //DbgPrint ( " 0x%x", Frame[1] );
-                 NewBlock->LastOwnerStack[i] = Frame[1];
-                 Frame = (PULONG)Frame[0];
-             }
-         }
-      }
-#endif//MM_PPOOL_REDZONE_BYTES
-      ASSERT_SIZE ( NewBlock->Size );
-      //DPRINT(".\n");
-   }
-   else
-   {
-      ULONG NewSize = BestBlock->Size;
-
-      /*
-       * Remove the selected block from the list of free blocks.
-       */
-      //DPRINT ( "Removing selected block from free block list\n" );
-      if (BestPreviousBlock == NULL)
-      {
-         MmPagedPoolFirstFreeBlock = BestBlock->NextFree;
-      }
-      else
-      {
-         BestPreviousBlock->NextFree = BestBlock->NextFree;
-      }
-
-      /*
-       * Set up the header of the new block
-       */
-      NewBlock = (PMM_PPOOL_USED_BLOCK_HEADER)BestBlock;
-      NewBlock->Size = NewSize;
-#if MM_PPOOL_REDZONE_BYTES
-      {
-         PULONG Frame;
-         int i;
-#if defined __GNUC__
-         __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
-#elif defined(_MSC_VER)
-         __asm mov [Frame], ebp
-#endif
-
-         NewBlock->UsedMagic = MM_PPOOL_USEDMAGIC;
-         
-         Frame = (PULONG)Frame[0]; // step out of ExFreePagedPool
-         for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
-         {
-             if ( Frame == 0 || (ULONG)Frame == 0xDEADBEEF )
-                 NewBlock->LastOwnerStack[i] = 0xDEADBEEF;
-             else
-             {
-                 //DbgPrint ( " 0x%x", Frame[1] );
-                 NewBlock->LastOwnerStack[i] = Frame[1];
-                 Frame = (PULONG)Frame[0];
-             }
-         }
-      }
-#endif//MM_PPOOL_REDZONE_BYTES
-      ASSERT_SIZE ( NewBlock->Size );
-   }
-
-   // now add the block to the used block list
-   NewBlock->NextUsed = MmPagedPoolFirstUsedBlock;
-   MmPagedPoolFirstUsedBlock = NewBlock;
-
-   NewBlock->Tag = Tag;
-
-   VerifyPagedPool();
-
-   ExReleaseFastMutex(&MmPagedPoolLock);
-
-   BlockAddress = block_to_address ( NewBlock );
-   /*  RtlZeroMemory(BlockAddress, NumberOfBytes);*/
-
-#if MM_PPOOL_REDZONE_BYTES
-
-   NewBlock->UserSize = NumberOfBytes;
-   // write out buffer-overrun detection bytes
-   {
-      PUCHAR Addr = (PUCHAR)BlockAddress;
-      //DbgPrint ( "writing buffer-overrun detection bytes" );
-      memset ( Addr - MM_PPOOL_REDZONE_BYTES,
-               MM_PPOOL_REDZONE_LOVALUE, MM_PPOOL_REDZONE_BYTES );
-      memset ( Addr + NewBlock->UserSize, MM_PPOOL_REDZONE_HIVALUE,
-               MM_PPOOL_REDZONE_BYTES );
-   }
-
-#endif//MM_PPOOL_REDZONE_BYTES
-
-   return(BlockAddress);
+       int align;
+
+       if ( NumberOfBytes >= PAGE_SIZE )
+               align = 2;
+       else if ( PoolType == PagedPoolCacheAligned )
+               align = 1;
+       else
+               align = 0;
+
+       ASSERT_IRQL(APC_LEVEL);
+
+       return RPoolAlloc ( MmPagedPool, NumberOfBytes, Tag, align );
 }
 
 VOID STDCALL
 ExFreePagedPool(IN PVOID Block)
 {
-   PMM_PPOOL_FREE_BLOCK_HEADER PreviousBlock;
-   PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = address_to_block(Block);
-   ULONG UsedSize = UsedBlock->Size;
-   PMM_PPOOL_FREE_BLOCK_HEADER FreeBlock =
-      (PMM_PPOOL_FREE_BLOCK_HEADER)UsedBlock;
-   PMM_PPOOL_FREE_BLOCK_HEADER NextBlock;
-   PMM_PPOOL_FREE_BLOCK_HEADER NextNextBlock;
-
-   ASSERT_IRQL(APC_LEVEL);
-
-   MmpRedZoneCheck ( UsedBlock, Block, __FILE__, __LINE__ );
-
-#if MM_PPOOL_REDZONE_BYTES
-   memset ( Block, 0xCD, UsedBlock->UserSize );
-#endif
-
-   ExAcquireFastMutex(&MmPagedPoolLock);
-
-   // remove from used list...
-   {
-      PMM_PPOOL_USED_BLOCK_HEADER pPrev = MmPagedPoolFirstUsedBlock;
-      if ( pPrev == UsedBlock )
-      {
-         // special-case, our freeing block is first in list...
-         MmPagedPoolFirstUsedBlock = pPrev->NextUsed;
-      }
-      else
-      {
-         while ( pPrev && pPrev->NextUsed != UsedBlock )
-            pPrev = pPrev->NextUsed;
-         // if this assert fails - memory has been corrupted
-         // ( or I have a logic error...! )
-         ASSERT ( pPrev->NextUsed == UsedBlock );
-         pPrev->NextUsed = UsedBlock->NextUsed;
-      }
-   }
-
-   /*
-    * Begin setting up the newly freed block's header.
-    */
-   FreeBlock->Size = UsedSize;
-#if MM_PPOOL_REDZONE_BYTES
-   FreeBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
-   {
-      PULONG Frame;
-      int i;
-#if defined __GNUC__
-      __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
-#elif defined(_MSC_VER)
-      __asm mov [Frame], ebp
-#endif
-      //DbgPrint ( "Stack Frames for Free Block 0x%x:", Block );
-      Frame = (PULONG)Frame[0]; // step out of ExFreePagedPool
-      for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
-      {
-         if ( Frame == 0 || (ULONG)Frame == 0xDEADBEEF )
-            FreeBlock->LastOwnerStack[i] = 0xDEADBEEF;
-         else
-         {
-            //DbgPrint ( " 0x%x", Frame[1] );
-            FreeBlock->LastOwnerStack[i] = Frame[1];
-            Frame = (PULONG)Frame[0];
-         }
-      }
-      //DbgPrint ( "\n" );
-      //KeRosDumpStackFrames ( NULL, 4 );
-   }
-#endif//MM_PPOOL_REDZONE_BYTES
-   ASSERT_SIZE ( FreeBlock->Size );
-
-   /*
-    * Find the blocks immediately before and after the newly freed block on the free list.
-    */
-   PreviousBlock = NULL;
-   NextBlock = MmPagedPoolFirstFreeBlock;
-   while (NextBlock != NULL && NextBlock < FreeBlock)
-   {
-      PreviousBlock = NextBlock;
-      NextBlock = NextBlock->NextFree;
-   }
-
-   /*
-    * Insert the freed block on the free list.
-    */
-   if (PreviousBlock == NULL)
-   {
-      FreeBlock->NextFree = MmPagedPoolFirstFreeBlock;
-      MmPagedPoolFirstFreeBlock = FreeBlock;
-   }
-   else
-   {
-      PreviousBlock->NextFree = FreeBlock;
-      FreeBlock->NextFree = NextBlock;
-   }
-
-   /*
-    * If the next block is immediately adjacent to the newly freed one then
-    * merge them.
-    * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
-    */
-   if (NextBlock != NULL &&
-         ((char*)FreeBlock + FreeBlock->Size) == (char*)NextBlock)
-   {
-      FreeBlock->Size = FreeBlock->Size + NextBlock->Size;
-      ASSERT_SIZE ( FreeBlock->Size );
-      FreeBlock->NextFree = NextBlock->NextFree;
-      NextNextBlock = NextBlock->NextFree;
-   }
-   else
-   {
-      NextNextBlock = NextBlock;
-   }
-
-   /*
-    * If the previous block is adjacent to the newly freed one then
-    * merge them.
-    * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
-    */
-   if (PreviousBlock != NULL &&
-         ((char*)PreviousBlock + PreviousBlock->Size) == (char*)FreeBlock)
-   {
-      PreviousBlock->Size = PreviousBlock->Size + FreeBlock->Size;
-      ASSERT_SIZE ( PreviousBlock->Size );
-      PreviousBlock->NextFree = NextNextBlock;
-   }
-
-   VerifyPagedPool();
-
-   ExReleaseFastMutex(&MmPagedPoolLock);
+       ASSERT_IRQL(APC_LEVEL);
+       RPoolFree ( MmPagedPool, Block );
 }
 
 VOID STDCALL
 ExRosDumpPagedPoolByTag ( ULONG Tag )
 {
-       PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = MmPagedPoolFirstUsedBlock;
-       int count = 0;
-       char tag[5];
-
-       // TODO FIXME - should we validate params or ASSERT_IRQL?
-       *(ULONG*)&tag[0] = Tag;
-       tag[4] = 0;
-       DbgPrint ( "PagedPool Dump by tag '%s'\n", tag );
-       DbgPrint ( "  -BLOCK-- --SIZE--\n" );
-       while ( IS_PPOOL_PTR(UsedBlock) )
+       // TODO FIXME - should we ASSERT_IRQL?
+       RPoolDumpByTag ( MmPagedPool, Tag );
+}
+
+ULONG STDCALL
+ExRosQueryPagedPoolTag ( PVOID Addr )
+{
+       // TODO FIXME - should we ASSERT_IRQL?
+       return RPoolQueryTag ( Addr );
+}
+
+#ifdef PPOOL_UMODE_TEST
+
+PVOID TestAlloc ( ULONG Bytes )
+{
+       PVOID ret;
+
+       //printf ( "Allocating block: " ); RPoolStats ( MmPagedPool );
+       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+
+       ret = ExAllocatePagedPoolWithTag ( PagedPool, Bytes, 0 );
+
+       //printf ( "Block %x allocated: ", ret ); RPoolStats ( MmPagedPool );
+       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+
+       return ret;
+}
+
+void TestFree ( PVOID ptr )
+{
+       //printf ( "Freeing block %x: ", ptr ); RPoolStats ( MmPagedPool );
+       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+       ExFreePagedPool(ptr);
+       //printf ( "Block %x freed: ", ptr ); RPoolStats ( MmPagedPool );
+       //RPoolRedZoneCheck ( MmPagedPool, __FILE__, __LINE__ );
+}
+
+int main()
+{
+#define COUNT 100
+       int i, j;
+       char* keepers[COUNT];
+       char* trash[COUNT];
+       int AllocSize[] = { 15, 31, 63, 127, 255, 511, 1023, 2047 };
+       const int ALLOCS = sizeof(AllocSize) / sizeof(0[AllocSize]);
+       DWORD dwStart;
+
+       MmPagedPoolSize = 1*1024*1024;
+       MmPagedPoolBase = malloc ( MmPagedPoolSize );
+       MmInitializePagedPool();
+
+       dwStart = GetTickCount();
+
+       printf ( "test #1 phase #1\n" );
+       for ( i = 0; i < COUNT; i++ )
+       {
+               //printf ( "keeper %i) ", i );
+               keepers[i] = TestAlloc ( AllocSize[i%ALLOCS] );
+               if ( !keepers[i] ) printf ( "allocation failed\n" );
+               //printf ( "trash %i) ", i );
+               trash[i] = TestAlloc ( AllocSize[i%ALLOCS] );
+               if ( !trash[i] ) printf ( "allocation failed\n" );
+       }
+
+       printf ( "test #1 phase #2\n" );
+       for ( i = 0; i < COUNT; i++ )
+       {
+               if ( i == 6 )
+                       i = i;
+               //printf ( "%i) ", i );
+               TestFree ( trash[i] );
+       }
+
+       printf ( "test #1 phase #3\n" );
+       for ( i = 0; i < 4; i++ )
+       {
+               //printf ( "%i) ", i );
+               keepers[i] = TestAlloc ( 4096 );
+               if ( !keepers[i] ) printf ( "allocation failed\n" );
+       }
+
+       printf ( "test #1 phase #4\n" );
+       for ( i = 0; i < 4; i++ )
+       {
+               //printf ( "%i) ", i );
+               TestFree ( keepers[i] );
+       }
+
+       printf ( "test #1 phase #5\n" );
+       srand(1);
+       for ( i = 0; i < COUNT; i++ )
+       {
+               //printf ( "%i) ", i );
+               trash[i] = TestAlloc ( rand()%1024+1 );
+               if ( !trash[i] ) printf ( "allocation failed\n" );
+       }
+       printf ( "test #1 phase #6\n" );
+       for ( i = 0; i < 10000; i++ )
+       {
+               TestFree ( trash[i%COUNT] );
+               trash[i%COUNT] = TestAlloc ( rand()%1024+1 );
+               if ( !trash[i%COUNT] ) printf ( "allocation failed\n" );
+       }
+       printf ( "test #1 phase #7\n" );
+       j = 0;
+       for ( i = 0; i < COUNT; i++ )
        {
-               if ( UsedBlock->Tag == Tag )
+               if ( trash[i] )
                {
-                       DbgPrint ( "  %08X %08X\n", UsedBlock, UsedBlock->Size );
-                       ++count;
+                       TestFree ( trash[i] );
+                       ++j;
                }
-               UsedBlock = UsedBlock->NextUsed;
        }
-       if ( UsedBlock && !IS_PPOOL_PTR(UsedBlock) )
+       printf ( "test #1 phase #8 ( freed %i of %i trash )\n", j, COUNT );
+       if ( !TestAlloc ( 2048 ) )
+               printf ( "Couldn't allocate 2048 bytes after freeing up a whole bunch of blocks\n" );
+
+       free ( MmPagedPoolBase );
+
+       printf ( "test time: %lu\n", GetTickCount() - dwStart );
+
+       printf ( "test #2\n" );
+
+       MmPagedPoolSize = 1024;
+       MmPagedPoolBase = malloc ( MmPagedPoolSize );
+       MmInitializePagedPool();
+
+       TestAlloc ( 512 );
+       i = RPoolLargestAllocPossible ( MmPagedPool, 0 );
+       if ( !TestAlloc ( i ) )
        {
-               DPRINT1 ( "!!NextUsed took me to lala land: 0x%08X\n", UsedBlock );
+               printf ( "allocating last available block failed\n" );
        }
-       DbgPrint ( "Entries found for tag '%s': %i\n", tag, count );
-}
 
-ULONG STDCALL
-ExRosQueryPagedPoolTag ( PVOID Block )
-{
-       PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = address_to_block(Block);
-       // TODO FIXME - should we validate params or ASSERT_IRQL?
-       return UsedBlock->Tag;
+       free ( MmPagedPoolBase );
+
+       printf ( "done!\n" );
+       return 0;
 }
+#endif//PPOOL_UMODE_TEST
 
 /* EOF */