[NTOS]: This is why you shouldn't let Antoine Dodson commit code.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
index aa2603a..8156f67 100644 (file)
@@ -22,14 +22,252 @@ LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
 PVOID MmNonPagedPoolEnd0;
 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
-
+KGUARDED_MUTEX MmPagedPoolMutex;
 MM_PAGED_POOL_INFO MmPagedPoolInfo;
+SIZE_T MmAllocatedNonPagedPool;
+ULONG MmSpecialPoolTag;
+ULONG MmConsumedPoolPercentage;
+BOOLEAN MmProtectFreedNonPagedPool;
 
 /* PRIVATE FUNCTIONS **********************************************************/
 
 VOID
 NTAPI
-MiInitializeArmPool(VOID)
+MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
+                                                 IN ULONG PageCount)
+{
+    PMMPTE PointerPte, LastPte;
+    MMPTE TempPte;
+
+    /* If pool is physical, can't protect PTEs */
+    if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
+
+    /* Get PTE pointers and loop */
+    PointerPte = MiAddressToPte(VirtualAddress);
+    LastPte = PointerPte + PageCount;
+    do
+    {
+        /* Capture the PTE for safety */
+        TempPte = *PointerPte;
+
+        /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
+        TempPte.u.Hard.Valid = 0;
+        TempPte.u.Soft.Prototype = 1;
+        MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+    } while (++PointerPte < LastPte);
+       
+    /* Flush the TLB */
+    KeFlushEntireTb(TRUE, TRUE);
+}
+
+BOOLEAN
+NTAPI
+MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
+                                                       IN ULONG PageCount)
+{
+    PMMPTE PointerPte;
+    MMPTE TempPte;
+    PFN_NUMBER UnprotectedPages = 0;
+
+    /* If pool is physical, can't protect PTEs */
+    if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
+    
+    /* Get, and capture the PTE */
+    PointerPte = MiAddressToPte(VirtualAddress);
+    TempPte = *PointerPte;
+    
+    /* Loop protected PTEs */
+    while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
+    {
+        /* Unprotect the PTE */
+        TempPte.u.Hard.Valid = 1;
+        TempPte.u.Soft.Prototype = 0;
+        MI_WRITE_VALID_PTE(PointerPte, TempPte);
+        
+        /* One more page */
+        if (++UnprotectedPages == PageCount) break;
+        
+        /* Capture next PTE */
+        TempPte = *(++PointerPte);
+    }
+    
+    /* Return if any pages were unprotected */
+    return UnprotectedPages ? TRUE : FALSE;
+}
+
+VOID
+FORCEINLINE
+MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
+                              OUT PVOID* PoolFlink,
+                              OUT PVOID* PoolBlink)
+{
+    BOOLEAN Safe;
+    PVOID PoolVa;
+    
+    /* Initialize variables */
+    *PoolFlink = *PoolBlink = NULL;
+    
+    /* Check if the list has entries */
+    if (IsListEmpty(Links) == FALSE)
+    {
+        /* We are going to need to forward link to do an insert */
+        PoolVa = Links->Flink;
+        
+        /* So make it safe to access */
+        Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
+        if (Safe) PoolFlink = PoolVa;
+    }
+    
+    /* Are we going to need a backward link too? */
+    if (Links != Links->Blink)
+    {
+        /* Get the head's backward link for the insert */
+        PoolVa = Links->Blink;
+        
+        /* Make it safe to access */
+        Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
+        if (Safe) PoolBlink = PoolVa;
+    }
+}
+
+VOID
+FORCEINLINE
+MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
+                            IN PVOID PoolBlink)
+{
+    /* Reprotect the pages, if they got unprotected earlier */
+    if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
+    if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
+}
+
+VOID
+NTAPI
+MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
+                          IN PLIST_ENTRY Entry,
+                          IN BOOLEAN Critical)
+{
+    PVOID PoolFlink, PoolBlink;
+    
+    /* Make the list accessible */
+    MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
+    
+    /* Now insert in the right position */
+    Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
+    
+    /* And reprotect the pages containing the free links */
+    MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
+}
+
+VOID
+NTAPI
+MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
+{
+    PVOID PoolFlink, PoolBlink;
+    
+    /* Make the list accessible */
+    MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
+    
+    /* Now remove */
+    RemoveEntryList(Entry);
+    
+    /* And reprotect the pages containing the free links */
+    if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
+    if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
+}
+
+VOID
+NTAPI
+MiInitializeNonPagedPoolThresholds(VOID)
+{
+    PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
+
+    /* Default low threshold of 8MB or one third of nonpaged pool */
+    MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
+    MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
+
+    /* Default high threshold of 20MB or 50% */
+    MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
+    MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
+    ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
+}
+
+VOID
+NTAPI
+MiInitializePoolEvents(VOID)
+{
+    KIRQL OldIrql;
+    PFN_NUMBER FreePoolInPages;
+
+    /* Lock paged pool */
+    KeAcquireGuardedMutex(&MmPagedPoolMutex);
+
+    /* Total size of the paged pool minus the allocated size, is free */
+    FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
+
+    /* Check the initial state high state */
+    if (FreePoolInPages >= MiHighPagedPoolThreshold)
+    {
+        /* We have plenty of pool */
+        KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
+    }
+    else
+    {
+        /* We don't */
+        KeClearEvent(MiHighPagedPoolEvent);
+    }
+
+    /* Check the initial low state */
+    if (FreePoolInPages <= MiLowPagedPoolThreshold)
+    {
+        /* We're very low in free pool memory */
+        KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
+    }
+    else
+    {
+        /* We're not */
+        KeClearEvent(MiLowPagedPoolEvent);
+    }
+
+    /* Release the paged pool lock */
+    KeReleaseGuardedMutex(&MmPagedPoolMutex);
+
+    /* Now it's time for the nonpaged pool lock */
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
+
+    /* Free pages are the maximum minus what's been allocated */
+    FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
+
+    /* Check if we have plenty */
+    if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
+    {
+        /* We do, set the event */
+        KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
+    }
+    else
+    {
+        /* We don't, clear the event */
+        KeClearEvent(MiHighNonPagedPoolEvent);
+    }
+
+    /* Check if we have very little */
+    if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
+    {
+        /* We do, set the event */
+        KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
+    }
+    else
+    {
+        /* We don't, clear it */
+        KeClearEvent(MiLowNonPagedPoolEvent);
+    }
+
+    /* We're done, release the nonpaged pool lock */
+    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
+}
+
+VOID
+NTAPI
+MiInitializeNonPagedPool(VOID)
 {
     ULONG i;
     PFN_NUMBER PoolPages;
@@ -60,6 +298,7 @@ MiInitializeArmPool(VOID)
     FreeEntry = MmNonPagedPoolStart;
     FirstEntry = FreeEntry;
     FreeEntry->Size = PoolPages;
+    FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
     FreeEntry->Owner = FirstEntry;
 
     //
@@ -78,6 +317,7 @@ MiInitializeArmPool(VOID)
         //
         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
         FreeEntry->Owner = FirstEntry;
+        FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
     }
 
     //
@@ -137,16 +377,227 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
     KIRQL OldIrql;
     PLIST_ENTRY NextEntry, NextHead, LastHead;
     PMMPTE PointerPte, StartPte;
+    PMMPDE PointerPde;
+    ULONG EndAllocation;
     MMPTE TempPte;
+    MMPDE TempPde;
     PMMPFN Pfn1;
-    PVOID BaseVa;
+    PVOID BaseVa, BaseVaStart;
     PMMFREE_POOL_ENTRY FreeEntry;
+    PKSPIN_LOCK_QUEUE LockQueue;
     
     //
     // Figure out how big the allocation is in pages
     //
     SizeInPages = BYTES_TO_PAGES(SizeInBytes);
     
+    //
+    // Handle paged pool
+    //
+    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
+    {
+        //
+        // Lock the paged pool mutex
+        //
+        KeAcquireGuardedMutex(&MmPagedPoolMutex);
+        
+        //
+        // Find some empty allocation space
+        //
+        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
+                                   SizeInPages,
+                                   MmPagedPoolInfo.PagedPoolHint);
+        if (i == 0xFFFFFFFF)
+        {
+            //
+            // Get the page bit count
+            //
+            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
+            DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
+            
+            //
+            // Check if there is enougn paged pool expansion space left
+            //
+            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
+                MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
+            {
+                //
+                // Out of memory!
+                //
+                DPRINT1("OUT OF PAGED POOL!!!\n");
+                KeReleaseGuardedMutex(&MmPagedPoolMutex);
+                return NULL;
+            }
+            
+            //
+            // Check if we'll have to expand past the last PTE we have available
+            //            
+            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
+                 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
+            {
+                //
+                // We can only support this much then
+                //
+                SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) - 
+                              MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
+                              1;
+                ASSERT(SizeInPages < i);
+                i = SizeInPages;
+            }
+            else
+            {
+                //
+                // Otherwise, there is plenty of space left for this expansion
+                //
+                SizeInPages = i;
+            }
+            
+            //
+            // Get the template PDE we'll use to expand
+            //
+            TempPde = ValidKernelPde;
+            
+            //
+            // Get the first PTE in expansion space
+            //
+            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
+            BaseVa = MiPteToAddress(PointerPde);
+            BaseVaStart = BaseVa;
+            
+            //
+            // Lock the PFN database and loop pages
+            //            
+            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);          
+            do
+            {
+                //
+                // It should not already be valid
+                //
+                ASSERT(PointerPde->u.Hard.Valid == 0);
+                
+                /* Request a page */
+                DPRINT1("Requesting %d PDEs\n", i);
+                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
+                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
+                DPRINT1("We have a PDE: %lx\n", PageFrameNumber);
+
+#if (_MI_PAGING_LEVELS >= 3)
+                /* On PAE/x64 systems, there's no double-buffering */
+                ASSERT(FALSE);
+#else
+                //
+                // Save it into our double-buffered system page directory
+                //
+                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
+                                            
+                /* Initialize the PFN */
+                MiInitializePfnForOtherProcess(PageFrameNumber,
+                                               PointerPde,
+                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
+                             
+                /* Write the actual PDE now */
+                MI_WRITE_VALID_PTE(PointerPde, TempPde);
+#endif                
+                //
+                // Move on to the next expansion address
+                //
+                PointerPde++;
+                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
+                i--;
+            } while (i > 0);
+            
+            //
+            // Release the PFN database lock
+            //            
+            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+                        
+            //
+            // These pages are now available, clear their availablity bits
+            //
+            EndAllocation = (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
+                             MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
+                             PTE_COUNT;
+            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
+                         EndAllocation,
+                         SizeInPages * PTE_COUNT);
+                        
+            //
+            // Update the next expansion location
+            //
+            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
+            
+            //
+            // Zero out the newly available memory
+            //
+            RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
+            
+            //
+            // Now try consuming the pages again
+            //
+            SizeInPages = BYTES_TO_PAGES(SizeInBytes);
+            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
+                                       SizeInPages,
+                                       0);
+            if (i == 0xFFFFFFFF) 
+            {
+                //
+                // Out of memory!
+                //
+                DPRINT1("OUT OF PAGED POOL!!!\n");
+                KeReleaseGuardedMutex(&MmPagedPoolMutex);
+                return NULL;
+            }
+        }
+        
+        //
+        // Update the pool hint if the request was just one page
+        //
+        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
+        
+        //
+        // Update the end bitmap so we know the bounds of this allocation when
+        // the time comes to free it
+        //
+        EndAllocation = i + SizeInPages - 1;
+        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
+        
+        //
+        // Now we can release the lock (it mainly protects the bitmap)
+        //
+        KeReleaseGuardedMutex(&MmPagedPoolMutex);
+        
+        //
+        // Now figure out where this allocation starts
+        //
+        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
+        
+        //
+        // Flush the TLB
+        //
+        KeFlushEntireTb(TRUE, TRUE);
+        
+        /* Setup a demand-zero writable PTE */
+        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
+        
+        //
+        // Find the first and last PTE, then loop them all
+        //
+        PointerPte = MiAddressToPte(BaseVa);
+        StartPte = PointerPte + SizeInPages;
+        do
+        {
+            //
+            // Write the demand zero PTE and keep going
+            //
+            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+        } while (++PointerPte < StartPte);
+        
+        //
+        // Return the allocation address to the caller
+        //
+        return BaseVa;
+    }    
+    
     //
     // Allocations of less than 4 pages go into their individual buckets
     //
@@ -171,10 +622,18 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
         NextEntry = NextHead->Flink;
         while (NextEntry != NextHead)
         {
+            /* Is freed non paged pool enabled */
+            if (MmProtectFreedNonPagedPool)
+            {
+                /* We need to be able to touch this page, unprotect it */
+                MiUnProtectFreeNonPagedPool(NextEntry, 0);
+            }
+            
             //
             // Grab the entry and see if it can handle our allocation
             //
             FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
+            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
             if (FreeEntry->Size >= SizeInPages)
             {
                 //
@@ -188,23 +647,31 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
                 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                  (FreeEntry->Size  << PAGE_SHIFT));
                 
-                //
-                // This is not a free page segment anymore
-                //
-                RemoveEntryList(&FreeEntry->List);
+                /* Remove the item from the list, depending if pool is protected */
+                MmProtectFreedNonPagedPool ?
+                    MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
+                    RemoveEntryList(&FreeEntry->List);
                 
                 //
                 // However, check if its' still got space left
                 //
                 if (FreeEntry->Size != 0)
                 {
-                    //
-                    // Insert it back into a different list, based on its pages
-                    //
+                    /* Check which list to insert this entry into */
                     i = FreeEntry->Size - 1;
                     if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
-                    InsertTailList (&MmNonPagedPoolFreeListHead[i],
-                                    &FreeEntry->List);
+
+                    /* Insert the entry into the free list head, check for prot. pool */
+                    MmProtectFreedNonPagedPool ?
+                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
+                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
+                        
+                    /* Is freed non paged pool protected? */
+                    if (MmProtectFreedNonPagedPool)
+                    {
+                        /* Protect the freed pool! */
+                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
+                    }
                 }
                 
                 //
@@ -224,6 +691,10 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
                 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                 Pfn1->u3.e1.StartOfAllocation = 1;
                 
+                /* Mark it as special pool if needed */
+                ASSERT(Pfn1->u4.VerifierAllocation == 0);
+                if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
+                
                 //
                 // Check if the allocation is larger than one page
                 //
@@ -254,6 +725,13 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
             // Try the next free page entry
             //
             NextEntry = FreeEntry->List.Flink;
+            
+            /* Is freed non paged pool protected? */
+            if (MmProtectFreedNonPagedPool)
+            {
+                /* Protect the freed pool! */
+                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
+            }
         }
     } while (++NextHead < LastHead);
     
@@ -285,31 +763,29 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
     //
     // Lock the PFN database too
     //
-    //KeAcquireQueuedSpinLockAtDpcLevel(LockQueuePfnLock);
+    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
+    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
     
     //
     // Loop the pages
     //
-    TempPte = HyperTemplatePte;
+    TempPte = ValidKernelPte;
     do
     {
-        //
-        // Allocate a page
-        //
-        PageFrameNumber = MmAllocPage(MC_NPPOOL, 0);
+        /* Allocate a page */
+        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
         
-        //
-        // Get the PFN entry for it
-        //
+        /* Get the PFN entry for it and fill it out */
         Pfn1 = MiGetPfnEntry(PageFrameNumber);
+        Pfn1->u3.e2.ReferenceCount = 1;
+        Pfn1->u2.ShareCount = 1;
+        Pfn1->PteAddress = PointerPte;
+        Pfn1->u3.e1.PageLocation = ActiveAndValid;
+        Pfn1->u4.VerifierAllocation = 0;
         
-        //
-        // Write the PTE for it
-        //
+        /* Write the PTE for it */
         TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
-        ASSERT(PointerPte->u.Hard.Valid == 0);
-        ASSERT(TempPte.u.Hard.Valid == 1);
-        *PointerPte++ = TempPte;
+        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
     } while (--SizeInPages > 0);
     
     //
@@ -323,10 +799,14 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
     Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
     Pfn1->u3.e1.StartOfAllocation = 1;
     
+    /* Mark it as a verifier allocation if needed */
+    ASSERT(Pfn1->u4.VerifierAllocation == 0);
+    if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
+    
     //
     // Release the PFN and nonpaged pool lock
     //
-    //KeReleaseQueuedSpinLockFromDpcLevel(LockQueuePfnLock);
+    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
     
     //
@@ -344,7 +824,62 @@ MiFreePoolPages(IN PVOID StartingVa)
     PFN_NUMBER FreePages, NumberOfPages;
     KIRQL OldIrql;
     PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
-    ULONG i;
+    ULONG i, End;
+    
+    //
+    // Handle paged pool
+    //
+    if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
+    {
+        //
+        // Calculate the offset from the beginning of paged pool, and convert it
+        // into pages
+        //
+        i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
+        End = i;
+        
+        //
+        // Now use the end bitmap to scan until we find a set bit, meaning that
+        // this allocation finishes here
+        //
+        while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
+        
+        //
+        // Now calculate the total number of pages this allocation spans
+        //
+        NumberOfPages = End - i + 1;
+        
+        /* Delete the actual pages */
+        PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
+        FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
+        ASSERT(FreePages == NumberOfPages);
+        
+        //
+        // Acquire the paged pool lock
+        //
+        KeAcquireGuardedMutex(&MmPagedPoolMutex);
+        
+        //
+        // Clear the allocation and free bits
+        //
+        RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
+        RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
+        
+        //
+        // Update the hint if we need to
+        //
+        if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
+        
+        //
+        // Release the lock protecting the bitmaps
+        //
+        KeReleaseGuardedMutex(&MmPagedPoolMutex);
+        
+        //
+        // And finally return the number of pages freed
+        //
+        return NumberOfPages;
+    }
     
     //
     // Get the first PTE and its corresponding PFN entry
@@ -402,12 +937,21 @@ MiFreePoolPages(IN PVOID StartingVa)
     }
     else
     {
+        /* Sanity check */
+        ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
+        
+        /* Check if protected pool is enabled */
+        if (MmProtectFreedNonPagedPool)
+        {
+            /* The freed block will be merged, it must be made accessible */
+            MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
+        }
+        
         //
         // Otherwise, our entire allocation must've fit within the initial non 
         // paged pool, or the expansion nonpaged pool, so get the PFN entry of
         // the next allocation
         //
-        ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
         if (PointerPte->u.Hard.Valid == 1)
         {
             //
@@ -436,13 +980,16 @@ MiFreePoolPages(IN PVOID StartingVa)
         //
         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
                                          (NumberOfPages << PAGE_SHIFT));
+        ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
         ASSERT(FreeEntry->Owner == FreeEntry);
         
-        //
-        // Consume this entry's pages, and remove it from its free list
-        //
+        /* Consume this entry's pages */
         FreePages += FreeEntry->Size;
-        RemoveEntryList (&FreeEntry->List);
+        
+        /* Remove the item from the list, depending if pool is protected */
+        MmProtectFreedNonPagedPool ?
+            MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
+            RemoveEntryList(&FreeEntry->List);
     }
     
     //
@@ -466,6 +1013,15 @@ MiFreePoolPages(IN PVOID StartingVa)
         // Otherwise, get the PTE for the page right before our allocation
         //
         PointerPte -= NumberOfPages + 1;
+        
+        /* Check if protected pool is enabled */
+        if (MmProtectFreedNonPagedPool)
+        {
+            /* The freed block will be merged, it must be made accessible */
+            MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
+        }
+        
+        /* Check if this is valid pool, or a guard page */
         if (PointerPte->u.Hard.Valid == 1)
         {
             //
@@ -493,8 +1049,16 @@ MiFreePoolPages(IN PVOID StartingVa)
         // Get the free entry descriptor for that given page range
         //
         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
+        ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
         FreeEntry = FreeEntry->Owner;
         
+        /* Check if protected pool is enabled */
+        if (MmProtectFreedNonPagedPool)
+        {
+            /* The freed block will be merged, it must be made accessible */
+            MiUnProtectFreeNonPagedPool(FreeEntry, 0);
+        }
+        
         //
         // Check if the entry is small enough to be indexed on a free list
         // If it is, we'll want to re-insert it, since we're about to
@@ -502,10 +1066,10 @@ MiFreePoolPages(IN PVOID StartingVa)
         //
         if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
         {
-            //
-            // Remove the list from where it is now
-            //
-            RemoveEntryList(&FreeEntry->List);
+            /* Remove the item from the list, depending if pool is protected */
+            MmProtectFreedNonPagedPool ?
+                MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
+                RemoveEntryList(&FreeEntry->List);
             
             //
             // Update its size
@@ -518,10 +1082,10 @@ MiFreePoolPages(IN PVOID StartingVa)
             i = (ULONG)(FreeEntry->Size - 1);
             if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
             
-            //
-            // Do it
-            //
-            InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
+            /* Insert the entry into the free list head, check for prot. pool */
+            MmProtectFreedNonPagedPool ?
+                MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
+                InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
         }
         else
         {
@@ -549,10 +1113,10 @@ MiFreePoolPages(IN PVOID StartingVa)
         i = FreeEntry->Size - 1;
         if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
         
-        //
-        // And insert us
-        //
-        InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
+        /* Insert the entry into the free list head, check for prot. pool */
+        MmProtectFreedNonPagedPool ?
+            MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
+            InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
     }
     
     //
@@ -571,10 +1135,18 @@ MiFreePoolPages(IN PVOID StartingVa)
         //
         // Link back to the parent free entry, and keep going
         //
-        NextEntry->Owner = FreeEntry;    
+        NextEntry->Owner = FreeEntry;
+        NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
         NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
     } while (NextEntry != LastEntry);
     
+    /* Is freed non paged pool protected? */
+    if (MmProtectFreedNonPagedPool)
+    {
+        /* Protect the freed pool! */
+        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
+    }
+    
     //
     // We're done, release the lock and let the caller know how much we freed
     //
@@ -582,4 +1154,44 @@ MiFreePoolPages(IN PVOID StartingVa)
     return NumberOfPages;
 }
 
+
+BOOLEAN
+NTAPI
+MiRaisePoolQuota(IN POOL_TYPE PoolType,
+                 IN ULONG CurrentMaxQuota,
+                 OUT PULONG NewMaxQuota)
+{
+    //
+    // Not implemented
+    //
+    UNIMPLEMENTED;
+    *NewMaxQuota = CurrentMaxQuota + 65536;
+    return TRUE;
+}
+
+/* PUBLIC FUNCTIONS ***********************************************************/
+
+/*
+ * @unimplemented
+ */
+PVOID
+NTAPI
+MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
+                         IN ULONG PoolTag)
+{
+       UNIMPLEMENTED;
+       return NULL;
+}
+
+/*
+ * @unimplemented
+ */
+VOID
+NTAPI
+MmFreeMappingAddress(IN PVOID BaseAddress,
+                     IN ULONG PoolTag)
+{
+       UNIMPLEMENTED;
+}
+
 /* EOF */