POOL_DESCRIPTOR NonPagedPoolDescriptor;
PPOOL_DESCRIPTOR PoolVector[2];
+PKGUARDED_MUTEX ExpPagedPoolMutex;
/* PRIVATE FUNCTIONS **********************************************************/
InitializePool(IN POOL_TYPE PoolType,
IN ULONG Threshold)
{
- ASSERT(PoolType == NonPagedPool);
+ PPOOL_DESCRIPTOR Descriptor;
//
- // Initialize the nonpaged pool descirptor
+ // Check what kind of pool this is
//
- PoolVector[PoolType] = &NonPagedPoolDescriptor;
- ExInitializePoolDescriptor(PoolVector[PoolType],
- PoolType,
- 0,
- Threshold,
- NULL);
+ if (PoolType == NonPagedPool)
+ {
+ //
+ // Initialize the nonpaged pool descriptor
+ //
+ PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
+ ExInitializePoolDescriptor(PoolVector[NonPagedPool],
+ NonPagedPool,
+ 0,
+ Threshold,
+ NULL);
+ }
+ else
+ {
+ //
+ // Allocate the pool descriptor
+ //
+ Descriptor = ExAllocatePoolWithTag(NonPagedPool,
+ sizeof(KGUARDED_MUTEX) +
+ sizeof(POOL_DESCRIPTOR),
+ 'looP');
+ if (!Descriptor)
+ {
+ //
+ // This is really bad...
+ //
+ KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
+ 0,
+ -1,
+ -1,
+ -1);
+ }
+
+ //
+ // Setup the vector and guarded mutex for paged pool
+ //
+ PoolVector[PagedPool] = Descriptor;
+ ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
+ KeInitializeGuardedMutex(ExpPagedPoolMutex);
+ ExInitializePoolDescriptor(Descriptor,
+ PagedPool,
+ 0,
+ Threshold,
+ ExpPagedPoolMutex);
+ }
+}
+
+FORCEINLINE
+KIRQL
+ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
+{
+ //
+ // Check if this is nonpaged pool
+ //
+ if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
+ {
+ //
+ // Use the queued spin lock
+ //
+ return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ }
+ else
+ {
+ //
+ // Use the guarded mutex
+ //
+ KeAcquireGuardedMutex(Descriptor->LockAddress);
+ return APC_LEVEL;
+ }
}
+FORCEINLINE
+VOID
+ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
+ IN KIRQL OldIrql)
+{
+ //
+ // Check if this is nonpaged pool
+ //
+ if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
+ {
+ //
+ // Use the queued spin lock
+ //
+ KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ }
+ else
+ {
+ //
+ // Use the guarded mutex
+ //
+ KeReleaseGuardedMutex(Descriptor->LockAddress);
+ }
+}
+
+/* PUBLIC FUNCTIONS ***********************************************************/
+
PVOID
NTAPI
ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType,
//
// Acquire the nonpaged pool lock now
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ OldIrql = ExLockPool(PoolDesc);
//
// And make sure the list still has entries
//
// Try again!
//
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
ListHead++;
continue;
}
// and release the lock since we're done
//
Entry->PoolType = PoolType + 1;
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
//
// Return the pool allocation
//
// Excellent -- acquire the nonpaged pool lock
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ OldIrql = ExLockPool(PoolDesc);
//
// And insert the free entry into the free list for this block size
//
// Release the nonpaged pool lock
//
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
}
//
//
// Acquire the nonpaged pool lock
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ OldIrql = ExLockPool(PoolDesc);
//
// Check if the next allocation is at the end of the page
//
// In this case, release the nonpaged pool lock, and free the page
//
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
MiFreePoolPages(Entry);
return;
}
// Insert this new free block, and release the nonpaged pool lock
//
InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
}
VOID
//
Size = Size * 1024;
ASSERT(Size == MmSizeOfPagedPoolInPages);
- BitMapSize = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
+ BitMapSize = Size;
+ Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
//
// Allocate the allocation bitmap, which tells us which regions have not yet
// been mapped into memory
//
MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
- BitMapSize,
+ Size,
' mM');
ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
// entire allocation is.
//
MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
- BitMapSize,
+ Size,
' mM');
ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
//
// Initialize paged pool.
//
- //InitializePool(PagedPool, 0);
+ InitializePool(PagedPool, 0);
+
+ //
+ // Initialize the paged pool mutex
+ //
+ KeInitializeGuardedMutex(&MmPagedPoolMutex);
}
NTSTATUS