#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
+#undef ExAllocatePoolWithQuota
+#undef ExAllocatePoolWithQuotaTag
+
/* GLOBALS ********************************************************************/
+ULONG ExpNumberOfPagedPools;
POOL_DESCRIPTOR NonPagedPoolDescriptor;
+PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
PPOOL_DESCRIPTOR PoolVector[2];
+PVOID PoolTrackTable;
+PKGUARDED_MUTEX ExpPagedPoolMutex;
+/* Pool block/header/list access macros */
+#define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
+#define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
+#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
+#define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
+#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -(x)->PreviousSize)
+
/* PRIVATE FUNCTIONS **********************************************************/
VOID
//
NextEntry = PoolDescriptor->ListHeads;
LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
- while (NextEntry < LastEntry) InitializeListHead(NextEntry++);
+ while (NextEntry < LastEntry)
+ {
+ InitializeListHead(NextEntry);
+ NextEntry++;
+ }
}
VOID
InitializePool(IN POOL_TYPE PoolType,
IN ULONG Threshold)
{
- ASSERT(PoolType == NonPagedPool);
+ PPOOL_DESCRIPTOR Descriptor;
//
- // Initialize the nonpaged pool descirptor
+ // Check what kind of pool this is
+ //
+ if (PoolType == NonPagedPool)
+ {
+ //
+ // Initialize the nonpaged pool descriptor
+ //
+ PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
+ ExInitializePoolDescriptor(PoolVector[NonPagedPool],
+ NonPagedPool,
+ 0,
+ Threshold,
+ NULL);
+ }
+ else
+ {
+ //
+ // Allocate the pool descriptor
+ //
+ Descriptor = ExAllocatePoolWithTag(NonPagedPool,
+ sizeof(KGUARDED_MUTEX) +
+ sizeof(POOL_DESCRIPTOR),
+ 'looP');
+ if (!Descriptor)
+ {
+ //
+ // This is really bad...
+ //
+ KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
+ 0,
+ -1,
+ -1,
+ -1);
+ }
+
+ //
+ // Setup the vector and guarded mutex for paged pool
+ //
+ PoolVector[PagedPool] = Descriptor;
+ ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
+ KeInitializeGuardedMutex(ExpPagedPoolMutex);
+ ExInitializePoolDescriptor(Descriptor,
+ PagedPool,
+ 0,
+ Threshold,
+ ExpPagedPoolMutex);
+ }
+}
+
+FORCEINLINE
+KIRQL
+ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
+{
+ //
+ // Check if this is nonpaged pool
+ //
+ if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
+ {
+ //
+ // Use the queued spin lock
+ //
+ return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ }
+ else
+ {
+ //
+ // Use the guarded mutex
+ //
+ KeAcquireGuardedMutex(Descriptor->LockAddress);
+ return APC_LEVEL;
+ }
+}
+
+FORCEINLINE
+VOID
+ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
+ IN KIRQL OldIrql)
+{
+ //
+ // Check if this is nonpaged pool
//
- PoolVector[PoolType] = &NonPagedPoolDescriptor;
- ExInitializePoolDescriptor(PoolVector[PoolType],
- PoolType,
- 0,
- Threshold,
- NULL);
+ if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
+ {
+ //
+ // Use the queued spin lock
+ //
+ KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ }
+ else
+ {
+ //
+ // Use the guarded mutex
+ //
+ KeReleaseGuardedMutex(Descriptor->LockAddress);
+ }
}
+/* PUBLIC FUNCTIONS ***********************************************************/
+
+/*
+ * @implemented
+ */
PVOID
NTAPI
-ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType,
- IN SIZE_T NumberOfBytes,
- IN ULONG Tag)
+ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
+ IN SIZE_T NumberOfBytes,
+ IN ULONG Tag)
{
- POOL_TYPE PoolType;
PPOOL_DESCRIPTOR PoolDesc;
PLIST_ENTRY ListHead;
PPOOL_HEADER Entry, NextEntry, FragmentEntry;
KIRQL OldIrql;
ULONG BlockSize, i;
+ //
+ // Check for paged pool
+ //
+ if (PoolType == PagedPool) return ExAllocatePagedPoolWithTag(PagedPool, NumberOfBytes, Tag);
+
//
// Some sanity checks
//
// request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
// the direct allocation of pages.
//
- i = (NumberOfBytes + sizeof(POOL_HEADER) + sizeof(LIST_ENTRY) - 1) /
- sizeof(POOL_HEADER);
+ i = (NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) / POOL_BLOCK_SIZE;
//
// Loop in the free lists looking for a block if this size. Start with the
if (!IsListEmpty(ListHead))
{
//
- // Acquire the nonpaged pool lock now
+ // Acquire the pool lock now
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ OldIrql = ExLockPool(PoolDesc);
//
// And make sure the list still has entries
//
// Try again!
//
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
ListHead++;
continue;
}
// there is a guarantee that any block on this list will either be
// of the correct size, or perhaps larger.
//
- Entry = (PPOOL_HEADER)RemoveHeadList(ListHead) - 1;
+ Entry = POOL_ENTRY(RemoveHeadList(ListHead));
ASSERT(Entry->BlockSize >= i);
ASSERT(Entry->PoolType == 0);
// turn it into a fragment that contains the leftover data
// that we don't need to satisfy the caller's request
//
- FragmentEntry = Entry + i;
+ FragmentEntry = POOL_BLOCK(Entry, i);
FragmentEntry->BlockSize = Entry->BlockSize - i;
//
// Now get the block that follows the new fragment and check
// if it's still on the same page as us (and not at the end)
//
- NextEntry = FragmentEntry + FragmentEntry->BlockSize;
+ NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
if (PAGE_ALIGN(NextEntry) != NextEntry)
{
//
// This is the entry that will actually end up holding the
// allocation!
//
- Entry += Entry->BlockSize;
+ Entry = POOL_NEXT_BLOCK(Entry);
Entry->PreviousSize = FragmentEntry->BlockSize;
//
// And now let's go to the entry after that one and check if
// it's still on the same page, and not at the end
//
- NextEntry = Entry + i;
+ NextEntry = POOL_BLOCK(Entry, i);
if (PAGE_ALIGN(NextEntry) != NextEntry)
{
//
// Insert the free entry into the free list for this size
//
InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- (PLIST_ENTRY)FragmentEntry + 1);
+ POOL_FREE_BLOCK(FragmentEntry));
}
}
// and release the lock since we're done
//
Entry->PoolType = PoolType + 1;
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
//
// Return the pool allocation
//
Entry->PoolTag = Tag;
- return ++Entry;
+ (POOL_FREE_BLOCK(Entry))->Flink = NULL;
+ (POOL_FREE_BLOCK(Entry))->Blink = NULL;
+ return POOL_FREE_BLOCK(Entry);
}
} while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
// There were no free entries left, so we have to allocate a new fresh page
//
Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
+ ASSERT(Entry != NULL);
Entry->Ulong1 = 0;
Entry->BlockSize = i;
Entry->PoolType = PoolType + 1;
// to create now. The free bytes are the whole page minus what was allocated
// and then converted into units of block headers.
//
- BlockSize = (PAGE_SIZE / sizeof(POOL_HEADER)) - i;
- FragmentEntry = Entry + i;
+ BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
+ FragmentEntry = POOL_BLOCK(Entry, i);
FragmentEntry->Ulong1 = 0;
FragmentEntry->BlockSize = BlockSize;
FragmentEntry->PreviousSize = i;
if (FragmentEntry->BlockSize != 1)
{
//
- // Excellent -- acquire the nonpaged pool lock
+ // Excellent -- acquire the pool lock
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ OldIrql = ExLockPool(PoolDesc);
//
// And insert the free entry into the free list for this block size
//
InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- (PLIST_ENTRY)FragmentEntry + 1);
-
+ POOL_FREE_BLOCK(FragmentEntry));
+
//
- // Release the nonpaged pool lock
+ // Release the pool lock
//
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ ExUnlockPool(PoolDesc, OldIrql);
}
//
// And return the pool allocation
//
Entry->PoolTag = Tag;
- return ++Entry;
+ return POOL_FREE_BLOCK(Entry);
}
+/*
+ * @implemented
+ */
PVOID
NTAPI
-ExAllocateArmPool(POOL_TYPE PoolType,
- SIZE_T NumberOfBytes)
+ExAllocatePool(POOL_TYPE PoolType,
+ SIZE_T NumberOfBytes)
{
//
// Use a default tag of "None"
return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
}
+/*
+ * @implemented
+ */
VOID
NTAPI
-ExFreeArmPoolWithTag(IN PVOID P,
- IN ULONG TagToFree)
+ExFreePoolWithTag(IN PVOID P,
+ IN ULONG TagToFree)
{
PPOOL_HEADER Entry, NextEntry;
ULONG BlockSize;
POOL_TYPE PoolType;
PPOOL_DESCRIPTOR PoolDesc;
BOOLEAN Combined = FALSE;
+#if 1
+ //
+ // Check for paged pool
+ //
+ if ((P >= MmPagedPoolBase) &&
+ (P <= (PVOID)((ULONG_PTR)MmPagedPoolBase + MmPagedPoolSize)))
+ {
+ //
+ // Use old allocator
+ //
+ ExFreePagedPool(P);
+ return;
+ }
+#endif
//
// Quickly deal with big page allocations
//
- if (PAGE_ALIGN(P) == P) return (VOID)MiFreePoolPages(P);
+ if (PAGE_ALIGN(P) == P)
+ {
+ MiFreePoolPages(P);
+ return;
+ }
//
// Get the entry for this pool allocation
// for this pool type
//
BlockSize = Entry->BlockSize;
- PoolType = (Entry->PoolType & BASE_POOL_TYPE_MASK) - 1;
+ PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
PoolDesc = PoolVector[PoolType];
//
// Get the pointer to the next entry
//
- NextEntry = Entry + BlockSize;
+ NextEntry = POOL_BLOCK(Entry, BlockSize);
//
- // Acquire the nonpaged pool lock
+ // Acquire the pool lock
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
+ OldIrql = ExLockPool(PoolDesc);
//
// Check if the next allocation is at the end of the page
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
+ RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
}
//
//
// Great, grab that entry and check if it's free
//
- NextEntry = Entry - Entry->PreviousSize;
+ NextEntry = POOL_PREV_BLOCK(Entry);
if (NextEntry->PoolType == 0)
{
//
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
+ RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
}
//
// page, they could've all been combined).
//
if ((PAGE_ALIGN(Entry) == Entry) &&
- (PAGE_ALIGN(Entry + Entry->BlockSize) == Entry + Entry->BlockSize))
+ (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
{
//
- // In this case, release the nonpaged pool lock, and free the page
+ // In this case, release the pool lock, and free the page
//
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
- return (VOID)MiFreePoolPages(Entry);
+ ExUnlockPool(PoolDesc, OldIrql);
+ MiFreePoolPages(Entry);
+ return;
}
//
// Get the first combined block (either our original to begin with, or
// the one after the original, depending if we combined with the previous)
//
- NextEntry = Entry + BlockSize;
+ NextEntry = POOL_NEXT_BLOCK(Entry);
//
// As long as the next block isn't on a page boundary, have it point
}
//
- // Insert this new free block, and release the nonpaged pool lock
+ // Insert this new free block, and release the pool lock
//
- InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
- KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
+ InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
+ ExUnlockPool(PoolDesc, OldIrql);
}
+/*
+ * @implemented
+ */
VOID
NTAPI
-ExFreeArmPool(PVOID P)
+ExFreePool(PVOID P)
{
//
// Just free without checking for the tag
//
- ExFreeArmPoolWithTag(P, 0);
+ ExFreePoolWithTag(P, 0);
+}
+
+/*
+ * @unimplemented
+ */
+SIZE_T
+NTAPI
+ExQueryPoolBlockSize(IN PVOID PoolBlock,
+ OUT PBOOLEAN QuotaCharged)
+{
+ //
+ // Not implemented
+ //
+ UNIMPLEMENTED;
+ return FALSE;
+}
+
+/*
+ * @implemented
+ */
+
+PVOID
+NTAPI
+ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
+ IN SIZE_T NumberOfBytes)
+{
+ //
+ // Allocate the pool
+ //
+ return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
+}
+
+/*
+ * @implemented
+ */
+PVOID
+NTAPI
+ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
+ IN SIZE_T NumberOfBytes,
+ IN ULONG Tag,
+ IN EX_POOL_PRIORITY Priority)
+{
+ //
+ // Allocate the pool
+ //
+ UNIMPLEMENTED;
+ return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
+}
+
+/*
+ * @implemented
+ */
+PVOID
+NTAPI
+ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
+ IN SIZE_T NumberOfBytes,
+ IN ULONG Tag)
+{
+ //
+ // Allocate the pool
+ //
+ UNIMPLEMENTED;
+ return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
}
/* EOF */