#define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
#define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
-#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -(x)->PreviousSize)
+#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
/*
* Pool list access debug macros, similar to Arthur's pfnlist.c work.
* Microsoft actually implements similar checks in the Windows Server 2003 SP1
* pool code, but only for checked builds.
+ *
* As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
* that these checks are done even on retail builds, due to the increasing
* number of kernel-mode attacks which depend on dangling list pointers and other
* kinds of list-based attacks.
+ *
* For now, I will leave these checks on all the time, but later they are likely
* to be DBG-only, at least until there are enough kernel-mode security attacks
* against ReactOS to warrant the performance hit.
*
+ * For now, these are not made inline, so we can get good stack traces.
*/
-FORCEINLINE
PLIST_ENTRY
+NTAPI
ExpDecodePoolLink(IN PLIST_ENTRY Link)
{
return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
}
-FORCEINLINE
PLIST_ENTRY
+NTAPI
ExpEncodePoolLink(IN PLIST_ENTRY Link)
{
return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
}
-FORCEINLINE
VOID
+NTAPI
ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
{
if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
}
}
-FORCEINLINE
VOID
+NTAPI
ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
{
ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
}
-FORCEINLINE
BOOLEAN
+NTAPI
ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
{
return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
}
-FORCEINLINE
VOID
+NTAPI
ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
{
PLIST_ENTRY Blink, Flink;
Flink = ExpDecodePoolLink(Entry->Flink);
Blink = ExpDecodePoolLink(Entry->Blink);
- Blink->Flink = ExpEncodePoolLink(Flink);
Flink->Blink = ExpEncodePoolLink(Blink);
+ Blink->Flink = ExpEncodePoolLink(Flink);
}
-FORCEINLINE
PLIST_ENTRY
+NTAPI
ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
{
- PLIST_ENTRY Head;
- Head = ExpDecodePoolLink(ListHead->Flink);
- ExpRemovePoolEntryList(Head);
- return Head;
+ PLIST_ENTRY Entry, Flink;
+ Entry = ExpDecodePoolLink(ListHead->Flink);
+ Flink = ExpDecodePoolLink(Entry->Flink);
+ ListHead->Flink = ExpEncodePoolLink(Flink);
+ Flink->Blink = ExpEncodePoolLink(ListHead);
+ return Entry;
}
-FORCEINLINE
PLIST_ENTRY
+NTAPI
ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
{
- PLIST_ENTRY Tail;
- Tail = ExpDecodePoolLink(ListHead->Blink);
- ExpRemovePoolEntryList(Tail);
- return Tail;
+ PLIST_ENTRY Entry, Blink;
+ Entry = ExpDecodePoolLink(ListHead->Blink);
+ Blink = ExpDecodePoolLink(Entry->Blink);
+ ListHead->Blink = ExpEncodePoolLink(Blink);
+ Blink->Flink = ExpEncodePoolLink(ListHead);
+ return Entry;
}
-FORCEINLINE
VOID
+NTAPI
ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
IN PLIST_ENTRY Entry)
{
ExpCheckPoolLinks(ListHead);
}
-FORCEINLINE
VOID
+NTAPI
ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
IN PLIST_ENTRY Entry)
{
PLIST_ENTRY Flink;
ExpCheckPoolLinks(ListHead);
- Flink = ExpDecodePoolLink(ListHead->Blink);
+ Flink = ExpDecodePoolLink(ListHead->Flink);
Entry->Flink = ExpEncodePoolLink(Flink);
Entry->Blink = ExpEncodePoolLink(ListHead);
Flink->Blink = ExpEncodePoolLink(Entry);
ExpCheckPoolLinks(ListHead);
}
+VOID
+NTAPI
+ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
+{
+ PPOOL_HEADER PreviousEntry, NextEntry;
+
+ /* Is there a block before this one? */
+ if (Entry->PreviousSize)
+ {
+ /* Get it */
+ PreviousEntry = POOL_PREV_BLOCK(Entry);
+
+ /* The two blocks must be on the same page! */
+ if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
+ {
+ /* Something is awry */
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 6,
+ (ULONG_PTR)PreviousEntry,
+ __LINE__,
+ (ULONG_PTR)Entry);
+ }
+
+ /* This block should also indicate that it's as large as we think it is */
+ if (PreviousEntry->BlockSize != Entry->PreviousSize)
+ {
+ /* Otherwise, someone corrupted one of the sizes */
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 5,
+ (ULONG_PTR)PreviousEntry,
+ __LINE__,
+ (ULONG_PTR)Entry);
+ }
+ }
+ else if (PAGE_ALIGN(Entry) != Entry)
+ {
+ /* If there's no block before us, we are the first block, so we should be on a page boundary */
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 7,
+ 0,
+ __LINE__,
+ (ULONG_PTR)Entry);
+ }
+
+ /* This block must have a size */
+ if (!Entry->BlockSize)
+ {
+ /* Someone must've corrupted this field */
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 8,
+ 0,
+ __LINE__,
+ (ULONG_PTR)Entry);
+ }
+
+ /* Okay, now get the next block */
+ NextEntry = POOL_NEXT_BLOCK(Entry);
+
+ /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
+ if (PAGE_ALIGN(NextEntry) != NextEntry)
+ {
+ /* The two blocks must be on the same page! */
+ if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
+ {
+ /* Something is messed up */
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 9,
+ (ULONG_PTR)NextEntry,
+ __LINE__,
+ (ULONG_PTR)Entry);
+ }
+
+ /* And this block should think we are as large as we truly are */
+ if (NextEntry->PreviousSize != Entry->BlockSize)
+ {
+ /* Otherwise, someone corrupted the field */
+ KeBugCheckEx(BAD_POOL_HEADER,
+ 5,
+ (ULONG_PTR)NextEntry,
+ __LINE__,
+ (ULONG_PTR)Entry);
+ }
+ }
+}
+
+VOID
+NTAPI
+ExpCheckPoolBlocks(IN PVOID Block)
+{
+ BOOLEAN FoundBlock = FALSE;
+ SIZE_T Size = 0;
+ PPOOL_HEADER Entry;
+
+ /* Get the first entry for this page, make sure it really is the first */
+ Entry = PAGE_ALIGN(Block);
+ ASSERT(Entry->PreviousSize == 0);
+
+ /* Now scan each entry */
+ while (TRUE)
+ {
+ /* When we actually found our block, remember this */
+ if (Entry == Block) FoundBlock = TRUE;
+
+ /* Now validate this block header */
+ ExpCheckPoolHeader(Entry);
+
+ /* And go to the next one, keeping track of our size */
+ Size += Entry->BlockSize;
+ Entry = POOL_NEXT_BLOCK(Entry);
+
+ /* If we hit the last block, stop */
+ if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
+
+ /* If we hit the end of the page, stop */
+ if (PAGE_ALIGN(Entry) == Entry) break;
+ }
+
+ /* We must've found our block, and we must have hit the end of the page */
+ if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
+ {
+ /* Otherwise, the blocks are messed up */
+ KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
+ }
+}
+
/* PRIVATE FUNCTIONS **********************************************************/
VOID
NTAPI
+INIT_FUNCTION
ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
IN POOL_TYPE PoolType,
IN ULONG PoolIndex,
LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
while (NextEntry < LastEntry)
{
- InitializeListHead(NextEntry);
+ ExpInitializePoolListHead(NextEntry);
NextEntry++;
}
}
VOID
NTAPI
+INIT_FUNCTION
InitializePool(IN POOL_TYPE PoolType,
IN ULONG Threshold)
{
PPOOL_HEADER Entry, NextEntry, FragmentEntry;
KIRQL OldIrql;
ULONG BlockSize, i;
-
- //
- // Check for paged pool
- //
- if (PoolType == PagedPool) return ExAllocatePagedPoolWithTag(PagedPool, NumberOfBytes, Tag);
-
+
//
// Some sanity checks
//
//
// Are there any free entries available on this list?
//
- if (!IsListEmpty(ListHead))
+ if (!ExpIsPoolListEmpty(ListHead))
{
//
// Acquire the pool lock now
//
// And make sure the list still has entries
//
- if (IsListEmpty(ListHead))
+ if (ExpIsPoolListEmpty(ListHead))
{
//
// Someone raced us (and won) before we had a chance to acquire
// there is a guarantee that any block on this list will either be
// of the correct size, or perhaps larger.
//
- Entry = POOL_ENTRY(RemoveHeadList(ListHead));
+ ExpCheckPoolLinks(ListHead);
+ Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
+ ExpCheckPoolLinks(ListHead);
+ ExpCheckPoolBlocks(Entry);
ASSERT(Entry->BlockSize >= i);
ASSERT(Entry->PoolType == 0);
// "full" entry, which contains enough bytes for a linked list
// and thus can be used for allocations (up to 8 bytes...)
//
+ ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
if (BlockSize != 1)
{
//
// Insert the free entry into the free list for this size
//
- InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- POOL_FREE_BLOCK(FragmentEntry));
+ ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
+ POOL_FREE_BLOCK(FragmentEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
}
}
// and release the lock since we're done
//
Entry->PoolType = PoolType + 1;
+ ExpCheckPoolBlocks(Entry);
ExUnlockPool(PoolDesc, OldIrql);
//
//
// And insert the free entry into the free list for this block size
//
- InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
- POOL_FREE_BLOCK(FragmentEntry));
+ ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
+ ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
+ POOL_FREE_BLOCK(FragmentEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
//
// Release the pool lock
//
+ ExpCheckPoolBlocks(Entry);
ExUnlockPool(PoolDesc, OldIrql);
}
//
// And return the pool allocation
//
+ ExpCheckPoolBlocks(Entry);
Entry->PoolTag = Tag;
return POOL_FREE_BLOCK(Entry);
}
POOL_TYPE PoolType;
PPOOL_DESCRIPTOR PoolDesc;
BOOLEAN Combined = FALSE;
-#if 1
- //
- // Check for paged pool
- //
- if ((P >= MmPagedPoolBase) &&
- (P <= (PVOID)((ULONG_PTR)MmPagedPoolBase + MmPagedPoolSize)))
- {
- //
- // Use old allocator
- //
- ExFreePagedPool(P);
- return;
- }
-#endif
-
+
//
// Quickly deal with big page allocations
//
//
// Check if the next allocation is at the end of the page
//
+ ExpCheckPoolBlocks(Entry);
if (PAGE_ALIGN(NextEntry) != NextEntry)
{
//
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
+ ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
}
//
// The block is at least big enough to have a linked list, so go
// ahead and remove it
//
- RemoveEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
+ ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
+ ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
}
//
//
// Insert this new free block, and release the pool lock
//
- InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
+ ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
+ ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
ExUnlockPool(PoolDesc, OldIrql);
}