2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
21 /* GLOBALS ********************************************************************/
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
25 typedef struct _POOL_DPC_CONTEXT
27 PPOOL_TRACKER_TABLE PoolTrackTable
;
28 SIZE_T PoolTrackTableSize
;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion
;
30 SIZE_T PoolTrackTableSizeExpansion
;
31 } POOL_DPC_CONTEXT
, *PPOOL_DPC_CONTEXT
;
33 ULONG ExpNumberOfPagedPools
;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector
[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex
;
38 SIZE_T PoolTrackTableSize
, PoolTrackTableMask
;
39 SIZE_T PoolBigPageTableSize
, PoolBigPageTableHash
;
40 PPOOL_TRACKER_TABLE PoolTrackTable
;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
;
42 KSPIN_LOCK ExpTaggedPoolLock
;
44 BOOLEAN ExStopBadTags
;
45 KSPIN_LOCK ExpLargePoolTableLock
;
46 ULONG ExpPoolBigEntriesInUse
;
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
71 * For now, these are not made inline, so we can get good stack traces.
75 ExpDecodePoolLink(IN PLIST_ENTRY Link
)
77 return (PLIST_ENTRY
)((ULONG_PTR
)Link
& ~1);
82 ExpEncodePoolLink(IN PLIST_ENTRY Link
)
84 return (PLIST_ENTRY
)((ULONG_PTR
)Link
| 1);
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead
)
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
) != ListHead
) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
) != ListHead
))
94 KeBugCheckEx(BAD_POOL_HEADER
,
97 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
),
98 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
));
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead
)
106 ListHead
->Flink
= ListHead
->Blink
= ExpEncodePoolLink(ListHead
);
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead
)
113 return (ExpDecodePoolLink(ListHead
->Flink
) == ListHead
);
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry
)
120 PLIST_ENTRY Blink
, Flink
;
121 Flink
= ExpDecodePoolLink(Entry
->Flink
);
122 Blink
= ExpDecodePoolLink(Entry
->Blink
);
123 Flink
->Blink
= ExpEncodePoolLink(Blink
);
124 Blink
->Flink
= ExpEncodePoolLink(Flink
);
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead
)
131 PLIST_ENTRY Entry
, Flink
;
132 Entry
= ExpDecodePoolLink(ListHead
->Flink
);
133 Flink
= ExpDecodePoolLink(Entry
->Flink
);
134 ListHead
->Flink
= ExpEncodePoolLink(Flink
);
135 Flink
->Blink
= ExpEncodePoolLink(ListHead
);
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead
)
143 PLIST_ENTRY Entry
, Blink
;
144 Entry
= ExpDecodePoolLink(ListHead
->Blink
);
145 Blink
= ExpDecodePoolLink(Entry
->Blink
);
146 ListHead
->Blink
= ExpEncodePoolLink(Blink
);
147 Blink
->Flink
= ExpEncodePoolLink(ListHead
);
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead
,
154 IN PLIST_ENTRY Entry
)
157 ExpCheckPoolLinks(ListHead
);
158 Blink
= ExpDecodePoolLink(ListHead
->Blink
);
159 Entry
->Flink
= ExpEncodePoolLink(ListHead
);
160 Entry
->Blink
= ExpEncodePoolLink(Blink
);
161 Blink
->Flink
= ExpEncodePoolLink(Entry
);
162 ListHead
->Blink
= ExpEncodePoolLink(Entry
);
163 ExpCheckPoolLinks(ListHead
);
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead
,
169 IN PLIST_ENTRY Entry
)
172 ExpCheckPoolLinks(ListHead
);
173 Flink
= ExpDecodePoolLink(ListHead
->Flink
);
174 Entry
->Flink
= ExpEncodePoolLink(Flink
);
175 Entry
->Blink
= ExpEncodePoolLink(ListHead
);
176 Flink
->Blink
= ExpEncodePoolLink(Entry
);
177 ListHead
->Flink
= ExpEncodePoolLink(Entry
);
178 ExpCheckPoolLinks(ListHead
);
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry
)
185 PPOOL_HEADER PreviousEntry
, NextEntry
;
187 /* Is there a block before this one? */
188 if (Entry
->PreviousSize
)
191 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(PreviousEntry
))
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER
,
199 (ULONG_PTR
)PreviousEntry
,
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry
->BlockSize
!= Entry
->PreviousSize
)
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry
->BlockSize
, (char *)&PreviousEntry
->PoolTag
,
210 Entry
->PreviousSize
, (char *)&Entry
->PoolTag
);
211 KeBugCheckEx(BAD_POOL_HEADER
,
213 (ULONG_PTR
)PreviousEntry
,
218 else if (PAGE_ALIGN(Entry
) != Entry
)
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER
,
228 /* This block must have a size */
229 if (!Entry
->BlockSize
)
231 /* Someone must've corrupted this field */
232 if (Entry
->PreviousSize
)
234 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry
->PoolTag
,
237 (char *)&Entry
->PoolTag
);
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry
->PoolTag
);
244 KeBugCheckEx(BAD_POOL_HEADER
,
251 /* Okay, now get the next block */
252 NextEntry
= POOL_NEXT_BLOCK(Entry
);
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(NextEntry
))
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER
,
263 (ULONG_PTR
)NextEntry
,
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry
->PreviousSize
!= Entry
->BlockSize
)
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry
->BlockSize
, (char *)&Entry
->PoolTag
,
274 NextEntry
->PreviousSize
, (char *)&NextEntry
->PoolTag
);
275 KeBugCheckEx(BAD_POOL_HEADER
,
277 (ULONG_PTR
)NextEntry
,
286 ExpCheckPoolAllocation(
294 POOL_TYPE RealPoolType
;
296 /* Get the pool header */
297 Entry
= ((PPOOL_HEADER
)P
) - 1;
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P
) == P
)
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
305 /* Find the pool tag */
306 for (i
= 0; i
< PoolBigPageTableSize
; i
++)
308 /* Check if this is our allocation */
309 if (PoolBigPageTable
[i
].Va
== P
)
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable
[i
].Key
!= Tag
)
314 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, PoolBigPageTable
[i
].Key
, Tag
);
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
324 if (i
== PoolBigPageTableSize
)
326 /* Did not find the allocation */
330 /* Get Pool type by address */
331 RealPoolType
= MmDeterminePoolType(P
);
336 if (Entry
->PoolTag
!= Tag
)
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag
, &Entry
->PoolTag
, Entry
->PoolTag
);
340 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Entry
->PoolTag
, Tag
);
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry
);
346 /* Get Pool type from entry */
347 RealPoolType
= (Entry
->PoolType
- 1);
350 /* Should we check the pool type? */
353 /* Verify the pool type */
354 if (RealPoolType
!= PoolType
)
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType
& BASE_POOL_TYPE_MASK
? "PagedPool" : "NonPagedPool",
358 (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER
, 0xCC, (ULONG_PTR
)P
, Entry
->PoolTag
, Tag
);
366 ExpCheckPoolBlocks(IN PVOID Block
)
368 BOOLEAN FoundBlock
= FALSE
;
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry
= PAGE_ALIGN(Block
);
374 ASSERT(Entry
->PreviousSize
== 0);
376 /* Now scan each entry */
379 /* When we actually found our block, remember this */
380 if (Entry
== Block
) FoundBlock
= TRUE
;
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry
);
385 /* And go to the next one, keeping track of our size */
386 Size
+= Entry
->BlockSize
;
387 Entry
= POOL_NEXT_BLOCK(Entry
);
389 /* If we hit the last block, stop */
390 if (Size
>= (PAGE_SIZE
/ POOL_BLOCK_SIZE
)) break;
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry
) == Entry
) break;
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry
) != Entry
) || !(FoundBlock
))
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER
, 10, (ULONG_PTR
)Block
, __LINE__
, (ULONG_PTR
)Entry
);
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType
,
407 IN SIZE_T NumberOfBytes
,
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
414 if (((PoolType
& BASE_POOL_TYPE_MASK
) == PagedPool
) ?
415 (KeGetCurrentIrql() > APC_LEVEL
) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL
))
419 // Take the system down
421 KeBugCheckEx(BAD_POOL_CALLER
,
422 !Entry
? POOL_ALLOC_IRQL_INVALID
: POOL_FREE_IRQL_INVALID
,
425 !Entry
? NumberOfBytes
: (ULONG_PTR
)Entry
);
431 ExpComputeHashForTag(IN ULONG Tag
,
432 IN SIZE_T BucketMask
)
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
441 ULONGLONG Result
= (ULONGLONG
)40543 * Tag
;
442 return (ULONG
)BucketMask
& ((ULONG
)Result
^ (Result
>> 32));
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress
)
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
459 Result
= (ULONG
)((ULONG_PTR
)BaseAddress
>> PAGE_SHIFT
);
460 return (Result
>> 24) ^ (Result
>> 16) ^ (Result
>> 8) ^ Result
;
463 /* PRIVATE FUNCTIONS **********************************************************/
470 ULONG i
, Key
, Hash
, Index
;
471 PPOOL_TRACKER_TABLE TrackTable
= PoolTrackTable
;
541 // Loop all 64 hot tags
543 ASSERT((sizeof(TagList
) / sizeof(ULONG
)) == 64);
544 for (i
= 0; i
< sizeof(TagList
) / sizeof(ULONG
); i
++)
547 // Get the current tag, and compute its hash in the tracker table
550 Hash
= ExpComputeHashForTag(Key
, PoolTrackTableMask
);
553 // Loop all the hashes in this index/bucket
559 // Find an empty entry, and make sure this isn't the last hash that
562 // On checked builds, also make sure this is the first time we are
565 ASSERT(TrackTable
[Hash
].Key
!= Key
);
566 if (!(TrackTable
[Hash
].Key
) && (Hash
!= PoolTrackTableSize
- 1))
569 // It has been seeded, move on to the next tag
571 TrackTable
[Hash
].Key
= Key
;
576 // This entry was already taken, compute the next possible hash while
577 // making sure we're not back at our initial index.
579 ASSERT(TrackTable
[Hash
].Key
!= Key
);
580 Hash
= (Hash
+ 1) & PoolTrackTableMask
;
581 if (Hash
== Index
) break;
588 ExpRemovePoolTracker(IN ULONG Key
,
589 IN SIZE_T NumberOfBytes
,
590 IN POOL_TYPE PoolType
)
593 PPOOL_TRACKER_TABLE Table
, TableEntry
;
594 SIZE_T TableMask
, TableSize
;
597 // Remove the PROTECTED_POOL flag which is not part of the tag
599 Key
&= ~PROTECTED_POOL
;
602 // With WinDBG you can set a tag you want to break on when an allocation is
605 if (Key
== PoolHitTag
) DbgBreakPoint();
608 // Why the double indirection? Because normally this function is also used
609 // when doing session pool allocations, which has another set of tables,
610 // sizes, and masks that live in session pool. Now we don't support session
611 // pool so we only ever use the regular tables, but I'm keeping the code this
612 // way so that the day we DO support session pool, it won't require that
615 Table
= PoolTrackTable
;
616 TableMask
= PoolTrackTableMask
;
617 TableSize
= PoolTrackTableSize
;
618 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize
);
621 // Compute the hash for this key, and loop all the possible buckets
623 Hash
= ExpComputeHashForTag(Key
, TableMask
);
628 // Have we found the entry for this tag? */
630 TableEntry
= &Table
[Hash
];
631 if (TableEntry
->Key
== Key
)
634 // Decrement the counters depending on if this was paged or nonpaged
637 if ((PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
639 InterlockedIncrement(&TableEntry
->NonPagedFrees
);
640 InterlockedExchangeAddSizeT(&TableEntry
->NonPagedBytes
,
641 -(SSIZE_T
)NumberOfBytes
);
644 InterlockedIncrement(&TableEntry
->PagedFrees
);
645 InterlockedExchangeAddSizeT(&TableEntry
->PagedBytes
,
646 -(SSIZE_T
)NumberOfBytes
);
651 // We should have only ended up with an empty entry if we've reached
654 if (!TableEntry
->Key
)
656 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
657 Hash
, TableMask
, Key
, (ULONG
)NumberOfBytes
, PoolType
);
658 ASSERT(Hash
== TableMask
);
662 // This path is hit when we don't have an entry, and the current bucket
663 // is full, so we simply try the next one
665 Hash
= (Hash
+ 1) & TableMask
;
666 if (Hash
== Index
) break;
670 // And finally this path is hit when all the buckets are full, and we need
671 // some expansion. This path is not yet supported in ReactOS and so we'll
674 DPRINT1("Out of pool tag space, ignoring...\n");
679 ExpInsertPoolTracker(IN ULONG Key
,
680 IN SIZE_T NumberOfBytes
,
681 IN POOL_TYPE PoolType
)
685 PPOOL_TRACKER_TABLE Table
, TableEntry
;
686 SIZE_T TableMask
, TableSize
;
689 // Remove the PROTECTED_POOL flag which is not part of the tag
691 Key
&= ~PROTECTED_POOL
;
694 // With WinDBG you can set a tag you want to break on when an allocation is
697 if (Key
== PoolHitTag
) DbgBreakPoint();
700 // There is also an internal flag you can set to break on malformed tags
702 if (ExStopBadTags
) ASSERT(Key
& 0xFFFFFF00);
705 // ASSERT on ReactOS features not yet supported
707 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
708 ASSERT(KeGetCurrentProcessorNumber() == 0);
711 // Why the double indirection? Because normally this function is also used
712 // when doing session pool allocations, which has another set of tables,
713 // sizes, and masks that live in session pool. Now we don't support session
714 // pool so we only ever use the regular tables, but I'm keeping the code this
715 // way so that the day we DO support session pool, it won't require that
718 Table
= PoolTrackTable
;
719 TableMask
= PoolTrackTableMask
;
720 TableSize
= PoolTrackTableSize
;
721 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize
);
724 // Compute the hash for this key, and loop all the possible buckets
726 Hash
= ExpComputeHashForTag(Key
, TableMask
);
731 // Do we already have an entry for this tag? */
733 TableEntry
= &Table
[Hash
];
734 if (TableEntry
->Key
== Key
)
737 // Increment the counters depending on if this was paged or nonpaged
740 if ((PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
742 InterlockedIncrement(&TableEntry
->NonPagedAllocs
);
743 InterlockedExchangeAddSizeT(&TableEntry
->NonPagedBytes
, NumberOfBytes
);
746 InterlockedIncrement(&TableEntry
->PagedAllocs
);
747 InterlockedExchangeAddSizeT(&TableEntry
->PagedBytes
, NumberOfBytes
);
752 // We don't have an entry yet, but we've found a free bucket for it
754 if (!(TableEntry
->Key
) && (Hash
!= PoolTrackTableSize
- 1))
757 // We need to hold the lock while creating a new entry, since other
758 // processors might be in this code path as well
760 ExAcquireSpinLock(&ExpTaggedPoolLock
, &OldIrql
);
761 if (!PoolTrackTable
[Hash
].Key
)
764 // We've won the race, so now create this entry in the bucket
766 ASSERT(Table
[Hash
].Key
== 0);
767 PoolTrackTable
[Hash
].Key
= Key
;
768 TableEntry
->Key
= Key
;
770 ExReleaseSpinLock(&ExpTaggedPoolLock
, OldIrql
);
773 // Now we force the loop to run again, and we should now end up in
774 // the code path above which does the interlocked increments...
780 // This path is hit when we don't have an entry, and the current bucket
781 // is full, so we simply try the next one
783 Hash
= (Hash
+ 1) & TableMask
;
784 if (Hash
== Index
) break;
788 // And finally this path is hit when all the buckets are full, and we need
789 // some expansion. This path is not yet supported in ReactOS and so we'll
792 DPRINT1("Out of pool tag space, ignoring...\n");
798 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
799 IN POOL_TYPE PoolType
,
804 PLIST_ENTRY NextEntry
, LastEntry
;
807 // Setup the descriptor based on the caller's request
809 PoolDescriptor
->PoolType
= PoolType
;
810 PoolDescriptor
->PoolIndex
= PoolIndex
;
811 PoolDescriptor
->Threshold
= Threshold
;
812 PoolDescriptor
->LockAddress
= PoolLock
;
815 // Initialize accounting data
817 PoolDescriptor
->RunningAllocs
= 0;
818 PoolDescriptor
->RunningDeAllocs
= 0;
819 PoolDescriptor
->TotalPages
= 0;
820 PoolDescriptor
->TotalBytes
= 0;
821 PoolDescriptor
->TotalBigPages
= 0;
824 // Nothing pending for now
826 PoolDescriptor
->PendingFrees
= NULL
;
827 PoolDescriptor
->PendingFreeDepth
= 0;
830 // Loop all the descriptor's allocation lists and initialize them
832 NextEntry
= PoolDescriptor
->ListHeads
;
833 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
834 while (NextEntry
< LastEntry
)
836 ExpInitializePoolListHead(NextEntry
);
841 // Note that ReactOS does not support Session Pool Yet
843 ASSERT(PoolType
!= PagedPoolSession
);
849 InitializePool(IN POOL_TYPE PoolType
,
852 PPOOL_DESCRIPTOR Descriptor
;
857 // Check what kind of pool this is
859 if (PoolType
== NonPagedPool
)
862 // Compute the track table size and convert it from a power of two to an
865 // NOTE: On checked builds, we'll assert if the registry table size was
866 // invalid, while on retail builds we'll just break out of the loop at
869 TableSize
= min(PoolTrackTableSize
, MmSizeOfNonPagedPoolInBytes
>> 8);
870 for (i
= 0; i
< 32; i
++)
874 ASSERT((TableSize
& ~1) == 0);
875 if (!(TableSize
& ~1)) break;
881 // If we hit bit 32, than no size was defined in the registry, so
882 // we'll use the default size of 2048 entries.
884 // Otherwise, use the size from the registry, as long as it's not
885 // smaller than 64 entries.
889 PoolTrackTableSize
= 2048;
893 PoolTrackTableSize
= max(1 << i
, 64);
897 // Loop trying with the biggest specified size first, and cut it down
898 // by a power of two each iteration in case not enough memory exist
903 // Do not allow overflow
905 if ((PoolTrackTableSize
+ 1) > (MAXULONG_PTR
/ sizeof(POOL_TRACKER_TABLE
)))
907 PoolTrackTableSize
>>= 1;
912 // Allocate the tracker table and exit the loop if this worked
914 PoolTrackTable
= MiAllocatePoolPages(NonPagedPool
,
915 (PoolTrackTableSize
+ 1) *
916 sizeof(POOL_TRACKER_TABLE
));
917 if (PoolTrackTable
) break;
920 // Otherwise, as long as we're not down to the last bit, keep
923 if (PoolTrackTableSize
== 1)
925 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
931 PoolTrackTableSize
>>= 1;
935 // Add one entry, compute the hash, and zero the table
937 PoolTrackTableSize
++;
938 PoolTrackTableMask
= PoolTrackTableSize
- 2;
940 RtlZeroMemory(PoolTrackTable
,
941 PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
944 // Finally, add the most used tags to speed up those allocations
949 // We now do the exact same thing with the tracker table for big pages
951 TableSize
= min(PoolBigPageTableSize
, MmSizeOfNonPagedPoolInBytes
>> 8);
952 for (i
= 0; i
< 32; i
++)
956 ASSERT((TableSize
& ~1) == 0);
957 if (!(TableSize
& ~1)) break;
963 // For big pages, the default tracker table is 4096 entries, while the
964 // minimum is still 64
968 PoolBigPageTableSize
= 4096;
972 PoolBigPageTableSize
= max(1 << i
, 64);
976 // Again, run the exact same loop we ran earlier, but this time for the
977 // big pool tracker instead
981 if ((PoolBigPageTableSize
+ 1) > (MAXULONG_PTR
/ sizeof(POOL_TRACKER_BIG_PAGES
)))
983 PoolBigPageTableSize
>>= 1;
987 PoolBigPageTable
= MiAllocatePoolPages(NonPagedPool
,
988 PoolBigPageTableSize
*
989 sizeof(POOL_TRACKER_BIG_PAGES
));
990 if (PoolBigPageTable
) break;
992 if (PoolBigPageTableSize
== 1)
994 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1001 PoolBigPageTableSize
>>= 1;
1005 // An extra entry is not needed for for the big pool tracker, so just
1006 // compute the hash and zero it
1008 PoolBigPageTableHash
= PoolBigPageTableSize
- 1;
1009 RtlZeroMemory(PoolBigPageTable
,
1010 PoolBigPageTableSize
* sizeof(POOL_TRACKER_BIG_PAGES
));
1011 for (i
= 0; i
< PoolBigPageTableSize
; i
++) PoolBigPageTable
[i
].Va
= (PVOID
)1;
1014 // During development, print this out so we can see what's happening
1016 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1017 PoolTrackTable
, PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1018 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1019 PoolBigPageTable
, PoolBigPageTableSize
* sizeof(POOL_TRACKER_BIG_PAGES
));
1022 // Insert the generic tracker for all of big pool
1024 ExpInsertPoolTracker('looP',
1025 ROUND_TO_PAGES(PoolBigPageTableSize
*
1026 sizeof(POOL_TRACKER_BIG_PAGES
)),
1030 // No support for NUMA systems at this time
1032 ASSERT(KeNumberNodes
== 1);
1035 // Initialize the tag spinlock
1037 KeInitializeSpinLock(&ExpTaggedPoolLock
);
1040 // Initialize the nonpaged pool descriptor
1042 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
1043 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
1052 // No support for NUMA systems at this time
1054 ASSERT(KeNumberNodes
== 1);
1057 // Allocate the pool descriptor
1059 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
1060 sizeof(KGUARDED_MUTEX
) +
1061 sizeof(POOL_DESCRIPTOR
),
1066 // This is really bad...
1068 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1076 // Setup the vector and guarded mutex for paged pool
1078 PoolVector
[PagedPool
] = Descriptor
;
1079 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
1080 ExpPagedPoolDescriptor
[0] = Descriptor
;
1081 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
1082 ExInitializePoolDescriptor(Descriptor
,
1089 // Insert the generic tracker for all of nonpaged pool
1091 ExpInsertPoolTracker('looP',
1092 ROUND_TO_PAGES(PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
)),
1099 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
1102 // Check if this is nonpaged pool
1104 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
1107 // Use the queued spin lock
1109 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
1114 // Use the guarded mutex
1116 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
1123 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
1127 // Check if this is nonpaged pool
1129 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
1132 // Use the queued spin lock
1134 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
1139 // Use the guarded mutex
1141 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
1147 ExpGetPoolTagInfoTarget(IN PKDPC Dpc
,
1148 IN PVOID DeferredContext
,
1149 IN PVOID SystemArgument1
,
1150 IN PVOID SystemArgument2
)
1152 PPOOL_DPC_CONTEXT Context
= DeferredContext
;
1153 UNREFERENCED_PARAMETER(Dpc
);
1154 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL
);
1157 // Make sure we win the race, and if we did, copy the data atomically
1159 if (KeSignalCallDpcSynchronize(SystemArgument2
))
1161 RtlCopyMemory(Context
->PoolTrackTable
,
1163 Context
->PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1166 // This is here because ReactOS does not yet support expansion
1168 ASSERT(Context
->PoolTrackTableSizeExpansion
== 0);
1172 // Regardless of whether we won or not, we must now synchronize and then
1173 // decrement the barrier since this is one more processor that has completed
1176 KeSignalCallDpcSynchronize(SystemArgument2
);
1177 KeSignalCallDpcDone(SystemArgument1
);
1182 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation
,
1183 IN ULONG SystemInformationLength
,
1184 IN OUT PULONG ReturnLength OPTIONAL
)
1186 ULONG TableSize
, CurrentLength
;
1188 NTSTATUS Status
= STATUS_SUCCESS
;
1189 PSYSTEM_POOLTAG TagEntry
;
1190 PPOOL_TRACKER_TABLE Buffer
, TrackerEntry
;
1191 POOL_DPC_CONTEXT Context
;
1192 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL
);
1195 // Keep track of how much data the caller's buffer must hold
1197 CurrentLength
= FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION
, TagInfo
);
1200 // Initialize the caller's buffer
1202 TagEntry
= &SystemInformation
->TagInfo
[0];
1203 SystemInformation
->Count
= 0;
1206 // Capture the number of entries, and the total size needed to make a copy
1209 EntryCount
= (ULONG
)PoolTrackTableSize
;
1210 TableSize
= EntryCount
* sizeof(POOL_TRACKER_TABLE
);
1213 // Allocate the "Generic DPC" temporary buffer
1215 Buffer
= ExAllocatePoolWithTag(NonPagedPool
, TableSize
, 'ofnI');
1216 if (!Buffer
) return STATUS_INSUFFICIENT_RESOURCES
;
1219 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1221 Context
.PoolTrackTable
= Buffer
;
1222 Context
.PoolTrackTableSize
= PoolTrackTableSize
;
1223 Context
.PoolTrackTableExpansion
= NULL
;
1224 Context
.PoolTrackTableSizeExpansion
= 0;
1225 KeGenericCallDpc(ExpGetPoolTagInfoTarget
, &Context
);
1228 // Now parse the results
1230 for (TrackerEntry
= Buffer
; TrackerEntry
< (Buffer
+ EntryCount
); TrackerEntry
++)
1233 // If the entry is empty, skip it
1235 if (!TrackerEntry
->Key
) continue;
1238 // Otherwise, add one more entry to the caller's buffer, and ensure that
1239 // enough space has been allocated in it
1241 SystemInformation
->Count
++;
1242 CurrentLength
+= sizeof(*TagEntry
);
1243 if (SystemInformationLength
< CurrentLength
)
1246 // The caller's buffer is too small, so set a failure code. The
1247 // caller will know the count, as well as how much space is needed.
1249 // We do NOT break out of the loop, because we want to keep incrementing
1250 // the Count as well as CurrentLength so that the caller can know the
1253 Status
= STATUS_INFO_LENGTH_MISMATCH
;
1258 // Small sanity check that our accounting is working correctly
1260 ASSERT(TrackerEntry
->PagedAllocs
>= TrackerEntry
->PagedFrees
);
1261 ASSERT(TrackerEntry
->NonPagedAllocs
>= TrackerEntry
->NonPagedFrees
);
1264 // Return the data into the caller's buffer
1266 TagEntry
->TagUlong
= TrackerEntry
->Key
;
1267 TagEntry
->PagedAllocs
= TrackerEntry
->PagedAllocs
;
1268 TagEntry
->PagedFrees
= TrackerEntry
->PagedFrees
;
1269 TagEntry
->PagedUsed
= TrackerEntry
->PagedBytes
;
1270 TagEntry
->NonPagedAllocs
= TrackerEntry
->NonPagedAllocs
;
1271 TagEntry
->NonPagedFrees
= TrackerEntry
->NonPagedFrees
;
1272 TagEntry
->NonPagedUsed
= TrackerEntry
->NonPagedBytes
;
1278 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1280 ExFreePoolWithTag(Buffer
, 'ofnI');
1281 if (ReturnLength
) *ReturnLength
= CurrentLength
;
1287 ExpAddTagForBigPages(IN PVOID Va
,
1289 IN ULONG NumberOfPages
,
1290 IN POOL_TYPE PoolType
)
1296 PPOOL_TRACKER_BIG_PAGES Entry
, EntryEnd
, EntryStart
;
1297 ASSERT(((ULONG_PTR
)Va
& POOL_BIG_TABLE_ENTRY_FREE
) == 0);
1298 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1301 // As the table is expandable, these values must only be read after acquiring
1302 // the lock to avoid a teared access during an expansion
1304 Hash
= ExpComputePartialHashForAddress(Va
);
1305 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
1306 Hash
&= PoolBigPageTableHash
;
1307 TableSize
= PoolBigPageTableSize
;
1310 // We loop from the current hash bucket to the end of the table, and then
1311 // rollover to hash bucket 0 and keep going from there. If we return back
1312 // to the beginning, then we attempt expansion at the bottom of the loop
1314 EntryStart
= Entry
= &PoolBigPageTable
[Hash
];
1315 EntryEnd
= &PoolBigPageTable
[TableSize
];
1319 // Make sure that this is a free entry and attempt to atomically make the
1323 if (((ULONG_PTR
)OldVa
& POOL_BIG_TABLE_ENTRY_FREE
) &&
1324 (InterlockedCompareExchangePointer(&Entry
->Va
, Va
, OldVa
) == OldVa
))
1327 // We now own this entry, write down the size and the pool tag
1330 Entry
->NumberOfPages
= NumberOfPages
;
1333 // Add one more entry to the count, and see if we're getting within
1334 // 25% of the table size, at which point we'll do an expansion now
1335 // to avoid blocking too hard later on.
1337 // Note that we only do this if it's also been the 16th time that we
1338 // keep losing the race or that we are not finding a free entry anymore,
1339 // which implies a massive number of concurrent big pool allocations.
1341 InterlockedIncrementUL(&ExpPoolBigEntriesInUse
);
1342 if ((i
>= 16) && (ExpPoolBigEntriesInUse
> (TableSize
/ 4)))
1344 DPRINT("Should attempt expansion since we now have %lu entries\n",
1345 ExpPoolBigEntriesInUse
);
1349 // We have our entry, return
1351 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1356 // We don't have our entry yet, so keep trying, making the entry list
1357 // circular if we reach the last entry. We'll eventually break out of
1358 // the loop once we've rolled over and returned back to our original
1362 if (++Entry
>= EntryEnd
) Entry
= &PoolBigPageTable
[0];
1363 } while (Entry
!= EntryStart
);
1366 // This means there's no free hash buckets whatsoever, so we would now have
1367 // to attempt expanding the table
1369 DPRINT1("Big pool expansion needed, not implemented!\n");
1370 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1376 ExpFindAndRemoveTagBigPages(IN PVOID Va
,
1377 OUT PULONG_PTR BigPages
,
1378 IN POOL_TYPE PoolType
)
1380 BOOLEAN FirstTry
= TRUE
;
1383 ULONG PoolTag
, Hash
;
1384 PPOOL_TRACKER_BIG_PAGES Entry
;
1385 ASSERT(((ULONG_PTR
)Va
& POOL_BIG_TABLE_ENTRY_FREE
) == 0);
1386 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1389 // As the table is expandable, these values must only be read after acquiring
1390 // the lock to avoid a teared access during an expansion
1392 Hash
= ExpComputePartialHashForAddress(Va
);
1393 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
1394 Hash
&= PoolBigPageTableHash
;
1395 TableSize
= PoolBigPageTableSize
;
1398 // Loop while trying to find this big page allocation
1400 while (PoolBigPageTable
[Hash
].Va
!= Va
)
1403 // Increment the size until we go past the end of the table
1405 if (++Hash
>= TableSize
)
1408 // Is this the second time we've tried?
1413 // This means it was never inserted into the pool table and it
1414 // received the special "BIG" tag -- return that and return 0
1415 // so that the code can ask Mm for the page count instead
1417 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1423 // The first time this happens, reset the hash index and try again
1431 // Now capture all the information we need from the entry, since after we
1432 // release the lock, the data can change
1434 Entry
= &PoolBigPageTable
[Hash
];
1435 *BigPages
= Entry
->NumberOfPages
;
1436 PoolTag
= Entry
->Key
;
1439 // Set the free bit, and decrement the number of allocations. Finally, release
1440 // the lock and return the tag that was located
1442 InterlockedIncrement((PLONG
)&Entry
->Va
);
1443 InterlockedDecrementUL(&ExpPoolBigEntriesInUse
);
1444 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1450 ExQueryPoolUsage(OUT PULONG PagedPoolPages
,
1451 OUT PULONG NonPagedPoolPages
,
1452 OUT PULONG PagedPoolAllocs
,
1453 OUT PULONG PagedPoolFrees
,
1454 OUT PULONG PagedPoolLookasideHits
,
1455 OUT PULONG NonPagedPoolAllocs
,
1456 OUT PULONG NonPagedPoolFrees
,
1457 OUT PULONG NonPagedPoolLookasideHits
)
1460 PPOOL_DESCRIPTOR PoolDesc
;
1463 // Assume all failures
1465 *PagedPoolPages
= 0;
1466 *PagedPoolAllocs
= 0;
1467 *PagedPoolFrees
= 0;
1470 // Tally up the totals for all the apged pool
1472 for (i
= 0; i
< ExpNumberOfPagedPools
+ 1; i
++)
1474 PoolDesc
= ExpPagedPoolDescriptor
[i
];
1475 *PagedPoolPages
+= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1476 *PagedPoolAllocs
+= PoolDesc
->RunningAllocs
;
1477 *PagedPoolFrees
+= PoolDesc
->RunningDeAllocs
;
1481 // The first non-paged pool has a hardcoded well-known descriptor name
1483 PoolDesc
= &NonPagedPoolDescriptor
;
1484 *NonPagedPoolPages
= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1485 *NonPagedPoolAllocs
= PoolDesc
->RunningAllocs
;
1486 *NonPagedPoolFrees
= PoolDesc
->RunningDeAllocs
;
1489 // If the system has more than one non-paged pool, copy the other descriptor
1493 if (ExpNumberOfNonPagedPools
> 1)
1495 for (i
= 0; i
< ExpNumberOfNonPagedPools
; i
++)
1497 PoolDesc
= ExpNonPagedPoolDescriptor
[i
];
1498 *NonPagedPoolPages
+= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1499 *NonPagedPoolAllocs
+= PoolDesc
->RunningAllocs
;
1500 *NonPagedPoolFrees
+= PoolDesc
->RunningDeAllocs
;
1506 // FIXME: Not yet supported
1508 *NonPagedPoolLookasideHits
+= 0;
1509 *PagedPoolLookasideHits
+= 0;
1512 /* PUBLIC FUNCTIONS ***********************************************************/
1519 ExAllocatePoolWithTag(IN POOL_TYPE PoolType
,
1520 IN SIZE_T NumberOfBytes
,
1523 PPOOL_DESCRIPTOR PoolDesc
;
1524 PLIST_ENTRY ListHead
;
1525 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
1527 USHORT BlockSize
, i
;
1529 PKPRCB Prcb
= KeGetCurrentPrcb();
1530 PGENERAL_LOOKASIDE LookasideList
;
1533 // Some sanity checks
1536 ASSERT(Tag
!= ' GIB');
1537 ASSERT(NumberOfBytes
!= 0);
1538 ExpCheckPoolIrqlLevel(PoolType
, NumberOfBytes
, NULL
);
1541 // Not supported in ReactOS
1543 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1546 // Check if verifier or special pool is enabled
1548 if (ExpPoolFlags
& (POOL_FLAG_VERIFIER
| POOL_FLAG_SPECIAL_POOL
))
1551 // For verifier, we should call the verification routine
1553 if (ExpPoolFlags
& POOL_FLAG_VERIFIER
)
1555 DPRINT1("Driver Verifier is not yet supported\n");
1559 // For special pool, we check if this is a suitable allocation and do
1560 // the special allocation if needed
1562 if (ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
)
1565 // Check if this is a special pool allocation
1567 if (MmUseSpecialPool(NumberOfBytes
, Tag
))
1570 // Try to allocate using special pool
1572 Entry
= MmAllocateSpecialPool(NumberOfBytes
, Tag
, PoolType
, 2);
1573 if (Entry
) return Entry
;
1579 // Get the pool type and its corresponding vector for this request
1581 OriginalType
= PoolType
;
1582 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
1583 PoolDesc
= PoolVector
[PoolType
];
1584 ASSERT(PoolDesc
!= NULL
);
1587 // Check if this is a big page allocation
1589 if (NumberOfBytes
> POOL_MAX_ALLOC
)
1592 // Allocate pages for it
1594 Entry
= MiAllocatePoolPages(OriginalType
, NumberOfBytes
);
1598 // Must succeed pool is deprecated, but still supported. These allocation
1599 // failures must cause an immediate bugcheck
1601 if (OriginalType
& MUST_SUCCEED_POOL_MASK
)
1603 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1605 NonPagedPoolDescriptor
.TotalPages
,
1606 NonPagedPoolDescriptor
.TotalBigPages
,
1611 // Internal debugging
1616 // This flag requests printing failures, and can also further specify
1617 // breaking on failures
1619 if (ExpPoolFlags
& POOL_FLAG_DBGPRINT_ON_FAILURE
)
1621 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1624 if (ExpPoolFlags
& POOL_FLAG_CRASH_ON_FAILURE
) DbgBreakPoint();
1628 // Finally, this flag requests an exception, which we are more than
1631 if (OriginalType
& POOL_RAISE_IF_ALLOCATION_FAILURE
)
1633 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
1638 // Increment required counters
1640 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalBigPages
,
1641 (LONG
)BYTES_TO_PAGES(NumberOfBytes
));
1642 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, NumberOfBytes
);
1643 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
1646 // Add a tag for the big page allocation and switch to the generic "BIG"
1647 // tag if we failed to do so, then insert a tracker for this alloation.
1649 if (!ExpAddTagForBigPages(Entry
,
1651 (ULONG
)BYTES_TO_PAGES(NumberOfBytes
),
1656 ExpInsertPoolTracker(Tag
, ROUND_TO_PAGES(NumberOfBytes
), OriginalType
);
1661 // Should never request 0 bytes from the pool, but since so many drivers do
1662 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1664 if (!NumberOfBytes
) NumberOfBytes
= 1;
1667 // A pool allocation is defined by its data, a linked list to connect it to
1668 // the free list (if necessary), and a pool header to store accounting info.
1669 // Calculate this size, then convert it into a block size (units of pool
1672 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1673 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1674 // the direct allocation of pages.
1676 i
= (USHORT
)((NumberOfBytes
+ sizeof(POOL_HEADER
) + (POOL_BLOCK_SIZE
- 1))
1678 ASSERT(i
< POOL_LISTS_PER_PAGE
);
1681 // Handle lookaside list optimization for both paged and nonpaged pool
1683 if (i
<= NUMBER_POOL_LOOKASIDE_LISTS
)
1686 // Try popping it from the per-CPU lookaside list
1688 LookasideList
= (PoolType
== PagedPool
) ?
1689 Prcb
->PPPagedLookasideList
[i
- 1].P
:
1690 Prcb
->PPNPagedLookasideList
[i
- 1].P
;
1691 LookasideList
->TotalAllocates
++;
1692 Entry
= (PPOOL_HEADER
)InterlockedPopEntrySList(&LookasideList
->ListHead
);
1696 // We failed, try popping it from the global list
1698 LookasideList
= (PoolType
== PagedPool
) ?
1699 Prcb
->PPPagedLookasideList
[i
- 1].L
:
1700 Prcb
->PPNPagedLookasideList
[i
- 1].L
;
1701 LookasideList
->TotalAllocates
++;
1702 Entry
= (PPOOL_HEADER
)InterlockedPopEntrySList(&LookasideList
->ListHead
);
1706 // If we were able to pop it, update the accounting and return the block
1710 LookasideList
->AllocateHits
++;
1713 // Get the real entry, write down its pool type, and track it
1716 Entry
->PoolType
= OriginalType
+ 1;
1717 ExpInsertPoolTracker(Tag
,
1718 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
1722 // Return the pool allocation
1724 Entry
->PoolTag
= Tag
;
1725 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
1726 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
1727 return POOL_FREE_BLOCK(Entry
);
1732 // Loop in the free lists looking for a block if this size. Start with the
1733 // list optimized for this kind of size lookup
1735 ListHead
= &PoolDesc
->ListHeads
[i
];
1739 // Are there any free entries available on this list?
1741 if (!ExpIsPoolListEmpty(ListHead
))
1744 // Acquire the pool lock now
1746 OldIrql
= ExLockPool(PoolDesc
);
1749 // And make sure the list still has entries
1751 if (ExpIsPoolListEmpty(ListHead
))
1754 // Someone raced us (and won) before we had a chance to acquire
1759 ExUnlockPool(PoolDesc
, OldIrql
);
1764 // Remove a free entry from the list
1765 // Note that due to the way we insert free blocks into multiple lists
1766 // there is a guarantee that any block on this list will either be
1767 // of the correct size, or perhaps larger.
1769 ExpCheckPoolLinks(ListHead
);
1770 Entry
= POOL_ENTRY(ExpRemovePoolHeadList(ListHead
));
1771 ExpCheckPoolLinks(ListHead
);
1772 ExpCheckPoolBlocks(Entry
);
1773 ASSERT(Entry
->BlockSize
>= i
);
1774 ASSERT(Entry
->PoolType
== 0);
1777 // Check if this block is larger that what we need. The block could
1778 // not possibly be smaller, due to the reason explained above (and
1779 // we would've asserted on a checked build if this was the case).
1781 if (Entry
->BlockSize
!= i
)
1784 // Is there an entry before this one?
1786 if (Entry
->PreviousSize
== 0)
1789 // There isn't anyone before us, so take the next block and
1790 // turn it into a fragment that contains the leftover data
1791 // that we don't need to satisfy the caller's request
1793 FragmentEntry
= POOL_BLOCK(Entry
, i
);
1794 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
1797 // And make it point back to us
1799 FragmentEntry
->PreviousSize
= i
;
1802 // Now get the block that follows the new fragment and check
1803 // if it's still on the same page as us (and not at the end)
1805 NextEntry
= POOL_NEXT_BLOCK(FragmentEntry
);
1806 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
1809 // Adjust this next block to point to our newly created
1812 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
1818 // There is a free entry before us, which we know is smaller
1819 // so we'll make this entry the fragment instead
1821 FragmentEntry
= Entry
;
1824 // And then we'll remove from it the actual size required.
1825 // Now the entry is a leftover free fragment
1827 Entry
->BlockSize
-= i
;
1830 // Now let's go to the next entry after the fragment (which
1831 // used to point to our original free entry) and make it
1832 // reference the new fragment entry instead.
1834 // This is the entry that will actually end up holding the
1837 Entry
= POOL_NEXT_BLOCK(Entry
);
1838 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
1841 // And now let's go to the entry after that one and check if
1842 // it's still on the same page, and not at the end
1844 NextEntry
= POOL_BLOCK(Entry
, i
);
1845 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
1848 // Make it reference the allocation entry
1850 NextEntry
->PreviousSize
= i
;
1855 // Now our (allocation) entry is the right size
1857 Entry
->BlockSize
= i
;
1860 // And the next entry is now the free fragment which contains
1861 // the remaining difference between how big the original entry
1862 // was, and the actual size the caller needs/requested.
1864 FragmentEntry
->PoolType
= 0;
1865 BlockSize
= FragmentEntry
->BlockSize
;
1868 // Now check if enough free bytes remained for us to have a
1869 // "full" entry, which contains enough bytes for a linked list
1870 // and thus can be used for allocations (up to 8 bytes...)
1872 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
1876 // Insert the free entry into the free list for this size
1878 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
1879 POOL_FREE_BLOCK(FragmentEntry
));
1880 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
1885 // We have found an entry for this allocation, so set the pool type
1886 // and release the lock since we're done
1888 Entry
->PoolType
= OriginalType
+ 1;
1889 ExpCheckPoolBlocks(Entry
);
1890 ExUnlockPool(PoolDesc
, OldIrql
);
1893 // Increment required counters
1895 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, Entry
->BlockSize
* POOL_BLOCK_SIZE
);
1896 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
1899 // Track this allocation
1901 ExpInsertPoolTracker(Tag
,
1902 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
1906 // Return the pool allocation
1908 Entry
->PoolTag
= Tag
;
1909 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
1910 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
1911 return POOL_FREE_BLOCK(Entry
);
1913 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
1916 // There were no free entries left, so we have to allocate a new fresh page
1918 Entry
= MiAllocatePoolPages(OriginalType
, PAGE_SIZE
);
1922 // Must succeed pool is deprecated, but still supported. These allocation
1923 // failures must cause an immediate bugcheck
1925 if (OriginalType
& MUST_SUCCEED_POOL_MASK
)
1927 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1929 NonPagedPoolDescriptor
.TotalPages
,
1930 NonPagedPoolDescriptor
.TotalBigPages
,
1935 // Internal debugging
1940 // This flag requests printing failures, and can also further specify
1941 // breaking on failures
1943 if (ExpPoolFlags
& POOL_FLAG_DBGPRINT_ON_FAILURE
)
1945 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1948 if (ExpPoolFlags
& POOL_FLAG_CRASH_ON_FAILURE
) DbgBreakPoint();
1952 // Finally, this flag requests an exception, which we are more than
1955 if (OriginalType
& POOL_RAISE_IF_ALLOCATION_FAILURE
)
1957 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
1961 // Return NULL to the caller in all other cases
1967 // Setup the entry data
1970 Entry
->BlockSize
= i
;
1971 Entry
->PoolType
= OriginalType
+ 1;
1974 // This page will have two entries -- one for the allocation (which we just
1975 // created above), and one for the remaining free bytes, which we're about
1976 // to create now. The free bytes are the whole page minus what was allocated
1977 // and then converted into units of block headers.
1979 BlockSize
= (PAGE_SIZE
/ POOL_BLOCK_SIZE
) - i
;
1980 FragmentEntry
= POOL_BLOCK(Entry
, i
);
1981 FragmentEntry
->Ulong1
= 0;
1982 FragmentEntry
->BlockSize
= BlockSize
;
1983 FragmentEntry
->PreviousSize
= i
;
1986 // Increment required counters
1988 InterlockedIncrement((PLONG
)&PoolDesc
->TotalPages
);
1989 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, Entry
->BlockSize
* POOL_BLOCK_SIZE
);
1992 // Now check if enough free bytes remained for us to have a "full" entry,
1993 // which contains enough bytes for a linked list and thus can be used for
1994 // allocations (up to 8 bytes...)
1996 if (FragmentEntry
->BlockSize
!= 1)
1999 // Excellent -- acquire the pool lock
2001 OldIrql
= ExLockPool(PoolDesc
);
2004 // And insert the free entry into the free list for this block size
2006 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
2007 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
2008 POOL_FREE_BLOCK(FragmentEntry
));
2009 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
2012 // Release the pool lock
2014 ExpCheckPoolBlocks(Entry
);
2015 ExUnlockPool(PoolDesc
, OldIrql
);
2020 // Simply do a sanity check
2022 ExpCheckPoolBlocks(Entry
);
2026 // Increment performance counters and track this allocation
2028 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
2029 ExpInsertPoolTracker(Tag
,
2030 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
2034 // And return the pool allocation
2036 ExpCheckPoolBlocks(Entry
);
2037 Entry
->PoolTag
= Tag
;
2038 return POOL_FREE_BLOCK(Entry
);
2046 ExAllocatePool(POOL_TYPE PoolType
,
2047 SIZE_T NumberOfBytes
)
2049 ULONG Tag
= TAG_NONE
;
2051 PLDR_DATA_TABLE_ENTRY LdrEntry
;
2053 /* Use the first four letters of the driver name, or "None" if unavailable */
2054 LdrEntry
= KeGetCurrentIrql() <= APC_LEVEL
2055 ? MiLookupDataTableEntry(_ReturnAddress())
2061 for (i
= 0; i
< min(4, LdrEntry
->BaseDllName
.Length
/ sizeof(WCHAR
)); i
++)
2062 Tag
= Tag
>> 8 | (LdrEntry
->BaseDllName
.Buffer
[i
] & 0xff) << 24;
2064 Tag
= Tag
>> 8 | ' ' << 24;
2067 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2075 ExFreePoolWithTag(IN PVOID P
,
2078 PPOOL_HEADER Entry
, NextEntry
;
2082 PPOOL_DESCRIPTOR PoolDesc
;
2084 BOOLEAN Combined
= FALSE
;
2085 PFN_NUMBER PageCount
, RealPageCount
;
2086 PKPRCB Prcb
= KeGetCurrentPrcb();
2087 PGENERAL_LOOKASIDE LookasideList
;
2091 // Check if any of the debug flags are enabled
2093 if (ExpPoolFlags
& (POOL_FLAG_CHECK_TIMERS
|
2094 POOL_FLAG_CHECK_WORKERS
|
2095 POOL_FLAG_CHECK_RESOURCES
|
2096 POOL_FLAG_VERIFIER
|
2097 POOL_FLAG_CHECK_DEADLOCK
|
2098 POOL_FLAG_SPECIAL_POOL
))
2101 // Check if special pool is enabled
2103 if (ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
)
2106 // Check if it was allocated from a special pool
2108 if (MmIsSpecialPoolAddress(P
))
2111 // Was deadlock verification also enabled? We can do some extra
2112 // checks at this point
2114 if (ExpPoolFlags
& POOL_FLAG_CHECK_DEADLOCK
)
2116 DPRINT1("Verifier not yet supported\n");
2120 // It is, so handle it via special pool free routine
2122 MmFreeSpecialPool(P
);
2128 // For non-big page allocations, we'll do a bunch of checks in here
2130 if (PAGE_ALIGN(P
) != P
)
2133 // Get the entry for this pool allocation
2134 // The pointer math here may look wrong or confusing, but it is quite right
2140 // Get the pool type
2142 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
2145 // FIXME: Many other debugging checks go here
2147 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2152 // Check if this is a big page allocation
2154 if (PAGE_ALIGN(P
) == P
)
2157 // We need to find the tag for it, so first we need to find out what
2158 // kind of allocation this was (paged or nonpaged), then we can go
2159 // ahead and try finding the tag for it. Remember to get rid of the
2160 // PROTECTED_POOL tag if it's found.
2162 // Note that if at insertion time, we failed to add the tag for a big
2163 // pool allocation, we used a special tag called 'BIG' to identify the
2164 // allocation, and we may get this tag back. In this scenario, we must
2165 // manually get the size of the allocation by actually counting through
2166 // the PFN database.
2168 PoolType
= MmDeterminePoolType(P
);
2169 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2170 Tag
= ExpFindAndRemoveTagBigPages(P
, &PageCount
, PoolType
);
2173 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2174 ASSERT(Tag
== ' GIB');
2175 PageCount
= 1; // We are going to lie! This might screw up accounting?
2177 else if (Tag
& PROTECTED_POOL
)
2179 Tag
&= ~PROTECTED_POOL
;
2185 if (TagToFree
&& TagToFree
!= Tag
)
2187 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Tag
);
2188 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Tag
, TagToFree
);
2192 // We have our tag and our page count, so we can go ahead and remove this
2195 ExpRemovePoolTracker(Tag
, PageCount
<< PAGE_SHIFT
, PoolType
);
2198 // Check if any of the debug flags are enabled
2200 if (ExpPoolFlags
& (POOL_FLAG_CHECK_TIMERS
|
2201 POOL_FLAG_CHECK_WORKERS
|
2202 POOL_FLAG_CHECK_RESOURCES
|
2203 POOL_FLAG_CHECK_DEADLOCK
))
2206 // Was deadlock verification also enabled? We can do some extra
2207 // checks at this point
2209 if (ExpPoolFlags
& POOL_FLAG_CHECK_DEADLOCK
)
2211 DPRINT1("Verifier not yet supported\n");
2215 // FIXME: Many debugging checks go here
2222 PoolDesc
= PoolVector
[PoolType
];
2223 InterlockedIncrement((PLONG
)&PoolDesc
->RunningDeAllocs
);
2224 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
,
2225 -(LONG_PTR
)(PageCount
<< PAGE_SHIFT
));
2228 // Do the real free now and update the last counter with the big page count
2230 RealPageCount
= MiFreePoolPages(P
);
2231 ASSERT(RealPageCount
== PageCount
);
2232 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalBigPages
,
2233 -(LONG
)RealPageCount
);
2238 // Get the entry for this pool allocation
2239 // The pointer math here may look wrong or confusing, but it is quite right
2243 ASSERT((ULONG_PTR
)Entry
% POOL_BLOCK_SIZE
== 0);
2246 // Get the size of the entry, and it's pool type, then load the descriptor
2247 // for this pool type
2249 BlockSize
= Entry
->BlockSize
;
2250 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
2251 PoolDesc
= PoolVector
[PoolType
];
2254 // Make sure that the IRQL makes sense
2256 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2259 // Get the pool tag and get rid of the PROTECTED_POOL flag
2261 Tag
= Entry
->PoolTag
;
2262 if (Tag
& PROTECTED_POOL
) Tag
&= ~PROTECTED_POOL
;
2267 if (TagToFree
&& TagToFree
!= Tag
)
2269 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Tag
);
2270 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Tag
, TagToFree
);
2274 // Track the removal of this allocation
2276 ExpRemovePoolTracker(Tag
,
2277 BlockSize
* POOL_BLOCK_SIZE
,
2278 Entry
->PoolType
- 1);
2281 // Release pool quota, if any
2283 if ((Entry
->PoolType
- 1) & QUOTA_POOL_MASK
)
2285 Process
= ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1];
2286 ASSERT(Process
!= NULL
);
2289 if (Process
->Pcb
.Header
.Type
!= ProcessObject
)
2291 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2292 Process
, Process
->Pcb
.Header
.Type
, Entry
->PoolType
, BlockSize
);
2293 KeBugCheckEx(BAD_POOL_CALLER
,
2297 (ULONG_PTR
)Process
);
2299 PsReturnPoolQuota(Process
, PoolType
, BlockSize
* POOL_BLOCK_SIZE
);
2300 ObDereferenceObject(Process
);
2305 // Is this allocation small enough to have come from a lookaside list?
2307 if (BlockSize
<= NUMBER_POOL_LOOKASIDE_LISTS
)
2310 // Try pushing it into the per-CPU lookaside list
2312 LookasideList
= (PoolType
== PagedPool
) ?
2313 Prcb
->PPPagedLookasideList
[BlockSize
- 1].P
:
2314 Prcb
->PPNPagedLookasideList
[BlockSize
- 1].P
;
2315 LookasideList
->TotalFrees
++;
2316 if (ExQueryDepthSList(&LookasideList
->ListHead
) < LookasideList
->Depth
)
2318 LookasideList
->FreeHits
++;
2319 InterlockedPushEntrySList(&LookasideList
->ListHead
, P
);
2324 // We failed, try to push it into the global lookaside list
2326 LookasideList
= (PoolType
== PagedPool
) ?
2327 Prcb
->PPPagedLookasideList
[BlockSize
- 1].L
:
2328 Prcb
->PPNPagedLookasideList
[BlockSize
- 1].L
;
2329 LookasideList
->TotalFrees
++;
2330 if (ExQueryDepthSList(&LookasideList
->ListHead
) < LookasideList
->Depth
)
2332 LookasideList
->FreeHits
++;
2333 InterlockedPushEntrySList(&LookasideList
->ListHead
, P
);
2339 // Get the pointer to the next entry
2341 NextEntry
= POOL_BLOCK(Entry
, BlockSize
);
2344 // Update performance counters
2346 InterlockedIncrement((PLONG
)&PoolDesc
->RunningDeAllocs
);
2347 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, -BlockSize
* POOL_BLOCK_SIZE
);
2350 // Acquire the pool lock
2352 OldIrql
= ExLockPool(PoolDesc
);
2355 // Check if the next allocation is at the end of the page
2357 ExpCheckPoolBlocks(Entry
);
2358 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2361 // We may be able to combine the block if it's free
2363 if (NextEntry
->PoolType
== 0)
2366 // The next block is free, so we'll do a combine
2371 // Make sure there's actual data in the block -- anything smaller
2372 // than this means we only have the header, so there's no linked list
2375 if ((NextEntry
->BlockSize
!= 1))
2378 // The block is at least big enough to have a linked list, so go
2379 // ahead and remove it
2381 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
2382 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
2383 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
2384 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
2388 // Our entry is now combined with the next entry
2390 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
2395 // Now check if there was a previous entry on the same page as us
2397 if (Entry
->PreviousSize
)
2400 // Great, grab that entry and check if it's free
2402 NextEntry
= POOL_PREV_BLOCK(Entry
);
2403 if (NextEntry
->PoolType
== 0)
2406 // It is, so we can do a combine
2411 // Make sure there's actual data in the block -- anything smaller
2412 // than this means we only have the header so there's no linked list
2415 if ((NextEntry
->BlockSize
!= 1))
2418 // The block is at least big enough to have a linked list, so go
2419 // ahead and remove it
2421 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
2422 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
2423 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
2424 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
2428 // Combine our original block (which might've already been combined
2429 // with the next block), into the previous block
2431 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
2434 // And now we'll work with the previous block instead
2441 // By now, it may have been possible for our combined blocks to actually
2442 // have made up a full page (if there were only 2-3 allocations on the
2443 // page, they could've all been combined).
2445 if ((PAGE_ALIGN(Entry
) == Entry
) &&
2446 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry
)) == POOL_NEXT_BLOCK(Entry
)))
2449 // In this case, release the pool lock, update the performance counter,
2450 // and free the page
2452 ExUnlockPool(PoolDesc
, OldIrql
);
2453 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalPages
, -1);
2454 MiFreePoolPages(Entry
);
2459 // Otherwise, we now have a free block (or a combination of 2 or 3)
2461 Entry
->PoolType
= 0;
2462 BlockSize
= Entry
->BlockSize
;
2463 ASSERT(BlockSize
!= 1);
2466 // Check if we actually did combine it with anyone
2471 // Get the first combined block (either our original to begin with, or
2472 // the one after the original, depending if we combined with the previous)
2474 NextEntry
= POOL_NEXT_BLOCK(Entry
);
2477 // As long as the next block isn't on a page boundary, have it point
2480 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
2484 // Insert this new free block, and release the pool lock
2486 ExpInsertPoolHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], POOL_FREE_BLOCK(Entry
));
2487 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry
));
2488 ExUnlockPool(PoolDesc
, OldIrql
);
2499 // Just free without checking for the tag
2501 ExFreePoolWithTag(P
, 0);
2509 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
2510 OUT PBOOLEAN QuotaCharged
)
2525 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
2526 IN SIZE_T NumberOfBytes
)
2529 // Allocate the pool
2531 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, TAG_NONE
);
2539 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
2540 IN SIZE_T NumberOfBytes
,
2542 IN EX_POOL_PRIORITY Priority
)
2545 // Allocate the pool
2548 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2556 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
2557 IN SIZE_T NumberOfBytes
,
2560 BOOLEAN Raise
= TRUE
;
2564 PEPROCESS Process
= PsGetCurrentProcess();
2567 // Check if we should fail instead of raising an exception
2569 if (PoolType
& POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
)
2572 PoolType
&= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
;
2576 // Inject the pool quota mask
2578 PoolType
+= QUOTA_POOL_MASK
;
2581 // Check if we have enough space to add the quota owner process, as long as
2582 // this isn't the system process, which never gets charged quota
2584 ASSERT(NumberOfBytes
!= 0);
2585 if ((NumberOfBytes
<= (PAGE_SIZE
- POOL_BLOCK_SIZE
- sizeof(PVOID
))) &&
2586 (Process
!= PsInitialSystemProcess
))
2589 // Add space for our EPROCESS pointer
2591 NumberOfBytes
+= sizeof(PEPROCESS
);
2596 // We won't be able to store the pointer, so don't use quota for this
2598 PoolType
-= QUOTA_POOL_MASK
;
2602 // Allocate the pool buffer now
2604 Buffer
= ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2607 // If the buffer is page-aligned, this is a large page allocation and we
2610 if (PAGE_ALIGN(Buffer
) != Buffer
)
2613 // Also if special pool is enabled, and this was allocated from there,
2614 // we won't touch it either
2616 if ((ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
) &&
2617 (MmIsSpecialPoolAddress(Buffer
)))
2623 // If it wasn't actually allocated with quota charges, ignore it too
2625 if (!(PoolType
& QUOTA_POOL_MASK
)) return Buffer
;
2628 // If this is the system process, we don't charge quota, so ignore
2630 if (Process
== PsInitialSystemProcess
) return Buffer
;
2633 // Actually go and charge quota for the process now
2635 Entry
= POOL_ENTRY(Buffer
);
2636 Status
= PsChargeProcessPoolQuota(Process
,
2637 PoolType
& BASE_POOL_TYPE_MASK
,
2638 Entry
->BlockSize
* POOL_BLOCK_SIZE
);
2639 if (!NT_SUCCESS(Status
))
2642 // Quota failed, back out the allocation, clear the owner, and fail
2644 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = NULL
;
2645 ExFreePoolWithTag(Buffer
, Tag
);
2646 if (Raise
) RtlRaiseStatus(Status
);
2651 // Quota worked, write the owner and then reference it before returning
2653 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = Process
;
2654 ObReferenceObject(Process
);
2656 else if (!(Buffer
) && (Raise
))
2659 // The allocation failed, raise an error if we are in raise mode
2661 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
2665 // Return the allocated buffer
2677 ULONG_PTR Address
= 0, Flags
= 0;
2686 if (!KdbpGetHexNumber(Argv
[1], &Address
))
2688 KdbpPrint("Invalid parameter: %s\n", Argv
[0]);
2696 if (!KdbpGetHexNumber(Argv
[1], &Flags
))
2698 KdbpPrint("Invalid parameter: %s\n", Argv
[0]);
2703 /* Check if we got an address */
2706 /* Get the base page */
2707 PoolPage
= PAGE_ALIGN(Address
);
2711 KdbpPrint("Heap is unimplemented\n");
2715 /* No paging support! */
2716 if (!MmIsAddressValid(PoolPage
))
2718 KdbpPrint("Address not accessible!\n");
2723 if ((Address
>= (ULONG_PTR
)MmPagedPoolStart
) && (Address
<= (ULONG_PTR
)MmPagedPoolEnd
))
2724 KdbpPrint("Allocation is from PagedPool region\n");
2725 else if ((Address
>= (ULONG_PTR
)MmNonPagedPoolStart
) && (Address
<= (ULONG_PTR
)MmNonPagedPoolEnd
))
2726 KdbpPrint("Allocation is from NonPagedPool region\n");
2729 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID
)Address
);
2733 /* Loop all entries of that page */
2737 /* Check if the address is within that entry */
2738 ThisOne
= ((Address
>= (ULONG_PTR
)Entry
) &&
2739 (Address
< (ULONG_PTR
)(Entry
+ Entry
->BlockSize
)));
2741 if (!(Flags
& 1) || ThisOne
)
2743 /* Print the line */
2744 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2745 ThisOne
? '*' : ' ', Entry
, Entry
->BlockSize
, Entry
->PreviousSize
,
2746 (Flags
& 0x80000000) ? "" : (Entry
->PoolType
? "(Allocated)" : "(Free) "),
2747 (Flags
& 0x80000000) ? "" : (PCHAR
)&Entry
->PoolTag
);
2752 Data
= (PULONG
)(Entry
+ 1);
2753 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2754 " %p %08lx %08lx %08lx %08lx\n",
2755 &Data
[0], Data
[0], Data
[1], Data
[2], Data
[3],
2756 &Data
[4], Data
[4], Data
[5], Data
[6], Data
[7]);
2759 /* Go to next entry */
2760 Entry
= POOL_BLOCK(Entry
, Entry
->BlockSize
);
2762 while ((Entry
->BlockSize
!= 0) && ((ULONG_PTR
)Entry
< (ULONG_PTR
)PoolPage
+ PAGE_SIZE
));
2767 #endif // DBG && KDBG