2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
21 /* GLOBALS ********************************************************************/
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
25 typedef struct _POOL_DPC_CONTEXT
27 PPOOL_TRACKER_TABLE PoolTrackTable
;
28 SIZE_T PoolTrackTableSize
;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion
;
30 SIZE_T PoolTrackTableSizeExpansion
;
31 } POOL_DPC_CONTEXT
, *PPOOL_DPC_CONTEXT
;
33 ULONG ExpNumberOfPagedPools
;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector
[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex
;
38 SIZE_T PoolTrackTableSize
, PoolTrackTableMask
;
39 SIZE_T PoolBigPageTableSize
, PoolBigPageTableHash
;
40 PPOOL_TRACKER_TABLE PoolTrackTable
;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
;
42 KSPIN_LOCK ExpTaggedPoolLock
;
44 BOOLEAN ExStopBadTags
;
45 KSPIN_LOCK ExpLargePoolTableLock
;
46 ULONG ExpPoolBigEntriesInUse
;
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
71 * For now, these are not made inline, so we can get good stack traces.
75 ExpDecodePoolLink(IN PLIST_ENTRY Link
)
77 return (PLIST_ENTRY
)((ULONG_PTR
)Link
& ~1);
82 ExpEncodePoolLink(IN PLIST_ENTRY Link
)
84 return (PLIST_ENTRY
)((ULONG_PTR
)Link
| 1);
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead
)
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
) != ListHead
) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
) != ListHead
))
94 KeBugCheckEx(BAD_POOL_HEADER
,
97 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
),
98 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
));
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead
)
106 ListHead
->Flink
= ListHead
->Blink
= ExpEncodePoolLink(ListHead
);
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead
)
113 return (ExpDecodePoolLink(ListHead
->Flink
) == ListHead
);
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry
)
120 PLIST_ENTRY Blink
, Flink
;
121 Flink
= ExpDecodePoolLink(Entry
->Flink
);
122 Blink
= ExpDecodePoolLink(Entry
->Blink
);
123 Flink
->Blink
= ExpEncodePoolLink(Blink
);
124 Blink
->Flink
= ExpEncodePoolLink(Flink
);
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead
)
131 PLIST_ENTRY Entry
, Flink
;
132 Entry
= ExpDecodePoolLink(ListHead
->Flink
);
133 Flink
= ExpDecodePoolLink(Entry
->Flink
);
134 ListHead
->Flink
= ExpEncodePoolLink(Flink
);
135 Flink
->Blink
= ExpEncodePoolLink(ListHead
);
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead
)
143 PLIST_ENTRY Entry
, Blink
;
144 Entry
= ExpDecodePoolLink(ListHead
->Blink
);
145 Blink
= ExpDecodePoolLink(Entry
->Blink
);
146 ListHead
->Blink
= ExpEncodePoolLink(Blink
);
147 Blink
->Flink
= ExpEncodePoolLink(ListHead
);
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead
,
154 IN PLIST_ENTRY Entry
)
157 ExpCheckPoolLinks(ListHead
);
158 Blink
= ExpDecodePoolLink(ListHead
->Blink
);
159 Entry
->Flink
= ExpEncodePoolLink(ListHead
);
160 Entry
->Blink
= ExpEncodePoolLink(Blink
);
161 Blink
->Flink
= ExpEncodePoolLink(Entry
);
162 ListHead
->Blink
= ExpEncodePoolLink(Entry
);
163 ExpCheckPoolLinks(ListHead
);
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead
,
169 IN PLIST_ENTRY Entry
)
172 ExpCheckPoolLinks(ListHead
);
173 Flink
= ExpDecodePoolLink(ListHead
->Flink
);
174 Entry
->Flink
= ExpEncodePoolLink(Flink
);
175 Entry
->Blink
= ExpEncodePoolLink(ListHead
);
176 Flink
->Blink
= ExpEncodePoolLink(Entry
);
177 ListHead
->Flink
= ExpEncodePoolLink(Entry
);
178 ExpCheckPoolLinks(ListHead
);
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry
)
185 PPOOL_HEADER PreviousEntry
, NextEntry
;
187 /* Is there a block before this one? */
188 if (Entry
->PreviousSize
)
191 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(PreviousEntry
))
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER
,
199 (ULONG_PTR
)PreviousEntry
,
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry
->BlockSize
!= Entry
->PreviousSize
)
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry
->BlockSize
, (char *)&PreviousEntry
->PoolTag
,
210 Entry
->PreviousSize
, (char *)&Entry
->PoolTag
);
211 KeBugCheckEx(BAD_POOL_HEADER
,
213 (ULONG_PTR
)PreviousEntry
,
218 else if (PAGE_ALIGN(Entry
) != Entry
)
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER
,
228 /* This block must have a size */
229 if (!Entry
->BlockSize
)
231 /* Someone must've corrupted this field */
232 if (Entry
->PreviousSize
)
234 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry
->PoolTag
,
237 (char *)&Entry
->PoolTag
);
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry
->PoolTag
);
244 KeBugCheckEx(BAD_POOL_HEADER
,
251 /* Okay, now get the next block */
252 NextEntry
= POOL_NEXT_BLOCK(Entry
);
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(NextEntry
))
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER
,
263 (ULONG_PTR
)NextEntry
,
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry
->PreviousSize
!= Entry
->BlockSize
)
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry
->BlockSize
, (char *)&Entry
->PoolTag
,
274 NextEntry
->PreviousSize
, (char *)&NextEntry
->PoolTag
);
275 KeBugCheckEx(BAD_POOL_HEADER
,
277 (ULONG_PTR
)NextEntry
,
286 ExpCheckPoolAllocation(
294 POOL_TYPE RealPoolType
;
296 /* Get the pool header */
297 Entry
= ((PPOOL_HEADER
)P
) - 1;
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P
) == P
)
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
305 /* Find the pool tag */
306 for (i
= 0; i
< PoolBigPageTableSize
; i
++)
308 /* Check if this is our allocation */
309 if (PoolBigPageTable
[i
].Va
== P
)
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable
[i
].Key
!= Tag
)
314 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, PoolBigPageTable
[i
].Key
, Tag
);
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
324 if (i
== PoolBigPageTableSize
)
326 /* Did not find the allocation */
330 /* Get Pool type by address */
331 RealPoolType
= MmDeterminePoolType(P
);
336 if (Entry
->PoolTag
!= Tag
)
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag
, &Entry
->PoolTag
, Entry
->PoolTag
);
340 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Entry
->PoolTag
, Tag
);
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry
);
346 /* Get Pool type from entry */
347 RealPoolType
= (Entry
->PoolType
- 1);
350 /* Should we check the pool type? */
353 /* Verify the pool type */
354 if (RealPoolType
!= PoolType
)
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType
& BASE_POOL_TYPE_MASK
? "PagedPool" : "NonPagedPool",
358 (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER
, 0xCC, (ULONG_PTR
)P
, Entry
->PoolTag
, Tag
);
366 ExpCheckPoolBlocks(IN PVOID Block
)
368 BOOLEAN FoundBlock
= FALSE
;
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry
= PAGE_ALIGN(Block
);
374 ASSERT(Entry
->PreviousSize
== 0);
376 /* Now scan each entry */
379 /* When we actually found our block, remember this */
380 if (Entry
== Block
) FoundBlock
= TRUE
;
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry
);
385 /* And go to the next one, keeping track of our size */
386 Size
+= Entry
->BlockSize
;
387 Entry
= POOL_NEXT_BLOCK(Entry
);
389 /* If we hit the last block, stop */
390 if (Size
>= (PAGE_SIZE
/ POOL_BLOCK_SIZE
)) break;
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry
) == Entry
) break;
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry
) != Entry
) || !(FoundBlock
))
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER
, 10, (ULONG_PTR
)Block
, __LINE__
, (ULONG_PTR
)Entry
);
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType
,
407 IN SIZE_T NumberOfBytes
,
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
414 if (((PoolType
& BASE_POOL_TYPE_MASK
) == PagedPool
) ?
415 (KeGetCurrentIrql() > APC_LEVEL
) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL
))
419 // Take the system down
421 KeBugCheckEx(BAD_POOL_CALLER
,
422 !Entry
? POOL_ALLOC_IRQL_INVALID
: POOL_FREE_IRQL_INVALID
,
425 !Entry
? NumberOfBytes
: (ULONG_PTR
)Entry
);
431 ExpComputeHashForTag(IN ULONG Tag
,
432 IN SIZE_T BucketMask
)
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
441 ULONGLONG Result
= (ULONGLONG
)40543 * Tag
;
442 return (ULONG
)BucketMask
& ((ULONG
)Result
^ (Result
>> 32));
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress
)
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
459 Result
= (ULONG
)((ULONG_PTR
)BaseAddress
>> PAGE_SHIFT
);
460 return (Result
>> 24) ^ (Result
>> 16) ^ (Result
>> 8) ^ Result
;
466 ExpTagAllowPrint(CHAR Tag
)
468 if ((Tag
>= 'a' && Tag
<= 'z') ||
469 (Tag
>= 'A' && Tag
<= 'Z') ||
470 (Tag
>= '0' && Tag
<= '9') ||
471 Tag
== ' ' || Tag
== '=' ||
472 Tag
== '?' || Tag
== '@')
481 #define MiDumperPrint(dbg, fmt, ...) \
482 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
483 else DPRINT1(fmt, ##__VA_ARGS__)
485 #define MiDumperPrint(dbg, fmt, ...) \
486 DPRINT1(fmt, ##__VA_ARGS__)
490 MiDumpPoolConsumers(BOOLEAN CalledFromDbg
, ULONG Tag
, ULONG Mask
, ULONG Flags
)
496 // Only print header if called from OOM situation
500 DPRINT1("---------------------\n");
501 DPRINT1("Out of memory dumper!\n");
506 KdbpPrint("Pool Used:\n");
511 // Remember whether we'll have to be verbose
512 // This is the only supported flag!
514 Verbose
= BooleanFlagOn(Flags
, 1);
517 // Print table header
521 MiDumperPrint(CalledFromDbg
, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
522 MiDumperPrint(CalledFromDbg
, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
526 MiDumperPrint(CalledFromDbg
, "\t\tNonPaged\t\t\tPaged\n");
527 MiDumperPrint(CalledFromDbg
, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
531 // We'll extract allocations for all the tracked pools
533 for (i
= 0; i
< PoolTrackTableSize
; ++i
)
535 PPOOL_TRACKER_TABLE TableEntry
;
537 TableEntry
= &PoolTrackTable
[i
];
540 // We only care about tags which have allocated memory
542 if (TableEntry
->NonPagedBytes
!= 0 || TableEntry
->PagedBytes
!= 0)
545 // If there's a tag, attempt to do a pretty print
546 // only if it matches the caller's tag, or if
547 // any tag is allowed
548 // For checking whether it matches caller's tag,
549 // use the mask to make sure not to mess with the wildcards
551 if (TableEntry
->Key
!= 0 && TableEntry
->Key
!= TAG_NONE
&&
552 (Tag
== 0 || (TableEntry
->Key
& Mask
) == (Tag
& Mask
)))
557 // Extract each 'component' and check whether they are printable
559 Tag
[0] = TableEntry
->Key
& 0xFF;
560 Tag
[1] = TableEntry
->Key
>> 8 & 0xFF;
561 Tag
[2] = TableEntry
->Key
>> 16 & 0xFF;
562 Tag
[3] = TableEntry
->Key
>> 24 & 0xFF;
564 if (ExpTagAllowPrint(Tag
[0]) && ExpTagAllowPrint(Tag
[1]) && ExpTagAllowPrint(Tag
[2]) && ExpTagAllowPrint(Tag
[3]))
567 // Print in direct order to make !poolused TAG usage easier
571 MiDumperPrint(CalledFromDbg
, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag
[0], Tag
[1], Tag
[2], Tag
[3],
572 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedFrees
,
573 (TableEntry
->NonPagedAllocs
- TableEntry
->NonPagedFrees
), TableEntry
->NonPagedBytes
,
574 TableEntry
->PagedAllocs
, TableEntry
->PagedFrees
,
575 (TableEntry
->PagedAllocs
- TableEntry
->PagedFrees
), TableEntry
->PagedBytes
);
579 MiDumperPrint(CalledFromDbg
, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag
[0], Tag
[1], Tag
[2], Tag
[3],
580 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedBytes
,
581 TableEntry
->PagedAllocs
, TableEntry
->PagedBytes
);
588 MiDumperPrint(CalledFromDbg
, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry
->Key
,
589 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedFrees
,
590 (TableEntry
->NonPagedAllocs
- TableEntry
->NonPagedFrees
), TableEntry
->NonPagedBytes
,
591 TableEntry
->PagedAllocs
, TableEntry
->PagedFrees
,
592 (TableEntry
->PagedAllocs
- TableEntry
->PagedFrees
), TableEntry
->PagedBytes
);
596 MiDumperPrint(CalledFromDbg
, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry
->Key
,
597 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedBytes
,
598 TableEntry
->PagedAllocs
, TableEntry
->PagedBytes
);
602 else if (Tag
== 0 || (Tag
& Mask
) == (TAG_NONE
& Mask
))
606 MiDumperPrint(CalledFromDbg
, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
607 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedFrees
,
608 (TableEntry
->NonPagedAllocs
- TableEntry
->NonPagedFrees
), TableEntry
->NonPagedBytes
,
609 TableEntry
->PagedAllocs
, TableEntry
->PagedFrees
,
610 (TableEntry
->PagedAllocs
- TableEntry
->PagedFrees
), TableEntry
->PagedBytes
);
614 MiDumperPrint(CalledFromDbg
, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
615 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedBytes
,
616 TableEntry
->PagedAllocs
, TableEntry
->PagedBytes
);
624 DPRINT1("---------------------\n");
629 /* PRIVATE FUNCTIONS **********************************************************/
636 ULONG i
, Key
, Hash
, Index
;
637 PPOOL_TRACKER_TABLE TrackTable
= PoolTrackTable
;
707 // Loop all 64 hot tags
709 ASSERT((sizeof(TagList
) / sizeof(ULONG
)) == 64);
710 for (i
= 0; i
< sizeof(TagList
) / sizeof(ULONG
); i
++)
713 // Get the current tag, and compute its hash in the tracker table
716 Hash
= ExpComputeHashForTag(Key
, PoolTrackTableMask
);
719 // Loop all the hashes in this index/bucket
725 // Find an empty entry, and make sure this isn't the last hash that
728 // On checked builds, also make sure this is the first time we are
731 ASSERT(TrackTable
[Hash
].Key
!= Key
);
732 if (!(TrackTable
[Hash
].Key
) && (Hash
!= PoolTrackTableSize
- 1))
735 // It has been seeded, move on to the next tag
737 TrackTable
[Hash
].Key
= Key
;
742 // This entry was already taken, compute the next possible hash while
743 // making sure we're not back at our initial index.
745 ASSERT(TrackTable
[Hash
].Key
!= Key
);
746 Hash
= (Hash
+ 1) & PoolTrackTableMask
;
747 if (Hash
== Index
) break;
754 ExpRemovePoolTracker(IN ULONG Key
,
755 IN SIZE_T NumberOfBytes
,
756 IN POOL_TYPE PoolType
)
759 PPOOL_TRACKER_TABLE Table
, TableEntry
;
760 SIZE_T TableMask
, TableSize
;
763 // Remove the PROTECTED_POOL flag which is not part of the tag
765 Key
&= ~PROTECTED_POOL
;
768 // With WinDBG you can set a tag you want to break on when an allocation is
771 if (Key
== PoolHitTag
) DbgBreakPoint();
774 // Why the double indirection? Because normally this function is also used
775 // when doing session pool allocations, which has another set of tables,
776 // sizes, and masks that live in session pool. Now we don't support session
777 // pool so we only ever use the regular tables, but I'm keeping the code this
778 // way so that the day we DO support session pool, it won't require that
781 Table
= PoolTrackTable
;
782 TableMask
= PoolTrackTableMask
;
783 TableSize
= PoolTrackTableSize
;
784 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize
);
787 // Compute the hash for this key, and loop all the possible buckets
789 Hash
= ExpComputeHashForTag(Key
, TableMask
);
794 // Have we found the entry for this tag? */
796 TableEntry
= &Table
[Hash
];
797 if (TableEntry
->Key
== Key
)
800 // Decrement the counters depending on if this was paged or nonpaged
803 if ((PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
805 InterlockedIncrement(&TableEntry
->NonPagedFrees
);
806 InterlockedExchangeAddSizeT(&TableEntry
->NonPagedBytes
,
807 -(SSIZE_T
)NumberOfBytes
);
810 InterlockedIncrement(&TableEntry
->PagedFrees
);
811 InterlockedExchangeAddSizeT(&TableEntry
->PagedBytes
,
812 -(SSIZE_T
)NumberOfBytes
);
817 // We should have only ended up with an empty entry if we've reached
820 if (!TableEntry
->Key
)
822 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
823 Hash
, TableMask
, Key
, (ULONG
)NumberOfBytes
, PoolType
);
824 ASSERT(Hash
== TableMask
);
828 // This path is hit when we don't have an entry, and the current bucket
829 // is full, so we simply try the next one
831 Hash
= (Hash
+ 1) & TableMask
;
832 if (Hash
== Index
) break;
836 // And finally this path is hit when all the buckets are full, and we need
837 // some expansion. This path is not yet supported in ReactOS and so we'll
840 DPRINT1("Out of pool tag space, ignoring...\n");
845 ExpInsertPoolTracker(IN ULONG Key
,
846 IN SIZE_T NumberOfBytes
,
847 IN POOL_TYPE PoolType
)
851 PPOOL_TRACKER_TABLE Table
, TableEntry
;
852 SIZE_T TableMask
, TableSize
;
855 // Remove the PROTECTED_POOL flag which is not part of the tag
857 Key
&= ~PROTECTED_POOL
;
860 // With WinDBG you can set a tag you want to break on when an allocation is
863 if (Key
== PoolHitTag
) DbgBreakPoint();
866 // There is also an internal flag you can set to break on malformed tags
868 if (ExStopBadTags
) ASSERT(Key
& 0xFFFFFF00);
871 // ASSERT on ReactOS features not yet supported
873 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
874 ASSERT(KeGetCurrentProcessorNumber() == 0);
877 // Why the double indirection? Because normally this function is also used
878 // when doing session pool allocations, which has another set of tables,
879 // sizes, and masks that live in session pool. Now we don't support session
880 // pool so we only ever use the regular tables, but I'm keeping the code this
881 // way so that the day we DO support session pool, it won't require that
884 Table
= PoolTrackTable
;
885 TableMask
= PoolTrackTableMask
;
886 TableSize
= PoolTrackTableSize
;
887 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize
);
890 // Compute the hash for this key, and loop all the possible buckets
892 Hash
= ExpComputeHashForTag(Key
, TableMask
);
897 // Do we already have an entry for this tag? */
899 TableEntry
= &Table
[Hash
];
900 if (TableEntry
->Key
== Key
)
903 // Increment the counters depending on if this was paged or nonpaged
906 if ((PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
908 InterlockedIncrement(&TableEntry
->NonPagedAllocs
);
909 InterlockedExchangeAddSizeT(&TableEntry
->NonPagedBytes
, NumberOfBytes
);
912 InterlockedIncrement(&TableEntry
->PagedAllocs
);
913 InterlockedExchangeAddSizeT(&TableEntry
->PagedBytes
, NumberOfBytes
);
918 // We don't have an entry yet, but we've found a free bucket for it
920 if (!(TableEntry
->Key
) && (Hash
!= PoolTrackTableSize
- 1))
923 // We need to hold the lock while creating a new entry, since other
924 // processors might be in this code path as well
926 ExAcquireSpinLock(&ExpTaggedPoolLock
, &OldIrql
);
927 if (!PoolTrackTable
[Hash
].Key
)
930 // We've won the race, so now create this entry in the bucket
932 ASSERT(Table
[Hash
].Key
== 0);
933 PoolTrackTable
[Hash
].Key
= Key
;
934 TableEntry
->Key
= Key
;
936 ExReleaseSpinLock(&ExpTaggedPoolLock
, OldIrql
);
939 // Now we force the loop to run again, and we should now end up in
940 // the code path above which does the interlocked increments...
946 // This path is hit when we don't have an entry, and the current bucket
947 // is full, so we simply try the next one
949 Hash
= (Hash
+ 1) & TableMask
;
950 if (Hash
== Index
) break;
954 // And finally this path is hit when all the buckets are full, and we need
955 // some expansion. This path is not yet supported in ReactOS and so we'll
958 DPRINT1("Out of pool tag space, ignoring...\n");
964 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
965 IN POOL_TYPE PoolType
,
970 PLIST_ENTRY NextEntry
, LastEntry
;
973 // Setup the descriptor based on the caller's request
975 PoolDescriptor
->PoolType
= PoolType
;
976 PoolDescriptor
->PoolIndex
= PoolIndex
;
977 PoolDescriptor
->Threshold
= Threshold
;
978 PoolDescriptor
->LockAddress
= PoolLock
;
981 // Initialize accounting data
983 PoolDescriptor
->RunningAllocs
= 0;
984 PoolDescriptor
->RunningDeAllocs
= 0;
985 PoolDescriptor
->TotalPages
= 0;
986 PoolDescriptor
->TotalBytes
= 0;
987 PoolDescriptor
->TotalBigPages
= 0;
990 // Nothing pending for now
992 PoolDescriptor
->PendingFrees
= NULL
;
993 PoolDescriptor
->PendingFreeDepth
= 0;
996 // Loop all the descriptor's allocation lists and initialize them
998 NextEntry
= PoolDescriptor
->ListHeads
;
999 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
1000 while (NextEntry
< LastEntry
)
1002 ExpInitializePoolListHead(NextEntry
);
1007 // Note that ReactOS does not support Session Pool Yet
1009 ASSERT(PoolType
!= PagedPoolSession
);
1015 InitializePool(IN POOL_TYPE PoolType
,
1018 PPOOL_DESCRIPTOR Descriptor
;
1023 // Check what kind of pool this is
1025 if (PoolType
== NonPagedPool
)
1028 // Compute the track table size and convert it from a power of two to an
1031 // NOTE: On checked builds, we'll assert if the registry table size was
1032 // invalid, while on retail builds we'll just break out of the loop at
1035 TableSize
= min(PoolTrackTableSize
, MmSizeOfNonPagedPoolInBytes
>> 8);
1036 for (i
= 0; i
< 32; i
++)
1040 ASSERT((TableSize
& ~1) == 0);
1041 if (!(TableSize
& ~1)) break;
1047 // If we hit bit 32, than no size was defined in the registry, so
1048 // we'll use the default size of 2048 entries.
1050 // Otherwise, use the size from the registry, as long as it's not
1051 // smaller than 64 entries.
1055 PoolTrackTableSize
= 2048;
1059 PoolTrackTableSize
= max(1 << i
, 64);
1063 // Loop trying with the biggest specified size first, and cut it down
1064 // by a power of two each iteration in case not enough memory exist
1069 // Do not allow overflow
1071 if ((PoolTrackTableSize
+ 1) > (MAXULONG_PTR
/ sizeof(POOL_TRACKER_TABLE
)))
1073 PoolTrackTableSize
>>= 1;
1078 // Allocate the tracker table and exit the loop if this worked
1080 PoolTrackTable
= MiAllocatePoolPages(NonPagedPool
,
1081 (PoolTrackTableSize
+ 1) *
1082 sizeof(POOL_TRACKER_TABLE
));
1083 if (PoolTrackTable
) break;
1086 // Otherwise, as long as we're not down to the last bit, keep
1089 if (PoolTrackTableSize
== 1)
1091 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1097 PoolTrackTableSize
>>= 1;
1101 // Add one entry, compute the hash, and zero the table
1103 PoolTrackTableSize
++;
1104 PoolTrackTableMask
= PoolTrackTableSize
- 2;
1106 RtlZeroMemory(PoolTrackTable
,
1107 PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1110 // Finally, add the most used tags to speed up those allocations
1115 // We now do the exact same thing with the tracker table for big pages
1117 TableSize
= min(PoolBigPageTableSize
, MmSizeOfNonPagedPoolInBytes
>> 8);
1118 for (i
= 0; i
< 32; i
++)
1122 ASSERT((TableSize
& ~1) == 0);
1123 if (!(TableSize
& ~1)) break;
1129 // For big pages, the default tracker table is 4096 entries, while the
1130 // minimum is still 64
1134 PoolBigPageTableSize
= 4096;
1138 PoolBigPageTableSize
= max(1 << i
, 64);
1142 // Again, run the exact same loop we ran earlier, but this time for the
1143 // big pool tracker instead
1147 if ((PoolBigPageTableSize
+ 1) > (MAXULONG_PTR
/ sizeof(POOL_TRACKER_BIG_PAGES
)))
1149 PoolBigPageTableSize
>>= 1;
1153 PoolBigPageTable
= MiAllocatePoolPages(NonPagedPool
,
1154 PoolBigPageTableSize
*
1155 sizeof(POOL_TRACKER_BIG_PAGES
));
1156 if (PoolBigPageTable
) break;
1158 if (PoolBigPageTableSize
== 1)
1160 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1167 PoolBigPageTableSize
>>= 1;
1171 // An extra entry is not needed for for the big pool tracker, so just
1172 // compute the hash and zero it
1174 PoolBigPageTableHash
= PoolBigPageTableSize
- 1;
1175 RtlZeroMemory(PoolBigPageTable
,
1176 PoolBigPageTableSize
* sizeof(POOL_TRACKER_BIG_PAGES
));
1177 for (i
= 0; i
< PoolBigPageTableSize
; i
++) PoolBigPageTable
[i
].Va
= (PVOID
)1;
1180 // During development, print this out so we can see what's happening
1182 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1183 PoolTrackTable
, PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1184 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1185 PoolBigPageTable
, PoolBigPageTableSize
* sizeof(POOL_TRACKER_BIG_PAGES
));
1188 // Insert the generic tracker for all of big pool
1190 ExpInsertPoolTracker('looP',
1191 ROUND_TO_PAGES(PoolBigPageTableSize
*
1192 sizeof(POOL_TRACKER_BIG_PAGES
)),
1196 // No support for NUMA systems at this time
1198 ASSERT(KeNumberNodes
== 1);
1201 // Initialize the tag spinlock
1203 KeInitializeSpinLock(&ExpTaggedPoolLock
);
1206 // Initialize the nonpaged pool descriptor
1208 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
1209 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
1218 // No support for NUMA systems at this time
1220 ASSERT(KeNumberNodes
== 1);
1223 // Allocate the pool descriptor
1225 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
1226 sizeof(KGUARDED_MUTEX
) +
1227 sizeof(POOL_DESCRIPTOR
),
1232 // This is really bad...
1234 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1242 // Setup the vector and guarded mutex for paged pool
1244 PoolVector
[PagedPool
] = Descriptor
;
1245 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
1246 ExpPagedPoolDescriptor
[0] = Descriptor
;
1247 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
1248 ExInitializePoolDescriptor(Descriptor
,
1255 // Insert the generic tracker for all of nonpaged pool
1257 ExpInsertPoolTracker('looP',
1258 ROUND_TO_PAGES(PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
)),
1265 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
1268 // Check if this is nonpaged pool
1270 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
1273 // Use the queued spin lock
1275 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
1280 // Use the guarded mutex
1282 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
1289 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
1293 // Check if this is nonpaged pool
1295 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
1298 // Use the queued spin lock
1300 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
1305 // Use the guarded mutex
1307 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
1313 ExpGetPoolTagInfoTarget(IN PKDPC Dpc
,
1314 IN PVOID DeferredContext
,
1315 IN PVOID SystemArgument1
,
1316 IN PVOID SystemArgument2
)
1318 PPOOL_DPC_CONTEXT Context
= DeferredContext
;
1319 UNREFERENCED_PARAMETER(Dpc
);
1320 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL
);
1323 // Make sure we win the race, and if we did, copy the data atomically
1325 if (KeSignalCallDpcSynchronize(SystemArgument2
))
1327 RtlCopyMemory(Context
->PoolTrackTable
,
1329 Context
->PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1332 // This is here because ReactOS does not yet support expansion
1334 ASSERT(Context
->PoolTrackTableSizeExpansion
== 0);
1338 // Regardless of whether we won or not, we must now synchronize and then
1339 // decrement the barrier since this is one more processor that has completed
1342 KeSignalCallDpcSynchronize(SystemArgument2
);
1343 KeSignalCallDpcDone(SystemArgument1
);
1348 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation
,
1349 IN ULONG SystemInformationLength
,
1350 IN OUT PULONG ReturnLength OPTIONAL
)
1352 ULONG TableSize
, CurrentLength
;
1354 NTSTATUS Status
= STATUS_SUCCESS
;
1355 PSYSTEM_POOLTAG TagEntry
;
1356 PPOOL_TRACKER_TABLE Buffer
, TrackerEntry
;
1357 POOL_DPC_CONTEXT Context
;
1358 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL
);
1361 // Keep track of how much data the caller's buffer must hold
1363 CurrentLength
= FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION
, TagInfo
);
1366 // Initialize the caller's buffer
1368 TagEntry
= &SystemInformation
->TagInfo
[0];
1369 SystemInformation
->Count
= 0;
1372 // Capture the number of entries, and the total size needed to make a copy
1375 EntryCount
= (ULONG
)PoolTrackTableSize
;
1376 TableSize
= EntryCount
* sizeof(POOL_TRACKER_TABLE
);
1379 // Allocate the "Generic DPC" temporary buffer
1381 Buffer
= ExAllocatePoolWithTag(NonPagedPool
, TableSize
, 'ofnI');
1382 if (!Buffer
) return STATUS_INSUFFICIENT_RESOURCES
;
1385 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1387 Context
.PoolTrackTable
= Buffer
;
1388 Context
.PoolTrackTableSize
= PoolTrackTableSize
;
1389 Context
.PoolTrackTableExpansion
= NULL
;
1390 Context
.PoolTrackTableSizeExpansion
= 0;
1391 KeGenericCallDpc(ExpGetPoolTagInfoTarget
, &Context
);
1394 // Now parse the results
1396 for (TrackerEntry
= Buffer
; TrackerEntry
< (Buffer
+ EntryCount
); TrackerEntry
++)
1399 // If the entry is empty, skip it
1401 if (!TrackerEntry
->Key
) continue;
1404 // Otherwise, add one more entry to the caller's buffer, and ensure that
1405 // enough space has been allocated in it
1407 SystemInformation
->Count
++;
1408 CurrentLength
+= sizeof(*TagEntry
);
1409 if (SystemInformationLength
< CurrentLength
)
1412 // The caller's buffer is too small, so set a failure code. The
1413 // caller will know the count, as well as how much space is needed.
1415 // We do NOT break out of the loop, because we want to keep incrementing
1416 // the Count as well as CurrentLength so that the caller can know the
1419 Status
= STATUS_INFO_LENGTH_MISMATCH
;
1424 // Small sanity check that our accounting is working correctly
1426 ASSERT(TrackerEntry
->PagedAllocs
>= TrackerEntry
->PagedFrees
);
1427 ASSERT(TrackerEntry
->NonPagedAllocs
>= TrackerEntry
->NonPagedFrees
);
1430 // Return the data into the caller's buffer
1432 TagEntry
->TagUlong
= TrackerEntry
->Key
;
1433 TagEntry
->PagedAllocs
= TrackerEntry
->PagedAllocs
;
1434 TagEntry
->PagedFrees
= TrackerEntry
->PagedFrees
;
1435 TagEntry
->PagedUsed
= TrackerEntry
->PagedBytes
;
1436 TagEntry
->NonPagedAllocs
= TrackerEntry
->NonPagedAllocs
;
1437 TagEntry
->NonPagedFrees
= TrackerEntry
->NonPagedFrees
;
1438 TagEntry
->NonPagedUsed
= TrackerEntry
->NonPagedBytes
;
1444 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1446 ExFreePoolWithTag(Buffer
, 'ofnI');
1447 if (ReturnLength
) *ReturnLength
= CurrentLength
;
1453 ExpAddTagForBigPages(IN PVOID Va
,
1455 IN ULONG NumberOfPages
,
1456 IN POOL_TYPE PoolType
)
1462 PPOOL_TRACKER_BIG_PAGES Entry
, EntryEnd
, EntryStart
;
1463 ASSERT(((ULONG_PTR
)Va
& POOL_BIG_TABLE_ENTRY_FREE
) == 0);
1464 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1467 // As the table is expandable, these values must only be read after acquiring
1468 // the lock to avoid a teared access during an expansion
1470 Hash
= ExpComputePartialHashForAddress(Va
);
1471 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
1472 Hash
&= PoolBigPageTableHash
;
1473 TableSize
= PoolBigPageTableSize
;
1476 // We loop from the current hash bucket to the end of the table, and then
1477 // rollover to hash bucket 0 and keep going from there. If we return back
1478 // to the beginning, then we attempt expansion at the bottom of the loop
1480 EntryStart
= Entry
= &PoolBigPageTable
[Hash
];
1481 EntryEnd
= &PoolBigPageTable
[TableSize
];
1485 // Make sure that this is a free entry and attempt to atomically make the
1489 if (((ULONG_PTR
)OldVa
& POOL_BIG_TABLE_ENTRY_FREE
) &&
1490 (InterlockedCompareExchangePointer(&Entry
->Va
, Va
, OldVa
) == OldVa
))
1493 // We now own this entry, write down the size and the pool tag
1496 Entry
->NumberOfPages
= NumberOfPages
;
1499 // Add one more entry to the count, and see if we're getting within
1500 // 25% of the table size, at which point we'll do an expansion now
1501 // to avoid blocking too hard later on.
1503 // Note that we only do this if it's also been the 16th time that we
1504 // keep losing the race or that we are not finding a free entry anymore,
1505 // which implies a massive number of concurrent big pool allocations.
1507 InterlockedIncrementUL(&ExpPoolBigEntriesInUse
);
1508 if ((i
>= 16) && (ExpPoolBigEntriesInUse
> (TableSize
/ 4)))
1510 DPRINT("Should attempt expansion since we now have %lu entries\n",
1511 ExpPoolBigEntriesInUse
);
1515 // We have our entry, return
1517 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1522 // We don't have our entry yet, so keep trying, making the entry list
1523 // circular if we reach the last entry. We'll eventually break out of
1524 // the loop once we've rolled over and returned back to our original
1528 if (++Entry
>= EntryEnd
) Entry
= &PoolBigPageTable
[0];
1529 } while (Entry
!= EntryStart
);
1532 // This means there's no free hash buckets whatsoever, so we would now have
1533 // to attempt expanding the table
1535 DPRINT1("Big pool expansion needed, not implemented!\n");
1536 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1542 ExpFindAndRemoveTagBigPages(IN PVOID Va
,
1543 OUT PULONG_PTR BigPages
,
1544 IN POOL_TYPE PoolType
)
1546 BOOLEAN FirstTry
= TRUE
;
1549 ULONG PoolTag
, Hash
;
1550 PPOOL_TRACKER_BIG_PAGES Entry
;
1551 ASSERT(((ULONG_PTR
)Va
& POOL_BIG_TABLE_ENTRY_FREE
) == 0);
1552 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1555 // As the table is expandable, these values must only be read after acquiring
1556 // the lock to avoid a teared access during an expansion
1558 Hash
= ExpComputePartialHashForAddress(Va
);
1559 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
1560 Hash
&= PoolBigPageTableHash
;
1561 TableSize
= PoolBigPageTableSize
;
1564 // Loop while trying to find this big page allocation
1566 while (PoolBigPageTable
[Hash
].Va
!= Va
)
1569 // Increment the size until we go past the end of the table
1571 if (++Hash
>= TableSize
)
1574 // Is this the second time we've tried?
1579 // This means it was never inserted into the pool table and it
1580 // received the special "BIG" tag -- return that and return 0
1581 // so that the code can ask Mm for the page count instead
1583 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1589 // The first time this happens, reset the hash index and try again
1597 // Now capture all the information we need from the entry, since after we
1598 // release the lock, the data can change
1600 Entry
= &PoolBigPageTable
[Hash
];
1601 *BigPages
= Entry
->NumberOfPages
;
1602 PoolTag
= Entry
->Key
;
1605 // Set the free bit, and decrement the number of allocations. Finally, release
1606 // the lock and return the tag that was located
1608 InterlockedIncrement((PLONG
)&Entry
->Va
);
1609 InterlockedDecrementUL(&ExpPoolBigEntriesInUse
);
1610 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1616 ExQueryPoolUsage(OUT PULONG PagedPoolPages
,
1617 OUT PULONG NonPagedPoolPages
,
1618 OUT PULONG PagedPoolAllocs
,
1619 OUT PULONG PagedPoolFrees
,
1620 OUT PULONG PagedPoolLookasideHits
,
1621 OUT PULONG NonPagedPoolAllocs
,
1622 OUT PULONG NonPagedPoolFrees
,
1623 OUT PULONG NonPagedPoolLookasideHits
)
1626 PPOOL_DESCRIPTOR PoolDesc
;
1629 // Assume all failures
1631 *PagedPoolPages
= 0;
1632 *PagedPoolAllocs
= 0;
1633 *PagedPoolFrees
= 0;
1636 // Tally up the totals for all the apged pool
1638 for (i
= 0; i
< ExpNumberOfPagedPools
+ 1; i
++)
1640 PoolDesc
= ExpPagedPoolDescriptor
[i
];
1641 *PagedPoolPages
+= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1642 *PagedPoolAllocs
+= PoolDesc
->RunningAllocs
;
1643 *PagedPoolFrees
+= PoolDesc
->RunningDeAllocs
;
1647 // The first non-paged pool has a hardcoded well-known descriptor name
1649 PoolDesc
= &NonPagedPoolDescriptor
;
1650 *NonPagedPoolPages
= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1651 *NonPagedPoolAllocs
= PoolDesc
->RunningAllocs
;
1652 *NonPagedPoolFrees
= PoolDesc
->RunningDeAllocs
;
1655 // If the system has more than one non-paged pool, copy the other descriptor
1659 if (ExpNumberOfNonPagedPools
> 1)
1661 for (i
= 0; i
< ExpNumberOfNonPagedPools
; i
++)
1663 PoolDesc
= ExpNonPagedPoolDescriptor
[i
];
1664 *NonPagedPoolPages
+= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1665 *NonPagedPoolAllocs
+= PoolDesc
->RunningAllocs
;
1666 *NonPagedPoolFrees
+= PoolDesc
->RunningDeAllocs
;
1672 // Get the amount of hits in the system lookaside lists
1674 if (!IsListEmpty(&ExPoolLookasideListHead
))
1676 PLIST_ENTRY ListEntry
;
1678 for (ListEntry
= ExPoolLookasideListHead
.Flink
;
1679 ListEntry
!= &ExPoolLookasideListHead
;
1680 ListEntry
= ListEntry
->Flink
)
1682 PGENERAL_LOOKASIDE Lookaside
;
1684 Lookaside
= CONTAINING_RECORD(ListEntry
, GENERAL_LOOKASIDE
, ListEntry
);
1686 if (Lookaside
->Type
== NonPagedPool
)
1688 *NonPagedPoolLookasideHits
+= Lookaside
->AllocateHits
;
1692 *PagedPoolLookasideHits
+= Lookaside
->AllocateHits
;
1700 ExReturnPoolQuota(IN PVOID P
)
1707 if ((ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
) &&
1708 (MmIsSpecialPoolAddress(P
)))
1715 ASSERT((ULONG_PTR
)Entry
% POOL_BLOCK_SIZE
== 0);
1717 PoolType
= Entry
->PoolType
- 1;
1718 BlockSize
= Entry
->BlockSize
;
1720 if (PoolType
& QUOTA_POOL_MASK
)
1722 Process
= ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1];
1723 ASSERT(Process
!= NULL
);
1726 if (Process
->Pcb
.Header
.Type
!= ProcessObject
)
1728 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1729 Process
, Process
->Pcb
.Header
.Type
, Entry
->PoolType
, BlockSize
);
1730 KeBugCheckEx(BAD_POOL_CALLER
,
1734 (ULONG_PTR
)Process
);
1736 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = NULL
;
1737 PsReturnPoolQuota(Process
,
1738 PoolType
& BASE_POOL_TYPE_MASK
,
1739 BlockSize
* POOL_BLOCK_SIZE
);
1740 ObDereferenceObject(Process
);
1745 /* PUBLIC FUNCTIONS ***********************************************************/
1752 ExAllocatePoolWithTag(IN POOL_TYPE PoolType
,
1753 IN SIZE_T NumberOfBytes
,
1756 PPOOL_DESCRIPTOR PoolDesc
;
1757 PLIST_ENTRY ListHead
;
1758 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
1760 USHORT BlockSize
, i
;
1762 PKPRCB Prcb
= KeGetCurrentPrcb();
1763 PGENERAL_LOOKASIDE LookasideList
;
1766 // Some sanity checks
1769 ASSERT(Tag
!= ' GIB');
1770 ASSERT(NumberOfBytes
!= 0);
1771 ExpCheckPoolIrqlLevel(PoolType
, NumberOfBytes
, NULL
);
1774 // Not supported in ReactOS
1776 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1779 // Check if verifier or special pool is enabled
1781 if (ExpPoolFlags
& (POOL_FLAG_VERIFIER
| POOL_FLAG_SPECIAL_POOL
))
1784 // For verifier, we should call the verification routine
1786 if (ExpPoolFlags
& POOL_FLAG_VERIFIER
)
1788 DPRINT1("Driver Verifier is not yet supported\n");
1792 // For special pool, we check if this is a suitable allocation and do
1793 // the special allocation if needed
1795 if (ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
)
1798 // Check if this is a special pool allocation
1800 if (MmUseSpecialPool(NumberOfBytes
, Tag
))
1803 // Try to allocate using special pool
1805 Entry
= MmAllocateSpecialPool(NumberOfBytes
, Tag
, PoolType
, 2);
1806 if (Entry
) return Entry
;
1812 // Get the pool type and its corresponding vector for this request
1814 OriginalType
= PoolType
;
1815 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
1816 PoolDesc
= PoolVector
[PoolType
];
1817 ASSERT(PoolDesc
!= NULL
);
1820 // Check if this is a big page allocation
1822 if (NumberOfBytes
> POOL_MAX_ALLOC
)
1825 // Allocate pages for it
1827 Entry
= MiAllocatePoolPages(OriginalType
, NumberOfBytes
);
1832 // Out of memory, display current consumption
1833 // Let's consider that if the caller wanted more
1834 // than a hundred pages, that's a bogus caller
1835 // and we are not out of memory
1837 if (NumberOfBytes
< 100 * PAGE_SIZE
)
1839 MiDumpPoolConsumers(FALSE
, 0, 0, 0);
1844 // Must succeed pool is deprecated, but still supported. These allocation
1845 // failures must cause an immediate bugcheck
1847 if (OriginalType
& MUST_SUCCEED_POOL_MASK
)
1849 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1851 NonPagedPoolDescriptor
.TotalPages
,
1852 NonPagedPoolDescriptor
.TotalBigPages
,
1857 // Internal debugging
1862 // This flag requests printing failures, and can also further specify
1863 // breaking on failures
1865 if (ExpPoolFlags
& POOL_FLAG_DBGPRINT_ON_FAILURE
)
1867 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1870 if (ExpPoolFlags
& POOL_FLAG_CRASH_ON_FAILURE
) DbgBreakPoint();
1874 // Finally, this flag requests an exception, which we are more than
1877 if (OriginalType
& POOL_RAISE_IF_ALLOCATION_FAILURE
)
1879 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
1886 // Increment required counters
1888 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalBigPages
,
1889 (LONG
)BYTES_TO_PAGES(NumberOfBytes
));
1890 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, NumberOfBytes
);
1891 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
1894 // Add a tag for the big page allocation and switch to the generic "BIG"
1895 // tag if we failed to do so, then insert a tracker for this alloation.
1897 if (!ExpAddTagForBigPages(Entry
,
1899 (ULONG
)BYTES_TO_PAGES(NumberOfBytes
),
1904 ExpInsertPoolTracker(Tag
, ROUND_TO_PAGES(NumberOfBytes
), OriginalType
);
1909 // Should never request 0 bytes from the pool, but since so many drivers do
1910 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1912 if (!NumberOfBytes
) NumberOfBytes
= 1;
1915 // A pool allocation is defined by its data, a linked list to connect it to
1916 // the free list (if necessary), and a pool header to store accounting info.
1917 // Calculate this size, then convert it into a block size (units of pool
1920 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1921 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1922 // the direct allocation of pages.
1924 i
= (USHORT
)((NumberOfBytes
+ sizeof(POOL_HEADER
) + (POOL_BLOCK_SIZE
- 1))
1926 ASSERT(i
< POOL_LISTS_PER_PAGE
);
1929 // Handle lookaside list optimization for both paged and nonpaged pool
1931 if (i
<= NUMBER_POOL_LOOKASIDE_LISTS
)
1934 // Try popping it from the per-CPU lookaside list
1936 LookasideList
= (PoolType
== PagedPool
) ?
1937 Prcb
->PPPagedLookasideList
[i
- 1].P
:
1938 Prcb
->PPNPagedLookasideList
[i
- 1].P
;
1939 LookasideList
->TotalAllocates
++;
1940 Entry
= (PPOOL_HEADER
)InterlockedPopEntrySList(&LookasideList
->ListHead
);
1944 // We failed, try popping it from the global list
1946 LookasideList
= (PoolType
== PagedPool
) ?
1947 Prcb
->PPPagedLookasideList
[i
- 1].L
:
1948 Prcb
->PPNPagedLookasideList
[i
- 1].L
;
1949 LookasideList
->TotalAllocates
++;
1950 Entry
= (PPOOL_HEADER
)InterlockedPopEntrySList(&LookasideList
->ListHead
);
1954 // If we were able to pop it, update the accounting and return the block
1958 LookasideList
->AllocateHits
++;
1961 // Get the real entry, write down its pool type, and track it
1964 Entry
->PoolType
= OriginalType
+ 1;
1965 ExpInsertPoolTracker(Tag
,
1966 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
1970 // Return the pool allocation
1972 Entry
->PoolTag
= Tag
;
1973 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
1974 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
1975 return POOL_FREE_BLOCK(Entry
);
1980 // Loop in the free lists looking for a block if this size. Start with the
1981 // list optimized for this kind of size lookup
1983 ListHead
= &PoolDesc
->ListHeads
[i
];
1987 // Are there any free entries available on this list?
1989 if (!ExpIsPoolListEmpty(ListHead
))
1992 // Acquire the pool lock now
1994 OldIrql
= ExLockPool(PoolDesc
);
1997 // And make sure the list still has entries
1999 if (ExpIsPoolListEmpty(ListHead
))
2002 // Someone raced us (and won) before we had a chance to acquire
2007 ExUnlockPool(PoolDesc
, OldIrql
);
2012 // Remove a free entry from the list
2013 // Note that due to the way we insert free blocks into multiple lists
2014 // there is a guarantee that any block on this list will either be
2015 // of the correct size, or perhaps larger.
2017 ExpCheckPoolLinks(ListHead
);
2018 Entry
= POOL_ENTRY(ExpRemovePoolHeadList(ListHead
));
2019 ExpCheckPoolLinks(ListHead
);
2020 ExpCheckPoolBlocks(Entry
);
2021 ASSERT(Entry
->BlockSize
>= i
);
2022 ASSERT(Entry
->PoolType
== 0);
2025 // Check if this block is larger that what we need. The block could
2026 // not possibly be smaller, due to the reason explained above (and
2027 // we would've asserted on a checked build if this was the case).
2029 if (Entry
->BlockSize
!= i
)
2032 // Is there an entry before this one?
2034 if (Entry
->PreviousSize
== 0)
2037 // There isn't anyone before us, so take the next block and
2038 // turn it into a fragment that contains the leftover data
2039 // that we don't need to satisfy the caller's request
2041 FragmentEntry
= POOL_BLOCK(Entry
, i
);
2042 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
2045 // And make it point back to us
2047 FragmentEntry
->PreviousSize
= i
;
2050 // Now get the block that follows the new fragment and check
2051 // if it's still on the same page as us (and not at the end)
2053 NextEntry
= POOL_NEXT_BLOCK(FragmentEntry
);
2054 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2057 // Adjust this next block to point to our newly created
2060 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
2066 // There is a free entry before us, which we know is smaller
2067 // so we'll make this entry the fragment instead
2069 FragmentEntry
= Entry
;
2072 // And then we'll remove from it the actual size required.
2073 // Now the entry is a leftover free fragment
2075 Entry
->BlockSize
-= i
;
2078 // Now let's go to the next entry after the fragment (which
2079 // used to point to our original free entry) and make it
2080 // reference the new fragment entry instead.
2082 // This is the entry that will actually end up holding the
2085 Entry
= POOL_NEXT_BLOCK(Entry
);
2086 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
2089 // And now let's go to the entry after that one and check if
2090 // it's still on the same page, and not at the end
2092 NextEntry
= POOL_BLOCK(Entry
, i
);
2093 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2096 // Make it reference the allocation entry
2098 NextEntry
->PreviousSize
= i
;
2103 // Now our (allocation) entry is the right size
2105 Entry
->BlockSize
= i
;
2108 // And the next entry is now the free fragment which contains
2109 // the remaining difference between how big the original entry
2110 // was, and the actual size the caller needs/requested.
2112 FragmentEntry
->PoolType
= 0;
2113 BlockSize
= FragmentEntry
->BlockSize
;
2116 // Now check if enough free bytes remained for us to have a
2117 // "full" entry, which contains enough bytes for a linked list
2118 // and thus can be used for allocations (up to 8 bytes...)
2120 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
2124 // Insert the free entry into the free list for this size
2126 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
2127 POOL_FREE_BLOCK(FragmentEntry
));
2128 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
2133 // We have found an entry for this allocation, so set the pool type
2134 // and release the lock since we're done
2136 Entry
->PoolType
= OriginalType
+ 1;
2137 ExpCheckPoolBlocks(Entry
);
2138 ExUnlockPool(PoolDesc
, OldIrql
);
2141 // Increment required counters
2143 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, Entry
->BlockSize
* POOL_BLOCK_SIZE
);
2144 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
2147 // Track this allocation
2149 ExpInsertPoolTracker(Tag
,
2150 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
2154 // Return the pool allocation
2156 Entry
->PoolTag
= Tag
;
2157 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
2158 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
2159 return POOL_FREE_BLOCK(Entry
);
2161 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
2164 // There were no free entries left, so we have to allocate a new fresh page
2166 Entry
= MiAllocatePoolPages(OriginalType
, PAGE_SIZE
);
2171 // Out of memory, display current consumption
2172 // Let's consider that if the caller wanted more
2173 // than a hundred pages, that's a bogus caller
2174 // and we are not out of memory
2176 if (NumberOfBytes
< 100 * PAGE_SIZE
)
2178 MiDumpPoolConsumers(FALSE
, 0, 0, 0);
2183 // Must succeed pool is deprecated, but still supported. These allocation
2184 // failures must cause an immediate bugcheck
2186 if (OriginalType
& MUST_SUCCEED_POOL_MASK
)
2188 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
2190 NonPagedPoolDescriptor
.TotalPages
,
2191 NonPagedPoolDescriptor
.TotalBigPages
,
2196 // Internal debugging
2201 // This flag requests printing failures, and can also further specify
2202 // breaking on failures
2204 if (ExpPoolFlags
& POOL_FLAG_DBGPRINT_ON_FAILURE
)
2206 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2209 if (ExpPoolFlags
& POOL_FLAG_CRASH_ON_FAILURE
) DbgBreakPoint();
2213 // Finally, this flag requests an exception, which we are more than
2216 if (OriginalType
& POOL_RAISE_IF_ALLOCATION_FAILURE
)
2218 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
2222 // Return NULL to the caller in all other cases
2228 // Setup the entry data
2231 Entry
->BlockSize
= i
;
2232 Entry
->PoolType
= OriginalType
+ 1;
2235 // This page will have two entries -- one for the allocation (which we just
2236 // created above), and one for the remaining free bytes, which we're about
2237 // to create now. The free bytes are the whole page minus what was allocated
2238 // and then converted into units of block headers.
2240 BlockSize
= (PAGE_SIZE
/ POOL_BLOCK_SIZE
) - i
;
2241 FragmentEntry
= POOL_BLOCK(Entry
, i
);
2242 FragmentEntry
->Ulong1
= 0;
2243 FragmentEntry
->BlockSize
= BlockSize
;
2244 FragmentEntry
->PreviousSize
= i
;
2247 // Increment required counters
2249 InterlockedIncrement((PLONG
)&PoolDesc
->TotalPages
);
2250 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, Entry
->BlockSize
* POOL_BLOCK_SIZE
);
2253 // Now check if enough free bytes remained for us to have a "full" entry,
2254 // which contains enough bytes for a linked list and thus can be used for
2255 // allocations (up to 8 bytes...)
2257 if (FragmentEntry
->BlockSize
!= 1)
2260 // Excellent -- acquire the pool lock
2262 OldIrql
= ExLockPool(PoolDesc
);
2265 // And insert the free entry into the free list for this block size
2267 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
2268 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
2269 POOL_FREE_BLOCK(FragmentEntry
));
2270 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
2273 // Release the pool lock
2275 ExpCheckPoolBlocks(Entry
);
2276 ExUnlockPool(PoolDesc
, OldIrql
);
2281 // Simply do a sanity check
2283 ExpCheckPoolBlocks(Entry
);
2287 // Increment performance counters and track this allocation
2289 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
2290 ExpInsertPoolTracker(Tag
,
2291 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
2295 // And return the pool allocation
2297 ExpCheckPoolBlocks(Entry
);
2298 Entry
->PoolTag
= Tag
;
2299 return POOL_FREE_BLOCK(Entry
);
2307 ExAllocatePool(POOL_TYPE PoolType
,
2308 SIZE_T NumberOfBytes
)
2310 ULONG Tag
= TAG_NONE
;
2312 PLDR_DATA_TABLE_ENTRY LdrEntry
;
2314 /* Use the first four letters of the driver name, or "None" if unavailable */
2315 LdrEntry
= KeGetCurrentIrql() <= APC_LEVEL
2316 ? MiLookupDataTableEntry(_ReturnAddress())
2322 for (i
= 0; i
< min(4, LdrEntry
->BaseDllName
.Length
/ sizeof(WCHAR
)); i
++)
2323 Tag
= Tag
>> 8 | (LdrEntry
->BaseDllName
.Buffer
[i
] & 0xff) << 24;
2325 Tag
= Tag
>> 8 | ' ' << 24;
2328 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2336 ExFreePoolWithTag(IN PVOID P
,
2339 PPOOL_HEADER Entry
, NextEntry
;
2343 PPOOL_DESCRIPTOR PoolDesc
;
2345 BOOLEAN Combined
= FALSE
;
2346 PFN_NUMBER PageCount
, RealPageCount
;
2347 PKPRCB Prcb
= KeGetCurrentPrcb();
2348 PGENERAL_LOOKASIDE LookasideList
;
2352 // Check if any of the debug flags are enabled
2354 if (ExpPoolFlags
& (POOL_FLAG_CHECK_TIMERS
|
2355 POOL_FLAG_CHECK_WORKERS
|
2356 POOL_FLAG_CHECK_RESOURCES
|
2357 POOL_FLAG_VERIFIER
|
2358 POOL_FLAG_CHECK_DEADLOCK
|
2359 POOL_FLAG_SPECIAL_POOL
))
2362 // Check if special pool is enabled
2364 if (ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
)
2367 // Check if it was allocated from a special pool
2369 if (MmIsSpecialPoolAddress(P
))
2372 // Was deadlock verification also enabled? We can do some extra
2373 // checks at this point
2375 if (ExpPoolFlags
& POOL_FLAG_CHECK_DEADLOCK
)
2377 DPRINT1("Verifier not yet supported\n");
2381 // It is, so handle it via special pool free routine
2383 MmFreeSpecialPool(P
);
2389 // For non-big page allocations, we'll do a bunch of checks in here
2391 if (PAGE_ALIGN(P
) != P
)
2394 // Get the entry for this pool allocation
2395 // The pointer math here may look wrong or confusing, but it is quite right
2401 // Get the pool type
2403 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
2406 // FIXME: Many other debugging checks go here
2408 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2413 // Check if this is a big page allocation
2415 if (PAGE_ALIGN(P
) == P
)
2418 // We need to find the tag for it, so first we need to find out what
2419 // kind of allocation this was (paged or nonpaged), then we can go
2420 // ahead and try finding the tag for it. Remember to get rid of the
2421 // PROTECTED_POOL tag if it's found.
2423 // Note that if at insertion time, we failed to add the tag for a big
2424 // pool allocation, we used a special tag called 'BIG' to identify the
2425 // allocation, and we may get this tag back. In this scenario, we must
2426 // manually get the size of the allocation by actually counting through
2427 // the PFN database.
2429 PoolType
= MmDeterminePoolType(P
);
2430 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2431 Tag
= ExpFindAndRemoveTagBigPages(P
, &PageCount
, PoolType
);
2434 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2435 ASSERT(Tag
== ' GIB');
2436 PageCount
= 1; // We are going to lie! This might screw up accounting?
2438 else if (Tag
& PROTECTED_POOL
)
2440 Tag
&= ~PROTECTED_POOL
;
2446 if (TagToFree
&& TagToFree
!= Tag
)
2448 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Tag
);
2449 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Tag
, TagToFree
);
2453 // We have our tag and our page count, so we can go ahead and remove this
2456 ExpRemovePoolTracker(Tag
, PageCount
<< PAGE_SHIFT
, PoolType
);
2459 // Check if any of the debug flags are enabled
2461 if (ExpPoolFlags
& (POOL_FLAG_CHECK_TIMERS
|
2462 POOL_FLAG_CHECK_WORKERS
|
2463 POOL_FLAG_CHECK_RESOURCES
|
2464 POOL_FLAG_CHECK_DEADLOCK
))
2467 // Was deadlock verification also enabled? We can do some extra
2468 // checks at this point
2470 if (ExpPoolFlags
& POOL_FLAG_CHECK_DEADLOCK
)
2472 DPRINT1("Verifier not yet supported\n");
2476 // FIXME: Many debugging checks go here
2483 PoolDesc
= PoolVector
[PoolType
];
2484 InterlockedIncrement((PLONG
)&PoolDesc
->RunningDeAllocs
);
2485 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
,
2486 -(LONG_PTR
)(PageCount
<< PAGE_SHIFT
));
2489 // Do the real free now and update the last counter with the big page count
2491 RealPageCount
= MiFreePoolPages(P
);
2492 ASSERT(RealPageCount
== PageCount
);
2493 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalBigPages
,
2494 -(LONG
)RealPageCount
);
2499 // Get the entry for this pool allocation
2500 // The pointer math here may look wrong or confusing, but it is quite right
2504 ASSERT((ULONG_PTR
)Entry
% POOL_BLOCK_SIZE
== 0);
2507 // Get the size of the entry, and it's pool type, then load the descriptor
2508 // for this pool type
2510 BlockSize
= Entry
->BlockSize
;
2511 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
2512 PoolDesc
= PoolVector
[PoolType
];
2515 // Make sure that the IRQL makes sense
2517 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2520 // Get the pool tag and get rid of the PROTECTED_POOL flag
2522 Tag
= Entry
->PoolTag
;
2523 if (Tag
& PROTECTED_POOL
) Tag
&= ~PROTECTED_POOL
;
2528 if (TagToFree
&& TagToFree
!= Tag
)
2530 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Tag
);
2531 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Tag
, TagToFree
);
2535 // Track the removal of this allocation
2537 ExpRemovePoolTracker(Tag
,
2538 BlockSize
* POOL_BLOCK_SIZE
,
2539 Entry
->PoolType
- 1);
2542 // Release pool quota, if any
2544 if ((Entry
->PoolType
- 1) & QUOTA_POOL_MASK
)
2546 Process
= ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1];
2549 if (Process
->Pcb
.Header
.Type
!= ProcessObject
)
2551 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2552 Process
, Process
->Pcb
.Header
.Type
, Entry
->PoolType
, BlockSize
);
2553 KeBugCheckEx(BAD_POOL_CALLER
,
2557 (ULONG_PTR
)Process
);
2559 PsReturnPoolQuota(Process
, PoolType
, BlockSize
* POOL_BLOCK_SIZE
);
2560 ObDereferenceObject(Process
);
2565 // Is this allocation small enough to have come from a lookaside list?
2567 if (BlockSize
<= NUMBER_POOL_LOOKASIDE_LISTS
)
2570 // Try pushing it into the per-CPU lookaside list
2572 LookasideList
= (PoolType
== PagedPool
) ?
2573 Prcb
->PPPagedLookasideList
[BlockSize
- 1].P
:
2574 Prcb
->PPNPagedLookasideList
[BlockSize
- 1].P
;
2575 LookasideList
->TotalFrees
++;
2576 if (ExQueryDepthSList(&LookasideList
->ListHead
) < LookasideList
->Depth
)
2578 LookasideList
->FreeHits
++;
2579 InterlockedPushEntrySList(&LookasideList
->ListHead
, P
);
2584 // We failed, try to push it into the global lookaside list
2586 LookasideList
= (PoolType
== PagedPool
) ?
2587 Prcb
->PPPagedLookasideList
[BlockSize
- 1].L
:
2588 Prcb
->PPNPagedLookasideList
[BlockSize
- 1].L
;
2589 LookasideList
->TotalFrees
++;
2590 if (ExQueryDepthSList(&LookasideList
->ListHead
) < LookasideList
->Depth
)
2592 LookasideList
->FreeHits
++;
2593 InterlockedPushEntrySList(&LookasideList
->ListHead
, P
);
2599 // Get the pointer to the next entry
2601 NextEntry
= POOL_BLOCK(Entry
, BlockSize
);
2604 // Update performance counters
2606 InterlockedIncrement((PLONG
)&PoolDesc
->RunningDeAllocs
);
2607 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, -BlockSize
* POOL_BLOCK_SIZE
);
2610 // Acquire the pool lock
2612 OldIrql
= ExLockPool(PoolDesc
);
2615 // Check if the next allocation is at the end of the page
2617 ExpCheckPoolBlocks(Entry
);
2618 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2621 // We may be able to combine the block if it's free
2623 if (NextEntry
->PoolType
== 0)
2626 // The next block is free, so we'll do a combine
2631 // Make sure there's actual data in the block -- anything smaller
2632 // than this means we only have the header, so there's no linked list
2635 if ((NextEntry
->BlockSize
!= 1))
2638 // The block is at least big enough to have a linked list, so go
2639 // ahead and remove it
2641 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
2642 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
2643 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
2644 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
2648 // Our entry is now combined with the next entry
2650 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
2655 // Now check if there was a previous entry on the same page as us
2657 if (Entry
->PreviousSize
)
2660 // Great, grab that entry and check if it's free
2662 NextEntry
= POOL_PREV_BLOCK(Entry
);
2663 if (NextEntry
->PoolType
== 0)
2666 // It is, so we can do a combine
2671 // Make sure there's actual data in the block -- anything smaller
2672 // than this means we only have the header so there's no linked list
2675 if ((NextEntry
->BlockSize
!= 1))
2678 // The block is at least big enough to have a linked list, so go
2679 // ahead and remove it
2681 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
2682 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
2683 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
2684 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
2688 // Combine our original block (which might've already been combined
2689 // with the next block), into the previous block
2691 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
2694 // And now we'll work with the previous block instead
2701 // By now, it may have been possible for our combined blocks to actually
2702 // have made up a full page (if there were only 2-3 allocations on the
2703 // page, they could've all been combined).
2705 if ((PAGE_ALIGN(Entry
) == Entry
) &&
2706 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry
)) == POOL_NEXT_BLOCK(Entry
)))
2709 // In this case, release the pool lock, update the performance counter,
2710 // and free the page
2712 ExUnlockPool(PoolDesc
, OldIrql
);
2713 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalPages
, -1);
2714 MiFreePoolPages(Entry
);
2719 // Otherwise, we now have a free block (or a combination of 2 or 3)
2721 Entry
->PoolType
= 0;
2722 BlockSize
= Entry
->BlockSize
;
2723 ASSERT(BlockSize
!= 1);
2726 // Check if we actually did combine it with anyone
2731 // Get the first combined block (either our original to begin with, or
2732 // the one after the original, depending if we combined with the previous)
2734 NextEntry
= POOL_NEXT_BLOCK(Entry
);
2737 // As long as the next block isn't on a page boundary, have it point
2740 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
2744 // Insert this new free block, and release the pool lock
2746 ExpInsertPoolHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], POOL_FREE_BLOCK(Entry
));
2747 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry
));
2748 ExUnlockPool(PoolDesc
, OldIrql
);
2759 // Just free without checking for the tag
2761 ExFreePoolWithTag(P
, 0);
2769 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
2770 OUT PBOOLEAN QuotaCharged
)
2785 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
2786 IN SIZE_T NumberOfBytes
)
2789 // Allocate the pool
2791 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, TAG_NONE
);
2799 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
2800 IN SIZE_T NumberOfBytes
,
2802 IN EX_POOL_PRIORITY Priority
)
2807 // Allocate the pool
2809 Buffer
= ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2823 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
2824 IN SIZE_T NumberOfBytes
,
2827 BOOLEAN Raise
= TRUE
;
2831 PEPROCESS Process
= PsGetCurrentProcess();
2834 // Check if we should fail instead of raising an exception
2836 if (PoolType
& POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
)
2839 PoolType
&= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
;
2843 // Inject the pool quota mask
2845 PoolType
+= QUOTA_POOL_MASK
;
2848 // Check if we have enough space to add the quota owner process, as long as
2849 // this isn't the system process, which never gets charged quota
2851 ASSERT(NumberOfBytes
!= 0);
2852 if ((NumberOfBytes
<= (PAGE_SIZE
- POOL_BLOCK_SIZE
- sizeof(PVOID
))) &&
2853 (Process
!= PsInitialSystemProcess
))
2856 // Add space for our EPROCESS pointer
2858 NumberOfBytes
+= sizeof(PEPROCESS
);
2863 // We won't be able to store the pointer, so don't use quota for this
2865 PoolType
-= QUOTA_POOL_MASK
;
2869 // Allocate the pool buffer now
2871 Buffer
= ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2874 // If the buffer is page-aligned, this is a large page allocation and we
2877 if (PAGE_ALIGN(Buffer
) != Buffer
)
2880 // Also if special pool is enabled, and this was allocated from there,
2881 // we won't touch it either
2883 if ((ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
) &&
2884 (MmIsSpecialPoolAddress(Buffer
)))
2890 // If it wasn't actually allocated with quota charges, ignore it too
2892 if (!(PoolType
& QUOTA_POOL_MASK
)) return Buffer
;
2895 // If this is the system process, we don't charge quota, so ignore
2897 if (Process
== PsInitialSystemProcess
) return Buffer
;
2900 // Actually go and charge quota for the process now
2902 Entry
= POOL_ENTRY(Buffer
);
2903 Status
= PsChargeProcessPoolQuota(Process
,
2904 PoolType
& BASE_POOL_TYPE_MASK
,
2905 Entry
->BlockSize
* POOL_BLOCK_SIZE
);
2906 if (!NT_SUCCESS(Status
))
2909 // Quota failed, back out the allocation, clear the owner, and fail
2911 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = NULL
;
2912 ExFreePoolWithTag(Buffer
, Tag
);
2913 if (Raise
) RtlRaiseStatus(Status
);
2918 // Quota worked, write the owner and then reference it before returning
2920 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = Process
;
2921 ObReferenceObject(Process
);
2923 else if (!(Buffer
) && (Raise
))
2926 // The allocation failed, raise an error if we are in raise mode
2928 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
2932 // Return the allocated buffer
2937 #if DBG && defined(KDBG)
2944 ULONG_PTR Address
= 0, Flags
= 0;
2953 if (!KdbpGetHexNumber(Argv
[1], &Address
))
2955 KdbpPrint("Invalid parameter: %s\n", Argv
[0]);
2963 if (!KdbpGetHexNumber(Argv
[1], &Flags
))
2965 KdbpPrint("Invalid parameter: %s\n", Argv
[0]);
2970 /* Check if we got an address */
2973 /* Get the base page */
2974 PoolPage
= PAGE_ALIGN(Address
);
2978 KdbpPrint("Heap is unimplemented\n");
2982 /* No paging support! */
2983 if (!MmIsAddressValid(PoolPage
))
2985 KdbpPrint("Address not accessible!\n");
2990 if ((Address
>= (ULONG_PTR
)MmPagedPoolStart
) && (Address
<= (ULONG_PTR
)MmPagedPoolEnd
))
2991 KdbpPrint("Allocation is from PagedPool region\n");
2992 else if ((Address
>= (ULONG_PTR
)MmNonPagedPoolStart
) && (Address
<= (ULONG_PTR
)MmNonPagedPoolEnd
))
2993 KdbpPrint("Allocation is from NonPagedPool region\n");
2996 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID
)Address
);
3000 /* Loop all entries of that page */
3004 /* Check if the address is within that entry */
3005 ThisOne
= ((Address
>= (ULONG_PTR
)Entry
) &&
3006 (Address
< (ULONG_PTR
)(Entry
+ Entry
->BlockSize
)));
3008 if (!(Flags
& 1) || ThisOne
)
3010 /* Print the line */
3011 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
3012 ThisOne
? '*' : ' ', Entry
, Entry
->BlockSize
, Entry
->PreviousSize
,
3013 (Flags
& 0x80000000) ? "" : (Entry
->PoolType
? "(Allocated)" : "(Free) "),
3014 (Flags
& 0x80000000) ? "" : (PCHAR
)&Entry
->PoolTag
);
3019 Data
= (PULONG
)(Entry
+ 1);
3020 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
3021 " %p %08lx %08lx %08lx %08lx\n",
3022 &Data
[0], Data
[0], Data
[1], Data
[2], Data
[3],
3023 &Data
[4], Data
[4], Data
[5], Data
[6], Data
[7]);
3026 /* Go to next entry */
3027 Entry
= POOL_BLOCK(Entry
, Entry
->BlockSize
);
3029 while ((Entry
->BlockSize
!= 0) && ((ULONG_PTR
)Entry
< (ULONG_PTR
)PoolPage
+ PAGE_SIZE
));
3036 ExpKdbgExtPoolUsedGetTag(PCHAR Arg
, PULONG Tag
, PULONG Mask
)
3049 /* Generate the mask to have wildcards support */
3050 for (i
= 0; i
< Len
; ++i
)
3055 *Mask
|= (0xFF << i
* 8);
3059 /* Get the tag in the ulong form */
3060 *Tag
= *((PULONG
)Tmp
);
3074 /* If we have 2+ args, easy: flags then tag */
3077 ExpKdbgExtPoolUsedGetTag(Argv
[2], &Tag
, &Mask
);
3078 if (!KdbpGetHexNumber(Argv
[1], &Flags
))
3080 KdbpPrint("Invalid parameter: %s\n", Argv
[0]);
3085 /* Otherwise, try to find out whether that's flags */
3086 if (strlen(Argv
[1]) == 1 ||
3087 (strlen(Argv
[1]) == 3 && Argv
[1][0] == '0' && Argv
[1][1] == 'x'))
3089 /* Fallback: if reading flags failed, assume it's a tag */
3090 if (!KdbpGetHexNumber(Argv
[1], &Flags
))
3092 ExpKdbgExtPoolUsedGetTag(Argv
[1], &Tag
, &Mask
);
3098 ExpKdbgExtPoolUsedGetTag(Argv
[1], &Tag
, &Mask
);
3103 /* Call the dumper */
3104 MiDumpPoolConsumers(TRUE
, Tag
, Mask
, Flags
);
3111 ExpKdbgExtValidatePoolHeader(
3114 POOL_TYPE BasePoolTye
)
3116 /* Block size cannot be NULL or negative and it must cover the page */
3117 if (Entry
->BlockSize
<= 0)
3121 if (Entry
->BlockSize
* 8 + (ULONG_PTR
)Entry
- (ULONG_PTR
)BaseVa
> PAGE_SIZE
)
3127 * PreviousSize cannot be 0 unless on page begin
3128 * And it cannot be bigger that our current
3131 if (Entry
->PreviousSize
== 0 && BaseVa
!= Entry
)
3135 if (Entry
->PreviousSize
* 8 > (ULONG_PTR
)Entry
- (ULONG_PTR
)BaseVa
)
3140 /* Must be paged pool */
3141 if (((Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
) != BasePoolTye
)
3146 /* Match tag mask */
3147 if ((Entry
->PoolTag
& 0x00808080) != 0)
3157 ExpKdbgExtPoolFindPagedPool(
3160 VOID (NTAPI
* FoundCallback
)(PPOOL_HEADER
, PVOID
),
3161 PVOID CallbackContext
)
3169 KdbpPrint("Searching Paged pool (%p : %p) for Tag: %.4s\n", MmPagedPoolStart
, MmPagedPoolEnd
, (PCHAR
)&Tag
);
3172 * To speed up paged pool search, we will use the allocation bipmap.
3173 * This is possible because we live directly in the kernel :-)
3175 i
= RtlFindSetBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, 1, 0);
3176 while (i
!= 0xFFFFFFFF)
3178 BaseVa
= (PVOID
)((ULONG_PTR
)MmPagedPoolStart
+ (i
<< PAGE_SHIFT
));
3181 /* Validate our address */
3182 if ((ULONG_PTR
)BaseVa
> (ULONG_PTR
)MmPagedPoolEnd
|| (ULONG_PTR
)BaseVa
+ PAGE_SIZE
> (ULONG_PTR
)MmPagedPoolEnd
)
3187 /* Check whether we are beyond expansion */
3188 PointerPde
= MiAddressToPde(BaseVa
);
3189 if (PointerPde
>= MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
)
3194 /* Check if allocation is valid */
3195 PointerPte
= MiAddressToPte(BaseVa
);
3196 if ((ULONG_PTR
)PointerPte
> PTE_TOP
)
3201 if (PointerPte
->u
.Hard
.Valid
)
3203 for (Entry
= BaseVa
;
3204 (ULONG_PTR
)Entry
+ sizeof(POOL_HEADER
) < (ULONG_PTR
)BaseVa
+ PAGE_SIZE
;
3205 Entry
= (PVOID
)((ULONG_PTR
)Entry
+ 8))
3207 /* Try to find whether we have a pool entry */
3208 if (!ExpKdbgExtValidatePoolHeader(BaseVa
, Entry
, PagedPool
))
3213 if ((Entry
->PoolTag
& Mask
) == (Tag
& Mask
))
3215 if (FoundCallback
!= NULL
)
3217 FoundCallback(Entry
, CallbackContext
);
3221 /* Print the line */
3222 KdbpPrint("%p size: %4d previous size: %4d %s %.4s\n",
3223 Entry
, Entry
->BlockSize
, Entry
->PreviousSize
,
3224 Entry
->PoolType
? "(Allocated)" : "(Free) ",
3225 (PCHAR
)&Entry
->PoolTag
);
3231 i
= RtlFindSetBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, 1, i
+ 1);
3235 extern PVOID MmNonPagedPoolEnd0
;
3238 ExpKdbgExtPoolFindNonPagedPool(
3241 VOID (NTAPI
* FoundCallback
)(PPOOL_HEADER
, PVOID
),
3242 PVOID CallbackContext
)
3248 KdbpPrint("Searching NonPaged pool (%p : %p) for Tag: %.4s\n", MmNonPagedPoolStart
, MmNonPagedPoolEnd0
, (PCHAR
)&Tag
);
3250 /* Brute force search: start browsing the whole non paged pool */
3251 for (BaseVa
= MmNonPagedPoolStart
;
3252 (ULONG_PTR
)BaseVa
+ PAGE_SIZE
<= (ULONG_PTR
)MmNonPagedPoolEnd0
;
3253 BaseVa
= (PVOID
)((ULONG_PTR
)BaseVa
+ PAGE_SIZE
))
3257 /* Check whether we are beyond expansion */
3258 if (BaseVa
>= MmNonPagedPoolExpansionStart
)
3263 /* Check if allocation is valid */
3264 PointerPte
= MiAddressToPte(BaseVa
);
3265 if ((ULONG_PTR
)PointerPte
> PTE_TOP
)
3270 if (PointerPte
->u
.Hard
.Valid
)
3272 for (Entry
= BaseVa
;
3273 (ULONG_PTR
)Entry
+ sizeof(POOL_HEADER
) < (ULONG_PTR
)BaseVa
+ PAGE_SIZE
;
3274 Entry
= (PVOID
)((ULONG_PTR
)Entry
+ 8))
3276 /* Try to find whether we have a pool entry */
3277 if (!ExpKdbgExtValidatePoolHeader(BaseVa
, Entry
, NonPagedPool
))
3282 if ((Entry
->PoolTag
& Mask
) == (Tag
& Mask
))
3284 if (FoundCallback
!= NULL
)
3286 FoundCallback(Entry
, CallbackContext
);
3290 /* Print the line */
3291 KdbpPrint("%p size: %4d previous size: %4d %s %.4s\n",
3292 Entry
, Entry
->BlockSize
, Entry
->PreviousSize
,
3293 Entry
->PoolType
? "(Allocated)" : "(Free) ",
3294 (PCHAR
)&Entry
->PoolTag
);
3309 ULONG PoolType
= NonPagedPool
;
3313 KdbpPrint("Specify a tag string\n");
3317 /* First arg is tag */
3318 if (strlen(Argv
[1]) != 1 || Argv
[1][0] != '*')
3320 ExpKdbgExtPoolUsedGetTag(Argv
[1], &Tag
, &Mask
);
3323 /* Second arg might be pool to search */
3326 PoolType
= strtoul(Argv
[2], NULL
, 0);
3330 KdbpPrint("Only (non) paged pool are supported\n");
3335 /* FIXME: What about large pool? */
3337 if (PoolType
== NonPagedPool
)
3339 ExpKdbgExtPoolFindNonPagedPool(Tag
, Mask
, NULL
, NULL
);
3341 else if (PoolType
== PagedPool
)
3343 ExpKdbgExtPoolFindPagedPool(Tag
, Mask
, NULL
, NULL
);
3349 typedef struct _IRP_FIND_CTXT
3351 ULONG_PTR RestartAddress
;
3354 } IRP_FIND_CTXT
, *PIRP_FIND_CTXT
;
3358 ExpKdbgExtIrpFindPrint(
3363 PIRP_FIND_CTXT FindCtxt
= Context
;
3364 PIO_STACK_LOCATION IoStack
= NULL
;
3365 PUNICODE_STRING DriverName
;
3366 ULONG_PTR SData
= FindCtxt
->SData
;
3367 ULONG Criteria
= FindCtxt
->Criteria
;
3369 /* Free entry, ignore */
3370 if (Entry
->PoolType
== 0)
3376 Irp
= (PIRP
)POOL_FREE_BLOCK(Entry
);
3378 /* Bail out if not matching restart address */
3379 if ((ULONG_PTR
)Irp
< FindCtxt
->RestartAddress
)
3384 /* Avoid bogus IRP stack locations */
3385 if (Irp
->CurrentLocation
<= Irp
->StackCount
+ 1)
3387 IoStack
= IoGetCurrentIrpStackLocation(Irp
);
3389 /* Get associated driver */
3390 if (IoStack
->DeviceObject
&& IoStack
->DeviceObject
->DriverObject
)
3391 DriverName
= &IoStack
->DeviceObject
->DriverObject
->DriverName
;
3396 /* Display if: no data, no criteria or if criteria matches data */
3397 if (SData
== 0 || Criteria
== 0 ||
3398 (Criteria
& 0x1 && IoStack
&& SData
== (ULONG_PTR
)IoStack
->DeviceObject
) ||
3399 (Criteria
& 0x2 && SData
== (ULONG_PTR
)Irp
->Tail
.Overlay
.OriginalFileObject
) ||
3400 (Criteria
& 0x4 && Irp
->MdlAddress
&& SData
== (ULONG_PTR
)Irp
->MdlAddress
->Process
) ||
3401 (Criteria
& 0x8 && SData
== (ULONG_PTR
)Irp
->Tail
.Overlay
.Thread
) ||
3402 (Criteria
& 0x10 && SData
== (ULONG_PTR
)Irp
->UserEvent
))
3404 KdbpPrint("%p Thread %p current stack belongs to %wZ\n", Irp
, Irp
->Tail
.Overlay
.Thread
, DriverName
);
3413 ULONG PoolType
= NonPagedPool
;
3414 IRP_FIND_CTXT FindCtxt
;
3419 PoolType
= strtoul(Argv
[1], NULL
, 0);
3423 KdbpPrint("Only (non) paged pool are supported\n");
3428 RtlZeroMemory(&FindCtxt
, sizeof(IRP_FIND_CTXT
));
3430 /* Restart address */
3433 if (!KdbpGetHexNumber(Argv
[2], &FindCtxt
.RestartAddress
))
3435 KdbpPrint("Invalid parameter: %s\n", Argv
[0]);
3436 FindCtxt
.RestartAddress
= 0;
3442 if (!KdbpGetHexNumber(Argv
[4], &FindCtxt
.SData
))
3448 if (strcmp(Argv
[3], "device") == 0)
3450 FindCtxt
.Criteria
= 0x1;
3452 else if (strcmp(Argv
[3], "fileobject") == 0)
3454 FindCtxt
.Criteria
= 0x2;
3456 else if (strcmp(Argv
[3], "mdlprocess") == 0)
3458 FindCtxt
.Criteria
= 0x4;
3460 else if (strcmp(Argv
[3], "thread") == 0)
3462 FindCtxt
.Criteria
= 0x8;
3464 else if (strcmp(Argv
[3], "userevent") == 0)
3466 FindCtxt
.Criteria
= 0x10;
3468 else if (strcmp(Argv
[3], "arg") == 0)
3470 FindCtxt
.Criteria
= 0x1f;
3475 if (PoolType
== NonPagedPool
)
3477 ExpKdbgExtPoolFindNonPagedPool(TAG_IRP
, 0xFFFFFFFF, ExpKdbgExtIrpFindPrint
, &FindCtxt
);
3479 else if (PoolType
== PagedPool
)
3481 ExpKdbgExtPoolFindPagedPool(TAG_IRP
, 0xFFFFFFFF, ExpKdbgExtIrpFindPrint
, &FindCtxt
);
3487 #endif // DBG && KDBG