2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
21 /* GLOBALS ********************************************************************/
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
25 typedef struct _POOL_DPC_CONTEXT
27 PPOOL_TRACKER_TABLE PoolTrackTable
;
28 SIZE_T PoolTrackTableSize
;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion
;
30 SIZE_T PoolTrackTableSizeExpansion
;
31 } POOL_DPC_CONTEXT
, *PPOOL_DPC_CONTEXT
;
33 ULONG ExpNumberOfPagedPools
;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor
;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor
[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector
[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex
;
38 SIZE_T PoolTrackTableSize
, PoolTrackTableMask
;
39 SIZE_T PoolBigPageTableSize
, PoolBigPageTableHash
;
40 ULONG ExpBigTableExpansionFailed
;
41 PPOOL_TRACKER_TABLE PoolTrackTable
;
42 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
;
43 KSPIN_LOCK ExpTaggedPoolLock
;
45 BOOLEAN ExStopBadTags
;
46 KSPIN_LOCK ExpLargePoolTableLock
;
47 ULONG ExpPoolBigEntriesInUse
;
50 ULONGLONG MiLastPoolDumpTime
;
52 /* Pool block/header/list access macros */
53 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
54 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
55 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
56 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
57 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
60 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
61 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
62 * pool code, but only for checked builds.
64 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
65 * that these checks are done even on retail builds, due to the increasing
66 * number of kernel-mode attacks which depend on dangling list pointers and other
67 * kinds of list-based attacks.
69 * For now, I will leave these checks on all the time, but later they are likely
70 * to be DBG-only, at least until there are enough kernel-mode security attacks
71 * against ReactOS to warrant the performance hit.
73 * For now, these are not made inline, so we can get good stack traces.
77 ExpDecodePoolLink(IN PLIST_ENTRY Link
)
79 return (PLIST_ENTRY
)((ULONG_PTR
)Link
& ~1);
84 ExpEncodePoolLink(IN PLIST_ENTRY Link
)
86 return (PLIST_ENTRY
)((ULONG_PTR
)Link
| 1);
91 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead
)
93 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
) != ListHead
) ||
94 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
) != ListHead
))
96 KeBugCheckEx(BAD_POOL_HEADER
,
99 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Flink
)->Blink
),
100 (ULONG_PTR
)ExpDecodePoolLink(ExpDecodePoolLink(ListHead
->Blink
)->Flink
));
106 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead
)
108 ListHead
->Flink
= ListHead
->Blink
= ExpEncodePoolLink(ListHead
);
113 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead
)
115 return (ExpDecodePoolLink(ListHead
->Flink
) == ListHead
);
120 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry
)
122 PLIST_ENTRY Blink
, Flink
;
123 Flink
= ExpDecodePoolLink(Entry
->Flink
);
124 Blink
= ExpDecodePoolLink(Entry
->Blink
);
125 Flink
->Blink
= ExpEncodePoolLink(Blink
);
126 Blink
->Flink
= ExpEncodePoolLink(Flink
);
131 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead
)
133 PLIST_ENTRY Entry
, Flink
;
134 Entry
= ExpDecodePoolLink(ListHead
->Flink
);
135 Flink
= ExpDecodePoolLink(Entry
->Flink
);
136 ListHead
->Flink
= ExpEncodePoolLink(Flink
);
137 Flink
->Blink
= ExpEncodePoolLink(ListHead
);
143 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead
)
145 PLIST_ENTRY Entry
, Blink
;
146 Entry
= ExpDecodePoolLink(ListHead
->Blink
);
147 Blink
= ExpDecodePoolLink(Entry
->Blink
);
148 ListHead
->Blink
= ExpEncodePoolLink(Blink
);
149 Blink
->Flink
= ExpEncodePoolLink(ListHead
);
155 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead
,
156 IN PLIST_ENTRY Entry
)
159 ExpCheckPoolLinks(ListHead
);
160 Blink
= ExpDecodePoolLink(ListHead
->Blink
);
161 Entry
->Flink
= ExpEncodePoolLink(ListHead
);
162 Entry
->Blink
= ExpEncodePoolLink(Blink
);
163 Blink
->Flink
= ExpEncodePoolLink(Entry
);
164 ListHead
->Blink
= ExpEncodePoolLink(Entry
);
165 ExpCheckPoolLinks(ListHead
);
170 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead
,
171 IN PLIST_ENTRY Entry
)
174 ExpCheckPoolLinks(ListHead
);
175 Flink
= ExpDecodePoolLink(ListHead
->Flink
);
176 Entry
->Flink
= ExpEncodePoolLink(Flink
);
177 Entry
->Blink
= ExpEncodePoolLink(ListHead
);
178 Flink
->Blink
= ExpEncodePoolLink(Entry
);
179 ListHead
->Flink
= ExpEncodePoolLink(Entry
);
180 ExpCheckPoolLinks(ListHead
);
185 ExpCheckPoolHeader(IN PPOOL_HEADER Entry
)
187 PPOOL_HEADER PreviousEntry
, NextEntry
;
189 /* Is there a block before this one? */
190 if (Entry
->PreviousSize
)
193 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
195 /* The two blocks must be on the same page! */
196 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(PreviousEntry
))
198 /* Something is awry */
199 KeBugCheckEx(BAD_POOL_HEADER
,
201 (ULONG_PTR
)PreviousEntry
,
206 /* This block should also indicate that it's as large as we think it is */
207 if (PreviousEntry
->BlockSize
!= Entry
->PreviousSize
)
209 /* Otherwise, someone corrupted one of the sizes */
210 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
211 PreviousEntry
->BlockSize
, (char *)&PreviousEntry
->PoolTag
,
212 Entry
->PreviousSize
, (char *)&Entry
->PoolTag
);
213 KeBugCheckEx(BAD_POOL_HEADER
,
215 (ULONG_PTR
)PreviousEntry
,
220 else if (PAGE_ALIGN(Entry
) != Entry
)
222 /* If there's no block before us, we are the first block, so we should be on a page boundary */
223 KeBugCheckEx(BAD_POOL_HEADER
,
230 /* This block must have a size */
231 if (!Entry
->BlockSize
)
233 /* Someone must've corrupted this field */
234 if (Entry
->PreviousSize
)
236 PreviousEntry
= POOL_PREV_BLOCK(Entry
);
237 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
238 (char *)&PreviousEntry
->PoolTag
,
239 (char *)&Entry
->PoolTag
);
243 DPRINT1("Entry tag %.4s\n",
244 (char *)&Entry
->PoolTag
);
246 KeBugCheckEx(BAD_POOL_HEADER
,
253 /* Okay, now get the next block */
254 NextEntry
= POOL_NEXT_BLOCK(Entry
);
256 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
257 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
259 /* The two blocks must be on the same page! */
260 if (PAGE_ALIGN(Entry
) != PAGE_ALIGN(NextEntry
))
262 /* Something is messed up */
263 KeBugCheckEx(BAD_POOL_HEADER
,
265 (ULONG_PTR
)NextEntry
,
270 /* And this block should think we are as large as we truly are */
271 if (NextEntry
->PreviousSize
!= Entry
->BlockSize
)
273 /* Otherwise, someone corrupted the field */
274 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
275 Entry
->BlockSize
, (char *)&Entry
->PoolTag
,
276 NextEntry
->PreviousSize
, (char *)&NextEntry
->PoolTag
);
277 KeBugCheckEx(BAD_POOL_HEADER
,
279 (ULONG_PTR
)NextEntry
,
288 ExpCheckPoolAllocation(
296 POOL_TYPE RealPoolType
;
298 /* Get the pool header */
299 Entry
= ((PPOOL_HEADER
)P
) - 1;
301 /* Check if this is a large allocation */
302 if (PAGE_ALIGN(P
) == P
)
304 /* Lock the pool table */
305 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
307 /* Find the pool tag */
308 for (i
= 0; i
< PoolBigPageTableSize
; i
++)
310 /* Check if this is our allocation */
311 if (PoolBigPageTable
[i
].Va
== P
)
313 /* Make sure the tag is ok */
314 if (PoolBigPageTable
[i
].Key
!= Tag
)
316 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, PoolBigPageTable
[i
].Key
, Tag
);
323 /* Release the lock */
324 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
326 if (i
== PoolBigPageTableSize
)
328 /* Did not find the allocation */
332 /* Get Pool type by address */
333 RealPoolType
= MmDeterminePoolType(P
);
338 if (Entry
->PoolTag
!= Tag
)
340 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
341 &Tag
, &Entry
->PoolTag
, Entry
->PoolTag
);
342 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Entry
->PoolTag
, Tag
);
345 /* Check the rest of the header */
346 ExpCheckPoolHeader(Entry
);
348 /* Get Pool type from entry */
349 RealPoolType
= (Entry
->PoolType
- 1);
352 /* Should we check the pool type? */
355 /* Verify the pool type */
356 if (RealPoolType
!= PoolType
)
358 DPRINT1("Wrong pool type! Expected %s, got %s\n",
359 PoolType
& BASE_POOL_TYPE_MASK
? "PagedPool" : "NonPagedPool",
360 (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
? "PagedPool" : "NonPagedPool");
361 KeBugCheckEx(BAD_POOL_CALLER
, 0xCC, (ULONG_PTR
)P
, Entry
->PoolTag
, Tag
);
368 ExpCheckPoolBlocks(IN PVOID Block
)
370 BOOLEAN FoundBlock
= FALSE
;
374 /* Get the first entry for this page, make sure it really is the first */
375 Entry
= PAGE_ALIGN(Block
);
376 ASSERT(Entry
->PreviousSize
== 0);
378 /* Now scan each entry */
381 /* When we actually found our block, remember this */
382 if (Entry
== Block
) FoundBlock
= TRUE
;
384 /* Now validate this block header */
385 ExpCheckPoolHeader(Entry
);
387 /* And go to the next one, keeping track of our size */
388 Size
+= Entry
->BlockSize
;
389 Entry
= POOL_NEXT_BLOCK(Entry
);
391 /* If we hit the last block, stop */
392 if (Size
>= (PAGE_SIZE
/ POOL_BLOCK_SIZE
)) break;
394 /* If we hit the end of the page, stop */
395 if (PAGE_ALIGN(Entry
) == Entry
) break;
398 /* We must've found our block, and we must have hit the end of the page */
399 if ((PAGE_ALIGN(Entry
) != Entry
) || !(FoundBlock
))
401 /* Otherwise, the blocks are messed up */
402 KeBugCheckEx(BAD_POOL_HEADER
, 10, (ULONG_PTR
)Block
, __LINE__
, (ULONG_PTR
)Entry
);
408 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType
,
409 IN SIZE_T NumberOfBytes
,
413 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
414 // be DISPATCH_LEVEL or lower for Non Paged Pool
416 if (((PoolType
& BASE_POOL_TYPE_MASK
) == PagedPool
) ?
417 (KeGetCurrentIrql() > APC_LEVEL
) :
418 (KeGetCurrentIrql() > DISPATCH_LEVEL
))
421 // Take the system down
423 KeBugCheckEx(BAD_POOL_CALLER
,
424 !Entry
? POOL_ALLOC_IRQL_INVALID
: POOL_FREE_IRQL_INVALID
,
427 !Entry
? NumberOfBytes
: (ULONG_PTR
)Entry
);
433 ExpComputeHashForTag(IN ULONG Tag
,
434 IN SIZE_T BucketMask
)
437 // Compute the hash by multiplying with a large prime number and then XORing
438 // with the HIDWORD of the result.
440 // Finally, AND with the bucket mask to generate a valid index/bucket into
443 ULONGLONG Result
= (ULONGLONG
)40543 * Tag
;
444 return (ULONG
)BucketMask
& ((ULONG
)Result
^ (Result
>> 32));
449 ExpComputePartialHashForAddress(IN PVOID BaseAddress
)
453 // Compute the hash by converting the address into a page number, and then
454 // XORing each nibble with the next one.
456 // We do *NOT* AND with the bucket mask at this point because big table expansion
457 // might happen. Therefore, the final step of the hash must be performed
458 // while holding the expansion pushlock, and this is why we call this a
459 // "partial" hash only.
461 Result
= (ULONG
)((ULONG_PTR
)BaseAddress
>> PAGE_SHIFT
);
462 return (Result
>> 24) ^ (Result
>> 16) ^ (Result
>> 8) ^ Result
;
468 ExpTagAllowPrint(CHAR Tag
)
470 if ((Tag
>= 'a' && Tag
<= 'z') ||
471 (Tag
>= 'A' && Tag
<= 'Z') ||
472 (Tag
>= '0' && Tag
<= '9') ||
473 Tag
== ' ' || Tag
== '=' ||
474 Tag
== '?' || Tag
== '@')
483 #define MiDumperPrint(dbg, fmt, ...) \
484 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
485 else DPRINT1(fmt, ##__VA_ARGS__)
487 #define MiDumperPrint(dbg, fmt, ...) \
488 DPRINT1(fmt, ##__VA_ARGS__)
492 MiDumpPoolConsumers(BOOLEAN CalledFromDbg
, ULONG Tag
, ULONG Mask
, ULONG Flags
)
498 // Only print header if called from OOM situation
502 DPRINT1("---------------------\n");
503 DPRINT1("Out of memory dumper!\n");
508 KdbpPrint("Pool Used:\n");
513 // Remember whether we'll have to be verbose
514 // This is the only supported flag!
516 Verbose
= BooleanFlagOn(Flags
, 1);
519 // Print table header
523 MiDumperPrint(CalledFromDbg
, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
524 MiDumperPrint(CalledFromDbg
, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
528 MiDumperPrint(CalledFromDbg
, "\t\tNonPaged\t\t\tPaged\n");
529 MiDumperPrint(CalledFromDbg
, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
533 // We'll extract allocations for all the tracked pools
535 for (i
= 0; i
< PoolTrackTableSize
; ++i
)
537 PPOOL_TRACKER_TABLE TableEntry
;
539 TableEntry
= &PoolTrackTable
[i
];
542 // We only care about tags which have allocated memory
544 if (TableEntry
->NonPagedBytes
!= 0 || TableEntry
->PagedBytes
!= 0)
547 // If there's a tag, attempt to do a pretty print
548 // only if it matches the caller's tag, or if
549 // any tag is allowed
550 // For checking whether it matches caller's tag,
551 // use the mask to make sure not to mess with the wildcards
553 if (TableEntry
->Key
!= 0 && TableEntry
->Key
!= TAG_NONE
&&
554 (Tag
== 0 || (TableEntry
->Key
& Mask
) == (Tag
& Mask
)))
559 // Extract each 'component' and check whether they are printable
561 Tag
[0] = TableEntry
->Key
& 0xFF;
562 Tag
[1] = TableEntry
->Key
>> 8 & 0xFF;
563 Tag
[2] = TableEntry
->Key
>> 16 & 0xFF;
564 Tag
[3] = TableEntry
->Key
>> 24 & 0xFF;
566 if (ExpTagAllowPrint(Tag
[0]) && ExpTagAllowPrint(Tag
[1]) && ExpTagAllowPrint(Tag
[2]) && ExpTagAllowPrint(Tag
[3]))
569 // Print in direct order to make !poolused TAG usage easier
573 MiDumperPrint(CalledFromDbg
, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag
[0], Tag
[1], Tag
[2], Tag
[3],
574 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedFrees
,
575 (TableEntry
->NonPagedAllocs
- TableEntry
->NonPagedFrees
), TableEntry
->NonPagedBytes
,
576 TableEntry
->PagedAllocs
, TableEntry
->PagedFrees
,
577 (TableEntry
->PagedAllocs
- TableEntry
->PagedFrees
), TableEntry
->PagedBytes
);
581 MiDumperPrint(CalledFromDbg
, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag
[0], Tag
[1], Tag
[2], Tag
[3],
582 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedBytes
,
583 TableEntry
->PagedAllocs
, TableEntry
->PagedBytes
);
590 MiDumperPrint(CalledFromDbg
, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry
->Key
,
591 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedFrees
,
592 (TableEntry
->NonPagedAllocs
- TableEntry
->NonPagedFrees
), TableEntry
->NonPagedBytes
,
593 TableEntry
->PagedAllocs
, TableEntry
->PagedFrees
,
594 (TableEntry
->PagedAllocs
- TableEntry
->PagedFrees
), TableEntry
->PagedBytes
);
598 MiDumperPrint(CalledFromDbg
, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry
->Key
,
599 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedBytes
,
600 TableEntry
->PagedAllocs
, TableEntry
->PagedBytes
);
604 else if (Tag
== 0 || (Tag
& Mask
) == (TAG_NONE
& Mask
))
608 MiDumperPrint(CalledFromDbg
, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
609 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedFrees
,
610 (TableEntry
->NonPagedAllocs
- TableEntry
->NonPagedFrees
), TableEntry
->NonPagedBytes
,
611 TableEntry
->PagedAllocs
, TableEntry
->PagedFrees
,
612 (TableEntry
->PagedAllocs
- TableEntry
->PagedFrees
), TableEntry
->PagedBytes
);
616 MiDumperPrint(CalledFromDbg
, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
617 TableEntry
->NonPagedAllocs
, TableEntry
->NonPagedBytes
,
618 TableEntry
->PagedAllocs
, TableEntry
->PagedBytes
);
626 DPRINT1("---------------------\n");
631 /* PRIVATE FUNCTIONS **********************************************************/
638 ULONG i
, Key
, Hash
, Index
;
639 PPOOL_TRACKER_TABLE TrackTable
= PoolTrackTable
;
709 // Loop all 64 hot tags
711 ASSERT((sizeof(TagList
) / sizeof(ULONG
)) == 64);
712 for (i
= 0; i
< sizeof(TagList
) / sizeof(ULONG
); i
++)
715 // Get the current tag, and compute its hash in the tracker table
718 Hash
= ExpComputeHashForTag(Key
, PoolTrackTableMask
);
721 // Loop all the hashes in this index/bucket
727 // Find an empty entry, and make sure this isn't the last hash that
730 // On checked builds, also make sure this is the first time we are
733 ASSERT(TrackTable
[Hash
].Key
!= Key
);
734 if (!(TrackTable
[Hash
].Key
) && (Hash
!= PoolTrackTableSize
- 1))
737 // It has been seeded, move on to the next tag
739 TrackTable
[Hash
].Key
= Key
;
744 // This entry was already taken, compute the next possible hash while
745 // making sure we're not back at our initial index.
747 ASSERT(TrackTable
[Hash
].Key
!= Key
);
748 Hash
= (Hash
+ 1) & PoolTrackTableMask
;
749 if (Hash
== Index
) break;
756 ExpRemovePoolTracker(IN ULONG Key
,
757 IN SIZE_T NumberOfBytes
,
758 IN POOL_TYPE PoolType
)
761 PPOOL_TRACKER_TABLE Table
, TableEntry
;
762 SIZE_T TableMask
, TableSize
;
765 // Remove the PROTECTED_POOL flag which is not part of the tag
767 Key
&= ~PROTECTED_POOL
;
770 // With WinDBG you can set a tag you want to break on when an allocation is
773 if (Key
== PoolHitTag
) DbgBreakPoint();
776 // Why the double indirection? Because normally this function is also used
777 // when doing session pool allocations, which has another set of tables,
778 // sizes, and masks that live in session pool. Now we don't support session
779 // pool so we only ever use the regular tables, but I'm keeping the code this
780 // way so that the day we DO support session pool, it won't require that
783 Table
= PoolTrackTable
;
784 TableMask
= PoolTrackTableMask
;
785 TableSize
= PoolTrackTableSize
;
786 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize
);
789 // Compute the hash for this key, and loop all the possible buckets
791 Hash
= ExpComputeHashForTag(Key
, TableMask
);
796 // Have we found the entry for this tag? */
798 TableEntry
= &Table
[Hash
];
799 if (TableEntry
->Key
== Key
)
802 // Decrement the counters depending on if this was paged or nonpaged
805 if ((PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
807 InterlockedIncrement(&TableEntry
->NonPagedFrees
);
808 InterlockedExchangeAddSizeT(&TableEntry
->NonPagedBytes
,
809 -(SSIZE_T
)NumberOfBytes
);
812 InterlockedIncrement(&TableEntry
->PagedFrees
);
813 InterlockedExchangeAddSizeT(&TableEntry
->PagedBytes
,
814 -(SSIZE_T
)NumberOfBytes
);
819 // We should have only ended up with an empty entry if we've reached
822 if (!TableEntry
->Key
)
824 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
825 Hash
, TableMask
, Key
, (ULONG
)NumberOfBytes
, PoolType
);
826 ASSERT(Hash
== TableMask
);
830 // This path is hit when we don't have an entry, and the current bucket
831 // is full, so we simply try the next one
833 Hash
= (Hash
+ 1) & TableMask
;
834 if (Hash
== Index
) break;
838 // And finally this path is hit when all the buckets are full, and we need
839 // some expansion. This path is not yet supported in ReactOS and so we'll
842 DPRINT1("Out of pool tag space, ignoring...\n");
847 ExpInsertPoolTracker(IN ULONG Key
,
848 IN SIZE_T NumberOfBytes
,
849 IN POOL_TYPE PoolType
)
853 PPOOL_TRACKER_TABLE Table
, TableEntry
;
854 SIZE_T TableMask
, TableSize
;
857 // Remove the PROTECTED_POOL flag which is not part of the tag
859 Key
&= ~PROTECTED_POOL
;
862 // With WinDBG you can set a tag you want to break on when an allocation is
865 if (Key
== PoolHitTag
) DbgBreakPoint();
868 // There is also an internal flag you can set to break on malformed tags
870 if (ExStopBadTags
) ASSERT(Key
& 0xFFFFFF00);
873 // ASSERT on ReactOS features not yet supported
875 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
876 ASSERT(KeGetCurrentProcessorNumber() == 0);
879 // Why the double indirection? Because normally this function is also used
880 // when doing session pool allocations, which has another set of tables,
881 // sizes, and masks that live in session pool. Now we don't support session
882 // pool so we only ever use the regular tables, but I'm keeping the code this
883 // way so that the day we DO support session pool, it won't require that
886 Table
= PoolTrackTable
;
887 TableMask
= PoolTrackTableMask
;
888 TableSize
= PoolTrackTableSize
;
889 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize
);
892 // Compute the hash for this key, and loop all the possible buckets
894 Hash
= ExpComputeHashForTag(Key
, TableMask
);
899 // Do we already have an entry for this tag? */
901 TableEntry
= &Table
[Hash
];
902 if (TableEntry
->Key
== Key
)
905 // Increment the counters depending on if this was paged or nonpaged
908 if ((PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
910 InterlockedIncrement(&TableEntry
->NonPagedAllocs
);
911 InterlockedExchangeAddSizeT(&TableEntry
->NonPagedBytes
, NumberOfBytes
);
914 InterlockedIncrement(&TableEntry
->PagedAllocs
);
915 InterlockedExchangeAddSizeT(&TableEntry
->PagedBytes
, NumberOfBytes
);
920 // We don't have an entry yet, but we've found a free bucket for it
922 if (!(TableEntry
->Key
) && (Hash
!= PoolTrackTableSize
- 1))
925 // We need to hold the lock while creating a new entry, since other
926 // processors might be in this code path as well
928 ExAcquireSpinLock(&ExpTaggedPoolLock
, &OldIrql
);
929 if (!PoolTrackTable
[Hash
].Key
)
932 // We've won the race, so now create this entry in the bucket
934 ASSERT(Table
[Hash
].Key
== 0);
935 PoolTrackTable
[Hash
].Key
= Key
;
936 TableEntry
->Key
= Key
;
938 ExReleaseSpinLock(&ExpTaggedPoolLock
, OldIrql
);
941 // Now we force the loop to run again, and we should now end up in
942 // the code path above which does the interlocked increments...
948 // This path is hit when we don't have an entry, and the current bucket
949 // is full, so we simply try the next one
951 Hash
= (Hash
+ 1) & TableMask
;
952 if (Hash
== Index
) break;
956 // And finally this path is hit when all the buckets are full, and we need
957 // some expansion. This path is not yet supported in ReactOS and so we'll
960 DPRINT1("Out of pool tag space, ignoring...\n");
966 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor
,
967 IN POOL_TYPE PoolType
,
972 PLIST_ENTRY NextEntry
, LastEntry
;
975 // Setup the descriptor based on the caller's request
977 PoolDescriptor
->PoolType
= PoolType
;
978 PoolDescriptor
->PoolIndex
= PoolIndex
;
979 PoolDescriptor
->Threshold
= Threshold
;
980 PoolDescriptor
->LockAddress
= PoolLock
;
983 // Initialize accounting data
985 PoolDescriptor
->RunningAllocs
= 0;
986 PoolDescriptor
->RunningDeAllocs
= 0;
987 PoolDescriptor
->TotalPages
= 0;
988 PoolDescriptor
->TotalBytes
= 0;
989 PoolDescriptor
->TotalBigPages
= 0;
992 // Nothing pending for now
994 PoolDescriptor
->PendingFrees
= NULL
;
995 PoolDescriptor
->PendingFreeDepth
= 0;
998 // Loop all the descriptor's allocation lists and initialize them
1000 NextEntry
= PoolDescriptor
->ListHeads
;
1001 LastEntry
= NextEntry
+ POOL_LISTS_PER_PAGE
;
1002 while (NextEntry
< LastEntry
)
1004 ExpInitializePoolListHead(NextEntry
);
1009 // Note that ReactOS does not support Session Pool Yet
1011 ASSERT(PoolType
!= PagedPoolSession
);
1017 InitializePool(IN POOL_TYPE PoolType
,
1020 PPOOL_DESCRIPTOR Descriptor
;
1025 // Check what kind of pool this is
1027 if (PoolType
== NonPagedPool
)
1030 // Compute the track table size and convert it from a power of two to an
1033 // NOTE: On checked builds, we'll assert if the registry table size was
1034 // invalid, while on retail builds we'll just break out of the loop at
1037 TableSize
= min(PoolTrackTableSize
, MmSizeOfNonPagedPoolInBytes
>> 8);
1038 for (i
= 0; i
< 32; i
++)
1042 ASSERT((TableSize
& ~1) == 0);
1043 if (!(TableSize
& ~1)) break;
1049 // If we hit bit 32, than no size was defined in the registry, so
1050 // we'll use the default size of 2048 entries.
1052 // Otherwise, use the size from the registry, as long as it's not
1053 // smaller than 64 entries.
1057 PoolTrackTableSize
= 2048;
1061 PoolTrackTableSize
= max(1 << i
, 64);
1065 // Loop trying with the biggest specified size first, and cut it down
1066 // by a power of two each iteration in case not enough memory exist
1071 // Do not allow overflow
1073 if ((PoolTrackTableSize
+ 1) > (MAXULONG_PTR
/ sizeof(POOL_TRACKER_TABLE
)))
1075 PoolTrackTableSize
>>= 1;
1080 // Allocate the tracker table and exit the loop if this worked
1082 PoolTrackTable
= MiAllocatePoolPages(NonPagedPool
,
1083 (PoolTrackTableSize
+ 1) *
1084 sizeof(POOL_TRACKER_TABLE
));
1085 if (PoolTrackTable
) break;
1088 // Otherwise, as long as we're not down to the last bit, keep
1091 if (PoolTrackTableSize
== 1)
1093 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1099 PoolTrackTableSize
>>= 1;
1103 // Add one entry, compute the hash, and zero the table
1105 PoolTrackTableSize
++;
1106 PoolTrackTableMask
= PoolTrackTableSize
- 2;
1108 RtlZeroMemory(PoolTrackTable
,
1109 PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1112 // Finally, add the most used tags to speed up those allocations
1117 // We now do the exact same thing with the tracker table for big pages
1119 TableSize
= min(PoolBigPageTableSize
, MmSizeOfNonPagedPoolInBytes
>> 8);
1120 for (i
= 0; i
< 32; i
++)
1124 ASSERT((TableSize
& ~1) == 0);
1125 if (!(TableSize
& ~1)) break;
1131 // For big pages, the default tracker table is 4096 entries, while the
1132 // minimum is still 64
1136 PoolBigPageTableSize
= 4096;
1140 PoolBigPageTableSize
= max(1 << i
, 64);
1144 // Again, run the exact same loop we ran earlier, but this time for the
1145 // big pool tracker instead
1149 if ((PoolBigPageTableSize
+ 1) > (MAXULONG_PTR
/ sizeof(POOL_TRACKER_BIG_PAGES
)))
1151 PoolBigPageTableSize
>>= 1;
1155 PoolBigPageTable
= MiAllocatePoolPages(NonPagedPool
,
1156 PoolBigPageTableSize
*
1157 sizeof(POOL_TRACKER_BIG_PAGES
));
1158 if (PoolBigPageTable
) break;
1160 if (PoolBigPageTableSize
== 1)
1162 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1169 PoolBigPageTableSize
>>= 1;
1173 // An extra entry is not needed for for the big pool tracker, so just
1174 // compute the hash and zero it
1176 PoolBigPageTableHash
= PoolBigPageTableSize
- 1;
1177 RtlZeroMemory(PoolBigPageTable
,
1178 PoolBigPageTableSize
* sizeof(POOL_TRACKER_BIG_PAGES
));
1179 for (i
= 0; i
< PoolBigPageTableSize
; i
++)
1181 PoolBigPageTable
[i
].Va
= (PVOID
)POOL_BIG_TABLE_ENTRY_FREE
;
1185 // During development, print this out so we can see what's happening
1187 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1188 PoolTrackTable
, PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1189 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1190 PoolBigPageTable
, PoolBigPageTableSize
* sizeof(POOL_TRACKER_BIG_PAGES
));
1193 // Insert the generic tracker for all of big pool
1195 ExpInsertPoolTracker('looP',
1196 ROUND_TO_PAGES(PoolBigPageTableSize
*
1197 sizeof(POOL_TRACKER_BIG_PAGES
)),
1201 // No support for NUMA systems at this time
1203 ASSERT(KeNumberNodes
== 1);
1206 // Initialize the tag spinlock
1208 KeInitializeSpinLock(&ExpTaggedPoolLock
);
1211 // Initialize the nonpaged pool descriptor
1213 PoolVector
[NonPagedPool
] = &NonPagedPoolDescriptor
;
1214 ExInitializePoolDescriptor(PoolVector
[NonPagedPool
],
1223 // No support for NUMA systems at this time
1225 ASSERT(KeNumberNodes
== 1);
1228 // Allocate the pool descriptor
1230 Descriptor
= ExAllocatePoolWithTag(NonPagedPool
,
1231 sizeof(KGUARDED_MUTEX
) +
1232 sizeof(POOL_DESCRIPTOR
),
1237 // This is really bad...
1239 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1247 // Setup the vector and guarded mutex for paged pool
1249 PoolVector
[PagedPool
] = Descriptor
;
1250 ExpPagedPoolMutex
= (PKGUARDED_MUTEX
)(Descriptor
+ 1);
1251 ExpPagedPoolDescriptor
[0] = Descriptor
;
1252 KeInitializeGuardedMutex(ExpPagedPoolMutex
);
1253 ExInitializePoolDescriptor(Descriptor
,
1260 // Insert the generic tracker for all of nonpaged pool
1262 ExpInsertPoolTracker('looP',
1263 ROUND_TO_PAGES(PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
)),
1270 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor
)
1273 // Check if this is nonpaged pool
1275 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
1278 // Use the queued spin lock
1280 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock
);
1285 // Use the guarded mutex
1287 KeAcquireGuardedMutex(Descriptor
->LockAddress
);
1294 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor
,
1298 // Check if this is nonpaged pool
1300 if ((Descriptor
->PoolType
& BASE_POOL_TYPE_MASK
) == NonPagedPool
)
1303 // Use the queued spin lock
1305 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock
, OldIrql
);
1310 // Use the guarded mutex
1312 KeReleaseGuardedMutex(Descriptor
->LockAddress
);
1318 ExpGetPoolTagInfoTarget(IN PKDPC Dpc
,
1319 IN PVOID DeferredContext
,
1320 IN PVOID SystemArgument1
,
1321 IN PVOID SystemArgument2
)
1323 PPOOL_DPC_CONTEXT Context
= DeferredContext
;
1324 UNREFERENCED_PARAMETER(Dpc
);
1325 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL
);
1328 // Make sure we win the race, and if we did, copy the data atomically
1330 if (KeSignalCallDpcSynchronize(SystemArgument2
))
1332 RtlCopyMemory(Context
->PoolTrackTable
,
1334 Context
->PoolTrackTableSize
* sizeof(POOL_TRACKER_TABLE
));
1337 // This is here because ReactOS does not yet support expansion
1339 ASSERT(Context
->PoolTrackTableSizeExpansion
== 0);
1343 // Regardless of whether we won or not, we must now synchronize and then
1344 // decrement the barrier since this is one more processor that has completed
1347 KeSignalCallDpcSynchronize(SystemArgument2
);
1348 KeSignalCallDpcDone(SystemArgument1
);
1353 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation
,
1354 IN ULONG SystemInformationLength
,
1355 IN OUT PULONG ReturnLength OPTIONAL
)
1357 ULONG TableSize
, CurrentLength
;
1359 NTSTATUS Status
= STATUS_SUCCESS
;
1360 PSYSTEM_POOLTAG TagEntry
;
1361 PPOOL_TRACKER_TABLE Buffer
, TrackerEntry
;
1362 POOL_DPC_CONTEXT Context
;
1363 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL
);
1366 // Keep track of how much data the caller's buffer must hold
1368 CurrentLength
= FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION
, TagInfo
);
1371 // Initialize the caller's buffer
1373 TagEntry
= &SystemInformation
->TagInfo
[0];
1374 SystemInformation
->Count
= 0;
1377 // Capture the number of entries, and the total size needed to make a copy
1380 EntryCount
= (ULONG
)PoolTrackTableSize
;
1381 TableSize
= EntryCount
* sizeof(POOL_TRACKER_TABLE
);
1384 // Allocate the "Generic DPC" temporary buffer
1386 Buffer
= ExAllocatePoolWithTag(NonPagedPool
, TableSize
, 'ofnI');
1387 if (!Buffer
) return STATUS_INSUFFICIENT_RESOURCES
;
1390 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1392 Context
.PoolTrackTable
= Buffer
;
1393 Context
.PoolTrackTableSize
= PoolTrackTableSize
;
1394 Context
.PoolTrackTableExpansion
= NULL
;
1395 Context
.PoolTrackTableSizeExpansion
= 0;
1396 KeGenericCallDpc(ExpGetPoolTagInfoTarget
, &Context
);
1399 // Now parse the results
1401 for (TrackerEntry
= Buffer
; TrackerEntry
< (Buffer
+ EntryCount
); TrackerEntry
++)
1404 // If the entry is empty, skip it
1406 if (!TrackerEntry
->Key
) continue;
1409 // Otherwise, add one more entry to the caller's buffer, and ensure that
1410 // enough space has been allocated in it
1412 SystemInformation
->Count
++;
1413 CurrentLength
+= sizeof(*TagEntry
);
1414 if (SystemInformationLength
< CurrentLength
)
1417 // The caller's buffer is too small, so set a failure code. The
1418 // caller will know the count, as well as how much space is needed.
1420 // We do NOT break out of the loop, because we want to keep incrementing
1421 // the Count as well as CurrentLength so that the caller can know the
1424 Status
= STATUS_INFO_LENGTH_MISMATCH
;
1429 // Small sanity check that our accounting is working correctly
1431 ASSERT(TrackerEntry
->PagedAllocs
>= TrackerEntry
->PagedFrees
);
1432 ASSERT(TrackerEntry
->NonPagedAllocs
>= TrackerEntry
->NonPagedFrees
);
1435 // Return the data into the caller's buffer
1437 TagEntry
->TagUlong
= TrackerEntry
->Key
;
1438 TagEntry
->PagedAllocs
= TrackerEntry
->PagedAllocs
;
1439 TagEntry
->PagedFrees
= TrackerEntry
->PagedFrees
;
1440 TagEntry
->PagedUsed
= TrackerEntry
->PagedBytes
;
1441 TagEntry
->NonPagedAllocs
= TrackerEntry
->NonPagedAllocs
;
1442 TagEntry
->NonPagedFrees
= TrackerEntry
->NonPagedFrees
;
1443 TagEntry
->NonPagedUsed
= TrackerEntry
->NonPagedBytes
;
1449 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1451 ExFreePoolWithTag(Buffer
, 'ofnI');
1452 if (ReturnLength
) *ReturnLength
= CurrentLength
;
1456 _IRQL_requires_(DISPATCH_LEVEL
)
1459 ExpExpandBigPageTable(
1460 _In_ _IRQL_restores_ KIRQL OldIrql
)
1462 ULONG OldSize
= PoolBigPageTableSize
;
1463 ULONG NewSize
= 2 * OldSize
;
1464 ULONG NewSizeInBytes
;
1465 PPOOL_TRACKER_BIG_PAGES NewTable
;
1466 PPOOL_TRACKER_BIG_PAGES OldTable
;
1472 /* Must be holding ExpLargePoolTableLock */
1473 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL
);
1475 /* Make sure we don't overflow */
1476 if (!NT_SUCCESS(RtlULongMult(2,
1477 OldSize
* sizeof(POOL_TRACKER_BIG_PAGES
),
1480 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize
);
1481 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1485 NewTable
= MiAllocatePoolPages(NonPagedPool
, NewSizeInBytes
);
1486 if (NewTable
== NULL
)
1488 DPRINT1("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes
);
1489 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1493 DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize
);
1495 /* Initialize the new table */
1496 RtlZeroMemory(NewTable
, NewSizeInBytes
);
1497 for (i
= 0; i
< NewSize
; i
++)
1499 NewTable
[i
].Va
= (PVOID
)POOL_BIG_TABLE_ENTRY_FREE
;
1502 /* Copy over all items */
1503 OldTable
= PoolBigPageTable
;
1504 HashMask
= NewSize
- 1;
1505 for (i
= 0; i
< OldSize
; i
++)
1507 /* Skip over empty items */
1508 if ((ULONG_PTR
)OldTable
[i
].Va
& POOL_BIG_TABLE_ENTRY_FREE
)
1513 /* Recalculate the hash due to the new table size */
1514 Hash
= ExpComputePartialHashForAddress(OldTable
[i
].Va
) & HashMask
;
1516 /* Find the location in the new table */
1517 while (!((ULONG_PTR
)NewTable
[Hash
].Va
& POOL_BIG_TABLE_ENTRY_FREE
))
1519 Hash
= (Hash
+ 1) & HashMask
;
1522 /* We just enlarged the table, so we must have space */
1523 ASSERT((ULONG_PTR
)NewTable
[Hash
].Va
& POOL_BIG_TABLE_ENTRY_FREE
);
1525 /* Finally, copy the item */
1526 NewTable
[Hash
] = OldTable
[i
];
1529 /* Activate the new table */
1530 PoolBigPageTable
= NewTable
;
1531 PoolBigPageTableSize
= NewSize
;
1532 PoolBigPageTableHash
= PoolBigPageTableSize
- 1;
1534 /* Release the lock, we're done changing global state */
1535 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1537 /* Free the old table and update our tracker */
1538 PagesFreed
= MiFreePoolPages(OldTable
);
1539 ExpRemovePoolTracker('looP', PagesFreed
<< PAGE_SHIFT
, 0);
1540 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes
, PAGE_SIZE
), 0);
1547 ExpAddTagForBigPages(IN PVOID Va
,
1549 IN ULONG NumberOfPages
,
1550 IN POOL_TYPE PoolType
)
1556 PPOOL_TRACKER_BIG_PAGES Entry
, EntryEnd
, EntryStart
;
1557 ASSERT(((ULONG_PTR
)Va
& POOL_BIG_TABLE_ENTRY_FREE
) == 0);
1558 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1561 // As the table is expandable, these values must only be read after acquiring
1562 // the lock to avoid a teared access during an expansion
1563 // NOTE: Windows uses a special reader/writer SpinLock to improve
1564 // performance in the common case (add/remove a tracker entry)
1567 Hash
= ExpComputePartialHashForAddress(Va
);
1568 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
1569 Hash
&= PoolBigPageTableHash
;
1570 TableSize
= PoolBigPageTableSize
;
1573 // We loop from the current hash bucket to the end of the table, and then
1574 // rollover to hash bucket 0 and keep going from there. If we return back
1575 // to the beginning, then we attempt expansion at the bottom of the loop
1577 EntryStart
= Entry
= &PoolBigPageTable
[Hash
];
1578 EntryEnd
= &PoolBigPageTable
[TableSize
];
1582 // Make sure that this is a free entry and attempt to atomically make the
1584 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1587 if (((ULONG_PTR
)OldVa
& POOL_BIG_TABLE_ENTRY_FREE
) &&
1588 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry
->Va
, Va
, OldVa
) == OldVa
)))
1591 // We now own this entry, write down the size and the pool tag
1594 Entry
->NumberOfPages
= NumberOfPages
;
1597 // Add one more entry to the count, and see if we're getting within
1598 // 25% of the table size, at which point we'll do an expansion now
1599 // to avoid blocking too hard later on.
1601 // Note that we only do this if it's also been the 16th time that we
1602 // keep losing the race or that we are not finding a free entry anymore,
1603 // which implies a massive number of concurrent big pool allocations.
1605 InterlockedIncrementUL(&ExpPoolBigEntriesInUse
);
1606 if ((i
>= 16) && (ExpPoolBigEntriesInUse
> (TableSize
/ 4)))
1608 DPRINT("Attempting expansion since we now have %lu entries\n",
1609 ExpPoolBigEntriesInUse
);
1610 ASSERT(TableSize
== PoolBigPageTableSize
);
1611 ExpExpandBigPageTable(OldIrql
);
1616 // We have our entry, return
1618 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1623 // We don't have our entry yet, so keep trying, making the entry list
1624 // circular if we reach the last entry. We'll eventually break out of
1625 // the loop once we've rolled over and returned back to our original
1629 if (++Entry
>= EntryEnd
) Entry
= &PoolBigPageTable
[0];
1630 } while (Entry
!= EntryStart
);
1633 // This means there's no free hash buckets whatsoever, so we now have
1634 // to attempt expanding the table
1636 ASSERT(TableSize
== PoolBigPageTableSize
);
1637 if (ExpExpandBigPageTable(OldIrql
))
1641 ExpBigTableExpansionFailed
++;
1642 DPRINT1("Big pool table expansion failed\n");
1648 ExpFindAndRemoveTagBigPages(IN PVOID Va
,
1649 OUT PULONG_PTR BigPages
,
1650 IN POOL_TYPE PoolType
)
1652 BOOLEAN FirstTry
= TRUE
;
1655 ULONG PoolTag
, Hash
;
1656 PPOOL_TRACKER_BIG_PAGES Entry
;
1657 ASSERT(((ULONG_PTR
)Va
& POOL_BIG_TABLE_ENTRY_FREE
) == 0);
1658 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1661 // As the table is expandable, these values must only be read after acquiring
1662 // the lock to avoid a teared access during an expansion
1664 Hash
= ExpComputePartialHashForAddress(Va
);
1665 KeAcquireSpinLock(&ExpLargePoolTableLock
, &OldIrql
);
1666 Hash
&= PoolBigPageTableHash
;
1667 TableSize
= PoolBigPageTableSize
;
1670 // Loop while trying to find this big page allocation
1672 while (PoolBigPageTable
[Hash
].Va
!= Va
)
1675 // Increment the size until we go past the end of the table
1677 if (++Hash
>= TableSize
)
1680 // Is this the second time we've tried?
1685 // This means it was never inserted into the pool table and it
1686 // received the special "BIG" tag -- return that and return 0
1687 // so that the code can ask Mm for the page count instead
1689 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1695 // The first time this happens, reset the hash index and try again
1703 // Now capture all the information we need from the entry, since after we
1704 // release the lock, the data can change
1706 Entry
= &PoolBigPageTable
[Hash
];
1707 *BigPages
= Entry
->NumberOfPages
;
1708 PoolTag
= Entry
->Key
;
1711 // Set the free bit, and decrement the number of allocations. Finally, release
1712 // the lock and return the tag that was located
1714 InterlockedIncrement((PLONG
)&Entry
->Va
);
1715 InterlockedDecrementUL(&ExpPoolBigEntriesInUse
);
1716 KeReleaseSpinLock(&ExpLargePoolTableLock
, OldIrql
);
1722 ExQueryPoolUsage(OUT PULONG PagedPoolPages
,
1723 OUT PULONG NonPagedPoolPages
,
1724 OUT PULONG PagedPoolAllocs
,
1725 OUT PULONG PagedPoolFrees
,
1726 OUT PULONG PagedPoolLookasideHits
,
1727 OUT PULONG NonPagedPoolAllocs
,
1728 OUT PULONG NonPagedPoolFrees
,
1729 OUT PULONG NonPagedPoolLookasideHits
)
1732 PPOOL_DESCRIPTOR PoolDesc
;
1735 // Assume all failures
1737 *PagedPoolPages
= 0;
1738 *PagedPoolAllocs
= 0;
1739 *PagedPoolFrees
= 0;
1742 // Tally up the totals for all the apged pool
1744 for (i
= 0; i
< ExpNumberOfPagedPools
+ 1; i
++)
1746 PoolDesc
= ExpPagedPoolDescriptor
[i
];
1747 *PagedPoolPages
+= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1748 *PagedPoolAllocs
+= PoolDesc
->RunningAllocs
;
1749 *PagedPoolFrees
+= PoolDesc
->RunningDeAllocs
;
1753 // The first non-paged pool has a hardcoded well-known descriptor name
1755 PoolDesc
= &NonPagedPoolDescriptor
;
1756 *NonPagedPoolPages
= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1757 *NonPagedPoolAllocs
= PoolDesc
->RunningAllocs
;
1758 *NonPagedPoolFrees
= PoolDesc
->RunningDeAllocs
;
1761 // If the system has more than one non-paged pool, copy the other descriptor
1765 if (ExpNumberOfNonPagedPools
> 1)
1767 for (i
= 0; i
< ExpNumberOfNonPagedPools
; i
++)
1769 PoolDesc
= ExpNonPagedPoolDescriptor
[i
];
1770 *NonPagedPoolPages
+= PoolDesc
->TotalPages
+ PoolDesc
->TotalBigPages
;
1771 *NonPagedPoolAllocs
+= PoolDesc
->RunningAllocs
;
1772 *NonPagedPoolFrees
+= PoolDesc
->RunningDeAllocs
;
1778 // Get the amount of hits in the system lookaside lists
1780 if (!IsListEmpty(&ExPoolLookasideListHead
))
1782 PLIST_ENTRY ListEntry
;
1784 for (ListEntry
= ExPoolLookasideListHead
.Flink
;
1785 ListEntry
!= &ExPoolLookasideListHead
;
1786 ListEntry
= ListEntry
->Flink
)
1788 PGENERAL_LOOKASIDE Lookaside
;
1790 Lookaside
= CONTAINING_RECORD(ListEntry
, GENERAL_LOOKASIDE
, ListEntry
);
1792 if (Lookaside
->Type
== NonPagedPool
)
1794 *NonPagedPoolLookasideHits
+= Lookaside
->AllocateHits
;
1798 *PagedPoolLookasideHits
+= Lookaside
->AllocateHits
;
1806 ExReturnPoolQuota(IN PVOID P
)
1813 if ((ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
) &&
1814 (MmIsSpecialPoolAddress(P
)))
1821 ASSERT((ULONG_PTR
)Entry
% POOL_BLOCK_SIZE
== 0);
1823 PoolType
= Entry
->PoolType
- 1;
1824 BlockSize
= Entry
->BlockSize
;
1826 if (PoolType
& QUOTA_POOL_MASK
)
1828 Process
= ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1];
1829 ASSERT(Process
!= NULL
);
1832 if (Process
->Pcb
.Header
.Type
!= ProcessObject
)
1834 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1835 Process
, Process
->Pcb
.Header
.Type
, Entry
->PoolType
, BlockSize
);
1836 KeBugCheckEx(BAD_POOL_CALLER
,
1840 (ULONG_PTR
)Process
);
1842 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = NULL
;
1843 PsReturnPoolQuota(Process
,
1844 PoolType
& BASE_POOL_TYPE_MASK
,
1845 BlockSize
* POOL_BLOCK_SIZE
);
1846 ObDereferenceObject(Process
);
1851 /* PUBLIC FUNCTIONS ***********************************************************/
1858 ExAllocatePoolWithTag(IN POOL_TYPE PoolType
,
1859 IN SIZE_T NumberOfBytes
,
1862 PPOOL_DESCRIPTOR PoolDesc
;
1863 PLIST_ENTRY ListHead
;
1864 PPOOL_HEADER Entry
, NextEntry
, FragmentEntry
;
1866 USHORT BlockSize
, i
;
1868 PKPRCB Prcb
= KeGetCurrentPrcb();
1869 PGENERAL_LOOKASIDE LookasideList
;
1872 // Some sanity checks
1875 ASSERT(Tag
!= ' GIB');
1876 ASSERT(NumberOfBytes
!= 0);
1877 ExpCheckPoolIrqlLevel(PoolType
, NumberOfBytes
, NULL
);
1880 // Not supported in ReactOS
1882 ASSERT(!(PoolType
& SESSION_POOL_MASK
));
1885 // Check if verifier or special pool is enabled
1887 if (ExpPoolFlags
& (POOL_FLAG_VERIFIER
| POOL_FLAG_SPECIAL_POOL
))
1890 // For verifier, we should call the verification routine
1892 if (ExpPoolFlags
& POOL_FLAG_VERIFIER
)
1894 DPRINT1("Driver Verifier is not yet supported\n");
1898 // For special pool, we check if this is a suitable allocation and do
1899 // the special allocation if needed
1901 if (ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
)
1904 // Check if this is a special pool allocation
1906 if (MmUseSpecialPool(NumberOfBytes
, Tag
))
1909 // Try to allocate using special pool
1911 Entry
= MmAllocateSpecialPool(NumberOfBytes
, Tag
, PoolType
, 2);
1912 if (Entry
) return Entry
;
1918 // Get the pool type and its corresponding vector for this request
1920 OriginalType
= PoolType
;
1921 PoolType
= PoolType
& BASE_POOL_TYPE_MASK
;
1922 PoolDesc
= PoolVector
[PoolType
];
1923 ASSERT(PoolDesc
!= NULL
);
1926 // Check if this is a big page allocation
1928 if (NumberOfBytes
> POOL_MAX_ALLOC
)
1931 // Allocate pages for it
1933 Entry
= MiAllocatePoolPages(OriginalType
, NumberOfBytes
);
1938 // Out of memory, display current consumption
1939 // Let's consider that if the caller wanted more
1940 // than a hundred pages, that's a bogus caller
1941 // and we are not out of memory. Dump at most
1942 // once a second to avoid spamming the log.
1944 if (NumberOfBytes
< 100 * PAGE_SIZE
&&
1945 KeQueryInterruptTime() >= MiLastPoolDumpTime
+ 10000000)
1947 MiDumpPoolConsumers(FALSE
, 0, 0, 0);
1948 MiLastPoolDumpTime
= KeQueryInterruptTime();
1953 // Must succeed pool is deprecated, but still supported. These allocation
1954 // failures must cause an immediate bugcheck
1956 if (OriginalType
& MUST_SUCCEED_POOL_MASK
)
1958 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
1960 NonPagedPoolDescriptor
.TotalPages
,
1961 NonPagedPoolDescriptor
.TotalBigPages
,
1966 // Internal debugging
1971 // This flag requests printing failures, and can also further specify
1972 // breaking on failures
1974 if (ExpPoolFlags
& POOL_FLAG_DBGPRINT_ON_FAILURE
)
1976 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1979 if (ExpPoolFlags
& POOL_FLAG_CRASH_ON_FAILURE
) DbgBreakPoint();
1983 // Finally, this flag requests an exception, which we are more than
1986 if (OriginalType
& POOL_RAISE_IF_ALLOCATION_FAILURE
)
1988 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
1995 // Increment required counters
1997 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalBigPages
,
1998 (LONG
)BYTES_TO_PAGES(NumberOfBytes
));
1999 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, NumberOfBytes
);
2000 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
2003 // Add a tag for the big page allocation and switch to the generic "BIG"
2004 // tag if we failed to do so, then insert a tracker for this alloation.
2006 if (!ExpAddTagForBigPages(Entry
,
2008 (ULONG
)BYTES_TO_PAGES(NumberOfBytes
),
2013 ExpInsertPoolTracker(Tag
, ROUND_TO_PAGES(NumberOfBytes
), OriginalType
);
2018 // Should never request 0 bytes from the pool, but since so many drivers do
2019 // it, we'll just assume they want 1 byte, based on NT's similar behavior
2021 if (!NumberOfBytes
) NumberOfBytes
= 1;
2024 // A pool allocation is defined by its data, a linked list to connect it to
2025 // the free list (if necessary), and a pool header to store accounting info.
2026 // Calculate this size, then convert it into a block size (units of pool
2029 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2030 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2031 // the direct allocation of pages.
2033 i
= (USHORT
)((NumberOfBytes
+ sizeof(POOL_HEADER
) + (POOL_BLOCK_SIZE
- 1))
2035 ASSERT(i
< POOL_LISTS_PER_PAGE
);
2038 // Handle lookaside list optimization for both paged and nonpaged pool
2040 if (i
<= NUMBER_POOL_LOOKASIDE_LISTS
)
2043 // Try popping it from the per-CPU lookaside list
2045 LookasideList
= (PoolType
== PagedPool
) ?
2046 Prcb
->PPPagedLookasideList
[i
- 1].P
:
2047 Prcb
->PPNPagedLookasideList
[i
- 1].P
;
2048 LookasideList
->TotalAllocates
++;
2049 Entry
= (PPOOL_HEADER
)InterlockedPopEntrySList(&LookasideList
->ListHead
);
2053 // We failed, try popping it from the global list
2055 LookasideList
= (PoolType
== PagedPool
) ?
2056 Prcb
->PPPagedLookasideList
[i
- 1].L
:
2057 Prcb
->PPNPagedLookasideList
[i
- 1].L
;
2058 LookasideList
->TotalAllocates
++;
2059 Entry
= (PPOOL_HEADER
)InterlockedPopEntrySList(&LookasideList
->ListHead
);
2063 // If we were able to pop it, update the accounting and return the block
2067 LookasideList
->AllocateHits
++;
2070 // Get the real entry, write down its pool type, and track it
2073 Entry
->PoolType
= OriginalType
+ 1;
2074 ExpInsertPoolTracker(Tag
,
2075 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
2079 // Return the pool allocation
2081 Entry
->PoolTag
= Tag
;
2082 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
2083 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
2084 return POOL_FREE_BLOCK(Entry
);
2089 // Loop in the free lists looking for a block if this size. Start with the
2090 // list optimized for this kind of size lookup
2092 ListHead
= &PoolDesc
->ListHeads
[i
];
2096 // Are there any free entries available on this list?
2098 if (!ExpIsPoolListEmpty(ListHead
))
2101 // Acquire the pool lock now
2103 OldIrql
= ExLockPool(PoolDesc
);
2106 // And make sure the list still has entries
2108 if (ExpIsPoolListEmpty(ListHead
))
2111 // Someone raced us (and won) before we had a chance to acquire
2116 ExUnlockPool(PoolDesc
, OldIrql
);
2121 // Remove a free entry from the list
2122 // Note that due to the way we insert free blocks into multiple lists
2123 // there is a guarantee that any block on this list will either be
2124 // of the correct size, or perhaps larger.
2126 ExpCheckPoolLinks(ListHead
);
2127 Entry
= POOL_ENTRY(ExpRemovePoolHeadList(ListHead
));
2128 ExpCheckPoolLinks(ListHead
);
2129 ExpCheckPoolBlocks(Entry
);
2130 ASSERT(Entry
->BlockSize
>= i
);
2131 ASSERT(Entry
->PoolType
== 0);
2134 // Check if this block is larger that what we need. The block could
2135 // not possibly be smaller, due to the reason explained above (and
2136 // we would've asserted on a checked build if this was the case).
2138 if (Entry
->BlockSize
!= i
)
2141 // Is there an entry before this one?
2143 if (Entry
->PreviousSize
== 0)
2146 // There isn't anyone before us, so take the next block and
2147 // turn it into a fragment that contains the leftover data
2148 // that we don't need to satisfy the caller's request
2150 FragmentEntry
= POOL_BLOCK(Entry
, i
);
2151 FragmentEntry
->BlockSize
= Entry
->BlockSize
- i
;
2154 // And make it point back to us
2156 FragmentEntry
->PreviousSize
= i
;
2159 // Now get the block that follows the new fragment and check
2160 // if it's still on the same page as us (and not at the end)
2162 NextEntry
= POOL_NEXT_BLOCK(FragmentEntry
);
2163 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2166 // Adjust this next block to point to our newly created
2169 NextEntry
->PreviousSize
= FragmentEntry
->BlockSize
;
2175 // There is a free entry before us, which we know is smaller
2176 // so we'll make this entry the fragment instead
2178 FragmentEntry
= Entry
;
2181 // And then we'll remove from it the actual size required.
2182 // Now the entry is a leftover free fragment
2184 Entry
->BlockSize
-= i
;
2187 // Now let's go to the next entry after the fragment (which
2188 // used to point to our original free entry) and make it
2189 // reference the new fragment entry instead.
2191 // This is the entry that will actually end up holding the
2194 Entry
= POOL_NEXT_BLOCK(Entry
);
2195 Entry
->PreviousSize
= FragmentEntry
->BlockSize
;
2198 // And now let's go to the entry after that one and check if
2199 // it's still on the same page, and not at the end
2201 NextEntry
= POOL_BLOCK(Entry
, i
);
2202 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2205 // Make it reference the allocation entry
2207 NextEntry
->PreviousSize
= i
;
2212 // Now our (allocation) entry is the right size
2214 Entry
->BlockSize
= i
;
2217 // And the next entry is now the free fragment which contains
2218 // the remaining difference between how big the original entry
2219 // was, and the actual size the caller needs/requested.
2221 FragmentEntry
->PoolType
= 0;
2222 BlockSize
= FragmentEntry
->BlockSize
;
2225 // Now check if enough free bytes remained for us to have a
2226 // "full" entry, which contains enough bytes for a linked list
2227 // and thus can be used for allocations (up to 8 bytes...)
2229 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
2233 // Insert the free entry into the free list for this size
2235 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
2236 POOL_FREE_BLOCK(FragmentEntry
));
2237 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
2242 // We have found an entry for this allocation, so set the pool type
2243 // and release the lock since we're done
2245 Entry
->PoolType
= OriginalType
+ 1;
2246 ExpCheckPoolBlocks(Entry
);
2247 ExUnlockPool(PoolDesc
, OldIrql
);
2250 // Increment required counters
2252 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, Entry
->BlockSize
* POOL_BLOCK_SIZE
);
2253 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
2256 // Track this allocation
2258 ExpInsertPoolTracker(Tag
,
2259 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
2263 // Return the pool allocation
2265 Entry
->PoolTag
= Tag
;
2266 (POOL_FREE_BLOCK(Entry
))->Flink
= NULL
;
2267 (POOL_FREE_BLOCK(Entry
))->Blink
= NULL
;
2268 return POOL_FREE_BLOCK(Entry
);
2270 } while (++ListHead
!= &PoolDesc
->ListHeads
[POOL_LISTS_PER_PAGE
]);
2273 // There were no free entries left, so we have to allocate a new fresh page
2275 Entry
= MiAllocatePoolPages(OriginalType
, PAGE_SIZE
);
2280 // Out of memory, display current consumption
2281 // Let's consider that if the caller wanted more
2282 // than a hundred pages, that's a bogus caller
2283 // and we are not out of memory. Dump at most
2284 // once a second to avoid spamming the log.
2286 if (NumberOfBytes
< 100 * PAGE_SIZE
&&
2287 KeQueryInterruptTime() >= MiLastPoolDumpTime
+ 10000000)
2289 MiDumpPoolConsumers(FALSE
, 0, 0, 0);
2290 MiLastPoolDumpTime
= KeQueryInterruptTime();
2295 // Must succeed pool is deprecated, but still supported. These allocation
2296 // failures must cause an immediate bugcheck
2298 if (OriginalType
& MUST_SUCCEED_POOL_MASK
)
2300 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY
,
2302 NonPagedPoolDescriptor
.TotalPages
,
2303 NonPagedPoolDescriptor
.TotalBigPages
,
2308 // Internal debugging
2313 // This flag requests printing failures, and can also further specify
2314 // breaking on failures
2316 if (ExpPoolFlags
& POOL_FLAG_DBGPRINT_ON_FAILURE
)
2318 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2321 if (ExpPoolFlags
& POOL_FLAG_CRASH_ON_FAILURE
) DbgBreakPoint();
2325 // Finally, this flag requests an exception, which we are more than
2328 if (OriginalType
& POOL_RAISE_IF_ALLOCATION_FAILURE
)
2330 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
2334 // Return NULL to the caller in all other cases
2340 // Setup the entry data
2343 Entry
->BlockSize
= i
;
2344 Entry
->PoolType
= OriginalType
+ 1;
2347 // This page will have two entries -- one for the allocation (which we just
2348 // created above), and one for the remaining free bytes, which we're about
2349 // to create now. The free bytes are the whole page minus what was allocated
2350 // and then converted into units of block headers.
2352 BlockSize
= (PAGE_SIZE
/ POOL_BLOCK_SIZE
) - i
;
2353 FragmentEntry
= POOL_BLOCK(Entry
, i
);
2354 FragmentEntry
->Ulong1
= 0;
2355 FragmentEntry
->BlockSize
= BlockSize
;
2356 FragmentEntry
->PreviousSize
= i
;
2359 // Increment required counters
2361 InterlockedIncrement((PLONG
)&PoolDesc
->TotalPages
);
2362 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, Entry
->BlockSize
* POOL_BLOCK_SIZE
);
2365 // Now check if enough free bytes remained for us to have a "full" entry,
2366 // which contains enough bytes for a linked list and thus can be used for
2367 // allocations (up to 8 bytes...)
2369 if (FragmentEntry
->BlockSize
!= 1)
2372 // Excellent -- acquire the pool lock
2374 OldIrql
= ExLockPool(PoolDesc
);
2377 // And insert the free entry into the free list for this block size
2379 ExpCheckPoolLinks(&PoolDesc
->ListHeads
[BlockSize
- 1]);
2380 ExpInsertPoolTailList(&PoolDesc
->ListHeads
[BlockSize
- 1],
2381 POOL_FREE_BLOCK(FragmentEntry
));
2382 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry
));
2385 // Release the pool lock
2387 ExpCheckPoolBlocks(Entry
);
2388 ExUnlockPool(PoolDesc
, OldIrql
);
2393 // Simply do a sanity check
2395 ExpCheckPoolBlocks(Entry
);
2399 // Increment performance counters and track this allocation
2401 InterlockedIncrement((PLONG
)&PoolDesc
->RunningAllocs
);
2402 ExpInsertPoolTracker(Tag
,
2403 Entry
->BlockSize
* POOL_BLOCK_SIZE
,
2407 // And return the pool allocation
2409 ExpCheckPoolBlocks(Entry
);
2410 Entry
->PoolTag
= Tag
;
2411 return POOL_FREE_BLOCK(Entry
);
2419 ExAllocatePool(POOL_TYPE PoolType
,
2420 SIZE_T NumberOfBytes
)
2422 ULONG Tag
= TAG_NONE
;
2424 PLDR_DATA_TABLE_ENTRY LdrEntry
;
2426 /* Use the first four letters of the driver name, or "None" if unavailable */
2427 LdrEntry
= KeGetCurrentIrql() <= APC_LEVEL
2428 ? MiLookupDataTableEntry(_ReturnAddress())
2434 for (i
= 0; i
< min(4, LdrEntry
->BaseDllName
.Length
/ sizeof(WCHAR
)); i
++)
2435 Tag
= Tag
>> 8 | (LdrEntry
->BaseDllName
.Buffer
[i
] & 0xff) << 24;
2437 Tag
= Tag
>> 8 | ' ' << 24;
2440 return ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2448 ExFreePoolWithTag(IN PVOID P
,
2451 PPOOL_HEADER Entry
, NextEntry
;
2455 PPOOL_DESCRIPTOR PoolDesc
;
2457 BOOLEAN Combined
= FALSE
;
2458 PFN_NUMBER PageCount
, RealPageCount
;
2459 PKPRCB Prcb
= KeGetCurrentPrcb();
2460 PGENERAL_LOOKASIDE LookasideList
;
2464 // Check if any of the debug flags are enabled
2466 if (ExpPoolFlags
& (POOL_FLAG_CHECK_TIMERS
|
2467 POOL_FLAG_CHECK_WORKERS
|
2468 POOL_FLAG_CHECK_RESOURCES
|
2469 POOL_FLAG_VERIFIER
|
2470 POOL_FLAG_CHECK_DEADLOCK
|
2471 POOL_FLAG_SPECIAL_POOL
))
2474 // Check if special pool is enabled
2476 if (ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
)
2479 // Check if it was allocated from a special pool
2481 if (MmIsSpecialPoolAddress(P
))
2484 // Was deadlock verification also enabled? We can do some extra
2485 // checks at this point
2487 if (ExpPoolFlags
& POOL_FLAG_CHECK_DEADLOCK
)
2489 DPRINT1("Verifier not yet supported\n");
2493 // It is, so handle it via special pool free routine
2495 MmFreeSpecialPool(P
);
2501 // For non-big page allocations, we'll do a bunch of checks in here
2503 if (PAGE_ALIGN(P
) != P
)
2506 // Get the entry for this pool allocation
2507 // The pointer math here may look wrong or confusing, but it is quite right
2513 // Get the pool type
2515 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
2518 // FIXME: Many other debugging checks go here
2520 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2525 // Check if this is a big page allocation
2527 if (PAGE_ALIGN(P
) == P
)
2530 // We need to find the tag for it, so first we need to find out what
2531 // kind of allocation this was (paged or nonpaged), then we can go
2532 // ahead and try finding the tag for it. Remember to get rid of the
2533 // PROTECTED_POOL tag if it's found.
2535 // Note that if at insertion time, we failed to add the tag for a big
2536 // pool allocation, we used a special tag called 'BIG' to identify the
2537 // allocation, and we may get this tag back. In this scenario, we must
2538 // manually get the size of the allocation by actually counting through
2539 // the PFN database.
2541 PoolType
= MmDeterminePoolType(P
);
2542 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2543 Tag
= ExpFindAndRemoveTagBigPages(P
, &PageCount
, PoolType
);
2546 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2547 ASSERT(Tag
== ' GIB');
2548 PageCount
= 1; // We are going to lie! This might screw up accounting?
2550 else if (Tag
& PROTECTED_POOL
)
2552 Tag
&= ~PROTECTED_POOL
;
2558 if (TagToFree
&& TagToFree
!= Tag
)
2560 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Tag
);
2561 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Tag
, TagToFree
);
2565 // We have our tag and our page count, so we can go ahead and remove this
2568 ExpRemovePoolTracker(Tag
, PageCount
<< PAGE_SHIFT
, PoolType
);
2571 // Check if any of the debug flags are enabled
2573 if (ExpPoolFlags
& (POOL_FLAG_CHECK_TIMERS
|
2574 POOL_FLAG_CHECK_WORKERS
|
2575 POOL_FLAG_CHECK_RESOURCES
|
2576 POOL_FLAG_CHECK_DEADLOCK
))
2579 // Was deadlock verification also enabled? We can do some extra
2580 // checks at this point
2582 if (ExpPoolFlags
& POOL_FLAG_CHECK_DEADLOCK
)
2584 DPRINT1("Verifier not yet supported\n");
2588 // FIXME: Many debugging checks go here
2595 PoolDesc
= PoolVector
[PoolType
];
2596 InterlockedIncrement((PLONG
)&PoolDesc
->RunningDeAllocs
);
2597 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
,
2598 -(LONG_PTR
)(PageCount
<< PAGE_SHIFT
));
2601 // Do the real free now and update the last counter with the big page count
2603 RealPageCount
= MiFreePoolPages(P
);
2604 ASSERT(RealPageCount
== PageCount
);
2605 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalBigPages
,
2606 -(LONG
)RealPageCount
);
2611 // Get the entry for this pool allocation
2612 // The pointer math here may look wrong or confusing, but it is quite right
2616 ASSERT((ULONG_PTR
)Entry
% POOL_BLOCK_SIZE
== 0);
2619 // Get the size of the entry, and it's pool type, then load the descriptor
2620 // for this pool type
2622 BlockSize
= Entry
->BlockSize
;
2623 PoolType
= (Entry
->PoolType
- 1) & BASE_POOL_TYPE_MASK
;
2624 PoolDesc
= PoolVector
[PoolType
];
2627 // Make sure that the IRQL makes sense
2629 ExpCheckPoolIrqlLevel(PoolType
, 0, P
);
2632 // Get the pool tag and get rid of the PROTECTED_POOL flag
2634 Tag
= Entry
->PoolTag
;
2635 if (Tag
& PROTECTED_POOL
) Tag
&= ~PROTECTED_POOL
;
2640 if (TagToFree
&& TagToFree
!= Tag
)
2642 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree
, (char*)&Tag
);
2643 KeBugCheckEx(BAD_POOL_CALLER
, 0x0A, (ULONG_PTR
)P
, Tag
, TagToFree
);
2647 // Track the removal of this allocation
2649 ExpRemovePoolTracker(Tag
,
2650 BlockSize
* POOL_BLOCK_SIZE
,
2651 Entry
->PoolType
- 1);
2654 // Release pool quota, if any
2656 if ((Entry
->PoolType
- 1) & QUOTA_POOL_MASK
)
2658 Process
= ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1];
2661 if (Process
->Pcb
.Header
.Type
!= ProcessObject
)
2663 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2664 Process
, Process
->Pcb
.Header
.Type
, Entry
->PoolType
, BlockSize
);
2665 KeBugCheckEx(BAD_POOL_CALLER
,
2669 (ULONG_PTR
)Process
);
2671 PsReturnPoolQuota(Process
, PoolType
, BlockSize
* POOL_BLOCK_SIZE
);
2672 ObDereferenceObject(Process
);
2677 // Is this allocation small enough to have come from a lookaside list?
2679 if (BlockSize
<= NUMBER_POOL_LOOKASIDE_LISTS
)
2682 // Try pushing it into the per-CPU lookaside list
2684 LookasideList
= (PoolType
== PagedPool
) ?
2685 Prcb
->PPPagedLookasideList
[BlockSize
- 1].P
:
2686 Prcb
->PPNPagedLookasideList
[BlockSize
- 1].P
;
2687 LookasideList
->TotalFrees
++;
2688 if (ExQueryDepthSList(&LookasideList
->ListHead
) < LookasideList
->Depth
)
2690 LookasideList
->FreeHits
++;
2691 InterlockedPushEntrySList(&LookasideList
->ListHead
, P
);
2696 // We failed, try to push it into the global lookaside list
2698 LookasideList
= (PoolType
== PagedPool
) ?
2699 Prcb
->PPPagedLookasideList
[BlockSize
- 1].L
:
2700 Prcb
->PPNPagedLookasideList
[BlockSize
- 1].L
;
2701 LookasideList
->TotalFrees
++;
2702 if (ExQueryDepthSList(&LookasideList
->ListHead
) < LookasideList
->Depth
)
2704 LookasideList
->FreeHits
++;
2705 InterlockedPushEntrySList(&LookasideList
->ListHead
, P
);
2711 // Get the pointer to the next entry
2713 NextEntry
= POOL_BLOCK(Entry
, BlockSize
);
2716 // Update performance counters
2718 InterlockedIncrement((PLONG
)&PoolDesc
->RunningDeAllocs
);
2719 InterlockedExchangeAddSizeT(&PoolDesc
->TotalBytes
, -BlockSize
* POOL_BLOCK_SIZE
);
2722 // Acquire the pool lock
2724 OldIrql
= ExLockPool(PoolDesc
);
2727 // Check if the next allocation is at the end of the page
2729 ExpCheckPoolBlocks(Entry
);
2730 if (PAGE_ALIGN(NextEntry
) != NextEntry
)
2733 // We may be able to combine the block if it's free
2735 if (NextEntry
->PoolType
== 0)
2738 // The next block is free, so we'll do a combine
2743 // Make sure there's actual data in the block -- anything smaller
2744 // than this means we only have the header, so there's no linked list
2747 if ((NextEntry
->BlockSize
!= 1))
2750 // The block is at least big enough to have a linked list, so go
2751 // ahead and remove it
2753 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
2754 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
2755 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
2756 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
2760 // Our entry is now combined with the next entry
2762 Entry
->BlockSize
= Entry
->BlockSize
+ NextEntry
->BlockSize
;
2767 // Now check if there was a previous entry on the same page as us
2769 if (Entry
->PreviousSize
)
2772 // Great, grab that entry and check if it's free
2774 NextEntry
= POOL_PREV_BLOCK(Entry
);
2775 if (NextEntry
->PoolType
== 0)
2778 // It is, so we can do a combine
2783 // Make sure there's actual data in the block -- anything smaller
2784 // than this means we only have the header so there's no linked list
2787 if ((NextEntry
->BlockSize
!= 1))
2790 // The block is at least big enough to have a linked list, so go
2791 // ahead and remove it
2793 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry
));
2794 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry
));
2795 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Flink
));
2796 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry
))->Blink
));
2800 // Combine our original block (which might've already been combined
2801 // with the next block), into the previous block
2803 NextEntry
->BlockSize
= NextEntry
->BlockSize
+ Entry
->BlockSize
;
2806 // And now we'll work with the previous block instead
2813 // By now, it may have been possible for our combined blocks to actually
2814 // have made up a full page (if there were only 2-3 allocations on the
2815 // page, they could've all been combined).
2817 if ((PAGE_ALIGN(Entry
) == Entry
) &&
2818 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry
)) == POOL_NEXT_BLOCK(Entry
)))
2821 // In this case, release the pool lock, update the performance counter,
2822 // and free the page
2824 ExUnlockPool(PoolDesc
, OldIrql
);
2825 InterlockedExchangeAdd((PLONG
)&PoolDesc
->TotalPages
, -1);
2826 MiFreePoolPages(Entry
);
2831 // Otherwise, we now have a free block (or a combination of 2 or 3)
2833 Entry
->PoolType
= 0;
2834 BlockSize
= Entry
->BlockSize
;
2835 ASSERT(BlockSize
!= 1);
2838 // Check if we actually did combine it with anyone
2843 // Get the first combined block (either our original to begin with, or
2844 // the one after the original, depending if we combined with the previous)
2846 NextEntry
= POOL_NEXT_BLOCK(Entry
);
2849 // As long as the next block isn't on a page boundary, have it point
2852 if (PAGE_ALIGN(NextEntry
) != NextEntry
) NextEntry
->PreviousSize
= BlockSize
;
2856 // Insert this new free block, and release the pool lock
2858 ExpInsertPoolHeadList(&PoolDesc
->ListHeads
[BlockSize
- 1], POOL_FREE_BLOCK(Entry
));
2859 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry
));
2860 ExUnlockPool(PoolDesc
, OldIrql
);
2871 // Just free without checking for the tag
2873 ExFreePoolWithTag(P
, 0);
2881 ExQueryPoolBlockSize(IN PVOID PoolBlock
,
2882 OUT PBOOLEAN QuotaCharged
)
2897 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType
,
2898 IN SIZE_T NumberOfBytes
)
2901 // Allocate the pool
2903 return ExAllocatePoolWithQuotaTag(PoolType
, NumberOfBytes
, TAG_NONE
);
2911 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType
,
2912 IN SIZE_T NumberOfBytes
,
2914 IN EX_POOL_PRIORITY Priority
)
2919 // Allocate the pool
2921 Buffer
= ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2935 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType
,
2936 IN SIZE_T NumberOfBytes
,
2939 BOOLEAN Raise
= TRUE
;
2943 PEPROCESS Process
= PsGetCurrentProcess();
2946 // Check if we should fail instead of raising an exception
2948 if (PoolType
& POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
)
2951 PoolType
&= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
;
2955 // Inject the pool quota mask
2957 PoolType
+= QUOTA_POOL_MASK
;
2960 // Check if we have enough space to add the quota owner process, as long as
2961 // this isn't the system process, which never gets charged quota
2963 ASSERT(NumberOfBytes
!= 0);
2964 if ((NumberOfBytes
<= (PAGE_SIZE
- POOL_BLOCK_SIZE
- sizeof(PVOID
))) &&
2965 (Process
!= PsInitialSystemProcess
))
2968 // Add space for our EPROCESS pointer
2970 NumberOfBytes
+= sizeof(PEPROCESS
);
2975 // We won't be able to store the pointer, so don't use quota for this
2977 PoolType
-= QUOTA_POOL_MASK
;
2981 // Allocate the pool buffer now
2983 Buffer
= ExAllocatePoolWithTag(PoolType
, NumberOfBytes
, Tag
);
2986 // If the buffer is page-aligned, this is a large page allocation and we
2989 if (PAGE_ALIGN(Buffer
) != Buffer
)
2992 // Also if special pool is enabled, and this was allocated from there,
2993 // we won't touch it either
2995 if ((ExpPoolFlags
& POOL_FLAG_SPECIAL_POOL
) &&
2996 (MmIsSpecialPoolAddress(Buffer
)))
3002 // If it wasn't actually allocated with quota charges, ignore it too
3004 if (!(PoolType
& QUOTA_POOL_MASK
)) return Buffer
;
3007 // If this is the system process, we don't charge quota, so ignore
3009 if (Process
== PsInitialSystemProcess
) return Buffer
;
3012 // Actually go and charge quota for the process now
3014 Entry
= POOL_ENTRY(Buffer
);
3015 Status
= PsChargeProcessPoolQuota(Process
,
3016 PoolType
& BASE_POOL_TYPE_MASK
,
3017 Entry
->BlockSize
* POOL_BLOCK_SIZE
);
3018 if (!NT_SUCCESS(Status
))
3021 // Quota failed, back out the allocation, clear the owner, and fail
3023 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = NULL
;
3024 ExFreePoolWithTag(Buffer
, Tag
);
3025 if (Raise
) RtlRaiseStatus(Status
);
3030 // Quota worked, write the owner and then reference it before returning
3032 ((PVOID
*)POOL_NEXT_BLOCK(Entry
))[-1] = Process
;
3033 ObReferenceObject(Process
);
3035 else if (!(Buffer
) && (Raise
))
3038 // The allocation failed, raise an error if we are in raise mode
3040 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES
);
3044 // Return the allocated buffer