[NTOSKRNL] Print tags which contains numerics in the memory dumper.
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 #if DBG
464 FORCEINLINE
465 BOOLEAN
466 ExpTagAllowPrint(CHAR Tag)
467 {
468 if ((Tag >= 'a' && Tag <= 'z') ||
469 (Tag >= 'A' && Tag <= 'Z') ||
470 (Tag >= '0' && Tag <= '9') ||
471 Tag == ' ')
472 {
473 return TRUE;
474 }
475
476 return FALSE;
477 }
478
479 #ifdef KDBG
480 #define MiDumperPrint(dbg, fmt, ...) \
481 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
482 else DPRINT1(fmt, ##__VA_ARGS__)
483 #else
484 #define MiDumperPrint(dbg, fmt, ...) \
485 DPRINT1(fmt, ##__VA_ARGS__)
486 #endif
487
488 VOID
489 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
490 {
491 SIZE_T i;
492 BOOLEAN Verbose;
493
494 //
495 // Only print header if called from OOM situation
496 //
497 if (!CalledFromDbg)
498 {
499 DPRINT1("---------------------\n");
500 DPRINT1("Out of memory dumper!\n");
501 }
502 #ifdef KDBG
503 else
504 {
505 KdbpPrint("Pool Used:\n");
506 }
507 #endif
508
509 //
510 // Remember whether we'll have to be verbose
511 // This is the only supported flag!
512 //
513 Verbose = BooleanFlagOn(Flags, 1);
514
515 //
516 // Print table header
517 //
518 if (Verbose)
519 {
520 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
521 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
522 }
523 else
524 {
525 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
526 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
527 }
528
529 //
530 // We'll extract allocations for all the tracked pools
531 //
532 for (i = 0; i < PoolTrackTableSize; ++i)
533 {
534 PPOOL_TRACKER_TABLE TableEntry;
535
536 TableEntry = &PoolTrackTable[i];
537
538 //
539 // We only care about tags which have allocated memory
540 //
541 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
542 {
543 //
544 // If there's a tag, attempt to do a pretty print
545 // only if it matches the caller's tag, or if
546 // any tag is allowed
547 // For checking whether it matches caller's tag,
548 // use the mask to make sure not to mess with the wildcards
549 //
550 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
551 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
552 {
553 CHAR Tag[4];
554
555 //
556 // Extract each 'component' and check whether they are printable
557 //
558 Tag[0] = TableEntry->Key & 0xFF;
559 Tag[1] = TableEntry->Key >> 8 & 0xFF;
560 Tag[2] = TableEntry->Key >> 16 & 0xFF;
561 Tag[3] = TableEntry->Key >> 24 & 0xFF;
562
563 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
564 {
565 //
566 // Print in direct order to make !poolused TAG usage easier
567 //
568 if (Verbose)
569 {
570 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
571 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
572 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
573 TableEntry->PagedAllocs, TableEntry->PagedFrees,
574 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
575 }
576 else
577 {
578 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
579 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
580 TableEntry->PagedAllocs, TableEntry->PagedBytes);
581 }
582 }
583 else
584 {
585 if (Verbose)
586 {
587 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
588 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
589 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
590 TableEntry->PagedAllocs, TableEntry->PagedFrees,
591 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
592 }
593 else
594 {
595 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
596 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
597 TableEntry->PagedAllocs, TableEntry->PagedBytes);
598 }
599 }
600 }
601 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
602 {
603 if (Verbose)
604 {
605 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
606 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
607 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
608 TableEntry->PagedAllocs, TableEntry->PagedFrees,
609 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
610 }
611 else
612 {
613 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
614 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
615 TableEntry->PagedAllocs, TableEntry->PagedBytes);
616 }
617 }
618 }
619 }
620
621 if (!CalledFromDbg)
622 {
623 DPRINT1("---------------------\n");
624 }
625 }
626 #endif
627
628 /* PRIVATE FUNCTIONS **********************************************************/
629
630 VOID
631 NTAPI
632 INIT_SECTION
633 ExpSeedHotTags(VOID)
634 {
635 ULONG i, Key, Hash, Index;
636 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
637 ULONG TagList[] =
638 {
639 ' oI',
640 ' laH',
641 'PldM',
642 'LooP',
643 'tSbO',
644 ' prI',
645 'bdDN',
646 'LprI',
647 'pOoI',
648 ' ldM',
649 'eliF',
650 'aVMC',
651 'dSeS',
652 'CFtN',
653 'looP',
654 'rPCT',
655 'bNMC',
656 'dTeS',
657 'sFtN',
658 'TPCT',
659 'CPCT',
660 ' yeK',
661 'qSbO',
662 'mNoI',
663 'aEoI',
664 'cPCT',
665 'aFtN',
666 '0ftN',
667 'tceS',
668 'SprI',
669 'ekoT',
670 ' eS',
671 'lCbO',
672 'cScC',
673 'lFtN',
674 'cAeS',
675 'mfSF',
676 'kWcC',
677 'miSF',
678 'CdfA',
679 'EdfA',
680 'orSF',
681 'nftN',
682 'PRIU',
683 'rFpN',
684 'RFpN',
685 'aPeS',
686 'sUeS',
687 'FpcA',
688 'MpcA',
689 'cSeS',
690 'mNbO',
691 'sFpN',
692 'uLeS',
693 'DPcS',
694 'nevE',
695 'vrqR',
696 'ldaV',
697 ' pP',
698 'SdaV',
699 ' daV',
700 'LdaV',
701 'FdaV',
702 ' GIB',
703 };
704
705 //
706 // Loop all 64 hot tags
707 //
708 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
709 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
710 {
711 //
712 // Get the current tag, and compute its hash in the tracker table
713 //
714 Key = TagList[i];
715 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
716
717 //
718 // Loop all the hashes in this index/bucket
719 //
720 Index = Hash;
721 while (TRUE)
722 {
723 //
724 // Find an empty entry, and make sure this isn't the last hash that
725 // can fit.
726 //
727 // On checked builds, also make sure this is the first time we are
728 // seeding this tag.
729 //
730 ASSERT(TrackTable[Hash].Key != Key);
731 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
732 {
733 //
734 // It has been seeded, move on to the next tag
735 //
736 TrackTable[Hash].Key = Key;
737 break;
738 }
739
740 //
741 // This entry was already taken, compute the next possible hash while
742 // making sure we're not back at our initial index.
743 //
744 ASSERT(TrackTable[Hash].Key != Key);
745 Hash = (Hash + 1) & PoolTrackTableMask;
746 if (Hash == Index) break;
747 }
748 }
749 }
750
751 VOID
752 NTAPI
753 ExpRemovePoolTracker(IN ULONG Key,
754 IN SIZE_T NumberOfBytes,
755 IN POOL_TYPE PoolType)
756 {
757 ULONG Hash, Index;
758 PPOOL_TRACKER_TABLE Table, TableEntry;
759 SIZE_T TableMask, TableSize;
760
761 //
762 // Remove the PROTECTED_POOL flag which is not part of the tag
763 //
764 Key &= ~PROTECTED_POOL;
765
766 //
767 // With WinDBG you can set a tag you want to break on when an allocation is
768 // attempted
769 //
770 if (Key == PoolHitTag) DbgBreakPoint();
771
772 //
773 // Why the double indirection? Because normally this function is also used
774 // when doing session pool allocations, which has another set of tables,
775 // sizes, and masks that live in session pool. Now we don't support session
776 // pool so we only ever use the regular tables, but I'm keeping the code this
777 // way so that the day we DO support session pool, it won't require that
778 // many changes
779 //
780 Table = PoolTrackTable;
781 TableMask = PoolTrackTableMask;
782 TableSize = PoolTrackTableSize;
783 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
784
785 //
786 // Compute the hash for this key, and loop all the possible buckets
787 //
788 Hash = ExpComputeHashForTag(Key, TableMask);
789 Index = Hash;
790 while (TRUE)
791 {
792 //
793 // Have we found the entry for this tag? */
794 //
795 TableEntry = &Table[Hash];
796 if (TableEntry->Key == Key)
797 {
798 //
799 // Decrement the counters depending on if this was paged or nonpaged
800 // pool
801 //
802 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
803 {
804 InterlockedIncrement(&TableEntry->NonPagedFrees);
805 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
806 -(SSIZE_T)NumberOfBytes);
807 return;
808 }
809 InterlockedIncrement(&TableEntry->PagedFrees);
810 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
811 -(SSIZE_T)NumberOfBytes);
812 return;
813 }
814
815 //
816 // We should have only ended up with an empty entry if we've reached
817 // the last bucket
818 //
819 if (!TableEntry->Key)
820 {
821 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
822 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
823 ASSERT(Hash == TableMask);
824 }
825
826 //
827 // This path is hit when we don't have an entry, and the current bucket
828 // is full, so we simply try the next one
829 //
830 Hash = (Hash + 1) & TableMask;
831 if (Hash == Index) break;
832 }
833
834 //
835 // And finally this path is hit when all the buckets are full, and we need
836 // some expansion. This path is not yet supported in ReactOS and so we'll
837 // ignore the tag
838 //
839 DPRINT1("Out of pool tag space, ignoring...\n");
840 }
841
842 VOID
843 NTAPI
844 ExpInsertPoolTracker(IN ULONG Key,
845 IN SIZE_T NumberOfBytes,
846 IN POOL_TYPE PoolType)
847 {
848 ULONG Hash, Index;
849 KIRQL OldIrql;
850 PPOOL_TRACKER_TABLE Table, TableEntry;
851 SIZE_T TableMask, TableSize;
852
853 //
854 // Remove the PROTECTED_POOL flag which is not part of the tag
855 //
856 Key &= ~PROTECTED_POOL;
857
858 //
859 // With WinDBG you can set a tag you want to break on when an allocation is
860 // attempted
861 //
862 if (Key == PoolHitTag) DbgBreakPoint();
863
864 //
865 // There is also an internal flag you can set to break on malformed tags
866 //
867 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
868
869 //
870 // ASSERT on ReactOS features not yet supported
871 //
872 ASSERT(!(PoolType & SESSION_POOL_MASK));
873 ASSERT(KeGetCurrentProcessorNumber() == 0);
874
875 //
876 // Why the double indirection? Because normally this function is also used
877 // when doing session pool allocations, which has another set of tables,
878 // sizes, and masks that live in session pool. Now we don't support session
879 // pool so we only ever use the regular tables, but I'm keeping the code this
880 // way so that the day we DO support session pool, it won't require that
881 // many changes
882 //
883 Table = PoolTrackTable;
884 TableMask = PoolTrackTableMask;
885 TableSize = PoolTrackTableSize;
886 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
887
888 //
889 // Compute the hash for this key, and loop all the possible buckets
890 //
891 Hash = ExpComputeHashForTag(Key, TableMask);
892 Index = Hash;
893 while (TRUE)
894 {
895 //
896 // Do we already have an entry for this tag? */
897 //
898 TableEntry = &Table[Hash];
899 if (TableEntry->Key == Key)
900 {
901 //
902 // Increment the counters depending on if this was paged or nonpaged
903 // pool
904 //
905 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
906 {
907 InterlockedIncrement(&TableEntry->NonPagedAllocs);
908 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
909 return;
910 }
911 InterlockedIncrement(&TableEntry->PagedAllocs);
912 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
913 return;
914 }
915
916 //
917 // We don't have an entry yet, but we've found a free bucket for it
918 //
919 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
920 {
921 //
922 // We need to hold the lock while creating a new entry, since other
923 // processors might be in this code path as well
924 //
925 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
926 if (!PoolTrackTable[Hash].Key)
927 {
928 //
929 // We've won the race, so now create this entry in the bucket
930 //
931 ASSERT(Table[Hash].Key == 0);
932 PoolTrackTable[Hash].Key = Key;
933 TableEntry->Key = Key;
934 }
935 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
936
937 //
938 // Now we force the loop to run again, and we should now end up in
939 // the code path above which does the interlocked increments...
940 //
941 continue;
942 }
943
944 //
945 // This path is hit when we don't have an entry, and the current bucket
946 // is full, so we simply try the next one
947 //
948 Hash = (Hash + 1) & TableMask;
949 if (Hash == Index) break;
950 }
951
952 //
953 // And finally this path is hit when all the buckets are full, and we need
954 // some expansion. This path is not yet supported in ReactOS and so we'll
955 // ignore the tag
956 //
957 DPRINT1("Out of pool tag space, ignoring...\n");
958 }
959
960 VOID
961 NTAPI
962 INIT_SECTION
963 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
964 IN POOL_TYPE PoolType,
965 IN ULONG PoolIndex,
966 IN ULONG Threshold,
967 IN PVOID PoolLock)
968 {
969 PLIST_ENTRY NextEntry, LastEntry;
970
971 //
972 // Setup the descriptor based on the caller's request
973 //
974 PoolDescriptor->PoolType = PoolType;
975 PoolDescriptor->PoolIndex = PoolIndex;
976 PoolDescriptor->Threshold = Threshold;
977 PoolDescriptor->LockAddress = PoolLock;
978
979 //
980 // Initialize accounting data
981 //
982 PoolDescriptor->RunningAllocs = 0;
983 PoolDescriptor->RunningDeAllocs = 0;
984 PoolDescriptor->TotalPages = 0;
985 PoolDescriptor->TotalBytes = 0;
986 PoolDescriptor->TotalBigPages = 0;
987
988 //
989 // Nothing pending for now
990 //
991 PoolDescriptor->PendingFrees = NULL;
992 PoolDescriptor->PendingFreeDepth = 0;
993
994 //
995 // Loop all the descriptor's allocation lists and initialize them
996 //
997 NextEntry = PoolDescriptor->ListHeads;
998 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
999 while (NextEntry < LastEntry)
1000 {
1001 ExpInitializePoolListHead(NextEntry);
1002 NextEntry++;
1003 }
1004
1005 //
1006 // Note that ReactOS does not support Session Pool Yet
1007 //
1008 ASSERT(PoolType != PagedPoolSession);
1009 }
1010
1011 VOID
1012 NTAPI
1013 INIT_SECTION
1014 InitializePool(IN POOL_TYPE PoolType,
1015 IN ULONG Threshold)
1016 {
1017 PPOOL_DESCRIPTOR Descriptor;
1018 SIZE_T TableSize;
1019 ULONG i;
1020
1021 //
1022 // Check what kind of pool this is
1023 //
1024 if (PoolType == NonPagedPool)
1025 {
1026 //
1027 // Compute the track table size and convert it from a power of two to an
1028 // actual byte size
1029 //
1030 // NOTE: On checked builds, we'll assert if the registry table size was
1031 // invalid, while on retail builds we'll just break out of the loop at
1032 // that point.
1033 //
1034 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1035 for (i = 0; i < 32; i++)
1036 {
1037 if (TableSize & 1)
1038 {
1039 ASSERT((TableSize & ~1) == 0);
1040 if (!(TableSize & ~1)) break;
1041 }
1042 TableSize >>= 1;
1043 }
1044
1045 //
1046 // If we hit bit 32, than no size was defined in the registry, so
1047 // we'll use the default size of 2048 entries.
1048 //
1049 // Otherwise, use the size from the registry, as long as it's not
1050 // smaller than 64 entries.
1051 //
1052 if (i == 32)
1053 {
1054 PoolTrackTableSize = 2048;
1055 }
1056 else
1057 {
1058 PoolTrackTableSize = max(1 << i, 64);
1059 }
1060
1061 //
1062 // Loop trying with the biggest specified size first, and cut it down
1063 // by a power of two each iteration in case not enough memory exist
1064 //
1065 while (TRUE)
1066 {
1067 //
1068 // Do not allow overflow
1069 //
1070 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1071 {
1072 PoolTrackTableSize >>= 1;
1073 continue;
1074 }
1075
1076 //
1077 // Allocate the tracker table and exit the loop if this worked
1078 //
1079 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1080 (PoolTrackTableSize + 1) *
1081 sizeof(POOL_TRACKER_TABLE));
1082 if (PoolTrackTable) break;
1083
1084 //
1085 // Otherwise, as long as we're not down to the last bit, keep
1086 // iterating
1087 //
1088 if (PoolTrackTableSize == 1)
1089 {
1090 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1091 TableSize,
1092 0xFFFFFFFF,
1093 0xFFFFFFFF,
1094 0xFFFFFFFF);
1095 }
1096 PoolTrackTableSize >>= 1;
1097 }
1098
1099 //
1100 // Add one entry, compute the hash, and zero the table
1101 //
1102 PoolTrackTableSize++;
1103 PoolTrackTableMask = PoolTrackTableSize - 2;
1104
1105 RtlZeroMemory(PoolTrackTable,
1106 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1107
1108 //
1109 // Finally, add the most used tags to speed up those allocations
1110 //
1111 ExpSeedHotTags();
1112
1113 //
1114 // We now do the exact same thing with the tracker table for big pages
1115 //
1116 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1117 for (i = 0; i < 32; i++)
1118 {
1119 if (TableSize & 1)
1120 {
1121 ASSERT((TableSize & ~1) == 0);
1122 if (!(TableSize & ~1)) break;
1123 }
1124 TableSize >>= 1;
1125 }
1126
1127 //
1128 // For big pages, the default tracker table is 4096 entries, while the
1129 // minimum is still 64
1130 //
1131 if (i == 32)
1132 {
1133 PoolBigPageTableSize = 4096;
1134 }
1135 else
1136 {
1137 PoolBigPageTableSize = max(1 << i, 64);
1138 }
1139
1140 //
1141 // Again, run the exact same loop we ran earlier, but this time for the
1142 // big pool tracker instead
1143 //
1144 while (TRUE)
1145 {
1146 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1147 {
1148 PoolBigPageTableSize >>= 1;
1149 continue;
1150 }
1151
1152 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1153 PoolBigPageTableSize *
1154 sizeof(POOL_TRACKER_BIG_PAGES));
1155 if (PoolBigPageTable) break;
1156
1157 if (PoolBigPageTableSize == 1)
1158 {
1159 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1160 TableSize,
1161 0xFFFFFFFF,
1162 0xFFFFFFFF,
1163 0xFFFFFFFF);
1164 }
1165
1166 PoolBigPageTableSize >>= 1;
1167 }
1168
1169 //
1170 // An extra entry is not needed for for the big pool tracker, so just
1171 // compute the hash and zero it
1172 //
1173 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1174 RtlZeroMemory(PoolBigPageTable,
1175 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1176 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1177
1178 //
1179 // During development, print this out so we can see what's happening
1180 //
1181 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1182 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1183 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1184 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1185
1186 //
1187 // Insert the generic tracker for all of big pool
1188 //
1189 ExpInsertPoolTracker('looP',
1190 ROUND_TO_PAGES(PoolBigPageTableSize *
1191 sizeof(POOL_TRACKER_BIG_PAGES)),
1192 NonPagedPool);
1193
1194 //
1195 // No support for NUMA systems at this time
1196 //
1197 ASSERT(KeNumberNodes == 1);
1198
1199 //
1200 // Initialize the tag spinlock
1201 //
1202 KeInitializeSpinLock(&ExpTaggedPoolLock);
1203
1204 //
1205 // Initialize the nonpaged pool descriptor
1206 //
1207 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1208 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1209 NonPagedPool,
1210 0,
1211 Threshold,
1212 NULL);
1213 }
1214 else
1215 {
1216 //
1217 // No support for NUMA systems at this time
1218 //
1219 ASSERT(KeNumberNodes == 1);
1220
1221 //
1222 // Allocate the pool descriptor
1223 //
1224 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1225 sizeof(KGUARDED_MUTEX) +
1226 sizeof(POOL_DESCRIPTOR),
1227 'looP');
1228 if (!Descriptor)
1229 {
1230 //
1231 // This is really bad...
1232 //
1233 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1234 0,
1235 -1,
1236 -1,
1237 -1);
1238 }
1239
1240 //
1241 // Setup the vector and guarded mutex for paged pool
1242 //
1243 PoolVector[PagedPool] = Descriptor;
1244 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1245 ExpPagedPoolDescriptor[0] = Descriptor;
1246 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1247 ExInitializePoolDescriptor(Descriptor,
1248 PagedPool,
1249 0,
1250 Threshold,
1251 ExpPagedPoolMutex);
1252
1253 //
1254 // Insert the generic tracker for all of nonpaged pool
1255 //
1256 ExpInsertPoolTracker('looP',
1257 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1258 NonPagedPool);
1259 }
1260 }
1261
1262 FORCEINLINE
1263 KIRQL
1264 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1265 {
1266 //
1267 // Check if this is nonpaged pool
1268 //
1269 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1270 {
1271 //
1272 // Use the queued spin lock
1273 //
1274 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1275 }
1276 else
1277 {
1278 //
1279 // Use the guarded mutex
1280 //
1281 KeAcquireGuardedMutex(Descriptor->LockAddress);
1282 return APC_LEVEL;
1283 }
1284 }
1285
1286 FORCEINLINE
1287 VOID
1288 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1289 IN KIRQL OldIrql)
1290 {
1291 //
1292 // Check if this is nonpaged pool
1293 //
1294 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1295 {
1296 //
1297 // Use the queued spin lock
1298 //
1299 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1300 }
1301 else
1302 {
1303 //
1304 // Use the guarded mutex
1305 //
1306 KeReleaseGuardedMutex(Descriptor->LockAddress);
1307 }
1308 }
1309
1310 VOID
1311 NTAPI
1312 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1313 IN PVOID DeferredContext,
1314 IN PVOID SystemArgument1,
1315 IN PVOID SystemArgument2)
1316 {
1317 PPOOL_DPC_CONTEXT Context = DeferredContext;
1318 UNREFERENCED_PARAMETER(Dpc);
1319 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1320
1321 //
1322 // Make sure we win the race, and if we did, copy the data atomically
1323 //
1324 if (KeSignalCallDpcSynchronize(SystemArgument2))
1325 {
1326 RtlCopyMemory(Context->PoolTrackTable,
1327 PoolTrackTable,
1328 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1329
1330 //
1331 // This is here because ReactOS does not yet support expansion
1332 //
1333 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1334 }
1335
1336 //
1337 // Regardless of whether we won or not, we must now synchronize and then
1338 // decrement the barrier since this is one more processor that has completed
1339 // the callback.
1340 //
1341 KeSignalCallDpcSynchronize(SystemArgument2);
1342 KeSignalCallDpcDone(SystemArgument1);
1343 }
1344
1345 NTSTATUS
1346 NTAPI
1347 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1348 IN ULONG SystemInformationLength,
1349 IN OUT PULONG ReturnLength OPTIONAL)
1350 {
1351 ULONG TableSize, CurrentLength;
1352 ULONG EntryCount;
1353 NTSTATUS Status = STATUS_SUCCESS;
1354 PSYSTEM_POOLTAG TagEntry;
1355 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1356 POOL_DPC_CONTEXT Context;
1357 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1358
1359 //
1360 // Keep track of how much data the caller's buffer must hold
1361 //
1362 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1363
1364 //
1365 // Initialize the caller's buffer
1366 //
1367 TagEntry = &SystemInformation->TagInfo[0];
1368 SystemInformation->Count = 0;
1369
1370 //
1371 // Capture the number of entries, and the total size needed to make a copy
1372 // of the table
1373 //
1374 EntryCount = (ULONG)PoolTrackTableSize;
1375 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1376
1377 //
1378 // Allocate the "Generic DPC" temporary buffer
1379 //
1380 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1381 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1382
1383 //
1384 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1385 //
1386 Context.PoolTrackTable = Buffer;
1387 Context.PoolTrackTableSize = PoolTrackTableSize;
1388 Context.PoolTrackTableExpansion = NULL;
1389 Context.PoolTrackTableSizeExpansion = 0;
1390 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1391
1392 //
1393 // Now parse the results
1394 //
1395 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1396 {
1397 //
1398 // If the entry is empty, skip it
1399 //
1400 if (!TrackerEntry->Key) continue;
1401
1402 //
1403 // Otherwise, add one more entry to the caller's buffer, and ensure that
1404 // enough space has been allocated in it
1405 //
1406 SystemInformation->Count++;
1407 CurrentLength += sizeof(*TagEntry);
1408 if (SystemInformationLength < CurrentLength)
1409 {
1410 //
1411 // The caller's buffer is too small, so set a failure code. The
1412 // caller will know the count, as well as how much space is needed.
1413 //
1414 // We do NOT break out of the loop, because we want to keep incrementing
1415 // the Count as well as CurrentLength so that the caller can know the
1416 // final numbers
1417 //
1418 Status = STATUS_INFO_LENGTH_MISMATCH;
1419 }
1420 else
1421 {
1422 //
1423 // Small sanity check that our accounting is working correctly
1424 //
1425 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1426 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1427
1428 //
1429 // Return the data into the caller's buffer
1430 //
1431 TagEntry->TagUlong = TrackerEntry->Key;
1432 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1433 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1434 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1435 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1436 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1437 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1438 TagEntry++;
1439 }
1440 }
1441
1442 //
1443 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1444 //
1445 ExFreePoolWithTag(Buffer, 'ofnI');
1446 if (ReturnLength) *ReturnLength = CurrentLength;
1447 return Status;
1448 }
1449
1450 BOOLEAN
1451 NTAPI
1452 ExpAddTagForBigPages(IN PVOID Va,
1453 IN ULONG Key,
1454 IN ULONG NumberOfPages,
1455 IN POOL_TYPE PoolType)
1456 {
1457 ULONG Hash, i = 0;
1458 PVOID OldVa;
1459 KIRQL OldIrql;
1460 SIZE_T TableSize;
1461 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1462 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1463 ASSERT(!(PoolType & SESSION_POOL_MASK));
1464
1465 //
1466 // As the table is expandable, these values must only be read after acquiring
1467 // the lock to avoid a teared access during an expansion
1468 //
1469 Hash = ExpComputePartialHashForAddress(Va);
1470 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1471 Hash &= PoolBigPageTableHash;
1472 TableSize = PoolBigPageTableSize;
1473
1474 //
1475 // We loop from the current hash bucket to the end of the table, and then
1476 // rollover to hash bucket 0 and keep going from there. If we return back
1477 // to the beginning, then we attempt expansion at the bottom of the loop
1478 //
1479 EntryStart = Entry = &PoolBigPageTable[Hash];
1480 EntryEnd = &PoolBigPageTable[TableSize];
1481 do
1482 {
1483 //
1484 // Make sure that this is a free entry and attempt to atomically make the
1485 // entry busy now
1486 //
1487 OldVa = Entry->Va;
1488 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1489 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1490 {
1491 //
1492 // We now own this entry, write down the size and the pool tag
1493 //
1494 Entry->Key = Key;
1495 Entry->NumberOfPages = NumberOfPages;
1496
1497 //
1498 // Add one more entry to the count, and see if we're getting within
1499 // 25% of the table size, at which point we'll do an expansion now
1500 // to avoid blocking too hard later on.
1501 //
1502 // Note that we only do this if it's also been the 16th time that we
1503 // keep losing the race or that we are not finding a free entry anymore,
1504 // which implies a massive number of concurrent big pool allocations.
1505 //
1506 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1507 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1508 {
1509 DPRINT("Should attempt expansion since we now have %lu entries\n",
1510 ExpPoolBigEntriesInUse);
1511 }
1512
1513 //
1514 // We have our entry, return
1515 //
1516 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1517 return TRUE;
1518 }
1519
1520 //
1521 // We don't have our entry yet, so keep trying, making the entry list
1522 // circular if we reach the last entry. We'll eventually break out of
1523 // the loop once we've rolled over and returned back to our original
1524 // hash bucket
1525 //
1526 i++;
1527 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1528 } while (Entry != EntryStart);
1529
1530 //
1531 // This means there's no free hash buckets whatsoever, so we would now have
1532 // to attempt expanding the table
1533 //
1534 DPRINT1("Big pool expansion needed, not implemented!\n");
1535 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1536 return FALSE;
1537 }
1538
1539 ULONG
1540 NTAPI
1541 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1542 OUT PULONG_PTR BigPages,
1543 IN POOL_TYPE PoolType)
1544 {
1545 BOOLEAN FirstTry = TRUE;
1546 SIZE_T TableSize;
1547 KIRQL OldIrql;
1548 ULONG PoolTag, Hash;
1549 PPOOL_TRACKER_BIG_PAGES Entry;
1550 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1551 ASSERT(!(PoolType & SESSION_POOL_MASK));
1552
1553 //
1554 // As the table is expandable, these values must only be read after acquiring
1555 // the lock to avoid a teared access during an expansion
1556 //
1557 Hash = ExpComputePartialHashForAddress(Va);
1558 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1559 Hash &= PoolBigPageTableHash;
1560 TableSize = PoolBigPageTableSize;
1561
1562 //
1563 // Loop while trying to find this big page allocation
1564 //
1565 while (PoolBigPageTable[Hash].Va != Va)
1566 {
1567 //
1568 // Increment the size until we go past the end of the table
1569 //
1570 if (++Hash >= TableSize)
1571 {
1572 //
1573 // Is this the second time we've tried?
1574 //
1575 if (!FirstTry)
1576 {
1577 //
1578 // This means it was never inserted into the pool table and it
1579 // received the special "BIG" tag -- return that and return 0
1580 // so that the code can ask Mm for the page count instead
1581 //
1582 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1583 *BigPages = 0;
1584 return ' GIB';
1585 }
1586
1587 //
1588 // The first time this happens, reset the hash index and try again
1589 //
1590 Hash = 0;
1591 FirstTry = FALSE;
1592 }
1593 }
1594
1595 //
1596 // Now capture all the information we need from the entry, since after we
1597 // release the lock, the data can change
1598 //
1599 Entry = &PoolBigPageTable[Hash];
1600 *BigPages = Entry->NumberOfPages;
1601 PoolTag = Entry->Key;
1602
1603 //
1604 // Set the free bit, and decrement the number of allocations. Finally, release
1605 // the lock and return the tag that was located
1606 //
1607 InterlockedIncrement((PLONG)&Entry->Va);
1608 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1609 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1610 return PoolTag;
1611 }
1612
1613 VOID
1614 NTAPI
1615 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1616 OUT PULONG NonPagedPoolPages,
1617 OUT PULONG PagedPoolAllocs,
1618 OUT PULONG PagedPoolFrees,
1619 OUT PULONG PagedPoolLookasideHits,
1620 OUT PULONG NonPagedPoolAllocs,
1621 OUT PULONG NonPagedPoolFrees,
1622 OUT PULONG NonPagedPoolLookasideHits)
1623 {
1624 ULONG i;
1625 PPOOL_DESCRIPTOR PoolDesc;
1626
1627 //
1628 // Assume all failures
1629 //
1630 *PagedPoolPages = 0;
1631 *PagedPoolAllocs = 0;
1632 *PagedPoolFrees = 0;
1633
1634 //
1635 // Tally up the totals for all the apged pool
1636 //
1637 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1638 {
1639 PoolDesc = ExpPagedPoolDescriptor[i];
1640 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1641 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1642 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1643 }
1644
1645 //
1646 // The first non-paged pool has a hardcoded well-known descriptor name
1647 //
1648 PoolDesc = &NonPagedPoolDescriptor;
1649 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1650 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1651 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1652
1653 //
1654 // If the system has more than one non-paged pool, copy the other descriptor
1655 // totals as well
1656 //
1657 #if 0
1658 if (ExpNumberOfNonPagedPools > 1)
1659 {
1660 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1661 {
1662 PoolDesc = ExpNonPagedPoolDescriptor[i];
1663 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1664 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1665 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1666 }
1667 }
1668 #endif
1669
1670 //
1671 // FIXME: Not yet supported
1672 //
1673 *NonPagedPoolLookasideHits += 0;
1674 *PagedPoolLookasideHits += 0;
1675 }
1676
1677 VOID
1678 NTAPI
1679 ExReturnPoolQuota(IN PVOID P)
1680 {
1681 PPOOL_HEADER Entry;
1682 POOL_TYPE PoolType;
1683 USHORT BlockSize;
1684 PEPROCESS Process;
1685
1686 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1687 (MmIsSpecialPoolAddress(P)))
1688 {
1689 return;
1690 }
1691
1692 Entry = P;
1693 Entry--;
1694 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1695
1696 PoolType = Entry->PoolType - 1;
1697 BlockSize = Entry->BlockSize;
1698
1699 if (PoolType & QUOTA_POOL_MASK)
1700 {
1701 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1702 ASSERT(Process != NULL);
1703 if (Process)
1704 {
1705 if (Process->Pcb.Header.Type != ProcessObject)
1706 {
1707 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1708 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1709 KeBugCheckEx(BAD_POOL_CALLER,
1710 0x0D,
1711 (ULONG_PTR)P,
1712 Entry->PoolTag,
1713 (ULONG_PTR)Process);
1714 }
1715 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1716 PsReturnPoolQuota(Process,
1717 PoolType & BASE_POOL_TYPE_MASK,
1718 BlockSize * POOL_BLOCK_SIZE);
1719 ObDereferenceObject(Process);
1720 }
1721 }
1722 }
1723
1724 /* PUBLIC FUNCTIONS ***********************************************************/
1725
1726 /*
1727 * @implemented
1728 */
1729 PVOID
1730 NTAPI
1731 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1732 IN SIZE_T NumberOfBytes,
1733 IN ULONG Tag)
1734 {
1735 PPOOL_DESCRIPTOR PoolDesc;
1736 PLIST_ENTRY ListHead;
1737 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1738 KIRQL OldIrql;
1739 USHORT BlockSize, i;
1740 ULONG OriginalType;
1741 PKPRCB Prcb = KeGetCurrentPrcb();
1742 PGENERAL_LOOKASIDE LookasideList;
1743
1744 //
1745 // Some sanity checks
1746 //
1747 ASSERT(Tag != 0);
1748 ASSERT(Tag != ' GIB');
1749 ASSERT(NumberOfBytes != 0);
1750 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1751
1752 //
1753 // Not supported in ReactOS
1754 //
1755 ASSERT(!(PoolType & SESSION_POOL_MASK));
1756
1757 //
1758 // Check if verifier or special pool is enabled
1759 //
1760 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1761 {
1762 //
1763 // For verifier, we should call the verification routine
1764 //
1765 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1766 {
1767 DPRINT1("Driver Verifier is not yet supported\n");
1768 }
1769
1770 //
1771 // For special pool, we check if this is a suitable allocation and do
1772 // the special allocation if needed
1773 //
1774 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1775 {
1776 //
1777 // Check if this is a special pool allocation
1778 //
1779 if (MmUseSpecialPool(NumberOfBytes, Tag))
1780 {
1781 //
1782 // Try to allocate using special pool
1783 //
1784 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1785 if (Entry) return Entry;
1786 }
1787 }
1788 }
1789
1790 //
1791 // Get the pool type and its corresponding vector for this request
1792 //
1793 OriginalType = PoolType;
1794 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1795 PoolDesc = PoolVector[PoolType];
1796 ASSERT(PoolDesc != NULL);
1797
1798 //
1799 // Check if this is a big page allocation
1800 //
1801 if (NumberOfBytes > POOL_MAX_ALLOC)
1802 {
1803 //
1804 // Allocate pages for it
1805 //
1806 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1807 if (!Entry)
1808 {
1809 #if DBG
1810 //
1811 // Out of memory, display current consumption
1812 //
1813 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1814 #endif
1815
1816 //
1817 // Must succeed pool is deprecated, but still supported. These allocation
1818 // failures must cause an immediate bugcheck
1819 //
1820 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1821 {
1822 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1823 NumberOfBytes,
1824 NonPagedPoolDescriptor.TotalPages,
1825 NonPagedPoolDescriptor.TotalBigPages,
1826 0);
1827 }
1828
1829 //
1830 // Internal debugging
1831 //
1832 ExPoolFailures++;
1833
1834 //
1835 // This flag requests printing failures, and can also further specify
1836 // breaking on failures
1837 //
1838 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1839 {
1840 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1841 NumberOfBytes,
1842 OriginalType);
1843 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1844 }
1845
1846 //
1847 // Finally, this flag requests an exception, which we are more than
1848 // happy to raise!
1849 //
1850 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1851 {
1852 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1853 }
1854
1855 return NULL;
1856 }
1857
1858 //
1859 // Increment required counters
1860 //
1861 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1862 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1863 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1864 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1865
1866 //
1867 // Add a tag for the big page allocation and switch to the generic "BIG"
1868 // tag if we failed to do so, then insert a tracker for this alloation.
1869 //
1870 if (!ExpAddTagForBigPages(Entry,
1871 Tag,
1872 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1873 OriginalType))
1874 {
1875 Tag = ' GIB';
1876 }
1877 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1878 return Entry;
1879 }
1880
1881 //
1882 // Should never request 0 bytes from the pool, but since so many drivers do
1883 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1884 //
1885 if (!NumberOfBytes) NumberOfBytes = 1;
1886
1887 //
1888 // A pool allocation is defined by its data, a linked list to connect it to
1889 // the free list (if necessary), and a pool header to store accounting info.
1890 // Calculate this size, then convert it into a block size (units of pool
1891 // headers)
1892 //
1893 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1894 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1895 // the direct allocation of pages.
1896 //
1897 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1898 / POOL_BLOCK_SIZE);
1899 ASSERT(i < POOL_LISTS_PER_PAGE);
1900
1901 //
1902 // Handle lookaside list optimization for both paged and nonpaged pool
1903 //
1904 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1905 {
1906 //
1907 // Try popping it from the per-CPU lookaside list
1908 //
1909 LookasideList = (PoolType == PagedPool) ?
1910 Prcb->PPPagedLookasideList[i - 1].P :
1911 Prcb->PPNPagedLookasideList[i - 1].P;
1912 LookasideList->TotalAllocates++;
1913 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1914 if (!Entry)
1915 {
1916 //
1917 // We failed, try popping it from the global list
1918 //
1919 LookasideList = (PoolType == PagedPool) ?
1920 Prcb->PPPagedLookasideList[i - 1].L :
1921 Prcb->PPNPagedLookasideList[i - 1].L;
1922 LookasideList->TotalAllocates++;
1923 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1924 }
1925
1926 //
1927 // If we were able to pop it, update the accounting and return the block
1928 //
1929 if (Entry)
1930 {
1931 LookasideList->AllocateHits++;
1932
1933 //
1934 // Get the real entry, write down its pool type, and track it
1935 //
1936 Entry--;
1937 Entry->PoolType = OriginalType + 1;
1938 ExpInsertPoolTracker(Tag,
1939 Entry->BlockSize * POOL_BLOCK_SIZE,
1940 OriginalType);
1941
1942 //
1943 // Return the pool allocation
1944 //
1945 Entry->PoolTag = Tag;
1946 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1947 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1948 return POOL_FREE_BLOCK(Entry);
1949 }
1950 }
1951
1952 //
1953 // Loop in the free lists looking for a block if this size. Start with the
1954 // list optimized for this kind of size lookup
1955 //
1956 ListHead = &PoolDesc->ListHeads[i];
1957 do
1958 {
1959 //
1960 // Are there any free entries available on this list?
1961 //
1962 if (!ExpIsPoolListEmpty(ListHead))
1963 {
1964 //
1965 // Acquire the pool lock now
1966 //
1967 OldIrql = ExLockPool(PoolDesc);
1968
1969 //
1970 // And make sure the list still has entries
1971 //
1972 if (ExpIsPoolListEmpty(ListHead))
1973 {
1974 //
1975 // Someone raced us (and won) before we had a chance to acquire
1976 // the lock.
1977 //
1978 // Try again!
1979 //
1980 ExUnlockPool(PoolDesc, OldIrql);
1981 continue;
1982 }
1983
1984 //
1985 // Remove a free entry from the list
1986 // Note that due to the way we insert free blocks into multiple lists
1987 // there is a guarantee that any block on this list will either be
1988 // of the correct size, or perhaps larger.
1989 //
1990 ExpCheckPoolLinks(ListHead);
1991 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1992 ExpCheckPoolLinks(ListHead);
1993 ExpCheckPoolBlocks(Entry);
1994 ASSERT(Entry->BlockSize >= i);
1995 ASSERT(Entry->PoolType == 0);
1996
1997 //
1998 // Check if this block is larger that what we need. The block could
1999 // not possibly be smaller, due to the reason explained above (and
2000 // we would've asserted on a checked build if this was the case).
2001 //
2002 if (Entry->BlockSize != i)
2003 {
2004 //
2005 // Is there an entry before this one?
2006 //
2007 if (Entry->PreviousSize == 0)
2008 {
2009 //
2010 // There isn't anyone before us, so take the next block and
2011 // turn it into a fragment that contains the leftover data
2012 // that we don't need to satisfy the caller's request
2013 //
2014 FragmentEntry = POOL_BLOCK(Entry, i);
2015 FragmentEntry->BlockSize = Entry->BlockSize - i;
2016
2017 //
2018 // And make it point back to us
2019 //
2020 FragmentEntry->PreviousSize = i;
2021
2022 //
2023 // Now get the block that follows the new fragment and check
2024 // if it's still on the same page as us (and not at the end)
2025 //
2026 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2027 if (PAGE_ALIGN(NextEntry) != NextEntry)
2028 {
2029 //
2030 // Adjust this next block to point to our newly created
2031 // fragment block
2032 //
2033 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2034 }
2035 }
2036 else
2037 {
2038 //
2039 // There is a free entry before us, which we know is smaller
2040 // so we'll make this entry the fragment instead
2041 //
2042 FragmentEntry = Entry;
2043
2044 //
2045 // And then we'll remove from it the actual size required.
2046 // Now the entry is a leftover free fragment
2047 //
2048 Entry->BlockSize -= i;
2049
2050 //
2051 // Now let's go to the next entry after the fragment (which
2052 // used to point to our original free entry) and make it
2053 // reference the new fragment entry instead.
2054 //
2055 // This is the entry that will actually end up holding the
2056 // allocation!
2057 //
2058 Entry = POOL_NEXT_BLOCK(Entry);
2059 Entry->PreviousSize = FragmentEntry->BlockSize;
2060
2061 //
2062 // And now let's go to the entry after that one and check if
2063 // it's still on the same page, and not at the end
2064 //
2065 NextEntry = POOL_BLOCK(Entry, i);
2066 if (PAGE_ALIGN(NextEntry) != NextEntry)
2067 {
2068 //
2069 // Make it reference the allocation entry
2070 //
2071 NextEntry->PreviousSize = i;
2072 }
2073 }
2074
2075 //
2076 // Now our (allocation) entry is the right size
2077 //
2078 Entry->BlockSize = i;
2079
2080 //
2081 // And the next entry is now the free fragment which contains
2082 // the remaining difference between how big the original entry
2083 // was, and the actual size the caller needs/requested.
2084 //
2085 FragmentEntry->PoolType = 0;
2086 BlockSize = FragmentEntry->BlockSize;
2087
2088 //
2089 // Now check if enough free bytes remained for us to have a
2090 // "full" entry, which contains enough bytes for a linked list
2091 // and thus can be used for allocations (up to 8 bytes...)
2092 //
2093 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2094 if (BlockSize != 1)
2095 {
2096 //
2097 // Insert the free entry into the free list for this size
2098 //
2099 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2100 POOL_FREE_BLOCK(FragmentEntry));
2101 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2102 }
2103 }
2104
2105 //
2106 // We have found an entry for this allocation, so set the pool type
2107 // and release the lock since we're done
2108 //
2109 Entry->PoolType = OriginalType + 1;
2110 ExpCheckPoolBlocks(Entry);
2111 ExUnlockPool(PoolDesc, OldIrql);
2112
2113 //
2114 // Increment required counters
2115 //
2116 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2117 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2118
2119 //
2120 // Track this allocation
2121 //
2122 ExpInsertPoolTracker(Tag,
2123 Entry->BlockSize * POOL_BLOCK_SIZE,
2124 OriginalType);
2125
2126 //
2127 // Return the pool allocation
2128 //
2129 Entry->PoolTag = Tag;
2130 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2131 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2132 return POOL_FREE_BLOCK(Entry);
2133 }
2134 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2135
2136 //
2137 // There were no free entries left, so we have to allocate a new fresh page
2138 //
2139 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2140 if (!Entry)
2141 {
2142 #if DBG
2143 //
2144 // Out of memory, display current consumption
2145 //
2146 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2147 #endif
2148
2149 //
2150 // Must succeed pool is deprecated, but still supported. These allocation
2151 // failures must cause an immediate bugcheck
2152 //
2153 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2154 {
2155 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2156 PAGE_SIZE,
2157 NonPagedPoolDescriptor.TotalPages,
2158 NonPagedPoolDescriptor.TotalBigPages,
2159 0);
2160 }
2161
2162 //
2163 // Internal debugging
2164 //
2165 ExPoolFailures++;
2166
2167 //
2168 // This flag requests printing failures, and can also further specify
2169 // breaking on failures
2170 //
2171 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2172 {
2173 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2174 NumberOfBytes,
2175 OriginalType);
2176 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2177 }
2178
2179 //
2180 // Finally, this flag requests an exception, which we are more than
2181 // happy to raise!
2182 //
2183 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2184 {
2185 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2186 }
2187
2188 //
2189 // Return NULL to the caller in all other cases
2190 //
2191 return NULL;
2192 }
2193
2194 //
2195 // Setup the entry data
2196 //
2197 Entry->Ulong1 = 0;
2198 Entry->BlockSize = i;
2199 Entry->PoolType = OriginalType + 1;
2200
2201 //
2202 // This page will have two entries -- one for the allocation (which we just
2203 // created above), and one for the remaining free bytes, which we're about
2204 // to create now. The free bytes are the whole page minus what was allocated
2205 // and then converted into units of block headers.
2206 //
2207 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2208 FragmentEntry = POOL_BLOCK(Entry, i);
2209 FragmentEntry->Ulong1 = 0;
2210 FragmentEntry->BlockSize = BlockSize;
2211 FragmentEntry->PreviousSize = i;
2212
2213 //
2214 // Increment required counters
2215 //
2216 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2217 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2218
2219 //
2220 // Now check if enough free bytes remained for us to have a "full" entry,
2221 // which contains enough bytes for a linked list and thus can be used for
2222 // allocations (up to 8 bytes...)
2223 //
2224 if (FragmentEntry->BlockSize != 1)
2225 {
2226 //
2227 // Excellent -- acquire the pool lock
2228 //
2229 OldIrql = ExLockPool(PoolDesc);
2230
2231 //
2232 // And insert the free entry into the free list for this block size
2233 //
2234 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2235 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2236 POOL_FREE_BLOCK(FragmentEntry));
2237 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2238
2239 //
2240 // Release the pool lock
2241 //
2242 ExpCheckPoolBlocks(Entry);
2243 ExUnlockPool(PoolDesc, OldIrql);
2244 }
2245 else
2246 {
2247 //
2248 // Simply do a sanity check
2249 //
2250 ExpCheckPoolBlocks(Entry);
2251 }
2252
2253 //
2254 // Increment performance counters and track this allocation
2255 //
2256 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2257 ExpInsertPoolTracker(Tag,
2258 Entry->BlockSize * POOL_BLOCK_SIZE,
2259 OriginalType);
2260
2261 //
2262 // And return the pool allocation
2263 //
2264 ExpCheckPoolBlocks(Entry);
2265 Entry->PoolTag = Tag;
2266 return POOL_FREE_BLOCK(Entry);
2267 }
2268
2269 /*
2270 * @implemented
2271 */
2272 PVOID
2273 NTAPI
2274 ExAllocatePool(POOL_TYPE PoolType,
2275 SIZE_T NumberOfBytes)
2276 {
2277 ULONG Tag = TAG_NONE;
2278 #if 0 && DBG
2279 PLDR_DATA_TABLE_ENTRY LdrEntry;
2280
2281 /* Use the first four letters of the driver name, or "None" if unavailable */
2282 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2283 ? MiLookupDataTableEntry(_ReturnAddress())
2284 : NULL;
2285 if (LdrEntry)
2286 {
2287 ULONG i;
2288 Tag = 0;
2289 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2290 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2291 for (; i < 4; i++)
2292 Tag = Tag >> 8 | ' ' << 24;
2293 }
2294 #endif
2295 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2296 }
2297
2298 /*
2299 * @implemented
2300 */
2301 VOID
2302 NTAPI
2303 ExFreePoolWithTag(IN PVOID P,
2304 IN ULONG TagToFree)
2305 {
2306 PPOOL_HEADER Entry, NextEntry;
2307 USHORT BlockSize;
2308 KIRQL OldIrql;
2309 POOL_TYPE PoolType;
2310 PPOOL_DESCRIPTOR PoolDesc;
2311 ULONG Tag;
2312 BOOLEAN Combined = FALSE;
2313 PFN_NUMBER PageCount, RealPageCount;
2314 PKPRCB Prcb = KeGetCurrentPrcb();
2315 PGENERAL_LOOKASIDE LookasideList;
2316 PEPROCESS Process;
2317
2318 //
2319 // Check if any of the debug flags are enabled
2320 //
2321 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2322 POOL_FLAG_CHECK_WORKERS |
2323 POOL_FLAG_CHECK_RESOURCES |
2324 POOL_FLAG_VERIFIER |
2325 POOL_FLAG_CHECK_DEADLOCK |
2326 POOL_FLAG_SPECIAL_POOL))
2327 {
2328 //
2329 // Check if special pool is enabled
2330 //
2331 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2332 {
2333 //
2334 // Check if it was allocated from a special pool
2335 //
2336 if (MmIsSpecialPoolAddress(P))
2337 {
2338 //
2339 // Was deadlock verification also enabled? We can do some extra
2340 // checks at this point
2341 //
2342 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2343 {
2344 DPRINT1("Verifier not yet supported\n");
2345 }
2346
2347 //
2348 // It is, so handle it via special pool free routine
2349 //
2350 MmFreeSpecialPool(P);
2351 return;
2352 }
2353 }
2354
2355 //
2356 // For non-big page allocations, we'll do a bunch of checks in here
2357 //
2358 if (PAGE_ALIGN(P) != P)
2359 {
2360 //
2361 // Get the entry for this pool allocation
2362 // The pointer math here may look wrong or confusing, but it is quite right
2363 //
2364 Entry = P;
2365 Entry--;
2366
2367 //
2368 // Get the pool type
2369 //
2370 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2371
2372 //
2373 // FIXME: Many other debugging checks go here
2374 //
2375 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2376 }
2377 }
2378
2379 //
2380 // Check if this is a big page allocation
2381 //
2382 if (PAGE_ALIGN(P) == P)
2383 {
2384 //
2385 // We need to find the tag for it, so first we need to find out what
2386 // kind of allocation this was (paged or nonpaged), then we can go
2387 // ahead and try finding the tag for it. Remember to get rid of the
2388 // PROTECTED_POOL tag if it's found.
2389 //
2390 // Note that if at insertion time, we failed to add the tag for a big
2391 // pool allocation, we used a special tag called 'BIG' to identify the
2392 // allocation, and we may get this tag back. In this scenario, we must
2393 // manually get the size of the allocation by actually counting through
2394 // the PFN database.
2395 //
2396 PoolType = MmDeterminePoolType(P);
2397 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2398 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2399 if (!Tag)
2400 {
2401 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2402 ASSERT(Tag == ' GIB');
2403 PageCount = 1; // We are going to lie! This might screw up accounting?
2404 }
2405 else if (Tag & PROTECTED_POOL)
2406 {
2407 Tag &= ~PROTECTED_POOL;
2408 }
2409
2410 //
2411 // Check block tag
2412 //
2413 if (TagToFree && TagToFree != Tag)
2414 {
2415 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2416 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2417 }
2418
2419 //
2420 // We have our tag and our page count, so we can go ahead and remove this
2421 // tracker now
2422 //
2423 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2424
2425 //
2426 // Check if any of the debug flags are enabled
2427 //
2428 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2429 POOL_FLAG_CHECK_WORKERS |
2430 POOL_FLAG_CHECK_RESOURCES |
2431 POOL_FLAG_CHECK_DEADLOCK))
2432 {
2433 //
2434 // Was deadlock verification also enabled? We can do some extra
2435 // checks at this point
2436 //
2437 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2438 {
2439 DPRINT1("Verifier not yet supported\n");
2440 }
2441
2442 //
2443 // FIXME: Many debugging checks go here
2444 //
2445 }
2446
2447 //
2448 // Update counters
2449 //
2450 PoolDesc = PoolVector[PoolType];
2451 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2452 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2453 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2454
2455 //
2456 // Do the real free now and update the last counter with the big page count
2457 //
2458 RealPageCount = MiFreePoolPages(P);
2459 ASSERT(RealPageCount == PageCount);
2460 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2461 -(LONG)RealPageCount);
2462 return;
2463 }
2464
2465 //
2466 // Get the entry for this pool allocation
2467 // The pointer math here may look wrong or confusing, but it is quite right
2468 //
2469 Entry = P;
2470 Entry--;
2471 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2472
2473 //
2474 // Get the size of the entry, and it's pool type, then load the descriptor
2475 // for this pool type
2476 //
2477 BlockSize = Entry->BlockSize;
2478 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2479 PoolDesc = PoolVector[PoolType];
2480
2481 //
2482 // Make sure that the IRQL makes sense
2483 //
2484 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2485
2486 //
2487 // Get the pool tag and get rid of the PROTECTED_POOL flag
2488 //
2489 Tag = Entry->PoolTag;
2490 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2491
2492 //
2493 // Check block tag
2494 //
2495 if (TagToFree && TagToFree != Tag)
2496 {
2497 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2498 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2499 }
2500
2501 //
2502 // Track the removal of this allocation
2503 //
2504 ExpRemovePoolTracker(Tag,
2505 BlockSize * POOL_BLOCK_SIZE,
2506 Entry->PoolType - 1);
2507
2508 //
2509 // Release pool quota, if any
2510 //
2511 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2512 {
2513 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2514 if (Process)
2515 {
2516 if (Process->Pcb.Header.Type != ProcessObject)
2517 {
2518 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2519 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2520 KeBugCheckEx(BAD_POOL_CALLER,
2521 0x0D,
2522 (ULONG_PTR)P,
2523 Tag,
2524 (ULONG_PTR)Process);
2525 }
2526 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2527 ObDereferenceObject(Process);
2528 }
2529 }
2530
2531 //
2532 // Is this allocation small enough to have come from a lookaside list?
2533 //
2534 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2535 {
2536 //
2537 // Try pushing it into the per-CPU lookaside list
2538 //
2539 LookasideList = (PoolType == PagedPool) ?
2540 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2541 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2542 LookasideList->TotalFrees++;
2543 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2544 {
2545 LookasideList->FreeHits++;
2546 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2547 return;
2548 }
2549
2550 //
2551 // We failed, try to push it into the global lookaside list
2552 //
2553 LookasideList = (PoolType == PagedPool) ?
2554 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2555 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2556 LookasideList->TotalFrees++;
2557 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2558 {
2559 LookasideList->FreeHits++;
2560 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2561 return;
2562 }
2563 }
2564
2565 //
2566 // Get the pointer to the next entry
2567 //
2568 NextEntry = POOL_BLOCK(Entry, BlockSize);
2569
2570 //
2571 // Update performance counters
2572 //
2573 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2574 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2575
2576 //
2577 // Acquire the pool lock
2578 //
2579 OldIrql = ExLockPool(PoolDesc);
2580
2581 //
2582 // Check if the next allocation is at the end of the page
2583 //
2584 ExpCheckPoolBlocks(Entry);
2585 if (PAGE_ALIGN(NextEntry) != NextEntry)
2586 {
2587 //
2588 // We may be able to combine the block if it's free
2589 //
2590 if (NextEntry->PoolType == 0)
2591 {
2592 //
2593 // The next block is free, so we'll do a combine
2594 //
2595 Combined = TRUE;
2596
2597 //
2598 // Make sure there's actual data in the block -- anything smaller
2599 // than this means we only have the header, so there's no linked list
2600 // for us to remove
2601 //
2602 if ((NextEntry->BlockSize != 1))
2603 {
2604 //
2605 // The block is at least big enough to have a linked list, so go
2606 // ahead and remove it
2607 //
2608 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2609 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2610 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2611 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2612 }
2613
2614 //
2615 // Our entry is now combined with the next entry
2616 //
2617 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2618 }
2619 }
2620
2621 //
2622 // Now check if there was a previous entry on the same page as us
2623 //
2624 if (Entry->PreviousSize)
2625 {
2626 //
2627 // Great, grab that entry and check if it's free
2628 //
2629 NextEntry = POOL_PREV_BLOCK(Entry);
2630 if (NextEntry->PoolType == 0)
2631 {
2632 //
2633 // It is, so we can do a combine
2634 //
2635 Combined = TRUE;
2636
2637 //
2638 // Make sure there's actual data in the block -- anything smaller
2639 // than this means we only have the header so there's no linked list
2640 // for us to remove
2641 //
2642 if ((NextEntry->BlockSize != 1))
2643 {
2644 //
2645 // The block is at least big enough to have a linked list, so go
2646 // ahead and remove it
2647 //
2648 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2649 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2650 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2651 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2652 }
2653
2654 //
2655 // Combine our original block (which might've already been combined
2656 // with the next block), into the previous block
2657 //
2658 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2659
2660 //
2661 // And now we'll work with the previous block instead
2662 //
2663 Entry = NextEntry;
2664 }
2665 }
2666
2667 //
2668 // By now, it may have been possible for our combined blocks to actually
2669 // have made up a full page (if there were only 2-3 allocations on the
2670 // page, they could've all been combined).
2671 //
2672 if ((PAGE_ALIGN(Entry) == Entry) &&
2673 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2674 {
2675 //
2676 // In this case, release the pool lock, update the performance counter,
2677 // and free the page
2678 //
2679 ExUnlockPool(PoolDesc, OldIrql);
2680 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2681 MiFreePoolPages(Entry);
2682 return;
2683 }
2684
2685 //
2686 // Otherwise, we now have a free block (or a combination of 2 or 3)
2687 //
2688 Entry->PoolType = 0;
2689 BlockSize = Entry->BlockSize;
2690 ASSERT(BlockSize != 1);
2691
2692 //
2693 // Check if we actually did combine it with anyone
2694 //
2695 if (Combined)
2696 {
2697 //
2698 // Get the first combined block (either our original to begin with, or
2699 // the one after the original, depending if we combined with the previous)
2700 //
2701 NextEntry = POOL_NEXT_BLOCK(Entry);
2702
2703 //
2704 // As long as the next block isn't on a page boundary, have it point
2705 // back to us
2706 //
2707 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2708 }
2709
2710 //
2711 // Insert this new free block, and release the pool lock
2712 //
2713 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2714 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2715 ExUnlockPool(PoolDesc, OldIrql);
2716 }
2717
2718 /*
2719 * @implemented
2720 */
2721 VOID
2722 NTAPI
2723 ExFreePool(PVOID P)
2724 {
2725 //
2726 // Just free without checking for the tag
2727 //
2728 ExFreePoolWithTag(P, 0);
2729 }
2730
2731 /*
2732 * @unimplemented
2733 */
2734 SIZE_T
2735 NTAPI
2736 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2737 OUT PBOOLEAN QuotaCharged)
2738 {
2739 //
2740 // Not implemented
2741 //
2742 UNIMPLEMENTED;
2743 return FALSE;
2744 }
2745
2746 /*
2747 * @implemented
2748 */
2749
2750 PVOID
2751 NTAPI
2752 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2753 IN SIZE_T NumberOfBytes)
2754 {
2755 //
2756 // Allocate the pool
2757 //
2758 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2759 }
2760
2761 /*
2762 * @implemented
2763 */
2764 PVOID
2765 NTAPI
2766 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2767 IN SIZE_T NumberOfBytes,
2768 IN ULONG Tag,
2769 IN EX_POOL_PRIORITY Priority)
2770 {
2771 PVOID Buffer;
2772
2773 //
2774 // Allocate the pool
2775 //
2776 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2777 if (Buffer == NULL)
2778 {
2779 UNIMPLEMENTED;
2780 }
2781
2782 return Buffer;
2783 }
2784
2785 /*
2786 * @implemented
2787 */
2788 PVOID
2789 NTAPI
2790 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2791 IN SIZE_T NumberOfBytes,
2792 IN ULONG Tag)
2793 {
2794 BOOLEAN Raise = TRUE;
2795 PVOID Buffer;
2796 PPOOL_HEADER Entry;
2797 NTSTATUS Status;
2798 PEPROCESS Process = PsGetCurrentProcess();
2799
2800 //
2801 // Check if we should fail instead of raising an exception
2802 //
2803 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2804 {
2805 Raise = FALSE;
2806 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2807 }
2808
2809 //
2810 // Inject the pool quota mask
2811 //
2812 PoolType += QUOTA_POOL_MASK;
2813
2814 //
2815 // Check if we have enough space to add the quota owner process, as long as
2816 // this isn't the system process, which never gets charged quota
2817 //
2818 ASSERT(NumberOfBytes != 0);
2819 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2820 (Process != PsInitialSystemProcess))
2821 {
2822 //
2823 // Add space for our EPROCESS pointer
2824 //
2825 NumberOfBytes += sizeof(PEPROCESS);
2826 }
2827 else
2828 {
2829 //
2830 // We won't be able to store the pointer, so don't use quota for this
2831 //
2832 PoolType -= QUOTA_POOL_MASK;
2833 }
2834
2835 //
2836 // Allocate the pool buffer now
2837 //
2838 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2839
2840 //
2841 // If the buffer is page-aligned, this is a large page allocation and we
2842 // won't touch it
2843 //
2844 if (PAGE_ALIGN(Buffer) != Buffer)
2845 {
2846 //
2847 // Also if special pool is enabled, and this was allocated from there,
2848 // we won't touch it either
2849 //
2850 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2851 (MmIsSpecialPoolAddress(Buffer)))
2852 {
2853 return Buffer;
2854 }
2855
2856 //
2857 // If it wasn't actually allocated with quota charges, ignore it too
2858 //
2859 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2860
2861 //
2862 // If this is the system process, we don't charge quota, so ignore
2863 //
2864 if (Process == PsInitialSystemProcess) return Buffer;
2865
2866 //
2867 // Actually go and charge quota for the process now
2868 //
2869 Entry = POOL_ENTRY(Buffer);
2870 Status = PsChargeProcessPoolQuota(Process,
2871 PoolType & BASE_POOL_TYPE_MASK,
2872 Entry->BlockSize * POOL_BLOCK_SIZE);
2873 if (!NT_SUCCESS(Status))
2874 {
2875 //
2876 // Quota failed, back out the allocation, clear the owner, and fail
2877 //
2878 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2879 ExFreePoolWithTag(Buffer, Tag);
2880 if (Raise) RtlRaiseStatus(Status);
2881 return NULL;
2882 }
2883
2884 //
2885 // Quota worked, write the owner and then reference it before returning
2886 //
2887 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2888 ObReferenceObject(Process);
2889 }
2890 else if (!(Buffer) && (Raise))
2891 {
2892 //
2893 // The allocation failed, raise an error if we are in raise mode
2894 //
2895 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2896 }
2897
2898 //
2899 // Return the allocated buffer
2900 //
2901 return Buffer;
2902 }
2903
2904 #if DBG && defined(KDBG)
2905
2906 BOOLEAN
2907 ExpKdbgExtPool(
2908 ULONG Argc,
2909 PCHAR Argv[])
2910 {
2911 ULONG_PTR Address = 0, Flags = 0;
2912 PVOID PoolPage;
2913 PPOOL_HEADER Entry;
2914 BOOLEAN ThisOne;
2915 PULONG Data;
2916
2917 if (Argc > 1)
2918 {
2919 /* Get address */
2920 if (!KdbpGetHexNumber(Argv[1], &Address))
2921 {
2922 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2923 return TRUE;
2924 }
2925 }
2926
2927 if (Argc > 2)
2928 {
2929 /* Get address */
2930 if (!KdbpGetHexNumber(Argv[1], &Flags))
2931 {
2932 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2933 return TRUE;
2934 }
2935 }
2936
2937 /* Check if we got an address */
2938 if (Address != 0)
2939 {
2940 /* Get the base page */
2941 PoolPage = PAGE_ALIGN(Address);
2942 }
2943 else
2944 {
2945 KdbpPrint("Heap is unimplemented\n");
2946 return TRUE;
2947 }
2948
2949 /* No paging support! */
2950 if (!MmIsAddressValid(PoolPage))
2951 {
2952 KdbpPrint("Address not accessible!\n");
2953 return TRUE;
2954 }
2955
2956 /* Get pool type */
2957 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2958 KdbpPrint("Allocation is from PagedPool region\n");
2959 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2960 KdbpPrint("Allocation is from NonPagedPool region\n");
2961 else
2962 {
2963 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2964 return TRUE;
2965 }
2966
2967 /* Loop all entries of that page */
2968 Entry = PoolPage;
2969 do
2970 {
2971 /* Check if the address is within that entry */
2972 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2973 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2974
2975 if (!(Flags & 1) || ThisOne)
2976 {
2977 /* Print the line */
2978 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2979 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2980 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2981 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2982 }
2983
2984 if (Flags & 1)
2985 {
2986 Data = (PULONG)(Entry + 1);
2987 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2988 " %p %08lx %08lx %08lx %08lx\n",
2989 &Data[0], Data[0], Data[1], Data[2], Data[3],
2990 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2991 }
2992
2993 /* Go to next entry */
2994 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2995 }
2996 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2997
2998 return TRUE;
2999 }
3000
3001 static
3002 VOID
3003 ExpKdbgExtPoolUsedGetTag(PCHAR Arg, PULONG Tag, PULONG Mask)
3004 {
3005 CHAR Tmp[4];
3006 ULONG Len;
3007 USHORT i;
3008
3009 /* Get the tag */
3010 Len = strlen(Arg);
3011 if (Len > 4)
3012 {
3013 Len = 4;
3014 }
3015
3016 /* Generate the mask to have wildcards support */
3017 for (i = 0; i < Len; ++i)
3018 {
3019 Tmp[i] = Arg[i];
3020 if (Tmp[i] != '?')
3021 {
3022 *Mask |= (0xFF << i * 8);
3023 }
3024 }
3025
3026 /* Get the tag in the ulong form */
3027 *Tag = *((PULONG)Tmp);
3028 }
3029
3030 BOOLEAN
3031 ExpKdbgExtPoolUsed(
3032 ULONG Argc,
3033 PCHAR Argv[])
3034 {
3035 ULONG Tag = 0;
3036 ULONG Mask = 0;
3037 ULONG Flags = 0;
3038
3039 if (Argc > 1)
3040 {
3041 /* If we have 2+ args, easy: flags then tag */
3042 if (Argc > 2)
3043 {
3044 ExpKdbgExtPoolUsedGetTag(Argv[2], &Tag, &Mask);
3045 if (!KdbpGetHexNumber(Argv[1], &Flags))
3046 {
3047 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
3048 }
3049 }
3050 else
3051 {
3052 /* Otherwise, try to find out whether that's flags */
3053 if (strlen(Argv[1]) == 1 ||
3054 (strlen(Argv[1]) == 3 && Argv[1][0] == '0' && Argv[1][1] == 'x'))
3055 {
3056 /* Fallback: if reading flags failed, assume it's a tag */
3057 if (!KdbpGetHexNumber(Argv[1], &Flags))
3058 {
3059 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3060 }
3061 }
3062 /* Or tag */
3063 else
3064 {
3065 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3066 }
3067 }
3068 }
3069
3070 /* Call the dumper */
3071 MiDumpPoolConsumers(TRUE, Tag, Mask, Flags);
3072
3073 return TRUE;
3074 }
3075
3076 #endif // DBG && KDBG
3077
3078 /* EOF */