[NTOSKRNL] Reimplement !irpfind using !poolfind helpers
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 #if DBG
464 FORCEINLINE
465 BOOLEAN
466 ExpTagAllowPrint(CHAR Tag)
467 {
468 if ((Tag >= 'a' && Tag <= 'z') ||
469 (Tag >= 'A' && Tag <= 'Z') ||
470 (Tag >= '0' && Tag <= '9') ||
471 Tag == ' ' || Tag == '=' ||
472 Tag == '?' || Tag == '@')
473 {
474 return TRUE;
475 }
476
477 return FALSE;
478 }
479
480 #ifdef KDBG
481 #define MiDumperPrint(dbg, fmt, ...) \
482 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
483 else DPRINT1(fmt, ##__VA_ARGS__)
484 #else
485 #define MiDumperPrint(dbg, fmt, ...) \
486 DPRINT1(fmt, ##__VA_ARGS__)
487 #endif
488
489 VOID
490 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
491 {
492 SIZE_T i;
493 BOOLEAN Verbose;
494
495 //
496 // Only print header if called from OOM situation
497 //
498 if (!CalledFromDbg)
499 {
500 DPRINT1("---------------------\n");
501 DPRINT1("Out of memory dumper!\n");
502 }
503 #ifdef KDBG
504 else
505 {
506 KdbpPrint("Pool Used:\n");
507 }
508 #endif
509
510 //
511 // Remember whether we'll have to be verbose
512 // This is the only supported flag!
513 //
514 Verbose = BooleanFlagOn(Flags, 1);
515
516 //
517 // Print table header
518 //
519 if (Verbose)
520 {
521 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
522 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
523 }
524 else
525 {
526 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
527 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
528 }
529
530 //
531 // We'll extract allocations for all the tracked pools
532 //
533 for (i = 0; i < PoolTrackTableSize; ++i)
534 {
535 PPOOL_TRACKER_TABLE TableEntry;
536
537 TableEntry = &PoolTrackTable[i];
538
539 //
540 // We only care about tags which have allocated memory
541 //
542 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
543 {
544 //
545 // If there's a tag, attempt to do a pretty print
546 // only if it matches the caller's tag, or if
547 // any tag is allowed
548 // For checking whether it matches caller's tag,
549 // use the mask to make sure not to mess with the wildcards
550 //
551 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
552 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
553 {
554 CHAR Tag[4];
555
556 //
557 // Extract each 'component' and check whether they are printable
558 //
559 Tag[0] = TableEntry->Key & 0xFF;
560 Tag[1] = TableEntry->Key >> 8 & 0xFF;
561 Tag[2] = TableEntry->Key >> 16 & 0xFF;
562 Tag[3] = TableEntry->Key >> 24 & 0xFF;
563
564 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
565 {
566 //
567 // Print in direct order to make !poolused TAG usage easier
568 //
569 if (Verbose)
570 {
571 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
572 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
573 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
574 TableEntry->PagedAllocs, TableEntry->PagedFrees,
575 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
576 }
577 else
578 {
579 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
580 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
581 TableEntry->PagedAllocs, TableEntry->PagedBytes);
582 }
583 }
584 else
585 {
586 if (Verbose)
587 {
588 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
589 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
590 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
591 TableEntry->PagedAllocs, TableEntry->PagedFrees,
592 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
593 }
594 else
595 {
596 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
597 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
598 TableEntry->PagedAllocs, TableEntry->PagedBytes);
599 }
600 }
601 }
602 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
603 {
604 if (Verbose)
605 {
606 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
607 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
608 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
609 TableEntry->PagedAllocs, TableEntry->PagedFrees,
610 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
611 }
612 else
613 {
614 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
615 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
616 TableEntry->PagedAllocs, TableEntry->PagedBytes);
617 }
618 }
619 }
620 }
621
622 if (!CalledFromDbg)
623 {
624 DPRINT1("---------------------\n");
625 }
626 }
627 #endif
628
629 /* PRIVATE FUNCTIONS **********************************************************/
630
631 INIT_FUNCTION
632 VOID
633 NTAPI
634 ExpSeedHotTags(VOID)
635 {
636 ULONG i, Key, Hash, Index;
637 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
638 ULONG TagList[] =
639 {
640 ' oI',
641 ' laH',
642 'PldM',
643 'LooP',
644 'tSbO',
645 ' prI',
646 'bdDN',
647 'LprI',
648 'pOoI',
649 ' ldM',
650 'eliF',
651 'aVMC',
652 'dSeS',
653 'CFtN',
654 'looP',
655 'rPCT',
656 'bNMC',
657 'dTeS',
658 'sFtN',
659 'TPCT',
660 'CPCT',
661 ' yeK',
662 'qSbO',
663 'mNoI',
664 'aEoI',
665 'cPCT',
666 'aFtN',
667 '0ftN',
668 'tceS',
669 'SprI',
670 'ekoT',
671 ' eS',
672 'lCbO',
673 'cScC',
674 'lFtN',
675 'cAeS',
676 'mfSF',
677 'kWcC',
678 'miSF',
679 'CdfA',
680 'EdfA',
681 'orSF',
682 'nftN',
683 'PRIU',
684 'rFpN',
685 'RFpN',
686 'aPeS',
687 'sUeS',
688 'FpcA',
689 'MpcA',
690 'cSeS',
691 'mNbO',
692 'sFpN',
693 'uLeS',
694 'DPcS',
695 'nevE',
696 'vrqR',
697 'ldaV',
698 ' pP',
699 'SdaV',
700 ' daV',
701 'LdaV',
702 'FdaV',
703 ' GIB',
704 };
705
706 //
707 // Loop all 64 hot tags
708 //
709 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
710 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
711 {
712 //
713 // Get the current tag, and compute its hash in the tracker table
714 //
715 Key = TagList[i];
716 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
717
718 //
719 // Loop all the hashes in this index/bucket
720 //
721 Index = Hash;
722 while (TRUE)
723 {
724 //
725 // Find an empty entry, and make sure this isn't the last hash that
726 // can fit.
727 //
728 // On checked builds, also make sure this is the first time we are
729 // seeding this tag.
730 //
731 ASSERT(TrackTable[Hash].Key != Key);
732 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
733 {
734 //
735 // It has been seeded, move on to the next tag
736 //
737 TrackTable[Hash].Key = Key;
738 break;
739 }
740
741 //
742 // This entry was already taken, compute the next possible hash while
743 // making sure we're not back at our initial index.
744 //
745 ASSERT(TrackTable[Hash].Key != Key);
746 Hash = (Hash + 1) & PoolTrackTableMask;
747 if (Hash == Index) break;
748 }
749 }
750 }
751
752 VOID
753 NTAPI
754 ExpRemovePoolTracker(IN ULONG Key,
755 IN SIZE_T NumberOfBytes,
756 IN POOL_TYPE PoolType)
757 {
758 ULONG Hash, Index;
759 PPOOL_TRACKER_TABLE Table, TableEntry;
760 SIZE_T TableMask, TableSize;
761
762 //
763 // Remove the PROTECTED_POOL flag which is not part of the tag
764 //
765 Key &= ~PROTECTED_POOL;
766
767 //
768 // With WinDBG you can set a tag you want to break on when an allocation is
769 // attempted
770 //
771 if (Key == PoolHitTag) DbgBreakPoint();
772
773 //
774 // Why the double indirection? Because normally this function is also used
775 // when doing session pool allocations, which has another set of tables,
776 // sizes, and masks that live in session pool. Now we don't support session
777 // pool so we only ever use the regular tables, but I'm keeping the code this
778 // way so that the day we DO support session pool, it won't require that
779 // many changes
780 //
781 Table = PoolTrackTable;
782 TableMask = PoolTrackTableMask;
783 TableSize = PoolTrackTableSize;
784 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
785
786 //
787 // Compute the hash for this key, and loop all the possible buckets
788 //
789 Hash = ExpComputeHashForTag(Key, TableMask);
790 Index = Hash;
791 while (TRUE)
792 {
793 //
794 // Have we found the entry for this tag? */
795 //
796 TableEntry = &Table[Hash];
797 if (TableEntry->Key == Key)
798 {
799 //
800 // Decrement the counters depending on if this was paged or nonpaged
801 // pool
802 //
803 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
804 {
805 InterlockedIncrement(&TableEntry->NonPagedFrees);
806 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
807 -(SSIZE_T)NumberOfBytes);
808 return;
809 }
810 InterlockedIncrement(&TableEntry->PagedFrees);
811 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
812 -(SSIZE_T)NumberOfBytes);
813 return;
814 }
815
816 //
817 // We should have only ended up with an empty entry if we've reached
818 // the last bucket
819 //
820 if (!TableEntry->Key)
821 {
822 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
823 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
824 ASSERT(Hash == TableMask);
825 }
826
827 //
828 // This path is hit when we don't have an entry, and the current bucket
829 // is full, so we simply try the next one
830 //
831 Hash = (Hash + 1) & TableMask;
832 if (Hash == Index) break;
833 }
834
835 //
836 // And finally this path is hit when all the buckets are full, and we need
837 // some expansion. This path is not yet supported in ReactOS and so we'll
838 // ignore the tag
839 //
840 DPRINT1("Out of pool tag space, ignoring...\n");
841 }
842
843 VOID
844 NTAPI
845 ExpInsertPoolTracker(IN ULONG Key,
846 IN SIZE_T NumberOfBytes,
847 IN POOL_TYPE PoolType)
848 {
849 ULONG Hash, Index;
850 KIRQL OldIrql;
851 PPOOL_TRACKER_TABLE Table, TableEntry;
852 SIZE_T TableMask, TableSize;
853
854 //
855 // Remove the PROTECTED_POOL flag which is not part of the tag
856 //
857 Key &= ~PROTECTED_POOL;
858
859 //
860 // With WinDBG you can set a tag you want to break on when an allocation is
861 // attempted
862 //
863 if (Key == PoolHitTag) DbgBreakPoint();
864
865 //
866 // There is also an internal flag you can set to break on malformed tags
867 //
868 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
869
870 //
871 // ASSERT on ReactOS features not yet supported
872 //
873 ASSERT(!(PoolType & SESSION_POOL_MASK));
874 ASSERT(KeGetCurrentProcessorNumber() == 0);
875
876 //
877 // Why the double indirection? Because normally this function is also used
878 // when doing session pool allocations, which has another set of tables,
879 // sizes, and masks that live in session pool. Now we don't support session
880 // pool so we only ever use the regular tables, but I'm keeping the code this
881 // way so that the day we DO support session pool, it won't require that
882 // many changes
883 //
884 Table = PoolTrackTable;
885 TableMask = PoolTrackTableMask;
886 TableSize = PoolTrackTableSize;
887 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
888
889 //
890 // Compute the hash for this key, and loop all the possible buckets
891 //
892 Hash = ExpComputeHashForTag(Key, TableMask);
893 Index = Hash;
894 while (TRUE)
895 {
896 //
897 // Do we already have an entry for this tag? */
898 //
899 TableEntry = &Table[Hash];
900 if (TableEntry->Key == Key)
901 {
902 //
903 // Increment the counters depending on if this was paged or nonpaged
904 // pool
905 //
906 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
907 {
908 InterlockedIncrement(&TableEntry->NonPagedAllocs);
909 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
910 return;
911 }
912 InterlockedIncrement(&TableEntry->PagedAllocs);
913 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
914 return;
915 }
916
917 //
918 // We don't have an entry yet, but we've found a free bucket for it
919 //
920 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
921 {
922 //
923 // We need to hold the lock while creating a new entry, since other
924 // processors might be in this code path as well
925 //
926 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
927 if (!PoolTrackTable[Hash].Key)
928 {
929 //
930 // We've won the race, so now create this entry in the bucket
931 //
932 ASSERT(Table[Hash].Key == 0);
933 PoolTrackTable[Hash].Key = Key;
934 TableEntry->Key = Key;
935 }
936 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
937
938 //
939 // Now we force the loop to run again, and we should now end up in
940 // the code path above which does the interlocked increments...
941 //
942 continue;
943 }
944
945 //
946 // This path is hit when we don't have an entry, and the current bucket
947 // is full, so we simply try the next one
948 //
949 Hash = (Hash + 1) & TableMask;
950 if (Hash == Index) break;
951 }
952
953 //
954 // And finally this path is hit when all the buckets are full, and we need
955 // some expansion. This path is not yet supported in ReactOS and so we'll
956 // ignore the tag
957 //
958 DPRINT1("Out of pool tag space, ignoring...\n");
959 }
960
961 INIT_FUNCTION
962 VOID
963 NTAPI
964 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
965 IN POOL_TYPE PoolType,
966 IN ULONG PoolIndex,
967 IN ULONG Threshold,
968 IN PVOID PoolLock)
969 {
970 PLIST_ENTRY NextEntry, LastEntry;
971
972 //
973 // Setup the descriptor based on the caller's request
974 //
975 PoolDescriptor->PoolType = PoolType;
976 PoolDescriptor->PoolIndex = PoolIndex;
977 PoolDescriptor->Threshold = Threshold;
978 PoolDescriptor->LockAddress = PoolLock;
979
980 //
981 // Initialize accounting data
982 //
983 PoolDescriptor->RunningAllocs = 0;
984 PoolDescriptor->RunningDeAllocs = 0;
985 PoolDescriptor->TotalPages = 0;
986 PoolDescriptor->TotalBytes = 0;
987 PoolDescriptor->TotalBigPages = 0;
988
989 //
990 // Nothing pending for now
991 //
992 PoolDescriptor->PendingFrees = NULL;
993 PoolDescriptor->PendingFreeDepth = 0;
994
995 //
996 // Loop all the descriptor's allocation lists and initialize them
997 //
998 NextEntry = PoolDescriptor->ListHeads;
999 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1000 while (NextEntry < LastEntry)
1001 {
1002 ExpInitializePoolListHead(NextEntry);
1003 NextEntry++;
1004 }
1005
1006 //
1007 // Note that ReactOS does not support Session Pool Yet
1008 //
1009 ASSERT(PoolType != PagedPoolSession);
1010 }
1011
1012 INIT_FUNCTION
1013 VOID
1014 NTAPI
1015 InitializePool(IN POOL_TYPE PoolType,
1016 IN ULONG Threshold)
1017 {
1018 PPOOL_DESCRIPTOR Descriptor;
1019 SIZE_T TableSize;
1020 ULONG i;
1021
1022 //
1023 // Check what kind of pool this is
1024 //
1025 if (PoolType == NonPagedPool)
1026 {
1027 //
1028 // Compute the track table size and convert it from a power of two to an
1029 // actual byte size
1030 //
1031 // NOTE: On checked builds, we'll assert if the registry table size was
1032 // invalid, while on retail builds we'll just break out of the loop at
1033 // that point.
1034 //
1035 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1036 for (i = 0; i < 32; i++)
1037 {
1038 if (TableSize & 1)
1039 {
1040 ASSERT((TableSize & ~1) == 0);
1041 if (!(TableSize & ~1)) break;
1042 }
1043 TableSize >>= 1;
1044 }
1045
1046 //
1047 // If we hit bit 32, than no size was defined in the registry, so
1048 // we'll use the default size of 2048 entries.
1049 //
1050 // Otherwise, use the size from the registry, as long as it's not
1051 // smaller than 64 entries.
1052 //
1053 if (i == 32)
1054 {
1055 PoolTrackTableSize = 2048;
1056 }
1057 else
1058 {
1059 PoolTrackTableSize = max(1 << i, 64);
1060 }
1061
1062 //
1063 // Loop trying with the biggest specified size first, and cut it down
1064 // by a power of two each iteration in case not enough memory exist
1065 //
1066 while (TRUE)
1067 {
1068 //
1069 // Do not allow overflow
1070 //
1071 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1072 {
1073 PoolTrackTableSize >>= 1;
1074 continue;
1075 }
1076
1077 //
1078 // Allocate the tracker table and exit the loop if this worked
1079 //
1080 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1081 (PoolTrackTableSize + 1) *
1082 sizeof(POOL_TRACKER_TABLE));
1083 if (PoolTrackTable) break;
1084
1085 //
1086 // Otherwise, as long as we're not down to the last bit, keep
1087 // iterating
1088 //
1089 if (PoolTrackTableSize == 1)
1090 {
1091 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1092 TableSize,
1093 0xFFFFFFFF,
1094 0xFFFFFFFF,
1095 0xFFFFFFFF);
1096 }
1097 PoolTrackTableSize >>= 1;
1098 }
1099
1100 //
1101 // Add one entry, compute the hash, and zero the table
1102 //
1103 PoolTrackTableSize++;
1104 PoolTrackTableMask = PoolTrackTableSize - 2;
1105
1106 RtlZeroMemory(PoolTrackTable,
1107 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1108
1109 //
1110 // Finally, add the most used tags to speed up those allocations
1111 //
1112 ExpSeedHotTags();
1113
1114 //
1115 // We now do the exact same thing with the tracker table for big pages
1116 //
1117 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1118 for (i = 0; i < 32; i++)
1119 {
1120 if (TableSize & 1)
1121 {
1122 ASSERT((TableSize & ~1) == 0);
1123 if (!(TableSize & ~1)) break;
1124 }
1125 TableSize >>= 1;
1126 }
1127
1128 //
1129 // For big pages, the default tracker table is 4096 entries, while the
1130 // minimum is still 64
1131 //
1132 if (i == 32)
1133 {
1134 PoolBigPageTableSize = 4096;
1135 }
1136 else
1137 {
1138 PoolBigPageTableSize = max(1 << i, 64);
1139 }
1140
1141 //
1142 // Again, run the exact same loop we ran earlier, but this time for the
1143 // big pool tracker instead
1144 //
1145 while (TRUE)
1146 {
1147 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1148 {
1149 PoolBigPageTableSize >>= 1;
1150 continue;
1151 }
1152
1153 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1154 PoolBigPageTableSize *
1155 sizeof(POOL_TRACKER_BIG_PAGES));
1156 if (PoolBigPageTable) break;
1157
1158 if (PoolBigPageTableSize == 1)
1159 {
1160 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1161 TableSize,
1162 0xFFFFFFFF,
1163 0xFFFFFFFF,
1164 0xFFFFFFFF);
1165 }
1166
1167 PoolBigPageTableSize >>= 1;
1168 }
1169
1170 //
1171 // An extra entry is not needed for for the big pool tracker, so just
1172 // compute the hash and zero it
1173 //
1174 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1175 RtlZeroMemory(PoolBigPageTable,
1176 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1177 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1178
1179 //
1180 // During development, print this out so we can see what's happening
1181 //
1182 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1183 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1184 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1185 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1186
1187 //
1188 // Insert the generic tracker for all of big pool
1189 //
1190 ExpInsertPoolTracker('looP',
1191 ROUND_TO_PAGES(PoolBigPageTableSize *
1192 sizeof(POOL_TRACKER_BIG_PAGES)),
1193 NonPagedPool);
1194
1195 //
1196 // No support for NUMA systems at this time
1197 //
1198 ASSERT(KeNumberNodes == 1);
1199
1200 //
1201 // Initialize the tag spinlock
1202 //
1203 KeInitializeSpinLock(&ExpTaggedPoolLock);
1204
1205 //
1206 // Initialize the nonpaged pool descriptor
1207 //
1208 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1209 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1210 NonPagedPool,
1211 0,
1212 Threshold,
1213 NULL);
1214 }
1215 else
1216 {
1217 //
1218 // No support for NUMA systems at this time
1219 //
1220 ASSERT(KeNumberNodes == 1);
1221
1222 //
1223 // Allocate the pool descriptor
1224 //
1225 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1226 sizeof(KGUARDED_MUTEX) +
1227 sizeof(POOL_DESCRIPTOR),
1228 'looP');
1229 if (!Descriptor)
1230 {
1231 //
1232 // This is really bad...
1233 //
1234 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1235 0,
1236 -1,
1237 -1,
1238 -1);
1239 }
1240
1241 //
1242 // Setup the vector and guarded mutex for paged pool
1243 //
1244 PoolVector[PagedPool] = Descriptor;
1245 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1246 ExpPagedPoolDescriptor[0] = Descriptor;
1247 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1248 ExInitializePoolDescriptor(Descriptor,
1249 PagedPool,
1250 0,
1251 Threshold,
1252 ExpPagedPoolMutex);
1253
1254 //
1255 // Insert the generic tracker for all of nonpaged pool
1256 //
1257 ExpInsertPoolTracker('looP',
1258 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1259 NonPagedPool);
1260 }
1261 }
1262
1263 FORCEINLINE
1264 KIRQL
1265 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1266 {
1267 //
1268 // Check if this is nonpaged pool
1269 //
1270 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1271 {
1272 //
1273 // Use the queued spin lock
1274 //
1275 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1276 }
1277 else
1278 {
1279 //
1280 // Use the guarded mutex
1281 //
1282 KeAcquireGuardedMutex(Descriptor->LockAddress);
1283 return APC_LEVEL;
1284 }
1285 }
1286
1287 FORCEINLINE
1288 VOID
1289 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1290 IN KIRQL OldIrql)
1291 {
1292 //
1293 // Check if this is nonpaged pool
1294 //
1295 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1296 {
1297 //
1298 // Use the queued spin lock
1299 //
1300 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1301 }
1302 else
1303 {
1304 //
1305 // Use the guarded mutex
1306 //
1307 KeReleaseGuardedMutex(Descriptor->LockAddress);
1308 }
1309 }
1310
1311 VOID
1312 NTAPI
1313 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1314 IN PVOID DeferredContext,
1315 IN PVOID SystemArgument1,
1316 IN PVOID SystemArgument2)
1317 {
1318 PPOOL_DPC_CONTEXT Context = DeferredContext;
1319 UNREFERENCED_PARAMETER(Dpc);
1320 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1321
1322 //
1323 // Make sure we win the race, and if we did, copy the data atomically
1324 //
1325 if (KeSignalCallDpcSynchronize(SystemArgument2))
1326 {
1327 RtlCopyMemory(Context->PoolTrackTable,
1328 PoolTrackTable,
1329 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1330
1331 //
1332 // This is here because ReactOS does not yet support expansion
1333 //
1334 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1335 }
1336
1337 //
1338 // Regardless of whether we won or not, we must now synchronize and then
1339 // decrement the barrier since this is one more processor that has completed
1340 // the callback.
1341 //
1342 KeSignalCallDpcSynchronize(SystemArgument2);
1343 KeSignalCallDpcDone(SystemArgument1);
1344 }
1345
1346 NTSTATUS
1347 NTAPI
1348 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1349 IN ULONG SystemInformationLength,
1350 IN OUT PULONG ReturnLength OPTIONAL)
1351 {
1352 ULONG TableSize, CurrentLength;
1353 ULONG EntryCount;
1354 NTSTATUS Status = STATUS_SUCCESS;
1355 PSYSTEM_POOLTAG TagEntry;
1356 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1357 POOL_DPC_CONTEXT Context;
1358 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1359
1360 //
1361 // Keep track of how much data the caller's buffer must hold
1362 //
1363 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1364
1365 //
1366 // Initialize the caller's buffer
1367 //
1368 TagEntry = &SystemInformation->TagInfo[0];
1369 SystemInformation->Count = 0;
1370
1371 //
1372 // Capture the number of entries, and the total size needed to make a copy
1373 // of the table
1374 //
1375 EntryCount = (ULONG)PoolTrackTableSize;
1376 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1377
1378 //
1379 // Allocate the "Generic DPC" temporary buffer
1380 //
1381 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1382 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1383
1384 //
1385 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1386 //
1387 Context.PoolTrackTable = Buffer;
1388 Context.PoolTrackTableSize = PoolTrackTableSize;
1389 Context.PoolTrackTableExpansion = NULL;
1390 Context.PoolTrackTableSizeExpansion = 0;
1391 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1392
1393 //
1394 // Now parse the results
1395 //
1396 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1397 {
1398 //
1399 // If the entry is empty, skip it
1400 //
1401 if (!TrackerEntry->Key) continue;
1402
1403 //
1404 // Otherwise, add one more entry to the caller's buffer, and ensure that
1405 // enough space has been allocated in it
1406 //
1407 SystemInformation->Count++;
1408 CurrentLength += sizeof(*TagEntry);
1409 if (SystemInformationLength < CurrentLength)
1410 {
1411 //
1412 // The caller's buffer is too small, so set a failure code. The
1413 // caller will know the count, as well as how much space is needed.
1414 //
1415 // We do NOT break out of the loop, because we want to keep incrementing
1416 // the Count as well as CurrentLength so that the caller can know the
1417 // final numbers
1418 //
1419 Status = STATUS_INFO_LENGTH_MISMATCH;
1420 }
1421 else
1422 {
1423 //
1424 // Small sanity check that our accounting is working correctly
1425 //
1426 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1427 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1428
1429 //
1430 // Return the data into the caller's buffer
1431 //
1432 TagEntry->TagUlong = TrackerEntry->Key;
1433 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1434 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1435 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1436 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1437 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1438 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1439 TagEntry++;
1440 }
1441 }
1442
1443 //
1444 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1445 //
1446 ExFreePoolWithTag(Buffer, 'ofnI');
1447 if (ReturnLength) *ReturnLength = CurrentLength;
1448 return Status;
1449 }
1450
1451 BOOLEAN
1452 NTAPI
1453 ExpAddTagForBigPages(IN PVOID Va,
1454 IN ULONG Key,
1455 IN ULONG NumberOfPages,
1456 IN POOL_TYPE PoolType)
1457 {
1458 ULONG Hash, i = 0;
1459 PVOID OldVa;
1460 KIRQL OldIrql;
1461 SIZE_T TableSize;
1462 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1463 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1464 ASSERT(!(PoolType & SESSION_POOL_MASK));
1465
1466 //
1467 // As the table is expandable, these values must only be read after acquiring
1468 // the lock to avoid a teared access during an expansion
1469 //
1470 Hash = ExpComputePartialHashForAddress(Va);
1471 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1472 Hash &= PoolBigPageTableHash;
1473 TableSize = PoolBigPageTableSize;
1474
1475 //
1476 // We loop from the current hash bucket to the end of the table, and then
1477 // rollover to hash bucket 0 and keep going from there. If we return back
1478 // to the beginning, then we attempt expansion at the bottom of the loop
1479 //
1480 EntryStart = Entry = &PoolBigPageTable[Hash];
1481 EntryEnd = &PoolBigPageTable[TableSize];
1482 do
1483 {
1484 //
1485 // Make sure that this is a free entry and attempt to atomically make the
1486 // entry busy now
1487 //
1488 OldVa = Entry->Va;
1489 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1490 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1491 {
1492 //
1493 // We now own this entry, write down the size and the pool tag
1494 //
1495 Entry->Key = Key;
1496 Entry->NumberOfPages = NumberOfPages;
1497
1498 //
1499 // Add one more entry to the count, and see if we're getting within
1500 // 25% of the table size, at which point we'll do an expansion now
1501 // to avoid blocking too hard later on.
1502 //
1503 // Note that we only do this if it's also been the 16th time that we
1504 // keep losing the race or that we are not finding a free entry anymore,
1505 // which implies a massive number of concurrent big pool allocations.
1506 //
1507 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1508 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1509 {
1510 DPRINT("Should attempt expansion since we now have %lu entries\n",
1511 ExpPoolBigEntriesInUse);
1512 }
1513
1514 //
1515 // We have our entry, return
1516 //
1517 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1518 return TRUE;
1519 }
1520
1521 //
1522 // We don't have our entry yet, so keep trying, making the entry list
1523 // circular if we reach the last entry. We'll eventually break out of
1524 // the loop once we've rolled over and returned back to our original
1525 // hash bucket
1526 //
1527 i++;
1528 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1529 } while (Entry != EntryStart);
1530
1531 //
1532 // This means there's no free hash buckets whatsoever, so we would now have
1533 // to attempt expanding the table
1534 //
1535 DPRINT1("Big pool expansion needed, not implemented!\n");
1536 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1537 return FALSE;
1538 }
1539
1540 ULONG
1541 NTAPI
1542 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1543 OUT PULONG_PTR BigPages,
1544 IN POOL_TYPE PoolType)
1545 {
1546 BOOLEAN FirstTry = TRUE;
1547 SIZE_T TableSize;
1548 KIRQL OldIrql;
1549 ULONG PoolTag, Hash;
1550 PPOOL_TRACKER_BIG_PAGES Entry;
1551 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1552 ASSERT(!(PoolType & SESSION_POOL_MASK));
1553
1554 //
1555 // As the table is expandable, these values must only be read after acquiring
1556 // the lock to avoid a teared access during an expansion
1557 //
1558 Hash = ExpComputePartialHashForAddress(Va);
1559 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1560 Hash &= PoolBigPageTableHash;
1561 TableSize = PoolBigPageTableSize;
1562
1563 //
1564 // Loop while trying to find this big page allocation
1565 //
1566 while (PoolBigPageTable[Hash].Va != Va)
1567 {
1568 //
1569 // Increment the size until we go past the end of the table
1570 //
1571 if (++Hash >= TableSize)
1572 {
1573 //
1574 // Is this the second time we've tried?
1575 //
1576 if (!FirstTry)
1577 {
1578 //
1579 // This means it was never inserted into the pool table and it
1580 // received the special "BIG" tag -- return that and return 0
1581 // so that the code can ask Mm for the page count instead
1582 //
1583 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1584 *BigPages = 0;
1585 return ' GIB';
1586 }
1587
1588 //
1589 // The first time this happens, reset the hash index and try again
1590 //
1591 Hash = 0;
1592 FirstTry = FALSE;
1593 }
1594 }
1595
1596 //
1597 // Now capture all the information we need from the entry, since after we
1598 // release the lock, the data can change
1599 //
1600 Entry = &PoolBigPageTable[Hash];
1601 *BigPages = Entry->NumberOfPages;
1602 PoolTag = Entry->Key;
1603
1604 //
1605 // Set the free bit, and decrement the number of allocations. Finally, release
1606 // the lock and return the tag that was located
1607 //
1608 InterlockedIncrement((PLONG)&Entry->Va);
1609 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1610 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1611 return PoolTag;
1612 }
1613
1614 VOID
1615 NTAPI
1616 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1617 OUT PULONG NonPagedPoolPages,
1618 OUT PULONG PagedPoolAllocs,
1619 OUT PULONG PagedPoolFrees,
1620 OUT PULONG PagedPoolLookasideHits,
1621 OUT PULONG NonPagedPoolAllocs,
1622 OUT PULONG NonPagedPoolFrees,
1623 OUT PULONG NonPagedPoolLookasideHits)
1624 {
1625 ULONG i;
1626 PPOOL_DESCRIPTOR PoolDesc;
1627
1628 //
1629 // Assume all failures
1630 //
1631 *PagedPoolPages = 0;
1632 *PagedPoolAllocs = 0;
1633 *PagedPoolFrees = 0;
1634
1635 //
1636 // Tally up the totals for all the apged pool
1637 //
1638 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1639 {
1640 PoolDesc = ExpPagedPoolDescriptor[i];
1641 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1642 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1643 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1644 }
1645
1646 //
1647 // The first non-paged pool has a hardcoded well-known descriptor name
1648 //
1649 PoolDesc = &NonPagedPoolDescriptor;
1650 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1651 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1652 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1653
1654 //
1655 // If the system has more than one non-paged pool, copy the other descriptor
1656 // totals as well
1657 //
1658 #if 0
1659 if (ExpNumberOfNonPagedPools > 1)
1660 {
1661 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1662 {
1663 PoolDesc = ExpNonPagedPoolDescriptor[i];
1664 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1665 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1666 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1667 }
1668 }
1669 #endif
1670
1671 //
1672 // Get the amount of hits in the system lookaside lists
1673 //
1674 if (!IsListEmpty(&ExPoolLookasideListHead))
1675 {
1676 PLIST_ENTRY ListEntry;
1677
1678 for (ListEntry = ExPoolLookasideListHead.Flink;
1679 ListEntry != &ExPoolLookasideListHead;
1680 ListEntry = ListEntry->Flink)
1681 {
1682 PGENERAL_LOOKASIDE Lookaside;
1683
1684 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1685
1686 if (Lookaside->Type == NonPagedPool)
1687 {
1688 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1689 }
1690 else
1691 {
1692 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1693 }
1694 }
1695 }
1696 }
1697
1698 VOID
1699 NTAPI
1700 ExReturnPoolQuota(IN PVOID P)
1701 {
1702 PPOOL_HEADER Entry;
1703 POOL_TYPE PoolType;
1704 USHORT BlockSize;
1705 PEPROCESS Process;
1706
1707 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1708 (MmIsSpecialPoolAddress(P)))
1709 {
1710 return;
1711 }
1712
1713 Entry = P;
1714 Entry--;
1715 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1716
1717 PoolType = Entry->PoolType - 1;
1718 BlockSize = Entry->BlockSize;
1719
1720 if (PoolType & QUOTA_POOL_MASK)
1721 {
1722 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1723 ASSERT(Process != NULL);
1724 if (Process)
1725 {
1726 if (Process->Pcb.Header.Type != ProcessObject)
1727 {
1728 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1729 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1730 KeBugCheckEx(BAD_POOL_CALLER,
1731 0x0D,
1732 (ULONG_PTR)P,
1733 Entry->PoolTag,
1734 (ULONG_PTR)Process);
1735 }
1736 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1737 PsReturnPoolQuota(Process,
1738 PoolType & BASE_POOL_TYPE_MASK,
1739 BlockSize * POOL_BLOCK_SIZE);
1740 ObDereferenceObject(Process);
1741 }
1742 }
1743 }
1744
1745 /* PUBLIC FUNCTIONS ***********************************************************/
1746
1747 /*
1748 * @implemented
1749 */
1750 PVOID
1751 NTAPI
1752 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1753 IN SIZE_T NumberOfBytes,
1754 IN ULONG Tag)
1755 {
1756 PPOOL_DESCRIPTOR PoolDesc;
1757 PLIST_ENTRY ListHead;
1758 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1759 KIRQL OldIrql;
1760 USHORT BlockSize, i;
1761 ULONG OriginalType;
1762 PKPRCB Prcb = KeGetCurrentPrcb();
1763 PGENERAL_LOOKASIDE LookasideList;
1764
1765 //
1766 // Some sanity checks
1767 //
1768 ASSERT(Tag != 0);
1769 ASSERT(Tag != ' GIB');
1770 ASSERT(NumberOfBytes != 0);
1771 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1772
1773 //
1774 // Not supported in ReactOS
1775 //
1776 ASSERT(!(PoolType & SESSION_POOL_MASK));
1777
1778 //
1779 // Check if verifier or special pool is enabled
1780 //
1781 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1782 {
1783 //
1784 // For verifier, we should call the verification routine
1785 //
1786 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1787 {
1788 DPRINT1("Driver Verifier is not yet supported\n");
1789 }
1790
1791 //
1792 // For special pool, we check if this is a suitable allocation and do
1793 // the special allocation if needed
1794 //
1795 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1796 {
1797 //
1798 // Check if this is a special pool allocation
1799 //
1800 if (MmUseSpecialPool(NumberOfBytes, Tag))
1801 {
1802 //
1803 // Try to allocate using special pool
1804 //
1805 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1806 if (Entry) return Entry;
1807 }
1808 }
1809 }
1810
1811 //
1812 // Get the pool type and its corresponding vector for this request
1813 //
1814 OriginalType = PoolType;
1815 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1816 PoolDesc = PoolVector[PoolType];
1817 ASSERT(PoolDesc != NULL);
1818
1819 //
1820 // Check if this is a big page allocation
1821 //
1822 if (NumberOfBytes > POOL_MAX_ALLOC)
1823 {
1824 //
1825 // Allocate pages for it
1826 //
1827 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1828 if (!Entry)
1829 {
1830 #if DBG
1831 //
1832 // Out of memory, display current consumption
1833 // Let's consider that if the caller wanted more
1834 // than a hundred pages, that's a bogus caller
1835 // and we are not out of memory
1836 //
1837 if (NumberOfBytes < 100 * PAGE_SIZE)
1838 {
1839 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1840 }
1841 #endif
1842
1843 //
1844 // Must succeed pool is deprecated, but still supported. These allocation
1845 // failures must cause an immediate bugcheck
1846 //
1847 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1848 {
1849 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1850 NumberOfBytes,
1851 NonPagedPoolDescriptor.TotalPages,
1852 NonPagedPoolDescriptor.TotalBigPages,
1853 0);
1854 }
1855
1856 //
1857 // Internal debugging
1858 //
1859 ExPoolFailures++;
1860
1861 //
1862 // This flag requests printing failures, and can also further specify
1863 // breaking on failures
1864 //
1865 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1866 {
1867 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1868 NumberOfBytes,
1869 OriginalType);
1870 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1871 }
1872
1873 //
1874 // Finally, this flag requests an exception, which we are more than
1875 // happy to raise!
1876 //
1877 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1878 {
1879 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1880 }
1881
1882 return NULL;
1883 }
1884
1885 //
1886 // Increment required counters
1887 //
1888 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1889 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1890 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1891 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1892
1893 //
1894 // Add a tag for the big page allocation and switch to the generic "BIG"
1895 // tag if we failed to do so, then insert a tracker for this alloation.
1896 //
1897 if (!ExpAddTagForBigPages(Entry,
1898 Tag,
1899 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1900 OriginalType))
1901 {
1902 Tag = ' GIB';
1903 }
1904 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1905 return Entry;
1906 }
1907
1908 //
1909 // Should never request 0 bytes from the pool, but since so many drivers do
1910 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1911 //
1912 if (!NumberOfBytes) NumberOfBytes = 1;
1913
1914 //
1915 // A pool allocation is defined by its data, a linked list to connect it to
1916 // the free list (if necessary), and a pool header to store accounting info.
1917 // Calculate this size, then convert it into a block size (units of pool
1918 // headers)
1919 //
1920 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1921 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1922 // the direct allocation of pages.
1923 //
1924 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1925 / POOL_BLOCK_SIZE);
1926 ASSERT(i < POOL_LISTS_PER_PAGE);
1927
1928 //
1929 // Handle lookaside list optimization for both paged and nonpaged pool
1930 //
1931 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1932 {
1933 //
1934 // Try popping it from the per-CPU lookaside list
1935 //
1936 LookasideList = (PoolType == PagedPool) ?
1937 Prcb->PPPagedLookasideList[i - 1].P :
1938 Prcb->PPNPagedLookasideList[i - 1].P;
1939 LookasideList->TotalAllocates++;
1940 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1941 if (!Entry)
1942 {
1943 //
1944 // We failed, try popping it from the global list
1945 //
1946 LookasideList = (PoolType == PagedPool) ?
1947 Prcb->PPPagedLookasideList[i - 1].L :
1948 Prcb->PPNPagedLookasideList[i - 1].L;
1949 LookasideList->TotalAllocates++;
1950 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1951 }
1952
1953 //
1954 // If we were able to pop it, update the accounting and return the block
1955 //
1956 if (Entry)
1957 {
1958 LookasideList->AllocateHits++;
1959
1960 //
1961 // Get the real entry, write down its pool type, and track it
1962 //
1963 Entry--;
1964 Entry->PoolType = OriginalType + 1;
1965 ExpInsertPoolTracker(Tag,
1966 Entry->BlockSize * POOL_BLOCK_SIZE,
1967 OriginalType);
1968
1969 //
1970 // Return the pool allocation
1971 //
1972 Entry->PoolTag = Tag;
1973 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1974 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1975 return POOL_FREE_BLOCK(Entry);
1976 }
1977 }
1978
1979 //
1980 // Loop in the free lists looking for a block if this size. Start with the
1981 // list optimized for this kind of size lookup
1982 //
1983 ListHead = &PoolDesc->ListHeads[i];
1984 do
1985 {
1986 //
1987 // Are there any free entries available on this list?
1988 //
1989 if (!ExpIsPoolListEmpty(ListHead))
1990 {
1991 //
1992 // Acquire the pool lock now
1993 //
1994 OldIrql = ExLockPool(PoolDesc);
1995
1996 //
1997 // And make sure the list still has entries
1998 //
1999 if (ExpIsPoolListEmpty(ListHead))
2000 {
2001 //
2002 // Someone raced us (and won) before we had a chance to acquire
2003 // the lock.
2004 //
2005 // Try again!
2006 //
2007 ExUnlockPool(PoolDesc, OldIrql);
2008 continue;
2009 }
2010
2011 //
2012 // Remove a free entry from the list
2013 // Note that due to the way we insert free blocks into multiple lists
2014 // there is a guarantee that any block on this list will either be
2015 // of the correct size, or perhaps larger.
2016 //
2017 ExpCheckPoolLinks(ListHead);
2018 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
2019 ExpCheckPoolLinks(ListHead);
2020 ExpCheckPoolBlocks(Entry);
2021 ASSERT(Entry->BlockSize >= i);
2022 ASSERT(Entry->PoolType == 0);
2023
2024 //
2025 // Check if this block is larger that what we need. The block could
2026 // not possibly be smaller, due to the reason explained above (and
2027 // we would've asserted on a checked build if this was the case).
2028 //
2029 if (Entry->BlockSize != i)
2030 {
2031 //
2032 // Is there an entry before this one?
2033 //
2034 if (Entry->PreviousSize == 0)
2035 {
2036 //
2037 // There isn't anyone before us, so take the next block and
2038 // turn it into a fragment that contains the leftover data
2039 // that we don't need to satisfy the caller's request
2040 //
2041 FragmentEntry = POOL_BLOCK(Entry, i);
2042 FragmentEntry->BlockSize = Entry->BlockSize - i;
2043
2044 //
2045 // And make it point back to us
2046 //
2047 FragmentEntry->PreviousSize = i;
2048
2049 //
2050 // Now get the block that follows the new fragment and check
2051 // if it's still on the same page as us (and not at the end)
2052 //
2053 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2054 if (PAGE_ALIGN(NextEntry) != NextEntry)
2055 {
2056 //
2057 // Adjust this next block to point to our newly created
2058 // fragment block
2059 //
2060 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2061 }
2062 }
2063 else
2064 {
2065 //
2066 // There is a free entry before us, which we know is smaller
2067 // so we'll make this entry the fragment instead
2068 //
2069 FragmentEntry = Entry;
2070
2071 //
2072 // And then we'll remove from it the actual size required.
2073 // Now the entry is a leftover free fragment
2074 //
2075 Entry->BlockSize -= i;
2076
2077 //
2078 // Now let's go to the next entry after the fragment (which
2079 // used to point to our original free entry) and make it
2080 // reference the new fragment entry instead.
2081 //
2082 // This is the entry that will actually end up holding the
2083 // allocation!
2084 //
2085 Entry = POOL_NEXT_BLOCK(Entry);
2086 Entry->PreviousSize = FragmentEntry->BlockSize;
2087
2088 //
2089 // And now let's go to the entry after that one and check if
2090 // it's still on the same page, and not at the end
2091 //
2092 NextEntry = POOL_BLOCK(Entry, i);
2093 if (PAGE_ALIGN(NextEntry) != NextEntry)
2094 {
2095 //
2096 // Make it reference the allocation entry
2097 //
2098 NextEntry->PreviousSize = i;
2099 }
2100 }
2101
2102 //
2103 // Now our (allocation) entry is the right size
2104 //
2105 Entry->BlockSize = i;
2106
2107 //
2108 // And the next entry is now the free fragment which contains
2109 // the remaining difference between how big the original entry
2110 // was, and the actual size the caller needs/requested.
2111 //
2112 FragmentEntry->PoolType = 0;
2113 BlockSize = FragmentEntry->BlockSize;
2114
2115 //
2116 // Now check if enough free bytes remained for us to have a
2117 // "full" entry, which contains enough bytes for a linked list
2118 // and thus can be used for allocations (up to 8 bytes...)
2119 //
2120 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2121 if (BlockSize != 1)
2122 {
2123 //
2124 // Insert the free entry into the free list for this size
2125 //
2126 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2127 POOL_FREE_BLOCK(FragmentEntry));
2128 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2129 }
2130 }
2131
2132 //
2133 // We have found an entry for this allocation, so set the pool type
2134 // and release the lock since we're done
2135 //
2136 Entry->PoolType = OriginalType + 1;
2137 ExpCheckPoolBlocks(Entry);
2138 ExUnlockPool(PoolDesc, OldIrql);
2139
2140 //
2141 // Increment required counters
2142 //
2143 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2144 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2145
2146 //
2147 // Track this allocation
2148 //
2149 ExpInsertPoolTracker(Tag,
2150 Entry->BlockSize * POOL_BLOCK_SIZE,
2151 OriginalType);
2152
2153 //
2154 // Return the pool allocation
2155 //
2156 Entry->PoolTag = Tag;
2157 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2158 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2159 return POOL_FREE_BLOCK(Entry);
2160 }
2161 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2162
2163 //
2164 // There were no free entries left, so we have to allocate a new fresh page
2165 //
2166 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2167 if (!Entry)
2168 {
2169 #if DBG
2170 //
2171 // Out of memory, display current consumption
2172 // Let's consider that if the caller wanted more
2173 // than a hundred pages, that's a bogus caller
2174 // and we are not out of memory
2175 //
2176 if (NumberOfBytes < 100 * PAGE_SIZE)
2177 {
2178 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2179 }
2180 #endif
2181
2182 //
2183 // Must succeed pool is deprecated, but still supported. These allocation
2184 // failures must cause an immediate bugcheck
2185 //
2186 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2187 {
2188 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2189 PAGE_SIZE,
2190 NonPagedPoolDescriptor.TotalPages,
2191 NonPagedPoolDescriptor.TotalBigPages,
2192 0);
2193 }
2194
2195 //
2196 // Internal debugging
2197 //
2198 ExPoolFailures++;
2199
2200 //
2201 // This flag requests printing failures, and can also further specify
2202 // breaking on failures
2203 //
2204 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2205 {
2206 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2207 NumberOfBytes,
2208 OriginalType);
2209 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2210 }
2211
2212 //
2213 // Finally, this flag requests an exception, which we are more than
2214 // happy to raise!
2215 //
2216 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2217 {
2218 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2219 }
2220
2221 //
2222 // Return NULL to the caller in all other cases
2223 //
2224 return NULL;
2225 }
2226
2227 //
2228 // Setup the entry data
2229 //
2230 Entry->Ulong1 = 0;
2231 Entry->BlockSize = i;
2232 Entry->PoolType = OriginalType + 1;
2233
2234 //
2235 // This page will have two entries -- one for the allocation (which we just
2236 // created above), and one for the remaining free bytes, which we're about
2237 // to create now. The free bytes are the whole page minus what was allocated
2238 // and then converted into units of block headers.
2239 //
2240 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2241 FragmentEntry = POOL_BLOCK(Entry, i);
2242 FragmentEntry->Ulong1 = 0;
2243 FragmentEntry->BlockSize = BlockSize;
2244 FragmentEntry->PreviousSize = i;
2245
2246 //
2247 // Increment required counters
2248 //
2249 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2250 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2251
2252 //
2253 // Now check if enough free bytes remained for us to have a "full" entry,
2254 // which contains enough bytes for a linked list and thus can be used for
2255 // allocations (up to 8 bytes...)
2256 //
2257 if (FragmentEntry->BlockSize != 1)
2258 {
2259 //
2260 // Excellent -- acquire the pool lock
2261 //
2262 OldIrql = ExLockPool(PoolDesc);
2263
2264 //
2265 // And insert the free entry into the free list for this block size
2266 //
2267 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2268 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2269 POOL_FREE_BLOCK(FragmentEntry));
2270 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2271
2272 //
2273 // Release the pool lock
2274 //
2275 ExpCheckPoolBlocks(Entry);
2276 ExUnlockPool(PoolDesc, OldIrql);
2277 }
2278 else
2279 {
2280 //
2281 // Simply do a sanity check
2282 //
2283 ExpCheckPoolBlocks(Entry);
2284 }
2285
2286 //
2287 // Increment performance counters and track this allocation
2288 //
2289 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2290 ExpInsertPoolTracker(Tag,
2291 Entry->BlockSize * POOL_BLOCK_SIZE,
2292 OriginalType);
2293
2294 //
2295 // And return the pool allocation
2296 //
2297 ExpCheckPoolBlocks(Entry);
2298 Entry->PoolTag = Tag;
2299 return POOL_FREE_BLOCK(Entry);
2300 }
2301
2302 /*
2303 * @implemented
2304 */
2305 PVOID
2306 NTAPI
2307 ExAllocatePool(POOL_TYPE PoolType,
2308 SIZE_T NumberOfBytes)
2309 {
2310 ULONG Tag = TAG_NONE;
2311 #if 0 && DBG
2312 PLDR_DATA_TABLE_ENTRY LdrEntry;
2313
2314 /* Use the first four letters of the driver name, or "None" if unavailable */
2315 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2316 ? MiLookupDataTableEntry(_ReturnAddress())
2317 : NULL;
2318 if (LdrEntry)
2319 {
2320 ULONG i;
2321 Tag = 0;
2322 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2323 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2324 for (; i < 4; i++)
2325 Tag = Tag >> 8 | ' ' << 24;
2326 }
2327 #endif
2328 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2329 }
2330
2331 /*
2332 * @implemented
2333 */
2334 VOID
2335 NTAPI
2336 ExFreePoolWithTag(IN PVOID P,
2337 IN ULONG TagToFree)
2338 {
2339 PPOOL_HEADER Entry, NextEntry;
2340 USHORT BlockSize;
2341 KIRQL OldIrql;
2342 POOL_TYPE PoolType;
2343 PPOOL_DESCRIPTOR PoolDesc;
2344 ULONG Tag;
2345 BOOLEAN Combined = FALSE;
2346 PFN_NUMBER PageCount, RealPageCount;
2347 PKPRCB Prcb = KeGetCurrentPrcb();
2348 PGENERAL_LOOKASIDE LookasideList;
2349 PEPROCESS Process;
2350
2351 //
2352 // Check if any of the debug flags are enabled
2353 //
2354 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2355 POOL_FLAG_CHECK_WORKERS |
2356 POOL_FLAG_CHECK_RESOURCES |
2357 POOL_FLAG_VERIFIER |
2358 POOL_FLAG_CHECK_DEADLOCK |
2359 POOL_FLAG_SPECIAL_POOL))
2360 {
2361 //
2362 // Check if special pool is enabled
2363 //
2364 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2365 {
2366 //
2367 // Check if it was allocated from a special pool
2368 //
2369 if (MmIsSpecialPoolAddress(P))
2370 {
2371 //
2372 // Was deadlock verification also enabled? We can do some extra
2373 // checks at this point
2374 //
2375 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2376 {
2377 DPRINT1("Verifier not yet supported\n");
2378 }
2379
2380 //
2381 // It is, so handle it via special pool free routine
2382 //
2383 MmFreeSpecialPool(P);
2384 return;
2385 }
2386 }
2387
2388 //
2389 // For non-big page allocations, we'll do a bunch of checks in here
2390 //
2391 if (PAGE_ALIGN(P) != P)
2392 {
2393 //
2394 // Get the entry for this pool allocation
2395 // The pointer math here may look wrong or confusing, but it is quite right
2396 //
2397 Entry = P;
2398 Entry--;
2399
2400 //
2401 // Get the pool type
2402 //
2403 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2404
2405 //
2406 // FIXME: Many other debugging checks go here
2407 //
2408 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2409 }
2410 }
2411
2412 //
2413 // Check if this is a big page allocation
2414 //
2415 if (PAGE_ALIGN(P) == P)
2416 {
2417 //
2418 // We need to find the tag for it, so first we need to find out what
2419 // kind of allocation this was (paged or nonpaged), then we can go
2420 // ahead and try finding the tag for it. Remember to get rid of the
2421 // PROTECTED_POOL tag if it's found.
2422 //
2423 // Note that if at insertion time, we failed to add the tag for a big
2424 // pool allocation, we used a special tag called 'BIG' to identify the
2425 // allocation, and we may get this tag back. In this scenario, we must
2426 // manually get the size of the allocation by actually counting through
2427 // the PFN database.
2428 //
2429 PoolType = MmDeterminePoolType(P);
2430 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2431 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2432 if (!Tag)
2433 {
2434 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2435 ASSERT(Tag == ' GIB');
2436 PageCount = 1; // We are going to lie! This might screw up accounting?
2437 }
2438 else if (Tag & PROTECTED_POOL)
2439 {
2440 Tag &= ~PROTECTED_POOL;
2441 }
2442
2443 //
2444 // Check block tag
2445 //
2446 if (TagToFree && TagToFree != Tag)
2447 {
2448 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2449 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2450 }
2451
2452 //
2453 // We have our tag and our page count, so we can go ahead and remove this
2454 // tracker now
2455 //
2456 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2457
2458 //
2459 // Check if any of the debug flags are enabled
2460 //
2461 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2462 POOL_FLAG_CHECK_WORKERS |
2463 POOL_FLAG_CHECK_RESOURCES |
2464 POOL_FLAG_CHECK_DEADLOCK))
2465 {
2466 //
2467 // Was deadlock verification also enabled? We can do some extra
2468 // checks at this point
2469 //
2470 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2471 {
2472 DPRINT1("Verifier not yet supported\n");
2473 }
2474
2475 //
2476 // FIXME: Many debugging checks go here
2477 //
2478 }
2479
2480 //
2481 // Update counters
2482 //
2483 PoolDesc = PoolVector[PoolType];
2484 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2485 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2486 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2487
2488 //
2489 // Do the real free now and update the last counter with the big page count
2490 //
2491 RealPageCount = MiFreePoolPages(P);
2492 ASSERT(RealPageCount == PageCount);
2493 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2494 -(LONG)RealPageCount);
2495 return;
2496 }
2497
2498 //
2499 // Get the entry for this pool allocation
2500 // The pointer math here may look wrong or confusing, but it is quite right
2501 //
2502 Entry = P;
2503 Entry--;
2504 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2505
2506 //
2507 // Get the size of the entry, and it's pool type, then load the descriptor
2508 // for this pool type
2509 //
2510 BlockSize = Entry->BlockSize;
2511 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2512 PoolDesc = PoolVector[PoolType];
2513
2514 //
2515 // Make sure that the IRQL makes sense
2516 //
2517 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2518
2519 //
2520 // Get the pool tag and get rid of the PROTECTED_POOL flag
2521 //
2522 Tag = Entry->PoolTag;
2523 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2524
2525 //
2526 // Check block tag
2527 //
2528 if (TagToFree && TagToFree != Tag)
2529 {
2530 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2531 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2532 }
2533
2534 //
2535 // Track the removal of this allocation
2536 //
2537 ExpRemovePoolTracker(Tag,
2538 BlockSize * POOL_BLOCK_SIZE,
2539 Entry->PoolType - 1);
2540
2541 //
2542 // Release pool quota, if any
2543 //
2544 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2545 {
2546 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2547 if (Process)
2548 {
2549 if (Process->Pcb.Header.Type != ProcessObject)
2550 {
2551 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2552 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2553 KeBugCheckEx(BAD_POOL_CALLER,
2554 0x0D,
2555 (ULONG_PTR)P,
2556 Tag,
2557 (ULONG_PTR)Process);
2558 }
2559 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2560 ObDereferenceObject(Process);
2561 }
2562 }
2563
2564 //
2565 // Is this allocation small enough to have come from a lookaside list?
2566 //
2567 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2568 {
2569 //
2570 // Try pushing it into the per-CPU lookaside list
2571 //
2572 LookasideList = (PoolType == PagedPool) ?
2573 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2574 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2575 LookasideList->TotalFrees++;
2576 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2577 {
2578 LookasideList->FreeHits++;
2579 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2580 return;
2581 }
2582
2583 //
2584 // We failed, try to push it into the global lookaside list
2585 //
2586 LookasideList = (PoolType == PagedPool) ?
2587 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2588 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2589 LookasideList->TotalFrees++;
2590 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2591 {
2592 LookasideList->FreeHits++;
2593 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2594 return;
2595 }
2596 }
2597
2598 //
2599 // Get the pointer to the next entry
2600 //
2601 NextEntry = POOL_BLOCK(Entry, BlockSize);
2602
2603 //
2604 // Update performance counters
2605 //
2606 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2607 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2608
2609 //
2610 // Acquire the pool lock
2611 //
2612 OldIrql = ExLockPool(PoolDesc);
2613
2614 //
2615 // Check if the next allocation is at the end of the page
2616 //
2617 ExpCheckPoolBlocks(Entry);
2618 if (PAGE_ALIGN(NextEntry) != NextEntry)
2619 {
2620 //
2621 // We may be able to combine the block if it's free
2622 //
2623 if (NextEntry->PoolType == 0)
2624 {
2625 //
2626 // The next block is free, so we'll do a combine
2627 //
2628 Combined = TRUE;
2629
2630 //
2631 // Make sure there's actual data in the block -- anything smaller
2632 // than this means we only have the header, so there's no linked list
2633 // for us to remove
2634 //
2635 if ((NextEntry->BlockSize != 1))
2636 {
2637 //
2638 // The block is at least big enough to have a linked list, so go
2639 // ahead and remove it
2640 //
2641 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2642 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2643 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2644 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2645 }
2646
2647 //
2648 // Our entry is now combined with the next entry
2649 //
2650 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2651 }
2652 }
2653
2654 //
2655 // Now check if there was a previous entry on the same page as us
2656 //
2657 if (Entry->PreviousSize)
2658 {
2659 //
2660 // Great, grab that entry and check if it's free
2661 //
2662 NextEntry = POOL_PREV_BLOCK(Entry);
2663 if (NextEntry->PoolType == 0)
2664 {
2665 //
2666 // It is, so we can do a combine
2667 //
2668 Combined = TRUE;
2669
2670 //
2671 // Make sure there's actual data in the block -- anything smaller
2672 // than this means we only have the header so there's no linked list
2673 // for us to remove
2674 //
2675 if ((NextEntry->BlockSize != 1))
2676 {
2677 //
2678 // The block is at least big enough to have a linked list, so go
2679 // ahead and remove it
2680 //
2681 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2682 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2683 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2684 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2685 }
2686
2687 //
2688 // Combine our original block (which might've already been combined
2689 // with the next block), into the previous block
2690 //
2691 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2692
2693 //
2694 // And now we'll work with the previous block instead
2695 //
2696 Entry = NextEntry;
2697 }
2698 }
2699
2700 //
2701 // By now, it may have been possible for our combined blocks to actually
2702 // have made up a full page (if there were only 2-3 allocations on the
2703 // page, they could've all been combined).
2704 //
2705 if ((PAGE_ALIGN(Entry) == Entry) &&
2706 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2707 {
2708 //
2709 // In this case, release the pool lock, update the performance counter,
2710 // and free the page
2711 //
2712 ExUnlockPool(PoolDesc, OldIrql);
2713 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2714 MiFreePoolPages(Entry);
2715 return;
2716 }
2717
2718 //
2719 // Otherwise, we now have a free block (or a combination of 2 or 3)
2720 //
2721 Entry->PoolType = 0;
2722 BlockSize = Entry->BlockSize;
2723 ASSERT(BlockSize != 1);
2724
2725 //
2726 // Check if we actually did combine it with anyone
2727 //
2728 if (Combined)
2729 {
2730 //
2731 // Get the first combined block (either our original to begin with, or
2732 // the one after the original, depending if we combined with the previous)
2733 //
2734 NextEntry = POOL_NEXT_BLOCK(Entry);
2735
2736 //
2737 // As long as the next block isn't on a page boundary, have it point
2738 // back to us
2739 //
2740 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2741 }
2742
2743 //
2744 // Insert this new free block, and release the pool lock
2745 //
2746 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2747 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2748 ExUnlockPool(PoolDesc, OldIrql);
2749 }
2750
2751 /*
2752 * @implemented
2753 */
2754 VOID
2755 NTAPI
2756 ExFreePool(PVOID P)
2757 {
2758 //
2759 // Just free without checking for the tag
2760 //
2761 ExFreePoolWithTag(P, 0);
2762 }
2763
2764 /*
2765 * @unimplemented
2766 */
2767 SIZE_T
2768 NTAPI
2769 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2770 OUT PBOOLEAN QuotaCharged)
2771 {
2772 //
2773 // Not implemented
2774 //
2775 UNIMPLEMENTED;
2776 return FALSE;
2777 }
2778
2779 /*
2780 * @implemented
2781 */
2782
2783 PVOID
2784 NTAPI
2785 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2786 IN SIZE_T NumberOfBytes)
2787 {
2788 //
2789 // Allocate the pool
2790 //
2791 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2792 }
2793
2794 /*
2795 * @implemented
2796 */
2797 PVOID
2798 NTAPI
2799 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2800 IN SIZE_T NumberOfBytes,
2801 IN ULONG Tag,
2802 IN EX_POOL_PRIORITY Priority)
2803 {
2804 PVOID Buffer;
2805
2806 //
2807 // Allocate the pool
2808 //
2809 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2810 if (Buffer == NULL)
2811 {
2812 UNIMPLEMENTED;
2813 }
2814
2815 return Buffer;
2816 }
2817
2818 /*
2819 * @implemented
2820 */
2821 PVOID
2822 NTAPI
2823 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2824 IN SIZE_T NumberOfBytes,
2825 IN ULONG Tag)
2826 {
2827 BOOLEAN Raise = TRUE;
2828 PVOID Buffer;
2829 PPOOL_HEADER Entry;
2830 NTSTATUS Status;
2831 PEPROCESS Process = PsGetCurrentProcess();
2832
2833 //
2834 // Check if we should fail instead of raising an exception
2835 //
2836 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2837 {
2838 Raise = FALSE;
2839 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2840 }
2841
2842 //
2843 // Inject the pool quota mask
2844 //
2845 PoolType += QUOTA_POOL_MASK;
2846
2847 //
2848 // Check if we have enough space to add the quota owner process, as long as
2849 // this isn't the system process, which never gets charged quota
2850 //
2851 ASSERT(NumberOfBytes != 0);
2852 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2853 (Process != PsInitialSystemProcess))
2854 {
2855 //
2856 // Add space for our EPROCESS pointer
2857 //
2858 NumberOfBytes += sizeof(PEPROCESS);
2859 }
2860 else
2861 {
2862 //
2863 // We won't be able to store the pointer, so don't use quota for this
2864 //
2865 PoolType -= QUOTA_POOL_MASK;
2866 }
2867
2868 //
2869 // Allocate the pool buffer now
2870 //
2871 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2872
2873 //
2874 // If the buffer is page-aligned, this is a large page allocation and we
2875 // won't touch it
2876 //
2877 if (PAGE_ALIGN(Buffer) != Buffer)
2878 {
2879 //
2880 // Also if special pool is enabled, and this was allocated from there,
2881 // we won't touch it either
2882 //
2883 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2884 (MmIsSpecialPoolAddress(Buffer)))
2885 {
2886 return Buffer;
2887 }
2888
2889 //
2890 // If it wasn't actually allocated with quota charges, ignore it too
2891 //
2892 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2893
2894 //
2895 // If this is the system process, we don't charge quota, so ignore
2896 //
2897 if (Process == PsInitialSystemProcess) return Buffer;
2898
2899 //
2900 // Actually go and charge quota for the process now
2901 //
2902 Entry = POOL_ENTRY(Buffer);
2903 Status = PsChargeProcessPoolQuota(Process,
2904 PoolType & BASE_POOL_TYPE_MASK,
2905 Entry->BlockSize * POOL_BLOCK_SIZE);
2906 if (!NT_SUCCESS(Status))
2907 {
2908 //
2909 // Quota failed, back out the allocation, clear the owner, and fail
2910 //
2911 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2912 ExFreePoolWithTag(Buffer, Tag);
2913 if (Raise) RtlRaiseStatus(Status);
2914 return NULL;
2915 }
2916
2917 //
2918 // Quota worked, write the owner and then reference it before returning
2919 //
2920 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2921 ObReferenceObject(Process);
2922 }
2923 else if (!(Buffer) && (Raise))
2924 {
2925 //
2926 // The allocation failed, raise an error if we are in raise mode
2927 //
2928 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2929 }
2930
2931 //
2932 // Return the allocated buffer
2933 //
2934 return Buffer;
2935 }
2936
2937 #if DBG && defined(KDBG)
2938
2939 BOOLEAN
2940 ExpKdbgExtPool(
2941 ULONG Argc,
2942 PCHAR Argv[])
2943 {
2944 ULONG_PTR Address = 0, Flags = 0;
2945 PVOID PoolPage;
2946 PPOOL_HEADER Entry;
2947 BOOLEAN ThisOne;
2948 PULONG Data;
2949
2950 if (Argc > 1)
2951 {
2952 /* Get address */
2953 if (!KdbpGetHexNumber(Argv[1], &Address))
2954 {
2955 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2956 return TRUE;
2957 }
2958 }
2959
2960 if (Argc > 2)
2961 {
2962 /* Get address */
2963 if (!KdbpGetHexNumber(Argv[1], &Flags))
2964 {
2965 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2966 return TRUE;
2967 }
2968 }
2969
2970 /* Check if we got an address */
2971 if (Address != 0)
2972 {
2973 /* Get the base page */
2974 PoolPage = PAGE_ALIGN(Address);
2975 }
2976 else
2977 {
2978 KdbpPrint("Heap is unimplemented\n");
2979 return TRUE;
2980 }
2981
2982 /* No paging support! */
2983 if (!MmIsAddressValid(PoolPage))
2984 {
2985 KdbpPrint("Address not accessible!\n");
2986 return TRUE;
2987 }
2988
2989 /* Get pool type */
2990 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2991 KdbpPrint("Allocation is from PagedPool region\n");
2992 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2993 KdbpPrint("Allocation is from NonPagedPool region\n");
2994 else
2995 {
2996 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2997 return TRUE;
2998 }
2999
3000 /* Loop all entries of that page */
3001 Entry = PoolPage;
3002 do
3003 {
3004 /* Check if the address is within that entry */
3005 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
3006 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
3007
3008 if (!(Flags & 1) || ThisOne)
3009 {
3010 /* Print the line */
3011 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
3012 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
3013 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
3014 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
3015 }
3016
3017 if (Flags & 1)
3018 {
3019 Data = (PULONG)(Entry + 1);
3020 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
3021 " %p %08lx %08lx %08lx %08lx\n",
3022 &Data[0], Data[0], Data[1], Data[2], Data[3],
3023 &Data[4], Data[4], Data[5], Data[6], Data[7]);
3024 }
3025
3026 /* Go to next entry */
3027 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
3028 }
3029 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
3030
3031 return TRUE;
3032 }
3033
3034 static
3035 VOID
3036 ExpKdbgExtPoolUsedGetTag(PCHAR Arg, PULONG Tag, PULONG Mask)
3037 {
3038 CHAR Tmp[4];
3039 ULONG Len;
3040 USHORT i;
3041
3042 /* Get the tag */
3043 Len = strlen(Arg);
3044 if (Len > 4)
3045 {
3046 Len = 4;
3047 }
3048
3049 /* Generate the mask to have wildcards support */
3050 for (i = 0; i < Len; ++i)
3051 {
3052 Tmp[i] = Arg[i];
3053 if (Tmp[i] != '?')
3054 {
3055 *Mask |= (0xFF << i * 8);
3056 }
3057 }
3058
3059 /* Get the tag in the ulong form */
3060 *Tag = *((PULONG)Tmp);
3061 }
3062
3063 BOOLEAN
3064 ExpKdbgExtPoolUsed(
3065 ULONG Argc,
3066 PCHAR Argv[])
3067 {
3068 ULONG Tag = 0;
3069 ULONG Mask = 0;
3070 ULONG Flags = 0;
3071
3072 if (Argc > 1)
3073 {
3074 /* If we have 2+ args, easy: flags then tag */
3075 if (Argc > 2)
3076 {
3077 ExpKdbgExtPoolUsedGetTag(Argv[2], &Tag, &Mask);
3078 if (!KdbpGetHexNumber(Argv[1], &Flags))
3079 {
3080 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
3081 }
3082 }
3083 else
3084 {
3085 /* Otherwise, try to find out whether that's flags */
3086 if (strlen(Argv[1]) == 1 ||
3087 (strlen(Argv[1]) == 3 && Argv[1][0] == '0' && Argv[1][1] == 'x'))
3088 {
3089 /* Fallback: if reading flags failed, assume it's a tag */
3090 if (!KdbpGetHexNumber(Argv[1], &Flags))
3091 {
3092 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3093 }
3094 }
3095 /* Or tag */
3096 else
3097 {
3098 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3099 }
3100 }
3101 }
3102
3103 /* Call the dumper */
3104 MiDumpPoolConsumers(TRUE, Tag, Mask, Flags);
3105
3106 return TRUE;
3107 }
3108
3109 static
3110 BOOLEAN
3111 ExpKdbgExtValidatePoolHeader(
3112 PVOID BaseVa,
3113 PPOOL_HEADER Entry,
3114 POOL_TYPE BasePoolTye)
3115 {
3116 /* Block size cannot be NULL or negative and it must cover the page */
3117 if (Entry->BlockSize <= 0)
3118 {
3119 return FALSE;
3120 }
3121 if (Entry->BlockSize * 8 + (ULONG_PTR)Entry - (ULONG_PTR)BaseVa > PAGE_SIZE)
3122 {
3123 return FALSE;
3124 }
3125
3126 /*
3127 * PreviousSize cannot be 0 unless on page begin
3128 * And it cannot be bigger that our current
3129 * position in page
3130 */
3131 if (Entry->PreviousSize == 0 && BaseVa != Entry)
3132 {
3133 return FALSE;
3134 }
3135 if (Entry->PreviousSize * 8 > (ULONG_PTR)Entry - (ULONG_PTR)BaseVa)
3136 {
3137 return FALSE;
3138 }
3139
3140 /* Must be paged pool */
3141 if (((Entry->PoolType - 1) & BASE_POOL_TYPE_MASK) != BasePoolTye)
3142 {
3143 return FALSE;
3144 }
3145
3146 /* Match tag mask */
3147 if ((Entry->PoolTag & 0x00808080) != 0)
3148 {
3149 return FALSE;
3150 }
3151
3152 return TRUE;
3153 }
3154
3155 static
3156 VOID
3157 ExpKdbgExtPoolFindPagedPool(
3158 ULONG Tag,
3159 ULONG Mask,
3160 VOID (NTAPI* FoundCallback)(PPOOL_HEADER, PVOID),
3161 PVOID CallbackContext)
3162 {
3163 ULONG i = 0;
3164 PPOOL_HEADER Entry;
3165 PVOID BaseVa;
3166 PMMPTE PointerPte;
3167 PMMPDE PointerPde;
3168
3169 KdbpPrint("Searching Paged pool (%p : %p) for Tag: %.4s\n", MmPagedPoolStart, MmPagedPoolEnd, (PCHAR)&Tag);
3170
3171 /*
3172 * To speed up paged pool search, we will use the allocation bipmap.
3173 * This is possible because we live directly in the kernel :-)
3174 */
3175 i = RtlFindSetBits(MmPagedPoolInfo.PagedPoolAllocationMap, 1, 0);
3176 while (i != 0xFFFFFFFF)
3177 {
3178 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
3179 Entry = BaseVa;
3180
3181 /* Validate our address */
3182 if ((ULONG_PTR)BaseVa > (ULONG_PTR)MmPagedPoolEnd || (ULONG_PTR)BaseVa + PAGE_SIZE > (ULONG_PTR)MmPagedPoolEnd)
3183 {
3184 break;
3185 }
3186
3187 /* Check whether we are beyond expansion */
3188 PointerPde = MiAddressToPde(BaseVa);
3189 if (PointerPde >= MmPagedPoolInfo.NextPdeForPagedPoolExpansion)
3190 {
3191 break;
3192 }
3193
3194 /* Check if allocation is valid */
3195 PointerPte = MiAddressToPte(BaseVa);
3196 if ((ULONG_PTR)PointerPte > PTE_TOP)
3197 {
3198 break;
3199 }
3200
3201 if (PointerPte->u.Hard.Valid)
3202 {
3203 for (Entry = BaseVa;
3204 (ULONG_PTR)Entry + sizeof(POOL_HEADER) < (ULONG_PTR)BaseVa + PAGE_SIZE;
3205 Entry = (PVOID)((ULONG_PTR)Entry + 8))
3206 {
3207 /* Try to find whether we have a pool entry */
3208 if (!ExpKdbgExtValidatePoolHeader(BaseVa, Entry, PagedPool))
3209 {
3210 continue;
3211 }
3212
3213 if ((Entry->PoolTag & Mask) == (Tag & Mask))
3214 {
3215 if (FoundCallback != NULL)
3216 {
3217 FoundCallback(Entry, CallbackContext);
3218 }
3219 else
3220 {
3221 /* Print the line */
3222 KdbpPrint("%p size: %4d previous size: %4d %s %.4s\n",
3223 Entry, Entry->BlockSize, Entry->PreviousSize,
3224 Entry->PoolType ? "(Allocated)" : "(Free) ",
3225 (PCHAR)&Entry->PoolTag);
3226 }
3227 }
3228 }
3229 }
3230
3231 i = RtlFindSetBits(MmPagedPoolInfo.PagedPoolAllocationMap, 1, i + 1);
3232 }
3233 }
3234
3235 extern PVOID MmNonPagedPoolEnd0;
3236 static
3237 VOID
3238 ExpKdbgExtPoolFindNonPagedPool(
3239 ULONG Tag,
3240 ULONG Mask,
3241 VOID (NTAPI* FoundCallback)(PPOOL_HEADER, PVOID),
3242 PVOID CallbackContext)
3243 {
3244 PPOOL_HEADER Entry;
3245 PVOID BaseVa;
3246 PMMPTE PointerPte;
3247
3248 KdbpPrint("Searching NonPaged pool (%p : %p) for Tag: %.4s\n", MmNonPagedPoolStart, MmNonPagedPoolEnd0, (PCHAR)&Tag);
3249
3250 /* Brute force search: start browsing the whole non paged pool */
3251 for (BaseVa = MmNonPagedPoolStart;
3252 (ULONG_PTR)BaseVa + PAGE_SIZE <= (ULONG_PTR)MmNonPagedPoolEnd0;
3253 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE))
3254 {
3255 Entry = BaseVa;
3256
3257 /* Check whether we are beyond expansion */
3258 if (BaseVa >= MmNonPagedPoolExpansionStart)
3259 {
3260 break;
3261 }
3262
3263 /* Check if allocation is valid */
3264 PointerPte = MiAddressToPte(BaseVa);
3265 if ((ULONG_PTR)PointerPte > PTE_TOP)
3266 {
3267 break;
3268 }
3269
3270 if (PointerPte->u.Hard.Valid)
3271 {
3272 for (Entry = BaseVa;
3273 (ULONG_PTR)Entry + sizeof(POOL_HEADER) < (ULONG_PTR)BaseVa + PAGE_SIZE;
3274 Entry = (PVOID)((ULONG_PTR)Entry + 8))
3275 {
3276 /* Try to find whether we have a pool entry */
3277 if (!ExpKdbgExtValidatePoolHeader(BaseVa, Entry, NonPagedPool))
3278 {
3279 continue;
3280 }
3281
3282 if ((Entry->PoolTag & Mask) == (Tag & Mask))
3283 {
3284 if (FoundCallback != NULL)
3285 {
3286 FoundCallback(Entry, CallbackContext);
3287 }
3288 else
3289 {
3290 /* Print the line */
3291 KdbpPrint("%p size: %4d previous size: %4d %s %.4s\n",
3292 Entry, Entry->BlockSize, Entry->PreviousSize,
3293 Entry->PoolType ? "(Allocated)" : "(Free) ",
3294 (PCHAR)&Entry->PoolTag);
3295 }
3296 }
3297 }
3298 }
3299 }
3300 }
3301
3302 BOOLEAN
3303 ExpKdbgExtPoolFind(
3304 ULONG Argc,
3305 PCHAR Argv[])
3306 {
3307 ULONG Tag = 0;
3308 ULONG Mask = 0;
3309 ULONG PoolType = NonPagedPool;
3310
3311 if (Argc == 1)
3312 {
3313 KdbpPrint("Specify a tag string\n");
3314 return TRUE;
3315 }
3316
3317 /* First arg is tag */
3318 if (strlen(Argv[1]) != 1 || Argv[1][0] != '*')
3319 {
3320 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask);
3321 }
3322
3323 /* Second arg might be pool to search */
3324 if (Argc > 2)
3325 {
3326 PoolType = strtoul(Argv[2], NULL, 0);
3327
3328 if (PoolType > 1)
3329 {
3330 KdbpPrint("Only (non) paged pool are supported\n");
3331 return TRUE;
3332 }
3333 }
3334
3335 /* FIXME: What about large pool? */
3336
3337 if (PoolType == NonPagedPool)
3338 {
3339 ExpKdbgExtPoolFindNonPagedPool(Tag, Mask, NULL, NULL);
3340 }
3341 else if (PoolType == PagedPool)
3342 {
3343 ExpKdbgExtPoolFindPagedPool(Tag, Mask, NULL, NULL);
3344 }
3345
3346 return TRUE;
3347 }
3348
3349 typedef struct _IRP_FIND_CTXT
3350 {
3351 ULONG_PTR RestartAddress;
3352 ULONG_PTR SData;
3353 ULONG Criteria;
3354 } IRP_FIND_CTXT, *PIRP_FIND_CTXT;
3355
3356 VOID
3357 NTAPI
3358 ExpKdbgExtIrpFindPrint(
3359 PPOOL_HEADER Entry,
3360 PVOID Context)
3361 {
3362 PIRP Irp;
3363 PIRP_FIND_CTXT FindCtxt = Context;
3364 PIO_STACK_LOCATION IoStack = NULL;
3365 PUNICODE_STRING DriverName;
3366 ULONG_PTR SData = FindCtxt->SData;
3367 ULONG Criteria = FindCtxt->Criteria;
3368
3369 /* Free entry, ignore */
3370 if (Entry->PoolType == 0)
3371 {
3372 return;
3373 }
3374
3375 /* Get the IRP */
3376 Irp = (PIRP)POOL_FREE_BLOCK(Entry);
3377
3378 /* Bail out if not matching restart address */
3379 if ((ULONG_PTR)Irp < FindCtxt->RestartAddress)
3380 {
3381 return;
3382 }
3383
3384 /* Avoid bogus IRP stack locations */
3385 if (Irp->CurrentLocation <= Irp->StackCount + 1)
3386 {
3387 IoStack = IoGetCurrentIrpStackLocation(Irp);
3388
3389 /* Get associated driver */
3390 if (IoStack->DeviceObject && IoStack->DeviceObject->DriverObject)
3391 DriverName = &IoStack->DeviceObject->DriverObject->DriverName;
3392 else
3393 DriverName = NULL;
3394 }
3395
3396 /* Display if: no data, no criteria or if criteria matches data */
3397 if (SData == 0 || Criteria == 0 ||
3398 (Criteria & 0x1 && IoStack && SData == (ULONG_PTR)IoStack->DeviceObject) ||
3399 (Criteria & 0x2 && SData == (ULONG_PTR)Irp->Tail.Overlay.OriginalFileObject) ||
3400 (Criteria & 0x4 && Irp->MdlAddress && SData == (ULONG_PTR)Irp->MdlAddress->Process) ||
3401 (Criteria & 0x8 && SData == (ULONG_PTR)Irp->Tail.Overlay.Thread) ||
3402 (Criteria & 0x10 && SData == (ULONG_PTR)Irp->UserEvent))
3403 {
3404 KdbpPrint("%p Thread %p current stack belongs to %wZ\n", Irp, Irp->Tail.Overlay.Thread, DriverName);
3405 }
3406 }
3407
3408 BOOLEAN
3409 ExpKdbgExtIrpFind(
3410 ULONG Argc,
3411 PCHAR Argv[])
3412 {
3413 ULONG PoolType = NonPagedPool;
3414 IRP_FIND_CTXT FindCtxt;
3415
3416 /* Pool type */
3417 if (Argc > 1)
3418 {
3419 PoolType = strtoul(Argv[1], NULL, 0);
3420
3421 if (PoolType > 1)
3422 {
3423 KdbpPrint("Only (non) paged pool are supported\n");
3424 return TRUE;
3425 }
3426 }
3427
3428 RtlZeroMemory(&FindCtxt, sizeof(IRP_FIND_CTXT));
3429
3430 /* Restart address */
3431 if (Argc > 2)
3432 {
3433 if (!KdbpGetHexNumber(Argv[2], &FindCtxt.RestartAddress))
3434 {
3435 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
3436 FindCtxt.RestartAddress = 0;
3437 }
3438 }
3439
3440 if (Argc > 4)
3441 {
3442 if (!KdbpGetHexNumber(Argv[4], &FindCtxt.SData))
3443 {
3444 FindCtxt.SData = 0;
3445 }
3446 else
3447 {
3448 if (strcmp(Argv[3], "device") == 0)
3449 {
3450 FindCtxt.Criteria = 0x1;
3451 }
3452 else if (strcmp(Argv[3], "fileobject") == 0)
3453 {
3454 FindCtxt.Criteria = 0x2;
3455 }
3456 else if (strcmp(Argv[3], "mdlprocess") == 0)
3457 {
3458 FindCtxt.Criteria = 0x4;
3459 }
3460 else if (strcmp(Argv[3], "thread") == 0)
3461 {
3462 FindCtxt.Criteria = 0x8;
3463 }
3464 else if (strcmp(Argv[3], "userevent") == 0)
3465 {
3466 FindCtxt.Criteria = 0x10;
3467 }
3468 else if (strcmp(Argv[3], "arg") == 0)
3469 {
3470 FindCtxt.Criteria = 0x1f;
3471 }
3472 }
3473 }
3474
3475 if (PoolType == NonPagedPool)
3476 {
3477 ExpKdbgExtPoolFindNonPagedPool(TAG_IRP, 0xFFFFFFFF, ExpKdbgExtIrpFindPrint, &FindCtxt);
3478 }
3479 else if (PoolType == PagedPool)
3480 {
3481 ExpKdbgExtPoolFindPagedPool(TAG_IRP, 0xFFFFFFFF, ExpKdbgExtIrpFindPrint, &FindCtxt);
3482 }
3483
3484 return TRUE;
3485 }
3486
3487 #endif // DBG && KDBG
3488
3489 /* EOF */