[NTOSKRNL] Add a non paged memory dumper in case of low memory situation.
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 FORCEINLINE
464 BOOLEAN
465 ExpTagAllowPrint(CHAR Tag)
466 {
467 if ((Tag >= 'a' && Tag <= 'z') ||
468 (Tag >= 'A' && Tag <= 'Z') ||
469 Tag == ' ')
470 {
471 return TRUE;
472 }
473
474 return FALSE;
475 }
476
477 VOID
478 MiDumpNonPagedPoolConsumers(VOID)
479 {
480 USHORT i;
481
482 DPRINT1("---------------------\n");
483 DPRINT1("Out of memory dumper!\n");
484
485 //
486 // We'll extract allocations for all the tracked pools
487 //
488 for (i = 0; i < PoolTrackTableSize; ++i)
489 {
490 PPOOL_TRACKER_TABLE TableEntry;
491
492 TableEntry = &PoolTrackTable[i];
493
494 //
495 // We only care about non paged
496 //
497 if (TableEntry->NonPagedBytes != 0)
498 {
499 //
500 // If there's a tag, attempt to do a pretty print
501 //
502 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE)
503 {
504 CHAR Tag[4];
505
506 //
507 // Extract each 'component' and check whether they are printable
508 //
509 Tag[0] = TableEntry->Key & 0xFF;
510 Tag[1] = TableEntry->Key >> 8 & 0xFF;
511 Tag[2] = TableEntry->Key >> 16 & 0xFF;
512 Tag[3] = TableEntry->Key >> 24 & 0xFF;
513
514 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
515 {
516 //
517 // Print in reversed order to match what is in source code
518 //
519 DPRINT1("Tag: '%c%c%c%c', Size: %ld\n", Tag[3], Tag[2], Tag[1], Tag[0], TableEntry->NonPagedBytes);
520 }
521 else
522 {
523 DPRINT1("Tag: %x, Size: %ld\n", TableEntry->Key, TableEntry->NonPagedBytes);
524 }
525 }
526 else
527 {
528 DPRINT1("Anon, Size: %ld\n", TableEntry->NonPagedBytes);
529 }
530 }
531 }
532
533 DPRINT1("---------------------\n");
534 }
535
536 /* PRIVATE FUNCTIONS **********************************************************/
537
538 VOID
539 NTAPI
540 INIT_SECTION
541 ExpSeedHotTags(VOID)
542 {
543 ULONG i, Key, Hash, Index;
544 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
545 ULONG TagList[] =
546 {
547 ' oI',
548 ' laH',
549 'PldM',
550 'LooP',
551 'tSbO',
552 ' prI',
553 'bdDN',
554 'LprI',
555 'pOoI',
556 ' ldM',
557 'eliF',
558 'aVMC',
559 'dSeS',
560 'CFtN',
561 'looP',
562 'rPCT',
563 'bNMC',
564 'dTeS',
565 'sFtN',
566 'TPCT',
567 'CPCT',
568 ' yeK',
569 'qSbO',
570 'mNoI',
571 'aEoI',
572 'cPCT',
573 'aFtN',
574 '0ftN',
575 'tceS',
576 'SprI',
577 'ekoT',
578 ' eS',
579 'lCbO',
580 'cScC',
581 'lFtN',
582 'cAeS',
583 'mfSF',
584 'kWcC',
585 'miSF',
586 'CdfA',
587 'EdfA',
588 'orSF',
589 'nftN',
590 'PRIU',
591 'rFpN',
592 'RFpN',
593 'aPeS',
594 'sUeS',
595 'FpcA',
596 'MpcA',
597 'cSeS',
598 'mNbO',
599 'sFpN',
600 'uLeS',
601 'DPcS',
602 'nevE',
603 'vrqR',
604 'ldaV',
605 ' pP',
606 'SdaV',
607 ' daV',
608 'LdaV',
609 'FdaV',
610 ' GIB',
611 };
612
613 //
614 // Loop all 64 hot tags
615 //
616 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
617 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
618 {
619 //
620 // Get the current tag, and compute its hash in the tracker table
621 //
622 Key = TagList[i];
623 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
624
625 //
626 // Loop all the hashes in this index/bucket
627 //
628 Index = Hash;
629 while (TRUE)
630 {
631 //
632 // Find an empty entry, and make sure this isn't the last hash that
633 // can fit.
634 //
635 // On checked builds, also make sure this is the first time we are
636 // seeding this tag.
637 //
638 ASSERT(TrackTable[Hash].Key != Key);
639 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
640 {
641 //
642 // It has been seeded, move on to the next tag
643 //
644 TrackTable[Hash].Key = Key;
645 break;
646 }
647
648 //
649 // This entry was already taken, compute the next possible hash while
650 // making sure we're not back at our initial index.
651 //
652 ASSERT(TrackTable[Hash].Key != Key);
653 Hash = (Hash + 1) & PoolTrackTableMask;
654 if (Hash == Index) break;
655 }
656 }
657 }
658
659 VOID
660 NTAPI
661 ExpRemovePoolTracker(IN ULONG Key,
662 IN SIZE_T NumberOfBytes,
663 IN POOL_TYPE PoolType)
664 {
665 ULONG Hash, Index;
666 PPOOL_TRACKER_TABLE Table, TableEntry;
667 SIZE_T TableMask, TableSize;
668
669 //
670 // Remove the PROTECTED_POOL flag which is not part of the tag
671 //
672 Key &= ~PROTECTED_POOL;
673
674 //
675 // With WinDBG you can set a tag you want to break on when an allocation is
676 // attempted
677 //
678 if (Key == PoolHitTag) DbgBreakPoint();
679
680 //
681 // Why the double indirection? Because normally this function is also used
682 // when doing session pool allocations, which has another set of tables,
683 // sizes, and masks that live in session pool. Now we don't support session
684 // pool so we only ever use the regular tables, but I'm keeping the code this
685 // way so that the day we DO support session pool, it won't require that
686 // many changes
687 //
688 Table = PoolTrackTable;
689 TableMask = PoolTrackTableMask;
690 TableSize = PoolTrackTableSize;
691 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
692
693 //
694 // Compute the hash for this key, and loop all the possible buckets
695 //
696 Hash = ExpComputeHashForTag(Key, TableMask);
697 Index = Hash;
698 while (TRUE)
699 {
700 //
701 // Have we found the entry for this tag? */
702 //
703 TableEntry = &Table[Hash];
704 if (TableEntry->Key == Key)
705 {
706 //
707 // Decrement the counters depending on if this was paged or nonpaged
708 // pool
709 //
710 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
711 {
712 InterlockedIncrement(&TableEntry->NonPagedFrees);
713 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
714 -(SSIZE_T)NumberOfBytes);
715 return;
716 }
717 InterlockedIncrement(&TableEntry->PagedFrees);
718 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
719 -(SSIZE_T)NumberOfBytes);
720 return;
721 }
722
723 //
724 // We should have only ended up with an empty entry if we've reached
725 // the last bucket
726 //
727 if (!TableEntry->Key)
728 {
729 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
730 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
731 ASSERT(Hash == TableMask);
732 }
733
734 //
735 // This path is hit when we don't have an entry, and the current bucket
736 // is full, so we simply try the next one
737 //
738 Hash = (Hash + 1) & TableMask;
739 if (Hash == Index) break;
740 }
741
742 //
743 // And finally this path is hit when all the buckets are full, and we need
744 // some expansion. This path is not yet supported in ReactOS and so we'll
745 // ignore the tag
746 //
747 DPRINT1("Out of pool tag space, ignoring...\n");
748 }
749
750 VOID
751 NTAPI
752 ExpInsertPoolTracker(IN ULONG Key,
753 IN SIZE_T NumberOfBytes,
754 IN POOL_TYPE PoolType)
755 {
756 ULONG Hash, Index;
757 KIRQL OldIrql;
758 PPOOL_TRACKER_TABLE Table, TableEntry;
759 SIZE_T TableMask, TableSize;
760
761 //
762 // Remove the PROTECTED_POOL flag which is not part of the tag
763 //
764 Key &= ~PROTECTED_POOL;
765
766 //
767 // With WinDBG you can set a tag you want to break on when an allocation is
768 // attempted
769 //
770 if (Key == PoolHitTag) DbgBreakPoint();
771
772 //
773 // There is also an internal flag you can set to break on malformed tags
774 //
775 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
776
777 //
778 // ASSERT on ReactOS features not yet supported
779 //
780 ASSERT(!(PoolType & SESSION_POOL_MASK));
781 ASSERT(KeGetCurrentProcessorNumber() == 0);
782
783 //
784 // Why the double indirection? Because normally this function is also used
785 // when doing session pool allocations, which has another set of tables,
786 // sizes, and masks that live in session pool. Now we don't support session
787 // pool so we only ever use the regular tables, but I'm keeping the code this
788 // way so that the day we DO support session pool, it won't require that
789 // many changes
790 //
791 Table = PoolTrackTable;
792 TableMask = PoolTrackTableMask;
793 TableSize = PoolTrackTableSize;
794 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
795
796 //
797 // Compute the hash for this key, and loop all the possible buckets
798 //
799 Hash = ExpComputeHashForTag(Key, TableMask);
800 Index = Hash;
801 while (TRUE)
802 {
803 //
804 // Do we already have an entry for this tag? */
805 //
806 TableEntry = &Table[Hash];
807 if (TableEntry->Key == Key)
808 {
809 //
810 // Increment the counters depending on if this was paged or nonpaged
811 // pool
812 //
813 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
814 {
815 InterlockedIncrement(&TableEntry->NonPagedAllocs);
816 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
817 return;
818 }
819 InterlockedIncrement(&TableEntry->PagedAllocs);
820 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
821 return;
822 }
823
824 //
825 // We don't have an entry yet, but we've found a free bucket for it
826 //
827 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
828 {
829 //
830 // We need to hold the lock while creating a new entry, since other
831 // processors might be in this code path as well
832 //
833 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
834 if (!PoolTrackTable[Hash].Key)
835 {
836 //
837 // We've won the race, so now create this entry in the bucket
838 //
839 ASSERT(Table[Hash].Key == 0);
840 PoolTrackTable[Hash].Key = Key;
841 TableEntry->Key = Key;
842 }
843 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
844
845 //
846 // Now we force the loop to run again, and we should now end up in
847 // the code path above which does the interlocked increments...
848 //
849 continue;
850 }
851
852 //
853 // This path is hit when we don't have an entry, and the current bucket
854 // is full, so we simply try the next one
855 //
856 Hash = (Hash + 1) & TableMask;
857 if (Hash == Index) break;
858 }
859
860 //
861 // And finally this path is hit when all the buckets are full, and we need
862 // some expansion. This path is not yet supported in ReactOS and so we'll
863 // ignore the tag
864 //
865 DPRINT1("Out of pool tag space, ignoring...\n");
866 }
867
868 VOID
869 NTAPI
870 INIT_SECTION
871 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
872 IN POOL_TYPE PoolType,
873 IN ULONG PoolIndex,
874 IN ULONG Threshold,
875 IN PVOID PoolLock)
876 {
877 PLIST_ENTRY NextEntry, LastEntry;
878
879 //
880 // Setup the descriptor based on the caller's request
881 //
882 PoolDescriptor->PoolType = PoolType;
883 PoolDescriptor->PoolIndex = PoolIndex;
884 PoolDescriptor->Threshold = Threshold;
885 PoolDescriptor->LockAddress = PoolLock;
886
887 //
888 // Initialize accounting data
889 //
890 PoolDescriptor->RunningAllocs = 0;
891 PoolDescriptor->RunningDeAllocs = 0;
892 PoolDescriptor->TotalPages = 0;
893 PoolDescriptor->TotalBytes = 0;
894 PoolDescriptor->TotalBigPages = 0;
895
896 //
897 // Nothing pending for now
898 //
899 PoolDescriptor->PendingFrees = NULL;
900 PoolDescriptor->PendingFreeDepth = 0;
901
902 //
903 // Loop all the descriptor's allocation lists and initialize them
904 //
905 NextEntry = PoolDescriptor->ListHeads;
906 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
907 while (NextEntry < LastEntry)
908 {
909 ExpInitializePoolListHead(NextEntry);
910 NextEntry++;
911 }
912
913 //
914 // Note that ReactOS does not support Session Pool Yet
915 //
916 ASSERT(PoolType != PagedPoolSession);
917 }
918
919 VOID
920 NTAPI
921 INIT_SECTION
922 InitializePool(IN POOL_TYPE PoolType,
923 IN ULONG Threshold)
924 {
925 PPOOL_DESCRIPTOR Descriptor;
926 SIZE_T TableSize;
927 ULONG i;
928
929 //
930 // Check what kind of pool this is
931 //
932 if (PoolType == NonPagedPool)
933 {
934 //
935 // Compute the track table size and convert it from a power of two to an
936 // actual byte size
937 //
938 // NOTE: On checked builds, we'll assert if the registry table size was
939 // invalid, while on retail builds we'll just break out of the loop at
940 // that point.
941 //
942 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
943 for (i = 0; i < 32; i++)
944 {
945 if (TableSize & 1)
946 {
947 ASSERT((TableSize & ~1) == 0);
948 if (!(TableSize & ~1)) break;
949 }
950 TableSize >>= 1;
951 }
952
953 //
954 // If we hit bit 32, than no size was defined in the registry, so
955 // we'll use the default size of 2048 entries.
956 //
957 // Otherwise, use the size from the registry, as long as it's not
958 // smaller than 64 entries.
959 //
960 if (i == 32)
961 {
962 PoolTrackTableSize = 2048;
963 }
964 else
965 {
966 PoolTrackTableSize = max(1 << i, 64);
967 }
968
969 //
970 // Loop trying with the biggest specified size first, and cut it down
971 // by a power of two each iteration in case not enough memory exist
972 //
973 while (TRUE)
974 {
975 //
976 // Do not allow overflow
977 //
978 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
979 {
980 PoolTrackTableSize >>= 1;
981 continue;
982 }
983
984 //
985 // Allocate the tracker table and exit the loop if this worked
986 //
987 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
988 (PoolTrackTableSize + 1) *
989 sizeof(POOL_TRACKER_TABLE));
990 if (PoolTrackTable) break;
991
992 //
993 // Otherwise, as long as we're not down to the last bit, keep
994 // iterating
995 //
996 if (PoolTrackTableSize == 1)
997 {
998 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
999 TableSize,
1000 0xFFFFFFFF,
1001 0xFFFFFFFF,
1002 0xFFFFFFFF);
1003 }
1004 PoolTrackTableSize >>= 1;
1005 }
1006
1007 //
1008 // Add one entry, compute the hash, and zero the table
1009 //
1010 PoolTrackTableSize++;
1011 PoolTrackTableMask = PoolTrackTableSize - 2;
1012
1013 RtlZeroMemory(PoolTrackTable,
1014 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1015
1016 //
1017 // Finally, add the most used tags to speed up those allocations
1018 //
1019 ExpSeedHotTags();
1020
1021 //
1022 // We now do the exact same thing with the tracker table for big pages
1023 //
1024 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1025 for (i = 0; i < 32; i++)
1026 {
1027 if (TableSize & 1)
1028 {
1029 ASSERT((TableSize & ~1) == 0);
1030 if (!(TableSize & ~1)) break;
1031 }
1032 TableSize >>= 1;
1033 }
1034
1035 //
1036 // For big pages, the default tracker table is 4096 entries, while the
1037 // minimum is still 64
1038 //
1039 if (i == 32)
1040 {
1041 PoolBigPageTableSize = 4096;
1042 }
1043 else
1044 {
1045 PoolBigPageTableSize = max(1 << i, 64);
1046 }
1047
1048 //
1049 // Again, run the exact same loop we ran earlier, but this time for the
1050 // big pool tracker instead
1051 //
1052 while (TRUE)
1053 {
1054 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1055 {
1056 PoolBigPageTableSize >>= 1;
1057 continue;
1058 }
1059
1060 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1061 PoolBigPageTableSize *
1062 sizeof(POOL_TRACKER_BIG_PAGES));
1063 if (PoolBigPageTable) break;
1064
1065 if (PoolBigPageTableSize == 1)
1066 {
1067 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1068 TableSize,
1069 0xFFFFFFFF,
1070 0xFFFFFFFF,
1071 0xFFFFFFFF);
1072 }
1073
1074 PoolBigPageTableSize >>= 1;
1075 }
1076
1077 //
1078 // An extra entry is not needed for for the big pool tracker, so just
1079 // compute the hash and zero it
1080 //
1081 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1082 RtlZeroMemory(PoolBigPageTable,
1083 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1084 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1085
1086 //
1087 // During development, print this out so we can see what's happening
1088 //
1089 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1090 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1091 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1092 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1093
1094 //
1095 // Insert the generic tracker for all of big pool
1096 //
1097 ExpInsertPoolTracker('looP',
1098 ROUND_TO_PAGES(PoolBigPageTableSize *
1099 sizeof(POOL_TRACKER_BIG_PAGES)),
1100 NonPagedPool);
1101
1102 //
1103 // No support for NUMA systems at this time
1104 //
1105 ASSERT(KeNumberNodes == 1);
1106
1107 //
1108 // Initialize the tag spinlock
1109 //
1110 KeInitializeSpinLock(&ExpTaggedPoolLock);
1111
1112 //
1113 // Initialize the nonpaged pool descriptor
1114 //
1115 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1116 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1117 NonPagedPool,
1118 0,
1119 Threshold,
1120 NULL);
1121 }
1122 else
1123 {
1124 //
1125 // No support for NUMA systems at this time
1126 //
1127 ASSERT(KeNumberNodes == 1);
1128
1129 //
1130 // Allocate the pool descriptor
1131 //
1132 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1133 sizeof(KGUARDED_MUTEX) +
1134 sizeof(POOL_DESCRIPTOR),
1135 'looP');
1136 if (!Descriptor)
1137 {
1138 //
1139 // This is really bad...
1140 //
1141 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1142 0,
1143 -1,
1144 -1,
1145 -1);
1146 }
1147
1148 //
1149 // Setup the vector and guarded mutex for paged pool
1150 //
1151 PoolVector[PagedPool] = Descriptor;
1152 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1153 ExpPagedPoolDescriptor[0] = Descriptor;
1154 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1155 ExInitializePoolDescriptor(Descriptor,
1156 PagedPool,
1157 0,
1158 Threshold,
1159 ExpPagedPoolMutex);
1160
1161 //
1162 // Insert the generic tracker for all of nonpaged pool
1163 //
1164 ExpInsertPoolTracker('looP',
1165 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1166 NonPagedPool);
1167 }
1168 }
1169
1170 FORCEINLINE
1171 KIRQL
1172 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1173 {
1174 //
1175 // Check if this is nonpaged pool
1176 //
1177 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1178 {
1179 //
1180 // Use the queued spin lock
1181 //
1182 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1183 }
1184 else
1185 {
1186 //
1187 // Use the guarded mutex
1188 //
1189 KeAcquireGuardedMutex(Descriptor->LockAddress);
1190 return APC_LEVEL;
1191 }
1192 }
1193
1194 FORCEINLINE
1195 VOID
1196 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1197 IN KIRQL OldIrql)
1198 {
1199 //
1200 // Check if this is nonpaged pool
1201 //
1202 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1203 {
1204 //
1205 // Use the queued spin lock
1206 //
1207 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1208 }
1209 else
1210 {
1211 //
1212 // Use the guarded mutex
1213 //
1214 KeReleaseGuardedMutex(Descriptor->LockAddress);
1215 }
1216 }
1217
1218 VOID
1219 NTAPI
1220 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1221 IN PVOID DeferredContext,
1222 IN PVOID SystemArgument1,
1223 IN PVOID SystemArgument2)
1224 {
1225 PPOOL_DPC_CONTEXT Context = DeferredContext;
1226 UNREFERENCED_PARAMETER(Dpc);
1227 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1228
1229 //
1230 // Make sure we win the race, and if we did, copy the data atomically
1231 //
1232 if (KeSignalCallDpcSynchronize(SystemArgument2))
1233 {
1234 RtlCopyMemory(Context->PoolTrackTable,
1235 PoolTrackTable,
1236 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1237
1238 //
1239 // This is here because ReactOS does not yet support expansion
1240 //
1241 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1242 }
1243
1244 //
1245 // Regardless of whether we won or not, we must now synchronize and then
1246 // decrement the barrier since this is one more processor that has completed
1247 // the callback.
1248 //
1249 KeSignalCallDpcSynchronize(SystemArgument2);
1250 KeSignalCallDpcDone(SystemArgument1);
1251 }
1252
1253 NTSTATUS
1254 NTAPI
1255 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1256 IN ULONG SystemInformationLength,
1257 IN OUT PULONG ReturnLength OPTIONAL)
1258 {
1259 ULONG TableSize, CurrentLength;
1260 ULONG EntryCount;
1261 NTSTATUS Status = STATUS_SUCCESS;
1262 PSYSTEM_POOLTAG TagEntry;
1263 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1264 POOL_DPC_CONTEXT Context;
1265 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1266
1267 //
1268 // Keep track of how much data the caller's buffer must hold
1269 //
1270 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1271
1272 //
1273 // Initialize the caller's buffer
1274 //
1275 TagEntry = &SystemInformation->TagInfo[0];
1276 SystemInformation->Count = 0;
1277
1278 //
1279 // Capture the number of entries, and the total size needed to make a copy
1280 // of the table
1281 //
1282 EntryCount = (ULONG)PoolTrackTableSize;
1283 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1284
1285 //
1286 // Allocate the "Generic DPC" temporary buffer
1287 //
1288 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1289 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1290
1291 //
1292 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1293 //
1294 Context.PoolTrackTable = Buffer;
1295 Context.PoolTrackTableSize = PoolTrackTableSize;
1296 Context.PoolTrackTableExpansion = NULL;
1297 Context.PoolTrackTableSizeExpansion = 0;
1298 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1299
1300 //
1301 // Now parse the results
1302 //
1303 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1304 {
1305 //
1306 // If the entry is empty, skip it
1307 //
1308 if (!TrackerEntry->Key) continue;
1309
1310 //
1311 // Otherwise, add one more entry to the caller's buffer, and ensure that
1312 // enough space has been allocated in it
1313 //
1314 SystemInformation->Count++;
1315 CurrentLength += sizeof(*TagEntry);
1316 if (SystemInformationLength < CurrentLength)
1317 {
1318 //
1319 // The caller's buffer is too small, so set a failure code. The
1320 // caller will know the count, as well as how much space is needed.
1321 //
1322 // We do NOT break out of the loop, because we want to keep incrementing
1323 // the Count as well as CurrentLength so that the caller can know the
1324 // final numbers
1325 //
1326 Status = STATUS_INFO_LENGTH_MISMATCH;
1327 }
1328 else
1329 {
1330 //
1331 // Small sanity check that our accounting is working correctly
1332 //
1333 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1334 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1335
1336 //
1337 // Return the data into the caller's buffer
1338 //
1339 TagEntry->TagUlong = TrackerEntry->Key;
1340 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1341 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1342 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1343 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1344 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1345 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1346 TagEntry++;
1347 }
1348 }
1349
1350 //
1351 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1352 //
1353 ExFreePoolWithTag(Buffer, 'ofnI');
1354 if (ReturnLength) *ReturnLength = CurrentLength;
1355 return Status;
1356 }
1357
1358 BOOLEAN
1359 NTAPI
1360 ExpAddTagForBigPages(IN PVOID Va,
1361 IN ULONG Key,
1362 IN ULONG NumberOfPages,
1363 IN POOL_TYPE PoolType)
1364 {
1365 ULONG Hash, i = 0;
1366 PVOID OldVa;
1367 KIRQL OldIrql;
1368 SIZE_T TableSize;
1369 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1370 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1371 ASSERT(!(PoolType & SESSION_POOL_MASK));
1372
1373 //
1374 // As the table is expandable, these values must only be read after acquiring
1375 // the lock to avoid a teared access during an expansion
1376 //
1377 Hash = ExpComputePartialHashForAddress(Va);
1378 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1379 Hash &= PoolBigPageTableHash;
1380 TableSize = PoolBigPageTableSize;
1381
1382 //
1383 // We loop from the current hash bucket to the end of the table, and then
1384 // rollover to hash bucket 0 and keep going from there. If we return back
1385 // to the beginning, then we attempt expansion at the bottom of the loop
1386 //
1387 EntryStart = Entry = &PoolBigPageTable[Hash];
1388 EntryEnd = &PoolBigPageTable[TableSize];
1389 do
1390 {
1391 //
1392 // Make sure that this is a free entry and attempt to atomically make the
1393 // entry busy now
1394 //
1395 OldVa = Entry->Va;
1396 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1397 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1398 {
1399 //
1400 // We now own this entry, write down the size and the pool tag
1401 //
1402 Entry->Key = Key;
1403 Entry->NumberOfPages = NumberOfPages;
1404
1405 //
1406 // Add one more entry to the count, and see if we're getting within
1407 // 25% of the table size, at which point we'll do an expansion now
1408 // to avoid blocking too hard later on.
1409 //
1410 // Note that we only do this if it's also been the 16th time that we
1411 // keep losing the race or that we are not finding a free entry anymore,
1412 // which implies a massive number of concurrent big pool allocations.
1413 //
1414 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1415 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1416 {
1417 DPRINT("Should attempt expansion since we now have %lu entries\n",
1418 ExpPoolBigEntriesInUse);
1419 }
1420
1421 //
1422 // We have our entry, return
1423 //
1424 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1425 return TRUE;
1426 }
1427
1428 //
1429 // We don't have our entry yet, so keep trying, making the entry list
1430 // circular if we reach the last entry. We'll eventually break out of
1431 // the loop once we've rolled over and returned back to our original
1432 // hash bucket
1433 //
1434 i++;
1435 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1436 } while (Entry != EntryStart);
1437
1438 //
1439 // This means there's no free hash buckets whatsoever, so we would now have
1440 // to attempt expanding the table
1441 //
1442 DPRINT1("Big pool expansion needed, not implemented!\n");
1443 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1444 return FALSE;
1445 }
1446
1447 ULONG
1448 NTAPI
1449 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1450 OUT PULONG_PTR BigPages,
1451 IN POOL_TYPE PoolType)
1452 {
1453 BOOLEAN FirstTry = TRUE;
1454 SIZE_T TableSize;
1455 KIRQL OldIrql;
1456 ULONG PoolTag, Hash;
1457 PPOOL_TRACKER_BIG_PAGES Entry;
1458 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1459 ASSERT(!(PoolType & SESSION_POOL_MASK));
1460
1461 //
1462 // As the table is expandable, these values must only be read after acquiring
1463 // the lock to avoid a teared access during an expansion
1464 //
1465 Hash = ExpComputePartialHashForAddress(Va);
1466 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1467 Hash &= PoolBigPageTableHash;
1468 TableSize = PoolBigPageTableSize;
1469
1470 //
1471 // Loop while trying to find this big page allocation
1472 //
1473 while (PoolBigPageTable[Hash].Va != Va)
1474 {
1475 //
1476 // Increment the size until we go past the end of the table
1477 //
1478 if (++Hash >= TableSize)
1479 {
1480 //
1481 // Is this the second time we've tried?
1482 //
1483 if (!FirstTry)
1484 {
1485 //
1486 // This means it was never inserted into the pool table and it
1487 // received the special "BIG" tag -- return that and return 0
1488 // so that the code can ask Mm for the page count instead
1489 //
1490 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1491 *BigPages = 0;
1492 return ' GIB';
1493 }
1494
1495 //
1496 // The first time this happens, reset the hash index and try again
1497 //
1498 Hash = 0;
1499 FirstTry = FALSE;
1500 }
1501 }
1502
1503 //
1504 // Now capture all the information we need from the entry, since after we
1505 // release the lock, the data can change
1506 //
1507 Entry = &PoolBigPageTable[Hash];
1508 *BigPages = Entry->NumberOfPages;
1509 PoolTag = Entry->Key;
1510
1511 //
1512 // Set the free bit, and decrement the number of allocations. Finally, release
1513 // the lock and return the tag that was located
1514 //
1515 InterlockedIncrement((PLONG)&Entry->Va);
1516 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1517 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1518 return PoolTag;
1519 }
1520
1521 VOID
1522 NTAPI
1523 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1524 OUT PULONG NonPagedPoolPages,
1525 OUT PULONG PagedPoolAllocs,
1526 OUT PULONG PagedPoolFrees,
1527 OUT PULONG PagedPoolLookasideHits,
1528 OUT PULONG NonPagedPoolAllocs,
1529 OUT PULONG NonPagedPoolFrees,
1530 OUT PULONG NonPagedPoolLookasideHits)
1531 {
1532 ULONG i;
1533 PPOOL_DESCRIPTOR PoolDesc;
1534
1535 //
1536 // Assume all failures
1537 //
1538 *PagedPoolPages = 0;
1539 *PagedPoolAllocs = 0;
1540 *PagedPoolFrees = 0;
1541
1542 //
1543 // Tally up the totals for all the apged pool
1544 //
1545 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1546 {
1547 PoolDesc = ExpPagedPoolDescriptor[i];
1548 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1549 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1550 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1551 }
1552
1553 //
1554 // The first non-paged pool has a hardcoded well-known descriptor name
1555 //
1556 PoolDesc = &NonPagedPoolDescriptor;
1557 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1558 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1559 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1560
1561 //
1562 // If the system has more than one non-paged pool, copy the other descriptor
1563 // totals as well
1564 //
1565 #if 0
1566 if (ExpNumberOfNonPagedPools > 1)
1567 {
1568 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1569 {
1570 PoolDesc = ExpNonPagedPoolDescriptor[i];
1571 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1572 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1573 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1574 }
1575 }
1576 #endif
1577
1578 //
1579 // FIXME: Not yet supported
1580 //
1581 *NonPagedPoolLookasideHits += 0;
1582 *PagedPoolLookasideHits += 0;
1583 }
1584
1585 VOID
1586 NTAPI
1587 ExReturnPoolQuota(IN PVOID P)
1588 {
1589 PPOOL_HEADER Entry;
1590 POOL_TYPE PoolType;
1591 USHORT BlockSize;
1592 PEPROCESS Process;
1593
1594 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1595 (MmIsSpecialPoolAddress(P)))
1596 {
1597 return;
1598 }
1599
1600 Entry = P;
1601 Entry--;
1602 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1603
1604 PoolType = Entry->PoolType - 1;
1605 BlockSize = Entry->BlockSize;
1606
1607 if (PoolType & QUOTA_POOL_MASK)
1608 {
1609 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1610 ASSERT(Process != NULL);
1611 if (Process)
1612 {
1613 if (Process->Pcb.Header.Type != ProcessObject)
1614 {
1615 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1616 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1617 KeBugCheckEx(BAD_POOL_CALLER,
1618 0x0D,
1619 (ULONG_PTR)P,
1620 Entry->PoolTag,
1621 (ULONG_PTR)Process);
1622 }
1623 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1624 PsReturnPoolQuota(Process,
1625 PoolType & BASE_POOL_TYPE_MASK,
1626 BlockSize * POOL_BLOCK_SIZE);
1627 ObDereferenceObject(Process);
1628 }
1629 }
1630 }
1631
1632 /* PUBLIC FUNCTIONS ***********************************************************/
1633
1634 /*
1635 * @implemented
1636 */
1637 PVOID
1638 NTAPI
1639 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1640 IN SIZE_T NumberOfBytes,
1641 IN ULONG Tag)
1642 {
1643 PPOOL_DESCRIPTOR PoolDesc;
1644 PLIST_ENTRY ListHead;
1645 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1646 KIRQL OldIrql;
1647 USHORT BlockSize, i;
1648 ULONG OriginalType;
1649 PKPRCB Prcb = KeGetCurrentPrcb();
1650 PGENERAL_LOOKASIDE LookasideList;
1651
1652 //
1653 // Some sanity checks
1654 //
1655 ASSERT(Tag != 0);
1656 ASSERT(Tag != ' GIB');
1657 ASSERT(NumberOfBytes != 0);
1658 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1659
1660 //
1661 // Not supported in ReactOS
1662 //
1663 ASSERT(!(PoolType & SESSION_POOL_MASK));
1664
1665 //
1666 // Check if verifier or special pool is enabled
1667 //
1668 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1669 {
1670 //
1671 // For verifier, we should call the verification routine
1672 //
1673 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1674 {
1675 DPRINT1("Driver Verifier is not yet supported\n");
1676 }
1677
1678 //
1679 // For special pool, we check if this is a suitable allocation and do
1680 // the special allocation if needed
1681 //
1682 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1683 {
1684 //
1685 // Check if this is a special pool allocation
1686 //
1687 if (MmUseSpecialPool(NumberOfBytes, Tag))
1688 {
1689 //
1690 // Try to allocate using special pool
1691 //
1692 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1693 if (Entry) return Entry;
1694 }
1695 }
1696 }
1697
1698 //
1699 // Get the pool type and its corresponding vector for this request
1700 //
1701 OriginalType = PoolType;
1702 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1703 PoolDesc = PoolVector[PoolType];
1704 ASSERT(PoolDesc != NULL);
1705
1706 //
1707 // Check if this is a big page allocation
1708 //
1709 if (NumberOfBytes > POOL_MAX_ALLOC)
1710 {
1711 //
1712 // Allocate pages for it
1713 //
1714 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1715 if (!Entry)
1716 {
1717 //
1718 // If non paged backed, display current consumption
1719 //
1720 if ((OriginalType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1721 {
1722 MiDumpNonPagedPoolConsumers();
1723 }
1724
1725 //
1726 // Must succeed pool is deprecated, but still supported. These allocation
1727 // failures must cause an immediate bugcheck
1728 //
1729 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1730 {
1731 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1732 NumberOfBytes,
1733 NonPagedPoolDescriptor.TotalPages,
1734 NonPagedPoolDescriptor.TotalBigPages,
1735 0);
1736 }
1737
1738 //
1739 // Internal debugging
1740 //
1741 ExPoolFailures++;
1742
1743 //
1744 // This flag requests printing failures, and can also further specify
1745 // breaking on failures
1746 //
1747 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1748 {
1749 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1750 NumberOfBytes,
1751 OriginalType);
1752 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1753 }
1754
1755 //
1756 // Finally, this flag requests an exception, which we are more than
1757 // happy to raise!
1758 //
1759 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1760 {
1761 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1762 }
1763
1764 return NULL;
1765 }
1766
1767 //
1768 // Increment required counters
1769 //
1770 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1771 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1772 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1773 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1774
1775 //
1776 // Add a tag for the big page allocation and switch to the generic "BIG"
1777 // tag if we failed to do so, then insert a tracker for this alloation.
1778 //
1779 if (!ExpAddTagForBigPages(Entry,
1780 Tag,
1781 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1782 OriginalType))
1783 {
1784 Tag = ' GIB';
1785 }
1786 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1787 return Entry;
1788 }
1789
1790 //
1791 // Should never request 0 bytes from the pool, but since so many drivers do
1792 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1793 //
1794 if (!NumberOfBytes) NumberOfBytes = 1;
1795
1796 //
1797 // A pool allocation is defined by its data, a linked list to connect it to
1798 // the free list (if necessary), and a pool header to store accounting info.
1799 // Calculate this size, then convert it into a block size (units of pool
1800 // headers)
1801 //
1802 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1803 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1804 // the direct allocation of pages.
1805 //
1806 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1807 / POOL_BLOCK_SIZE);
1808 ASSERT(i < POOL_LISTS_PER_PAGE);
1809
1810 //
1811 // Handle lookaside list optimization for both paged and nonpaged pool
1812 //
1813 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1814 {
1815 //
1816 // Try popping it from the per-CPU lookaside list
1817 //
1818 LookasideList = (PoolType == PagedPool) ?
1819 Prcb->PPPagedLookasideList[i - 1].P :
1820 Prcb->PPNPagedLookasideList[i - 1].P;
1821 LookasideList->TotalAllocates++;
1822 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1823 if (!Entry)
1824 {
1825 //
1826 // We failed, try popping it from the global list
1827 //
1828 LookasideList = (PoolType == PagedPool) ?
1829 Prcb->PPPagedLookasideList[i - 1].L :
1830 Prcb->PPNPagedLookasideList[i - 1].L;
1831 LookasideList->TotalAllocates++;
1832 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1833 }
1834
1835 //
1836 // If we were able to pop it, update the accounting and return the block
1837 //
1838 if (Entry)
1839 {
1840 LookasideList->AllocateHits++;
1841
1842 //
1843 // Get the real entry, write down its pool type, and track it
1844 //
1845 Entry--;
1846 Entry->PoolType = OriginalType + 1;
1847 ExpInsertPoolTracker(Tag,
1848 Entry->BlockSize * POOL_BLOCK_SIZE,
1849 OriginalType);
1850
1851 //
1852 // Return the pool allocation
1853 //
1854 Entry->PoolTag = Tag;
1855 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1856 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1857 return POOL_FREE_BLOCK(Entry);
1858 }
1859 }
1860
1861 //
1862 // Loop in the free lists looking for a block if this size. Start with the
1863 // list optimized for this kind of size lookup
1864 //
1865 ListHead = &PoolDesc->ListHeads[i];
1866 do
1867 {
1868 //
1869 // Are there any free entries available on this list?
1870 //
1871 if (!ExpIsPoolListEmpty(ListHead))
1872 {
1873 //
1874 // Acquire the pool lock now
1875 //
1876 OldIrql = ExLockPool(PoolDesc);
1877
1878 //
1879 // And make sure the list still has entries
1880 //
1881 if (ExpIsPoolListEmpty(ListHead))
1882 {
1883 //
1884 // Someone raced us (and won) before we had a chance to acquire
1885 // the lock.
1886 //
1887 // Try again!
1888 //
1889 ExUnlockPool(PoolDesc, OldIrql);
1890 continue;
1891 }
1892
1893 //
1894 // Remove a free entry from the list
1895 // Note that due to the way we insert free blocks into multiple lists
1896 // there is a guarantee that any block on this list will either be
1897 // of the correct size, or perhaps larger.
1898 //
1899 ExpCheckPoolLinks(ListHead);
1900 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1901 ExpCheckPoolLinks(ListHead);
1902 ExpCheckPoolBlocks(Entry);
1903 ASSERT(Entry->BlockSize >= i);
1904 ASSERT(Entry->PoolType == 0);
1905
1906 //
1907 // Check if this block is larger that what we need. The block could
1908 // not possibly be smaller, due to the reason explained above (and
1909 // we would've asserted on a checked build if this was the case).
1910 //
1911 if (Entry->BlockSize != i)
1912 {
1913 //
1914 // Is there an entry before this one?
1915 //
1916 if (Entry->PreviousSize == 0)
1917 {
1918 //
1919 // There isn't anyone before us, so take the next block and
1920 // turn it into a fragment that contains the leftover data
1921 // that we don't need to satisfy the caller's request
1922 //
1923 FragmentEntry = POOL_BLOCK(Entry, i);
1924 FragmentEntry->BlockSize = Entry->BlockSize - i;
1925
1926 //
1927 // And make it point back to us
1928 //
1929 FragmentEntry->PreviousSize = i;
1930
1931 //
1932 // Now get the block that follows the new fragment and check
1933 // if it's still on the same page as us (and not at the end)
1934 //
1935 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1936 if (PAGE_ALIGN(NextEntry) != NextEntry)
1937 {
1938 //
1939 // Adjust this next block to point to our newly created
1940 // fragment block
1941 //
1942 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1943 }
1944 }
1945 else
1946 {
1947 //
1948 // There is a free entry before us, which we know is smaller
1949 // so we'll make this entry the fragment instead
1950 //
1951 FragmentEntry = Entry;
1952
1953 //
1954 // And then we'll remove from it the actual size required.
1955 // Now the entry is a leftover free fragment
1956 //
1957 Entry->BlockSize -= i;
1958
1959 //
1960 // Now let's go to the next entry after the fragment (which
1961 // used to point to our original free entry) and make it
1962 // reference the new fragment entry instead.
1963 //
1964 // This is the entry that will actually end up holding the
1965 // allocation!
1966 //
1967 Entry = POOL_NEXT_BLOCK(Entry);
1968 Entry->PreviousSize = FragmentEntry->BlockSize;
1969
1970 //
1971 // And now let's go to the entry after that one and check if
1972 // it's still on the same page, and not at the end
1973 //
1974 NextEntry = POOL_BLOCK(Entry, i);
1975 if (PAGE_ALIGN(NextEntry) != NextEntry)
1976 {
1977 //
1978 // Make it reference the allocation entry
1979 //
1980 NextEntry->PreviousSize = i;
1981 }
1982 }
1983
1984 //
1985 // Now our (allocation) entry is the right size
1986 //
1987 Entry->BlockSize = i;
1988
1989 //
1990 // And the next entry is now the free fragment which contains
1991 // the remaining difference between how big the original entry
1992 // was, and the actual size the caller needs/requested.
1993 //
1994 FragmentEntry->PoolType = 0;
1995 BlockSize = FragmentEntry->BlockSize;
1996
1997 //
1998 // Now check if enough free bytes remained for us to have a
1999 // "full" entry, which contains enough bytes for a linked list
2000 // and thus can be used for allocations (up to 8 bytes...)
2001 //
2002 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2003 if (BlockSize != 1)
2004 {
2005 //
2006 // Insert the free entry into the free list for this size
2007 //
2008 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2009 POOL_FREE_BLOCK(FragmentEntry));
2010 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2011 }
2012 }
2013
2014 //
2015 // We have found an entry for this allocation, so set the pool type
2016 // and release the lock since we're done
2017 //
2018 Entry->PoolType = OriginalType + 1;
2019 ExpCheckPoolBlocks(Entry);
2020 ExUnlockPool(PoolDesc, OldIrql);
2021
2022 //
2023 // Increment required counters
2024 //
2025 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2026 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2027
2028 //
2029 // Track this allocation
2030 //
2031 ExpInsertPoolTracker(Tag,
2032 Entry->BlockSize * POOL_BLOCK_SIZE,
2033 OriginalType);
2034
2035 //
2036 // Return the pool allocation
2037 //
2038 Entry->PoolTag = Tag;
2039 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2040 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2041 return POOL_FREE_BLOCK(Entry);
2042 }
2043 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2044
2045 //
2046 // There were no free entries left, so we have to allocate a new fresh page
2047 //
2048 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2049 if (!Entry)
2050 {
2051 //
2052 // If non paged backed, display current consumption
2053 //
2054 if ((OriginalType & BASE_POOL_TYPE_MASK) == NonPagedPool)
2055 {
2056 MiDumpNonPagedPoolConsumers();
2057 }
2058
2059 //
2060 // Must succeed pool is deprecated, but still supported. These allocation
2061 // failures must cause an immediate bugcheck
2062 //
2063 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2064 {
2065 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2066 PAGE_SIZE,
2067 NonPagedPoolDescriptor.TotalPages,
2068 NonPagedPoolDescriptor.TotalBigPages,
2069 0);
2070 }
2071
2072 //
2073 // Internal debugging
2074 //
2075 ExPoolFailures++;
2076
2077 //
2078 // This flag requests printing failures, and can also further specify
2079 // breaking on failures
2080 //
2081 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2082 {
2083 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2084 NumberOfBytes,
2085 OriginalType);
2086 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2087 }
2088
2089 //
2090 // Finally, this flag requests an exception, which we are more than
2091 // happy to raise!
2092 //
2093 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2094 {
2095 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2096 }
2097
2098 //
2099 // Return NULL to the caller in all other cases
2100 //
2101 return NULL;
2102 }
2103
2104 //
2105 // Setup the entry data
2106 //
2107 Entry->Ulong1 = 0;
2108 Entry->BlockSize = i;
2109 Entry->PoolType = OriginalType + 1;
2110
2111 //
2112 // This page will have two entries -- one for the allocation (which we just
2113 // created above), and one for the remaining free bytes, which we're about
2114 // to create now. The free bytes are the whole page minus what was allocated
2115 // and then converted into units of block headers.
2116 //
2117 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2118 FragmentEntry = POOL_BLOCK(Entry, i);
2119 FragmentEntry->Ulong1 = 0;
2120 FragmentEntry->BlockSize = BlockSize;
2121 FragmentEntry->PreviousSize = i;
2122
2123 //
2124 // Increment required counters
2125 //
2126 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2127 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2128
2129 //
2130 // Now check if enough free bytes remained for us to have a "full" entry,
2131 // which contains enough bytes for a linked list and thus can be used for
2132 // allocations (up to 8 bytes...)
2133 //
2134 if (FragmentEntry->BlockSize != 1)
2135 {
2136 //
2137 // Excellent -- acquire the pool lock
2138 //
2139 OldIrql = ExLockPool(PoolDesc);
2140
2141 //
2142 // And insert the free entry into the free list for this block size
2143 //
2144 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2145 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2146 POOL_FREE_BLOCK(FragmentEntry));
2147 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2148
2149 //
2150 // Release the pool lock
2151 //
2152 ExpCheckPoolBlocks(Entry);
2153 ExUnlockPool(PoolDesc, OldIrql);
2154 }
2155 else
2156 {
2157 //
2158 // Simply do a sanity check
2159 //
2160 ExpCheckPoolBlocks(Entry);
2161 }
2162
2163 //
2164 // Increment performance counters and track this allocation
2165 //
2166 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2167 ExpInsertPoolTracker(Tag,
2168 Entry->BlockSize * POOL_BLOCK_SIZE,
2169 OriginalType);
2170
2171 //
2172 // And return the pool allocation
2173 //
2174 ExpCheckPoolBlocks(Entry);
2175 Entry->PoolTag = Tag;
2176 return POOL_FREE_BLOCK(Entry);
2177 }
2178
2179 /*
2180 * @implemented
2181 */
2182 PVOID
2183 NTAPI
2184 ExAllocatePool(POOL_TYPE PoolType,
2185 SIZE_T NumberOfBytes)
2186 {
2187 ULONG Tag = TAG_NONE;
2188 #if 0 && DBG
2189 PLDR_DATA_TABLE_ENTRY LdrEntry;
2190
2191 /* Use the first four letters of the driver name, or "None" if unavailable */
2192 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2193 ? MiLookupDataTableEntry(_ReturnAddress())
2194 : NULL;
2195 if (LdrEntry)
2196 {
2197 ULONG i;
2198 Tag = 0;
2199 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2200 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2201 for (; i < 4; i++)
2202 Tag = Tag >> 8 | ' ' << 24;
2203 }
2204 #endif
2205 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2206 }
2207
2208 /*
2209 * @implemented
2210 */
2211 VOID
2212 NTAPI
2213 ExFreePoolWithTag(IN PVOID P,
2214 IN ULONG TagToFree)
2215 {
2216 PPOOL_HEADER Entry, NextEntry;
2217 USHORT BlockSize;
2218 KIRQL OldIrql;
2219 POOL_TYPE PoolType;
2220 PPOOL_DESCRIPTOR PoolDesc;
2221 ULONG Tag;
2222 BOOLEAN Combined = FALSE;
2223 PFN_NUMBER PageCount, RealPageCount;
2224 PKPRCB Prcb = KeGetCurrentPrcb();
2225 PGENERAL_LOOKASIDE LookasideList;
2226 PEPROCESS Process;
2227
2228 //
2229 // Check if any of the debug flags are enabled
2230 //
2231 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2232 POOL_FLAG_CHECK_WORKERS |
2233 POOL_FLAG_CHECK_RESOURCES |
2234 POOL_FLAG_VERIFIER |
2235 POOL_FLAG_CHECK_DEADLOCK |
2236 POOL_FLAG_SPECIAL_POOL))
2237 {
2238 //
2239 // Check if special pool is enabled
2240 //
2241 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2242 {
2243 //
2244 // Check if it was allocated from a special pool
2245 //
2246 if (MmIsSpecialPoolAddress(P))
2247 {
2248 //
2249 // Was deadlock verification also enabled? We can do some extra
2250 // checks at this point
2251 //
2252 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2253 {
2254 DPRINT1("Verifier not yet supported\n");
2255 }
2256
2257 //
2258 // It is, so handle it via special pool free routine
2259 //
2260 MmFreeSpecialPool(P);
2261 return;
2262 }
2263 }
2264
2265 //
2266 // For non-big page allocations, we'll do a bunch of checks in here
2267 //
2268 if (PAGE_ALIGN(P) != P)
2269 {
2270 //
2271 // Get the entry for this pool allocation
2272 // The pointer math here may look wrong or confusing, but it is quite right
2273 //
2274 Entry = P;
2275 Entry--;
2276
2277 //
2278 // Get the pool type
2279 //
2280 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2281
2282 //
2283 // FIXME: Many other debugging checks go here
2284 //
2285 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2286 }
2287 }
2288
2289 //
2290 // Check if this is a big page allocation
2291 //
2292 if (PAGE_ALIGN(P) == P)
2293 {
2294 //
2295 // We need to find the tag for it, so first we need to find out what
2296 // kind of allocation this was (paged or nonpaged), then we can go
2297 // ahead and try finding the tag for it. Remember to get rid of the
2298 // PROTECTED_POOL tag if it's found.
2299 //
2300 // Note that if at insertion time, we failed to add the tag for a big
2301 // pool allocation, we used a special tag called 'BIG' to identify the
2302 // allocation, and we may get this tag back. In this scenario, we must
2303 // manually get the size of the allocation by actually counting through
2304 // the PFN database.
2305 //
2306 PoolType = MmDeterminePoolType(P);
2307 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2308 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2309 if (!Tag)
2310 {
2311 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2312 ASSERT(Tag == ' GIB');
2313 PageCount = 1; // We are going to lie! This might screw up accounting?
2314 }
2315 else if (Tag & PROTECTED_POOL)
2316 {
2317 Tag &= ~PROTECTED_POOL;
2318 }
2319
2320 //
2321 // Check block tag
2322 //
2323 if (TagToFree && TagToFree != Tag)
2324 {
2325 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2326 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2327 }
2328
2329 //
2330 // We have our tag and our page count, so we can go ahead and remove this
2331 // tracker now
2332 //
2333 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2334
2335 //
2336 // Check if any of the debug flags are enabled
2337 //
2338 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2339 POOL_FLAG_CHECK_WORKERS |
2340 POOL_FLAG_CHECK_RESOURCES |
2341 POOL_FLAG_CHECK_DEADLOCK))
2342 {
2343 //
2344 // Was deadlock verification also enabled? We can do some extra
2345 // checks at this point
2346 //
2347 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2348 {
2349 DPRINT1("Verifier not yet supported\n");
2350 }
2351
2352 //
2353 // FIXME: Many debugging checks go here
2354 //
2355 }
2356
2357 //
2358 // Update counters
2359 //
2360 PoolDesc = PoolVector[PoolType];
2361 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2362 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2363 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2364
2365 //
2366 // Do the real free now and update the last counter with the big page count
2367 //
2368 RealPageCount = MiFreePoolPages(P);
2369 ASSERT(RealPageCount == PageCount);
2370 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2371 -(LONG)RealPageCount);
2372 return;
2373 }
2374
2375 //
2376 // Get the entry for this pool allocation
2377 // The pointer math here may look wrong or confusing, but it is quite right
2378 //
2379 Entry = P;
2380 Entry--;
2381 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2382
2383 //
2384 // Get the size of the entry, and it's pool type, then load the descriptor
2385 // for this pool type
2386 //
2387 BlockSize = Entry->BlockSize;
2388 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2389 PoolDesc = PoolVector[PoolType];
2390
2391 //
2392 // Make sure that the IRQL makes sense
2393 //
2394 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2395
2396 //
2397 // Get the pool tag and get rid of the PROTECTED_POOL flag
2398 //
2399 Tag = Entry->PoolTag;
2400 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2401
2402 //
2403 // Check block tag
2404 //
2405 if (TagToFree && TagToFree != Tag)
2406 {
2407 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2408 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2409 }
2410
2411 //
2412 // Track the removal of this allocation
2413 //
2414 ExpRemovePoolTracker(Tag,
2415 BlockSize * POOL_BLOCK_SIZE,
2416 Entry->PoolType - 1);
2417
2418 //
2419 // Release pool quota, if any
2420 //
2421 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2422 {
2423 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2424 if (Process)
2425 {
2426 if (Process->Pcb.Header.Type != ProcessObject)
2427 {
2428 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2429 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2430 KeBugCheckEx(BAD_POOL_CALLER,
2431 0x0D,
2432 (ULONG_PTR)P,
2433 Tag,
2434 (ULONG_PTR)Process);
2435 }
2436 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2437 ObDereferenceObject(Process);
2438 }
2439 }
2440
2441 //
2442 // Is this allocation small enough to have come from a lookaside list?
2443 //
2444 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2445 {
2446 //
2447 // Try pushing it into the per-CPU lookaside list
2448 //
2449 LookasideList = (PoolType == PagedPool) ?
2450 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2451 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2452 LookasideList->TotalFrees++;
2453 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2454 {
2455 LookasideList->FreeHits++;
2456 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2457 return;
2458 }
2459
2460 //
2461 // We failed, try to push it into the global lookaside list
2462 //
2463 LookasideList = (PoolType == PagedPool) ?
2464 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2465 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2466 LookasideList->TotalFrees++;
2467 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2468 {
2469 LookasideList->FreeHits++;
2470 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2471 return;
2472 }
2473 }
2474
2475 //
2476 // Get the pointer to the next entry
2477 //
2478 NextEntry = POOL_BLOCK(Entry, BlockSize);
2479
2480 //
2481 // Update performance counters
2482 //
2483 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2484 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2485
2486 //
2487 // Acquire the pool lock
2488 //
2489 OldIrql = ExLockPool(PoolDesc);
2490
2491 //
2492 // Check if the next allocation is at the end of the page
2493 //
2494 ExpCheckPoolBlocks(Entry);
2495 if (PAGE_ALIGN(NextEntry) != NextEntry)
2496 {
2497 //
2498 // We may be able to combine the block if it's free
2499 //
2500 if (NextEntry->PoolType == 0)
2501 {
2502 //
2503 // The next block is free, so we'll do a combine
2504 //
2505 Combined = TRUE;
2506
2507 //
2508 // Make sure there's actual data in the block -- anything smaller
2509 // than this means we only have the header, so there's no linked list
2510 // for us to remove
2511 //
2512 if ((NextEntry->BlockSize != 1))
2513 {
2514 //
2515 // The block is at least big enough to have a linked list, so go
2516 // ahead and remove it
2517 //
2518 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2519 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2520 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2521 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2522 }
2523
2524 //
2525 // Our entry is now combined with the next entry
2526 //
2527 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2528 }
2529 }
2530
2531 //
2532 // Now check if there was a previous entry on the same page as us
2533 //
2534 if (Entry->PreviousSize)
2535 {
2536 //
2537 // Great, grab that entry and check if it's free
2538 //
2539 NextEntry = POOL_PREV_BLOCK(Entry);
2540 if (NextEntry->PoolType == 0)
2541 {
2542 //
2543 // It is, so we can do a combine
2544 //
2545 Combined = TRUE;
2546
2547 //
2548 // Make sure there's actual data in the block -- anything smaller
2549 // than this means we only have the header so there's no linked list
2550 // for us to remove
2551 //
2552 if ((NextEntry->BlockSize != 1))
2553 {
2554 //
2555 // The block is at least big enough to have a linked list, so go
2556 // ahead and remove it
2557 //
2558 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2559 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2560 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2561 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2562 }
2563
2564 //
2565 // Combine our original block (which might've already been combined
2566 // with the next block), into the previous block
2567 //
2568 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2569
2570 //
2571 // And now we'll work with the previous block instead
2572 //
2573 Entry = NextEntry;
2574 }
2575 }
2576
2577 //
2578 // By now, it may have been possible for our combined blocks to actually
2579 // have made up a full page (if there were only 2-3 allocations on the
2580 // page, they could've all been combined).
2581 //
2582 if ((PAGE_ALIGN(Entry) == Entry) &&
2583 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2584 {
2585 //
2586 // In this case, release the pool lock, update the performance counter,
2587 // and free the page
2588 //
2589 ExUnlockPool(PoolDesc, OldIrql);
2590 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2591 MiFreePoolPages(Entry);
2592 return;
2593 }
2594
2595 //
2596 // Otherwise, we now have a free block (or a combination of 2 or 3)
2597 //
2598 Entry->PoolType = 0;
2599 BlockSize = Entry->BlockSize;
2600 ASSERT(BlockSize != 1);
2601
2602 //
2603 // Check if we actually did combine it with anyone
2604 //
2605 if (Combined)
2606 {
2607 //
2608 // Get the first combined block (either our original to begin with, or
2609 // the one after the original, depending if we combined with the previous)
2610 //
2611 NextEntry = POOL_NEXT_BLOCK(Entry);
2612
2613 //
2614 // As long as the next block isn't on a page boundary, have it point
2615 // back to us
2616 //
2617 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2618 }
2619
2620 //
2621 // Insert this new free block, and release the pool lock
2622 //
2623 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2624 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2625 ExUnlockPool(PoolDesc, OldIrql);
2626 }
2627
2628 /*
2629 * @implemented
2630 */
2631 VOID
2632 NTAPI
2633 ExFreePool(PVOID P)
2634 {
2635 //
2636 // Just free without checking for the tag
2637 //
2638 ExFreePoolWithTag(P, 0);
2639 }
2640
2641 /*
2642 * @unimplemented
2643 */
2644 SIZE_T
2645 NTAPI
2646 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2647 OUT PBOOLEAN QuotaCharged)
2648 {
2649 //
2650 // Not implemented
2651 //
2652 UNIMPLEMENTED;
2653 return FALSE;
2654 }
2655
2656 /*
2657 * @implemented
2658 */
2659
2660 PVOID
2661 NTAPI
2662 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2663 IN SIZE_T NumberOfBytes)
2664 {
2665 //
2666 // Allocate the pool
2667 //
2668 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2669 }
2670
2671 /*
2672 * @implemented
2673 */
2674 PVOID
2675 NTAPI
2676 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2677 IN SIZE_T NumberOfBytes,
2678 IN ULONG Tag,
2679 IN EX_POOL_PRIORITY Priority)
2680 {
2681 PVOID Buffer;
2682
2683 //
2684 // Allocate the pool
2685 //
2686 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2687 if (Buffer == NULL)
2688 {
2689 UNIMPLEMENTED;
2690 }
2691
2692 return Buffer;
2693 }
2694
2695 /*
2696 * @implemented
2697 */
2698 PVOID
2699 NTAPI
2700 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2701 IN SIZE_T NumberOfBytes,
2702 IN ULONG Tag)
2703 {
2704 BOOLEAN Raise = TRUE;
2705 PVOID Buffer;
2706 PPOOL_HEADER Entry;
2707 NTSTATUS Status;
2708 PEPROCESS Process = PsGetCurrentProcess();
2709
2710 //
2711 // Check if we should fail instead of raising an exception
2712 //
2713 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2714 {
2715 Raise = FALSE;
2716 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2717 }
2718
2719 //
2720 // Inject the pool quota mask
2721 //
2722 PoolType += QUOTA_POOL_MASK;
2723
2724 //
2725 // Check if we have enough space to add the quota owner process, as long as
2726 // this isn't the system process, which never gets charged quota
2727 //
2728 ASSERT(NumberOfBytes != 0);
2729 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2730 (Process != PsInitialSystemProcess))
2731 {
2732 //
2733 // Add space for our EPROCESS pointer
2734 //
2735 NumberOfBytes += sizeof(PEPROCESS);
2736 }
2737 else
2738 {
2739 //
2740 // We won't be able to store the pointer, so don't use quota for this
2741 //
2742 PoolType -= QUOTA_POOL_MASK;
2743 }
2744
2745 //
2746 // Allocate the pool buffer now
2747 //
2748 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2749
2750 //
2751 // If the buffer is page-aligned, this is a large page allocation and we
2752 // won't touch it
2753 //
2754 if (PAGE_ALIGN(Buffer) != Buffer)
2755 {
2756 //
2757 // Also if special pool is enabled, and this was allocated from there,
2758 // we won't touch it either
2759 //
2760 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2761 (MmIsSpecialPoolAddress(Buffer)))
2762 {
2763 return Buffer;
2764 }
2765
2766 //
2767 // If it wasn't actually allocated with quota charges, ignore it too
2768 //
2769 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2770
2771 //
2772 // If this is the system process, we don't charge quota, so ignore
2773 //
2774 if (Process == PsInitialSystemProcess) return Buffer;
2775
2776 //
2777 // Actually go and charge quota for the process now
2778 //
2779 Entry = POOL_ENTRY(Buffer);
2780 Status = PsChargeProcessPoolQuota(Process,
2781 PoolType & BASE_POOL_TYPE_MASK,
2782 Entry->BlockSize * POOL_BLOCK_SIZE);
2783 if (!NT_SUCCESS(Status))
2784 {
2785 //
2786 // Quota failed, back out the allocation, clear the owner, and fail
2787 //
2788 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2789 ExFreePoolWithTag(Buffer, Tag);
2790 if (Raise) RtlRaiseStatus(Status);
2791 return NULL;
2792 }
2793
2794 //
2795 // Quota worked, write the owner and then reference it before returning
2796 //
2797 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2798 ObReferenceObject(Process);
2799 }
2800 else if (!(Buffer) && (Raise))
2801 {
2802 //
2803 // The allocation failed, raise an error if we are in raise mode
2804 //
2805 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2806 }
2807
2808 //
2809 // Return the allocated buffer
2810 //
2811 return Buffer;
2812 }
2813
2814 #if DBG && defined(KDBG)
2815
2816 BOOLEAN
2817 ExpKdbgExtPool(
2818 ULONG Argc,
2819 PCHAR Argv[])
2820 {
2821 ULONG_PTR Address = 0, Flags = 0;
2822 PVOID PoolPage;
2823 PPOOL_HEADER Entry;
2824 BOOLEAN ThisOne;
2825 PULONG Data;
2826
2827 if (Argc > 1)
2828 {
2829 /* Get address */
2830 if (!KdbpGetHexNumber(Argv[1], &Address))
2831 {
2832 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2833 return TRUE;
2834 }
2835 }
2836
2837 if (Argc > 2)
2838 {
2839 /* Get address */
2840 if (!KdbpGetHexNumber(Argv[1], &Flags))
2841 {
2842 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2843 return TRUE;
2844 }
2845 }
2846
2847 /* Check if we got an address */
2848 if (Address != 0)
2849 {
2850 /* Get the base page */
2851 PoolPage = PAGE_ALIGN(Address);
2852 }
2853 else
2854 {
2855 KdbpPrint("Heap is unimplemented\n");
2856 return TRUE;
2857 }
2858
2859 /* No paging support! */
2860 if (!MmIsAddressValid(PoolPage))
2861 {
2862 KdbpPrint("Address not accessible!\n");
2863 return TRUE;
2864 }
2865
2866 /* Get pool type */
2867 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2868 KdbpPrint("Allocation is from PagedPool region\n");
2869 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2870 KdbpPrint("Allocation is from NonPagedPool region\n");
2871 else
2872 {
2873 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2874 return TRUE;
2875 }
2876
2877 /* Loop all entries of that page */
2878 Entry = PoolPage;
2879 do
2880 {
2881 /* Check if the address is within that entry */
2882 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2883 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2884
2885 if (!(Flags & 1) || ThisOne)
2886 {
2887 /* Print the line */
2888 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2889 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2890 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2891 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2892 }
2893
2894 if (Flags & 1)
2895 {
2896 Data = (PULONG)(Entry + 1);
2897 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2898 " %p %08lx %08lx %08lx %08lx\n",
2899 &Data[0], Data[0], Data[1], Data[2], Data[3],
2900 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2901 }
2902
2903 /* Go to next entry */
2904 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2905 }
2906 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2907
2908 return TRUE;
2909 }
2910
2911 #endif // DBG && KDBG
2912
2913 /* EOF */