[NTOSKRNL] Now that the memory dumper handles paged pool, make use of it in any situation
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 #if DBG
464 FORCEINLINE
465 BOOLEAN
466 ExpTagAllowPrint(CHAR Tag)
467 {
468 if ((Tag >= 'a' && Tag <= 'z') ||
469 (Tag >= 'A' && Tag <= 'Z') ||
470 Tag == ' ')
471 {
472 return TRUE;
473 }
474
475 return FALSE;
476 }
477
478 #define MiDumperPrint(dbg, fmt, ...) \
479 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
480 else DPRINT1(fmt, ##__VA_ARGS__)
481
482 VOID
483 MiDumpPoolConsumers(BOOLEAN CalledFromDbg)
484 {
485 SIZE_T i;
486
487 //
488 // Only print header if called from OOM situation
489 //
490 if (!CalledFromDbg)
491 {
492 DPRINT1("---------------------\n");
493 DPRINT1("Out of memory dumper!\n");
494 }
495 else
496 {
497 KdbpPrint("Pool Used:\n");
498 }
499
500 //
501 // Print table header
502 //
503 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
504 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
505
506 //
507 // We'll extract allocations for all the tracked pools
508 //
509 for (i = 0; i < PoolTrackTableSize; ++i)
510 {
511 PPOOL_TRACKER_TABLE TableEntry;
512
513 TableEntry = &PoolTrackTable[i];
514
515 //
516 // We only care about tags which have allocated memory
517 //
518 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
519 {
520 //
521 // If there's a tag, attempt to do a pretty print
522 //
523 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE)
524 {
525 CHAR Tag[4];
526
527 //
528 // Extract each 'component' and check whether they are printable
529 //
530 Tag[0] = TableEntry->Key & 0xFF;
531 Tag[1] = TableEntry->Key >> 8 & 0xFF;
532 Tag[2] = TableEntry->Key >> 16 & 0xFF;
533 Tag[3] = TableEntry->Key >> 24 & 0xFF;
534
535 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
536 {
537 //
538 // Print in reversed order to match what is in source code
539 //
540 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[3], Tag[2], Tag[1], Tag[0],
541 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
542 TableEntry->PagedAllocs, TableEntry->PagedBytes);
543 }
544 else
545 {
546 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
547 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
548 TableEntry->PagedAllocs, TableEntry->PagedBytes);
549 }
550 }
551 else
552 {
553 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
554 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
555 TableEntry->PagedAllocs, TableEntry->PagedBytes);
556 }
557 }
558 }
559
560 if (!CalledFromDbg)
561 {
562 DPRINT1("---------------------\n");
563 }
564 }
565 #endif
566
567 /* PRIVATE FUNCTIONS **********************************************************/
568
569 VOID
570 NTAPI
571 INIT_SECTION
572 ExpSeedHotTags(VOID)
573 {
574 ULONG i, Key, Hash, Index;
575 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
576 ULONG TagList[] =
577 {
578 ' oI',
579 ' laH',
580 'PldM',
581 'LooP',
582 'tSbO',
583 ' prI',
584 'bdDN',
585 'LprI',
586 'pOoI',
587 ' ldM',
588 'eliF',
589 'aVMC',
590 'dSeS',
591 'CFtN',
592 'looP',
593 'rPCT',
594 'bNMC',
595 'dTeS',
596 'sFtN',
597 'TPCT',
598 'CPCT',
599 ' yeK',
600 'qSbO',
601 'mNoI',
602 'aEoI',
603 'cPCT',
604 'aFtN',
605 '0ftN',
606 'tceS',
607 'SprI',
608 'ekoT',
609 ' eS',
610 'lCbO',
611 'cScC',
612 'lFtN',
613 'cAeS',
614 'mfSF',
615 'kWcC',
616 'miSF',
617 'CdfA',
618 'EdfA',
619 'orSF',
620 'nftN',
621 'PRIU',
622 'rFpN',
623 'RFpN',
624 'aPeS',
625 'sUeS',
626 'FpcA',
627 'MpcA',
628 'cSeS',
629 'mNbO',
630 'sFpN',
631 'uLeS',
632 'DPcS',
633 'nevE',
634 'vrqR',
635 'ldaV',
636 ' pP',
637 'SdaV',
638 ' daV',
639 'LdaV',
640 'FdaV',
641 ' GIB',
642 };
643
644 //
645 // Loop all 64 hot tags
646 //
647 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
648 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
649 {
650 //
651 // Get the current tag, and compute its hash in the tracker table
652 //
653 Key = TagList[i];
654 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
655
656 //
657 // Loop all the hashes in this index/bucket
658 //
659 Index = Hash;
660 while (TRUE)
661 {
662 //
663 // Find an empty entry, and make sure this isn't the last hash that
664 // can fit.
665 //
666 // On checked builds, also make sure this is the first time we are
667 // seeding this tag.
668 //
669 ASSERT(TrackTable[Hash].Key != Key);
670 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
671 {
672 //
673 // It has been seeded, move on to the next tag
674 //
675 TrackTable[Hash].Key = Key;
676 break;
677 }
678
679 //
680 // This entry was already taken, compute the next possible hash while
681 // making sure we're not back at our initial index.
682 //
683 ASSERT(TrackTable[Hash].Key != Key);
684 Hash = (Hash + 1) & PoolTrackTableMask;
685 if (Hash == Index) break;
686 }
687 }
688 }
689
690 VOID
691 NTAPI
692 ExpRemovePoolTracker(IN ULONG Key,
693 IN SIZE_T NumberOfBytes,
694 IN POOL_TYPE PoolType)
695 {
696 ULONG Hash, Index;
697 PPOOL_TRACKER_TABLE Table, TableEntry;
698 SIZE_T TableMask, TableSize;
699
700 //
701 // Remove the PROTECTED_POOL flag which is not part of the tag
702 //
703 Key &= ~PROTECTED_POOL;
704
705 //
706 // With WinDBG you can set a tag you want to break on when an allocation is
707 // attempted
708 //
709 if (Key == PoolHitTag) DbgBreakPoint();
710
711 //
712 // Why the double indirection? Because normally this function is also used
713 // when doing session pool allocations, which has another set of tables,
714 // sizes, and masks that live in session pool. Now we don't support session
715 // pool so we only ever use the regular tables, but I'm keeping the code this
716 // way so that the day we DO support session pool, it won't require that
717 // many changes
718 //
719 Table = PoolTrackTable;
720 TableMask = PoolTrackTableMask;
721 TableSize = PoolTrackTableSize;
722 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
723
724 //
725 // Compute the hash for this key, and loop all the possible buckets
726 //
727 Hash = ExpComputeHashForTag(Key, TableMask);
728 Index = Hash;
729 while (TRUE)
730 {
731 //
732 // Have we found the entry for this tag? */
733 //
734 TableEntry = &Table[Hash];
735 if (TableEntry->Key == Key)
736 {
737 //
738 // Decrement the counters depending on if this was paged or nonpaged
739 // pool
740 //
741 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
742 {
743 InterlockedIncrement(&TableEntry->NonPagedFrees);
744 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
745 -(SSIZE_T)NumberOfBytes);
746 return;
747 }
748 InterlockedIncrement(&TableEntry->PagedFrees);
749 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
750 -(SSIZE_T)NumberOfBytes);
751 return;
752 }
753
754 //
755 // We should have only ended up with an empty entry if we've reached
756 // the last bucket
757 //
758 if (!TableEntry->Key)
759 {
760 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
761 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
762 ASSERT(Hash == TableMask);
763 }
764
765 //
766 // This path is hit when we don't have an entry, and the current bucket
767 // is full, so we simply try the next one
768 //
769 Hash = (Hash + 1) & TableMask;
770 if (Hash == Index) break;
771 }
772
773 //
774 // And finally this path is hit when all the buckets are full, and we need
775 // some expansion. This path is not yet supported in ReactOS and so we'll
776 // ignore the tag
777 //
778 DPRINT1("Out of pool tag space, ignoring...\n");
779 }
780
781 VOID
782 NTAPI
783 ExpInsertPoolTracker(IN ULONG Key,
784 IN SIZE_T NumberOfBytes,
785 IN POOL_TYPE PoolType)
786 {
787 ULONG Hash, Index;
788 KIRQL OldIrql;
789 PPOOL_TRACKER_TABLE Table, TableEntry;
790 SIZE_T TableMask, TableSize;
791
792 //
793 // Remove the PROTECTED_POOL flag which is not part of the tag
794 //
795 Key &= ~PROTECTED_POOL;
796
797 //
798 // With WinDBG you can set a tag you want to break on when an allocation is
799 // attempted
800 //
801 if (Key == PoolHitTag) DbgBreakPoint();
802
803 //
804 // There is also an internal flag you can set to break on malformed tags
805 //
806 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
807
808 //
809 // ASSERT on ReactOS features not yet supported
810 //
811 ASSERT(!(PoolType & SESSION_POOL_MASK));
812 ASSERT(KeGetCurrentProcessorNumber() == 0);
813
814 //
815 // Why the double indirection? Because normally this function is also used
816 // when doing session pool allocations, which has another set of tables,
817 // sizes, and masks that live in session pool. Now we don't support session
818 // pool so we only ever use the regular tables, but I'm keeping the code this
819 // way so that the day we DO support session pool, it won't require that
820 // many changes
821 //
822 Table = PoolTrackTable;
823 TableMask = PoolTrackTableMask;
824 TableSize = PoolTrackTableSize;
825 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
826
827 //
828 // Compute the hash for this key, and loop all the possible buckets
829 //
830 Hash = ExpComputeHashForTag(Key, TableMask);
831 Index = Hash;
832 while (TRUE)
833 {
834 //
835 // Do we already have an entry for this tag? */
836 //
837 TableEntry = &Table[Hash];
838 if (TableEntry->Key == Key)
839 {
840 //
841 // Increment the counters depending on if this was paged or nonpaged
842 // pool
843 //
844 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
845 {
846 InterlockedIncrement(&TableEntry->NonPagedAllocs);
847 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
848 return;
849 }
850 InterlockedIncrement(&TableEntry->PagedAllocs);
851 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
852 return;
853 }
854
855 //
856 // We don't have an entry yet, but we've found a free bucket for it
857 //
858 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
859 {
860 //
861 // We need to hold the lock while creating a new entry, since other
862 // processors might be in this code path as well
863 //
864 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
865 if (!PoolTrackTable[Hash].Key)
866 {
867 //
868 // We've won the race, so now create this entry in the bucket
869 //
870 ASSERT(Table[Hash].Key == 0);
871 PoolTrackTable[Hash].Key = Key;
872 TableEntry->Key = Key;
873 }
874 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
875
876 //
877 // Now we force the loop to run again, and we should now end up in
878 // the code path above which does the interlocked increments...
879 //
880 continue;
881 }
882
883 //
884 // This path is hit when we don't have an entry, and the current bucket
885 // is full, so we simply try the next one
886 //
887 Hash = (Hash + 1) & TableMask;
888 if (Hash == Index) break;
889 }
890
891 //
892 // And finally this path is hit when all the buckets are full, and we need
893 // some expansion. This path is not yet supported in ReactOS and so we'll
894 // ignore the tag
895 //
896 DPRINT1("Out of pool tag space, ignoring...\n");
897 }
898
899 VOID
900 NTAPI
901 INIT_SECTION
902 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
903 IN POOL_TYPE PoolType,
904 IN ULONG PoolIndex,
905 IN ULONG Threshold,
906 IN PVOID PoolLock)
907 {
908 PLIST_ENTRY NextEntry, LastEntry;
909
910 //
911 // Setup the descriptor based on the caller's request
912 //
913 PoolDescriptor->PoolType = PoolType;
914 PoolDescriptor->PoolIndex = PoolIndex;
915 PoolDescriptor->Threshold = Threshold;
916 PoolDescriptor->LockAddress = PoolLock;
917
918 //
919 // Initialize accounting data
920 //
921 PoolDescriptor->RunningAllocs = 0;
922 PoolDescriptor->RunningDeAllocs = 0;
923 PoolDescriptor->TotalPages = 0;
924 PoolDescriptor->TotalBytes = 0;
925 PoolDescriptor->TotalBigPages = 0;
926
927 //
928 // Nothing pending for now
929 //
930 PoolDescriptor->PendingFrees = NULL;
931 PoolDescriptor->PendingFreeDepth = 0;
932
933 //
934 // Loop all the descriptor's allocation lists and initialize them
935 //
936 NextEntry = PoolDescriptor->ListHeads;
937 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
938 while (NextEntry < LastEntry)
939 {
940 ExpInitializePoolListHead(NextEntry);
941 NextEntry++;
942 }
943
944 //
945 // Note that ReactOS does not support Session Pool Yet
946 //
947 ASSERT(PoolType != PagedPoolSession);
948 }
949
950 VOID
951 NTAPI
952 INIT_SECTION
953 InitializePool(IN POOL_TYPE PoolType,
954 IN ULONG Threshold)
955 {
956 PPOOL_DESCRIPTOR Descriptor;
957 SIZE_T TableSize;
958 ULONG i;
959
960 //
961 // Check what kind of pool this is
962 //
963 if (PoolType == NonPagedPool)
964 {
965 //
966 // Compute the track table size and convert it from a power of two to an
967 // actual byte size
968 //
969 // NOTE: On checked builds, we'll assert if the registry table size was
970 // invalid, while on retail builds we'll just break out of the loop at
971 // that point.
972 //
973 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
974 for (i = 0; i < 32; i++)
975 {
976 if (TableSize & 1)
977 {
978 ASSERT((TableSize & ~1) == 0);
979 if (!(TableSize & ~1)) break;
980 }
981 TableSize >>= 1;
982 }
983
984 //
985 // If we hit bit 32, than no size was defined in the registry, so
986 // we'll use the default size of 2048 entries.
987 //
988 // Otherwise, use the size from the registry, as long as it's not
989 // smaller than 64 entries.
990 //
991 if (i == 32)
992 {
993 PoolTrackTableSize = 2048;
994 }
995 else
996 {
997 PoolTrackTableSize = max(1 << i, 64);
998 }
999
1000 //
1001 // Loop trying with the biggest specified size first, and cut it down
1002 // by a power of two each iteration in case not enough memory exist
1003 //
1004 while (TRUE)
1005 {
1006 //
1007 // Do not allow overflow
1008 //
1009 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1010 {
1011 PoolTrackTableSize >>= 1;
1012 continue;
1013 }
1014
1015 //
1016 // Allocate the tracker table and exit the loop if this worked
1017 //
1018 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1019 (PoolTrackTableSize + 1) *
1020 sizeof(POOL_TRACKER_TABLE));
1021 if (PoolTrackTable) break;
1022
1023 //
1024 // Otherwise, as long as we're not down to the last bit, keep
1025 // iterating
1026 //
1027 if (PoolTrackTableSize == 1)
1028 {
1029 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1030 TableSize,
1031 0xFFFFFFFF,
1032 0xFFFFFFFF,
1033 0xFFFFFFFF);
1034 }
1035 PoolTrackTableSize >>= 1;
1036 }
1037
1038 //
1039 // Add one entry, compute the hash, and zero the table
1040 //
1041 PoolTrackTableSize++;
1042 PoolTrackTableMask = PoolTrackTableSize - 2;
1043
1044 RtlZeroMemory(PoolTrackTable,
1045 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1046
1047 //
1048 // Finally, add the most used tags to speed up those allocations
1049 //
1050 ExpSeedHotTags();
1051
1052 //
1053 // We now do the exact same thing with the tracker table for big pages
1054 //
1055 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1056 for (i = 0; i < 32; i++)
1057 {
1058 if (TableSize & 1)
1059 {
1060 ASSERT((TableSize & ~1) == 0);
1061 if (!(TableSize & ~1)) break;
1062 }
1063 TableSize >>= 1;
1064 }
1065
1066 //
1067 // For big pages, the default tracker table is 4096 entries, while the
1068 // minimum is still 64
1069 //
1070 if (i == 32)
1071 {
1072 PoolBigPageTableSize = 4096;
1073 }
1074 else
1075 {
1076 PoolBigPageTableSize = max(1 << i, 64);
1077 }
1078
1079 //
1080 // Again, run the exact same loop we ran earlier, but this time for the
1081 // big pool tracker instead
1082 //
1083 while (TRUE)
1084 {
1085 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1086 {
1087 PoolBigPageTableSize >>= 1;
1088 continue;
1089 }
1090
1091 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1092 PoolBigPageTableSize *
1093 sizeof(POOL_TRACKER_BIG_PAGES));
1094 if (PoolBigPageTable) break;
1095
1096 if (PoolBigPageTableSize == 1)
1097 {
1098 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1099 TableSize,
1100 0xFFFFFFFF,
1101 0xFFFFFFFF,
1102 0xFFFFFFFF);
1103 }
1104
1105 PoolBigPageTableSize >>= 1;
1106 }
1107
1108 //
1109 // An extra entry is not needed for for the big pool tracker, so just
1110 // compute the hash and zero it
1111 //
1112 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1113 RtlZeroMemory(PoolBigPageTable,
1114 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1115 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1116
1117 //
1118 // During development, print this out so we can see what's happening
1119 //
1120 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1121 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1122 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1123 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1124
1125 //
1126 // Insert the generic tracker for all of big pool
1127 //
1128 ExpInsertPoolTracker('looP',
1129 ROUND_TO_PAGES(PoolBigPageTableSize *
1130 sizeof(POOL_TRACKER_BIG_PAGES)),
1131 NonPagedPool);
1132
1133 //
1134 // No support for NUMA systems at this time
1135 //
1136 ASSERT(KeNumberNodes == 1);
1137
1138 //
1139 // Initialize the tag spinlock
1140 //
1141 KeInitializeSpinLock(&ExpTaggedPoolLock);
1142
1143 //
1144 // Initialize the nonpaged pool descriptor
1145 //
1146 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1147 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1148 NonPagedPool,
1149 0,
1150 Threshold,
1151 NULL);
1152 }
1153 else
1154 {
1155 //
1156 // No support for NUMA systems at this time
1157 //
1158 ASSERT(KeNumberNodes == 1);
1159
1160 //
1161 // Allocate the pool descriptor
1162 //
1163 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1164 sizeof(KGUARDED_MUTEX) +
1165 sizeof(POOL_DESCRIPTOR),
1166 'looP');
1167 if (!Descriptor)
1168 {
1169 //
1170 // This is really bad...
1171 //
1172 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1173 0,
1174 -1,
1175 -1,
1176 -1);
1177 }
1178
1179 //
1180 // Setup the vector and guarded mutex for paged pool
1181 //
1182 PoolVector[PagedPool] = Descriptor;
1183 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1184 ExpPagedPoolDescriptor[0] = Descriptor;
1185 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1186 ExInitializePoolDescriptor(Descriptor,
1187 PagedPool,
1188 0,
1189 Threshold,
1190 ExpPagedPoolMutex);
1191
1192 //
1193 // Insert the generic tracker for all of nonpaged pool
1194 //
1195 ExpInsertPoolTracker('looP',
1196 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1197 NonPagedPool);
1198 }
1199 }
1200
1201 FORCEINLINE
1202 KIRQL
1203 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1204 {
1205 //
1206 // Check if this is nonpaged pool
1207 //
1208 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1209 {
1210 //
1211 // Use the queued spin lock
1212 //
1213 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1214 }
1215 else
1216 {
1217 //
1218 // Use the guarded mutex
1219 //
1220 KeAcquireGuardedMutex(Descriptor->LockAddress);
1221 return APC_LEVEL;
1222 }
1223 }
1224
1225 FORCEINLINE
1226 VOID
1227 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1228 IN KIRQL OldIrql)
1229 {
1230 //
1231 // Check if this is nonpaged pool
1232 //
1233 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1234 {
1235 //
1236 // Use the queued spin lock
1237 //
1238 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1239 }
1240 else
1241 {
1242 //
1243 // Use the guarded mutex
1244 //
1245 KeReleaseGuardedMutex(Descriptor->LockAddress);
1246 }
1247 }
1248
1249 VOID
1250 NTAPI
1251 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1252 IN PVOID DeferredContext,
1253 IN PVOID SystemArgument1,
1254 IN PVOID SystemArgument2)
1255 {
1256 PPOOL_DPC_CONTEXT Context = DeferredContext;
1257 UNREFERENCED_PARAMETER(Dpc);
1258 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1259
1260 //
1261 // Make sure we win the race, and if we did, copy the data atomically
1262 //
1263 if (KeSignalCallDpcSynchronize(SystemArgument2))
1264 {
1265 RtlCopyMemory(Context->PoolTrackTable,
1266 PoolTrackTable,
1267 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1268
1269 //
1270 // This is here because ReactOS does not yet support expansion
1271 //
1272 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1273 }
1274
1275 //
1276 // Regardless of whether we won or not, we must now synchronize and then
1277 // decrement the barrier since this is one more processor that has completed
1278 // the callback.
1279 //
1280 KeSignalCallDpcSynchronize(SystemArgument2);
1281 KeSignalCallDpcDone(SystemArgument1);
1282 }
1283
1284 NTSTATUS
1285 NTAPI
1286 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1287 IN ULONG SystemInformationLength,
1288 IN OUT PULONG ReturnLength OPTIONAL)
1289 {
1290 ULONG TableSize, CurrentLength;
1291 ULONG EntryCount;
1292 NTSTATUS Status = STATUS_SUCCESS;
1293 PSYSTEM_POOLTAG TagEntry;
1294 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1295 POOL_DPC_CONTEXT Context;
1296 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1297
1298 //
1299 // Keep track of how much data the caller's buffer must hold
1300 //
1301 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1302
1303 //
1304 // Initialize the caller's buffer
1305 //
1306 TagEntry = &SystemInformation->TagInfo[0];
1307 SystemInformation->Count = 0;
1308
1309 //
1310 // Capture the number of entries, and the total size needed to make a copy
1311 // of the table
1312 //
1313 EntryCount = (ULONG)PoolTrackTableSize;
1314 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1315
1316 //
1317 // Allocate the "Generic DPC" temporary buffer
1318 //
1319 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1320 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1321
1322 //
1323 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1324 //
1325 Context.PoolTrackTable = Buffer;
1326 Context.PoolTrackTableSize = PoolTrackTableSize;
1327 Context.PoolTrackTableExpansion = NULL;
1328 Context.PoolTrackTableSizeExpansion = 0;
1329 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1330
1331 //
1332 // Now parse the results
1333 //
1334 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1335 {
1336 //
1337 // If the entry is empty, skip it
1338 //
1339 if (!TrackerEntry->Key) continue;
1340
1341 //
1342 // Otherwise, add one more entry to the caller's buffer, and ensure that
1343 // enough space has been allocated in it
1344 //
1345 SystemInformation->Count++;
1346 CurrentLength += sizeof(*TagEntry);
1347 if (SystemInformationLength < CurrentLength)
1348 {
1349 //
1350 // The caller's buffer is too small, so set a failure code. The
1351 // caller will know the count, as well as how much space is needed.
1352 //
1353 // We do NOT break out of the loop, because we want to keep incrementing
1354 // the Count as well as CurrentLength so that the caller can know the
1355 // final numbers
1356 //
1357 Status = STATUS_INFO_LENGTH_MISMATCH;
1358 }
1359 else
1360 {
1361 //
1362 // Small sanity check that our accounting is working correctly
1363 //
1364 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1365 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1366
1367 //
1368 // Return the data into the caller's buffer
1369 //
1370 TagEntry->TagUlong = TrackerEntry->Key;
1371 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1372 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1373 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1374 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1375 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1376 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1377 TagEntry++;
1378 }
1379 }
1380
1381 //
1382 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1383 //
1384 ExFreePoolWithTag(Buffer, 'ofnI');
1385 if (ReturnLength) *ReturnLength = CurrentLength;
1386 return Status;
1387 }
1388
1389 BOOLEAN
1390 NTAPI
1391 ExpAddTagForBigPages(IN PVOID Va,
1392 IN ULONG Key,
1393 IN ULONG NumberOfPages,
1394 IN POOL_TYPE PoolType)
1395 {
1396 ULONG Hash, i = 0;
1397 PVOID OldVa;
1398 KIRQL OldIrql;
1399 SIZE_T TableSize;
1400 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1401 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1402 ASSERT(!(PoolType & SESSION_POOL_MASK));
1403
1404 //
1405 // As the table is expandable, these values must only be read after acquiring
1406 // the lock to avoid a teared access during an expansion
1407 //
1408 Hash = ExpComputePartialHashForAddress(Va);
1409 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1410 Hash &= PoolBigPageTableHash;
1411 TableSize = PoolBigPageTableSize;
1412
1413 //
1414 // We loop from the current hash bucket to the end of the table, and then
1415 // rollover to hash bucket 0 and keep going from there. If we return back
1416 // to the beginning, then we attempt expansion at the bottom of the loop
1417 //
1418 EntryStart = Entry = &PoolBigPageTable[Hash];
1419 EntryEnd = &PoolBigPageTable[TableSize];
1420 do
1421 {
1422 //
1423 // Make sure that this is a free entry and attempt to atomically make the
1424 // entry busy now
1425 //
1426 OldVa = Entry->Va;
1427 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1428 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1429 {
1430 //
1431 // We now own this entry, write down the size and the pool tag
1432 //
1433 Entry->Key = Key;
1434 Entry->NumberOfPages = NumberOfPages;
1435
1436 //
1437 // Add one more entry to the count, and see if we're getting within
1438 // 25% of the table size, at which point we'll do an expansion now
1439 // to avoid blocking too hard later on.
1440 //
1441 // Note that we only do this if it's also been the 16th time that we
1442 // keep losing the race or that we are not finding a free entry anymore,
1443 // which implies a massive number of concurrent big pool allocations.
1444 //
1445 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1446 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1447 {
1448 DPRINT("Should attempt expansion since we now have %lu entries\n",
1449 ExpPoolBigEntriesInUse);
1450 }
1451
1452 //
1453 // We have our entry, return
1454 //
1455 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1456 return TRUE;
1457 }
1458
1459 //
1460 // We don't have our entry yet, so keep trying, making the entry list
1461 // circular if we reach the last entry. We'll eventually break out of
1462 // the loop once we've rolled over and returned back to our original
1463 // hash bucket
1464 //
1465 i++;
1466 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1467 } while (Entry != EntryStart);
1468
1469 //
1470 // This means there's no free hash buckets whatsoever, so we would now have
1471 // to attempt expanding the table
1472 //
1473 DPRINT1("Big pool expansion needed, not implemented!\n");
1474 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1475 return FALSE;
1476 }
1477
1478 ULONG
1479 NTAPI
1480 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1481 OUT PULONG_PTR BigPages,
1482 IN POOL_TYPE PoolType)
1483 {
1484 BOOLEAN FirstTry = TRUE;
1485 SIZE_T TableSize;
1486 KIRQL OldIrql;
1487 ULONG PoolTag, Hash;
1488 PPOOL_TRACKER_BIG_PAGES Entry;
1489 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1490 ASSERT(!(PoolType & SESSION_POOL_MASK));
1491
1492 //
1493 // As the table is expandable, these values must only be read after acquiring
1494 // the lock to avoid a teared access during an expansion
1495 //
1496 Hash = ExpComputePartialHashForAddress(Va);
1497 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1498 Hash &= PoolBigPageTableHash;
1499 TableSize = PoolBigPageTableSize;
1500
1501 //
1502 // Loop while trying to find this big page allocation
1503 //
1504 while (PoolBigPageTable[Hash].Va != Va)
1505 {
1506 //
1507 // Increment the size until we go past the end of the table
1508 //
1509 if (++Hash >= TableSize)
1510 {
1511 //
1512 // Is this the second time we've tried?
1513 //
1514 if (!FirstTry)
1515 {
1516 //
1517 // This means it was never inserted into the pool table and it
1518 // received the special "BIG" tag -- return that and return 0
1519 // so that the code can ask Mm for the page count instead
1520 //
1521 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1522 *BigPages = 0;
1523 return ' GIB';
1524 }
1525
1526 //
1527 // The first time this happens, reset the hash index and try again
1528 //
1529 Hash = 0;
1530 FirstTry = FALSE;
1531 }
1532 }
1533
1534 //
1535 // Now capture all the information we need from the entry, since after we
1536 // release the lock, the data can change
1537 //
1538 Entry = &PoolBigPageTable[Hash];
1539 *BigPages = Entry->NumberOfPages;
1540 PoolTag = Entry->Key;
1541
1542 //
1543 // Set the free bit, and decrement the number of allocations. Finally, release
1544 // the lock and return the tag that was located
1545 //
1546 InterlockedIncrement((PLONG)&Entry->Va);
1547 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1548 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1549 return PoolTag;
1550 }
1551
1552 VOID
1553 NTAPI
1554 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1555 OUT PULONG NonPagedPoolPages,
1556 OUT PULONG PagedPoolAllocs,
1557 OUT PULONG PagedPoolFrees,
1558 OUT PULONG PagedPoolLookasideHits,
1559 OUT PULONG NonPagedPoolAllocs,
1560 OUT PULONG NonPagedPoolFrees,
1561 OUT PULONG NonPagedPoolLookasideHits)
1562 {
1563 ULONG i;
1564 PPOOL_DESCRIPTOR PoolDesc;
1565
1566 //
1567 // Assume all failures
1568 //
1569 *PagedPoolPages = 0;
1570 *PagedPoolAllocs = 0;
1571 *PagedPoolFrees = 0;
1572
1573 //
1574 // Tally up the totals for all the apged pool
1575 //
1576 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1577 {
1578 PoolDesc = ExpPagedPoolDescriptor[i];
1579 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1580 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1581 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1582 }
1583
1584 //
1585 // The first non-paged pool has a hardcoded well-known descriptor name
1586 //
1587 PoolDesc = &NonPagedPoolDescriptor;
1588 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1589 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1590 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1591
1592 //
1593 // If the system has more than one non-paged pool, copy the other descriptor
1594 // totals as well
1595 //
1596 #if 0
1597 if (ExpNumberOfNonPagedPools > 1)
1598 {
1599 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1600 {
1601 PoolDesc = ExpNonPagedPoolDescriptor[i];
1602 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1603 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1604 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1605 }
1606 }
1607 #endif
1608
1609 //
1610 // FIXME: Not yet supported
1611 //
1612 *NonPagedPoolLookasideHits += 0;
1613 *PagedPoolLookasideHits += 0;
1614 }
1615
1616 VOID
1617 NTAPI
1618 ExReturnPoolQuota(IN PVOID P)
1619 {
1620 PPOOL_HEADER Entry;
1621 POOL_TYPE PoolType;
1622 USHORT BlockSize;
1623 PEPROCESS Process;
1624
1625 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1626 (MmIsSpecialPoolAddress(P)))
1627 {
1628 return;
1629 }
1630
1631 Entry = P;
1632 Entry--;
1633 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1634
1635 PoolType = Entry->PoolType - 1;
1636 BlockSize = Entry->BlockSize;
1637
1638 if (PoolType & QUOTA_POOL_MASK)
1639 {
1640 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1641 ASSERT(Process != NULL);
1642 if (Process)
1643 {
1644 if (Process->Pcb.Header.Type != ProcessObject)
1645 {
1646 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1647 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1648 KeBugCheckEx(BAD_POOL_CALLER,
1649 0x0D,
1650 (ULONG_PTR)P,
1651 Entry->PoolTag,
1652 (ULONG_PTR)Process);
1653 }
1654 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1655 PsReturnPoolQuota(Process,
1656 PoolType & BASE_POOL_TYPE_MASK,
1657 BlockSize * POOL_BLOCK_SIZE);
1658 ObDereferenceObject(Process);
1659 }
1660 }
1661 }
1662
1663 /* PUBLIC FUNCTIONS ***********************************************************/
1664
1665 /*
1666 * @implemented
1667 */
1668 PVOID
1669 NTAPI
1670 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1671 IN SIZE_T NumberOfBytes,
1672 IN ULONG Tag)
1673 {
1674 PPOOL_DESCRIPTOR PoolDesc;
1675 PLIST_ENTRY ListHead;
1676 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1677 KIRQL OldIrql;
1678 USHORT BlockSize, i;
1679 ULONG OriginalType;
1680 PKPRCB Prcb = KeGetCurrentPrcb();
1681 PGENERAL_LOOKASIDE LookasideList;
1682
1683 //
1684 // Some sanity checks
1685 //
1686 ASSERT(Tag != 0);
1687 ASSERT(Tag != ' GIB');
1688 ASSERT(NumberOfBytes != 0);
1689 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1690
1691 //
1692 // Not supported in ReactOS
1693 //
1694 ASSERT(!(PoolType & SESSION_POOL_MASK));
1695
1696 //
1697 // Check if verifier or special pool is enabled
1698 //
1699 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1700 {
1701 //
1702 // For verifier, we should call the verification routine
1703 //
1704 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1705 {
1706 DPRINT1("Driver Verifier is not yet supported\n");
1707 }
1708
1709 //
1710 // For special pool, we check if this is a suitable allocation and do
1711 // the special allocation if needed
1712 //
1713 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1714 {
1715 //
1716 // Check if this is a special pool allocation
1717 //
1718 if (MmUseSpecialPool(NumberOfBytes, Tag))
1719 {
1720 //
1721 // Try to allocate using special pool
1722 //
1723 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1724 if (Entry) return Entry;
1725 }
1726 }
1727 }
1728
1729 //
1730 // Get the pool type and its corresponding vector for this request
1731 //
1732 OriginalType = PoolType;
1733 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1734 PoolDesc = PoolVector[PoolType];
1735 ASSERT(PoolDesc != NULL);
1736
1737 //
1738 // Check if this is a big page allocation
1739 //
1740 if (NumberOfBytes > POOL_MAX_ALLOC)
1741 {
1742 //
1743 // Allocate pages for it
1744 //
1745 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1746 if (!Entry)
1747 {
1748 #if DBG
1749 //
1750 // Out of memory, display current consumption
1751 //
1752 MiDumpPoolConsumers(FALSE);
1753 #endif
1754
1755 //
1756 // Must succeed pool is deprecated, but still supported. These allocation
1757 // failures must cause an immediate bugcheck
1758 //
1759 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1760 {
1761 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1762 NumberOfBytes,
1763 NonPagedPoolDescriptor.TotalPages,
1764 NonPagedPoolDescriptor.TotalBigPages,
1765 0);
1766 }
1767
1768 //
1769 // Internal debugging
1770 //
1771 ExPoolFailures++;
1772
1773 //
1774 // This flag requests printing failures, and can also further specify
1775 // breaking on failures
1776 //
1777 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1778 {
1779 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1780 NumberOfBytes,
1781 OriginalType);
1782 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1783 }
1784
1785 //
1786 // Finally, this flag requests an exception, which we are more than
1787 // happy to raise!
1788 //
1789 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1790 {
1791 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1792 }
1793
1794 return NULL;
1795 }
1796
1797 //
1798 // Increment required counters
1799 //
1800 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1801 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1802 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1803 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1804
1805 //
1806 // Add a tag for the big page allocation and switch to the generic "BIG"
1807 // tag if we failed to do so, then insert a tracker for this alloation.
1808 //
1809 if (!ExpAddTagForBigPages(Entry,
1810 Tag,
1811 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1812 OriginalType))
1813 {
1814 Tag = ' GIB';
1815 }
1816 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1817 return Entry;
1818 }
1819
1820 //
1821 // Should never request 0 bytes from the pool, but since so many drivers do
1822 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1823 //
1824 if (!NumberOfBytes) NumberOfBytes = 1;
1825
1826 //
1827 // A pool allocation is defined by its data, a linked list to connect it to
1828 // the free list (if necessary), and a pool header to store accounting info.
1829 // Calculate this size, then convert it into a block size (units of pool
1830 // headers)
1831 //
1832 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1833 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1834 // the direct allocation of pages.
1835 //
1836 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1837 / POOL_BLOCK_SIZE);
1838 ASSERT(i < POOL_LISTS_PER_PAGE);
1839
1840 //
1841 // Handle lookaside list optimization for both paged and nonpaged pool
1842 //
1843 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1844 {
1845 //
1846 // Try popping it from the per-CPU lookaside list
1847 //
1848 LookasideList = (PoolType == PagedPool) ?
1849 Prcb->PPPagedLookasideList[i - 1].P :
1850 Prcb->PPNPagedLookasideList[i - 1].P;
1851 LookasideList->TotalAllocates++;
1852 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1853 if (!Entry)
1854 {
1855 //
1856 // We failed, try popping it from the global list
1857 //
1858 LookasideList = (PoolType == PagedPool) ?
1859 Prcb->PPPagedLookasideList[i - 1].L :
1860 Prcb->PPNPagedLookasideList[i - 1].L;
1861 LookasideList->TotalAllocates++;
1862 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1863 }
1864
1865 //
1866 // If we were able to pop it, update the accounting and return the block
1867 //
1868 if (Entry)
1869 {
1870 LookasideList->AllocateHits++;
1871
1872 //
1873 // Get the real entry, write down its pool type, and track it
1874 //
1875 Entry--;
1876 Entry->PoolType = OriginalType + 1;
1877 ExpInsertPoolTracker(Tag,
1878 Entry->BlockSize * POOL_BLOCK_SIZE,
1879 OriginalType);
1880
1881 //
1882 // Return the pool allocation
1883 //
1884 Entry->PoolTag = Tag;
1885 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1886 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1887 return POOL_FREE_BLOCK(Entry);
1888 }
1889 }
1890
1891 //
1892 // Loop in the free lists looking for a block if this size. Start with the
1893 // list optimized for this kind of size lookup
1894 //
1895 ListHead = &PoolDesc->ListHeads[i];
1896 do
1897 {
1898 //
1899 // Are there any free entries available on this list?
1900 //
1901 if (!ExpIsPoolListEmpty(ListHead))
1902 {
1903 //
1904 // Acquire the pool lock now
1905 //
1906 OldIrql = ExLockPool(PoolDesc);
1907
1908 //
1909 // And make sure the list still has entries
1910 //
1911 if (ExpIsPoolListEmpty(ListHead))
1912 {
1913 //
1914 // Someone raced us (and won) before we had a chance to acquire
1915 // the lock.
1916 //
1917 // Try again!
1918 //
1919 ExUnlockPool(PoolDesc, OldIrql);
1920 continue;
1921 }
1922
1923 //
1924 // Remove a free entry from the list
1925 // Note that due to the way we insert free blocks into multiple lists
1926 // there is a guarantee that any block on this list will either be
1927 // of the correct size, or perhaps larger.
1928 //
1929 ExpCheckPoolLinks(ListHead);
1930 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1931 ExpCheckPoolLinks(ListHead);
1932 ExpCheckPoolBlocks(Entry);
1933 ASSERT(Entry->BlockSize >= i);
1934 ASSERT(Entry->PoolType == 0);
1935
1936 //
1937 // Check if this block is larger that what we need. The block could
1938 // not possibly be smaller, due to the reason explained above (and
1939 // we would've asserted on a checked build if this was the case).
1940 //
1941 if (Entry->BlockSize != i)
1942 {
1943 //
1944 // Is there an entry before this one?
1945 //
1946 if (Entry->PreviousSize == 0)
1947 {
1948 //
1949 // There isn't anyone before us, so take the next block and
1950 // turn it into a fragment that contains the leftover data
1951 // that we don't need to satisfy the caller's request
1952 //
1953 FragmentEntry = POOL_BLOCK(Entry, i);
1954 FragmentEntry->BlockSize = Entry->BlockSize - i;
1955
1956 //
1957 // And make it point back to us
1958 //
1959 FragmentEntry->PreviousSize = i;
1960
1961 //
1962 // Now get the block that follows the new fragment and check
1963 // if it's still on the same page as us (and not at the end)
1964 //
1965 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1966 if (PAGE_ALIGN(NextEntry) != NextEntry)
1967 {
1968 //
1969 // Adjust this next block to point to our newly created
1970 // fragment block
1971 //
1972 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1973 }
1974 }
1975 else
1976 {
1977 //
1978 // There is a free entry before us, which we know is smaller
1979 // so we'll make this entry the fragment instead
1980 //
1981 FragmentEntry = Entry;
1982
1983 //
1984 // And then we'll remove from it the actual size required.
1985 // Now the entry is a leftover free fragment
1986 //
1987 Entry->BlockSize -= i;
1988
1989 //
1990 // Now let's go to the next entry after the fragment (which
1991 // used to point to our original free entry) and make it
1992 // reference the new fragment entry instead.
1993 //
1994 // This is the entry that will actually end up holding the
1995 // allocation!
1996 //
1997 Entry = POOL_NEXT_BLOCK(Entry);
1998 Entry->PreviousSize = FragmentEntry->BlockSize;
1999
2000 //
2001 // And now let's go to the entry after that one and check if
2002 // it's still on the same page, and not at the end
2003 //
2004 NextEntry = POOL_BLOCK(Entry, i);
2005 if (PAGE_ALIGN(NextEntry) != NextEntry)
2006 {
2007 //
2008 // Make it reference the allocation entry
2009 //
2010 NextEntry->PreviousSize = i;
2011 }
2012 }
2013
2014 //
2015 // Now our (allocation) entry is the right size
2016 //
2017 Entry->BlockSize = i;
2018
2019 //
2020 // And the next entry is now the free fragment which contains
2021 // the remaining difference between how big the original entry
2022 // was, and the actual size the caller needs/requested.
2023 //
2024 FragmentEntry->PoolType = 0;
2025 BlockSize = FragmentEntry->BlockSize;
2026
2027 //
2028 // Now check if enough free bytes remained for us to have a
2029 // "full" entry, which contains enough bytes for a linked list
2030 // and thus can be used for allocations (up to 8 bytes...)
2031 //
2032 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2033 if (BlockSize != 1)
2034 {
2035 //
2036 // Insert the free entry into the free list for this size
2037 //
2038 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2039 POOL_FREE_BLOCK(FragmentEntry));
2040 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2041 }
2042 }
2043
2044 //
2045 // We have found an entry for this allocation, so set the pool type
2046 // and release the lock since we're done
2047 //
2048 Entry->PoolType = OriginalType + 1;
2049 ExpCheckPoolBlocks(Entry);
2050 ExUnlockPool(PoolDesc, OldIrql);
2051
2052 //
2053 // Increment required counters
2054 //
2055 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2056 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2057
2058 //
2059 // Track this allocation
2060 //
2061 ExpInsertPoolTracker(Tag,
2062 Entry->BlockSize * POOL_BLOCK_SIZE,
2063 OriginalType);
2064
2065 //
2066 // Return the pool allocation
2067 //
2068 Entry->PoolTag = Tag;
2069 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2070 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2071 return POOL_FREE_BLOCK(Entry);
2072 }
2073 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2074
2075 //
2076 // There were no free entries left, so we have to allocate a new fresh page
2077 //
2078 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2079 if (!Entry)
2080 {
2081 #if DBG
2082 //
2083 // Out of memory, display current consumption
2084 //
2085 MiDumpPoolConsumers(FALSE);
2086 #endif
2087
2088 //
2089 // Must succeed pool is deprecated, but still supported. These allocation
2090 // failures must cause an immediate bugcheck
2091 //
2092 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2093 {
2094 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2095 PAGE_SIZE,
2096 NonPagedPoolDescriptor.TotalPages,
2097 NonPagedPoolDescriptor.TotalBigPages,
2098 0);
2099 }
2100
2101 //
2102 // Internal debugging
2103 //
2104 ExPoolFailures++;
2105
2106 //
2107 // This flag requests printing failures, and can also further specify
2108 // breaking on failures
2109 //
2110 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2111 {
2112 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2113 NumberOfBytes,
2114 OriginalType);
2115 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2116 }
2117
2118 //
2119 // Finally, this flag requests an exception, which we are more than
2120 // happy to raise!
2121 //
2122 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2123 {
2124 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2125 }
2126
2127 //
2128 // Return NULL to the caller in all other cases
2129 //
2130 return NULL;
2131 }
2132
2133 //
2134 // Setup the entry data
2135 //
2136 Entry->Ulong1 = 0;
2137 Entry->BlockSize = i;
2138 Entry->PoolType = OriginalType + 1;
2139
2140 //
2141 // This page will have two entries -- one for the allocation (which we just
2142 // created above), and one for the remaining free bytes, which we're about
2143 // to create now. The free bytes are the whole page minus what was allocated
2144 // and then converted into units of block headers.
2145 //
2146 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2147 FragmentEntry = POOL_BLOCK(Entry, i);
2148 FragmentEntry->Ulong1 = 0;
2149 FragmentEntry->BlockSize = BlockSize;
2150 FragmentEntry->PreviousSize = i;
2151
2152 //
2153 // Increment required counters
2154 //
2155 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2156 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2157
2158 //
2159 // Now check if enough free bytes remained for us to have a "full" entry,
2160 // which contains enough bytes for a linked list and thus can be used for
2161 // allocations (up to 8 bytes...)
2162 //
2163 if (FragmentEntry->BlockSize != 1)
2164 {
2165 //
2166 // Excellent -- acquire the pool lock
2167 //
2168 OldIrql = ExLockPool(PoolDesc);
2169
2170 //
2171 // And insert the free entry into the free list for this block size
2172 //
2173 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2174 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2175 POOL_FREE_BLOCK(FragmentEntry));
2176 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2177
2178 //
2179 // Release the pool lock
2180 //
2181 ExpCheckPoolBlocks(Entry);
2182 ExUnlockPool(PoolDesc, OldIrql);
2183 }
2184 else
2185 {
2186 //
2187 // Simply do a sanity check
2188 //
2189 ExpCheckPoolBlocks(Entry);
2190 }
2191
2192 //
2193 // Increment performance counters and track this allocation
2194 //
2195 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2196 ExpInsertPoolTracker(Tag,
2197 Entry->BlockSize * POOL_BLOCK_SIZE,
2198 OriginalType);
2199
2200 //
2201 // And return the pool allocation
2202 //
2203 ExpCheckPoolBlocks(Entry);
2204 Entry->PoolTag = Tag;
2205 return POOL_FREE_BLOCK(Entry);
2206 }
2207
2208 /*
2209 * @implemented
2210 */
2211 PVOID
2212 NTAPI
2213 ExAllocatePool(POOL_TYPE PoolType,
2214 SIZE_T NumberOfBytes)
2215 {
2216 ULONG Tag = TAG_NONE;
2217 #if 0 && DBG
2218 PLDR_DATA_TABLE_ENTRY LdrEntry;
2219
2220 /* Use the first four letters of the driver name, or "None" if unavailable */
2221 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2222 ? MiLookupDataTableEntry(_ReturnAddress())
2223 : NULL;
2224 if (LdrEntry)
2225 {
2226 ULONG i;
2227 Tag = 0;
2228 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2229 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2230 for (; i < 4; i++)
2231 Tag = Tag >> 8 | ' ' << 24;
2232 }
2233 #endif
2234 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2235 }
2236
2237 /*
2238 * @implemented
2239 */
2240 VOID
2241 NTAPI
2242 ExFreePoolWithTag(IN PVOID P,
2243 IN ULONG TagToFree)
2244 {
2245 PPOOL_HEADER Entry, NextEntry;
2246 USHORT BlockSize;
2247 KIRQL OldIrql;
2248 POOL_TYPE PoolType;
2249 PPOOL_DESCRIPTOR PoolDesc;
2250 ULONG Tag;
2251 BOOLEAN Combined = FALSE;
2252 PFN_NUMBER PageCount, RealPageCount;
2253 PKPRCB Prcb = KeGetCurrentPrcb();
2254 PGENERAL_LOOKASIDE LookasideList;
2255 PEPROCESS Process;
2256
2257 //
2258 // Check if any of the debug flags are enabled
2259 //
2260 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2261 POOL_FLAG_CHECK_WORKERS |
2262 POOL_FLAG_CHECK_RESOURCES |
2263 POOL_FLAG_VERIFIER |
2264 POOL_FLAG_CHECK_DEADLOCK |
2265 POOL_FLAG_SPECIAL_POOL))
2266 {
2267 //
2268 // Check if special pool is enabled
2269 //
2270 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2271 {
2272 //
2273 // Check if it was allocated from a special pool
2274 //
2275 if (MmIsSpecialPoolAddress(P))
2276 {
2277 //
2278 // Was deadlock verification also enabled? We can do some extra
2279 // checks at this point
2280 //
2281 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2282 {
2283 DPRINT1("Verifier not yet supported\n");
2284 }
2285
2286 //
2287 // It is, so handle it via special pool free routine
2288 //
2289 MmFreeSpecialPool(P);
2290 return;
2291 }
2292 }
2293
2294 //
2295 // For non-big page allocations, we'll do a bunch of checks in here
2296 //
2297 if (PAGE_ALIGN(P) != P)
2298 {
2299 //
2300 // Get the entry for this pool allocation
2301 // The pointer math here may look wrong or confusing, but it is quite right
2302 //
2303 Entry = P;
2304 Entry--;
2305
2306 //
2307 // Get the pool type
2308 //
2309 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2310
2311 //
2312 // FIXME: Many other debugging checks go here
2313 //
2314 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2315 }
2316 }
2317
2318 //
2319 // Check if this is a big page allocation
2320 //
2321 if (PAGE_ALIGN(P) == P)
2322 {
2323 //
2324 // We need to find the tag for it, so first we need to find out what
2325 // kind of allocation this was (paged or nonpaged), then we can go
2326 // ahead and try finding the tag for it. Remember to get rid of the
2327 // PROTECTED_POOL tag if it's found.
2328 //
2329 // Note that if at insertion time, we failed to add the tag for a big
2330 // pool allocation, we used a special tag called 'BIG' to identify the
2331 // allocation, and we may get this tag back. In this scenario, we must
2332 // manually get the size of the allocation by actually counting through
2333 // the PFN database.
2334 //
2335 PoolType = MmDeterminePoolType(P);
2336 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2337 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2338 if (!Tag)
2339 {
2340 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2341 ASSERT(Tag == ' GIB');
2342 PageCount = 1; // We are going to lie! This might screw up accounting?
2343 }
2344 else if (Tag & PROTECTED_POOL)
2345 {
2346 Tag &= ~PROTECTED_POOL;
2347 }
2348
2349 //
2350 // Check block tag
2351 //
2352 if (TagToFree && TagToFree != Tag)
2353 {
2354 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2355 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2356 }
2357
2358 //
2359 // We have our tag and our page count, so we can go ahead and remove this
2360 // tracker now
2361 //
2362 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2363
2364 //
2365 // Check if any of the debug flags are enabled
2366 //
2367 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2368 POOL_FLAG_CHECK_WORKERS |
2369 POOL_FLAG_CHECK_RESOURCES |
2370 POOL_FLAG_CHECK_DEADLOCK))
2371 {
2372 //
2373 // Was deadlock verification also enabled? We can do some extra
2374 // checks at this point
2375 //
2376 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2377 {
2378 DPRINT1("Verifier not yet supported\n");
2379 }
2380
2381 //
2382 // FIXME: Many debugging checks go here
2383 //
2384 }
2385
2386 //
2387 // Update counters
2388 //
2389 PoolDesc = PoolVector[PoolType];
2390 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2391 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2392 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2393
2394 //
2395 // Do the real free now and update the last counter with the big page count
2396 //
2397 RealPageCount = MiFreePoolPages(P);
2398 ASSERT(RealPageCount == PageCount);
2399 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2400 -(LONG)RealPageCount);
2401 return;
2402 }
2403
2404 //
2405 // Get the entry for this pool allocation
2406 // The pointer math here may look wrong or confusing, but it is quite right
2407 //
2408 Entry = P;
2409 Entry--;
2410 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2411
2412 //
2413 // Get the size of the entry, and it's pool type, then load the descriptor
2414 // for this pool type
2415 //
2416 BlockSize = Entry->BlockSize;
2417 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2418 PoolDesc = PoolVector[PoolType];
2419
2420 //
2421 // Make sure that the IRQL makes sense
2422 //
2423 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2424
2425 //
2426 // Get the pool tag and get rid of the PROTECTED_POOL flag
2427 //
2428 Tag = Entry->PoolTag;
2429 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2430
2431 //
2432 // Check block tag
2433 //
2434 if (TagToFree && TagToFree != Tag)
2435 {
2436 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2437 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2438 }
2439
2440 //
2441 // Track the removal of this allocation
2442 //
2443 ExpRemovePoolTracker(Tag,
2444 BlockSize * POOL_BLOCK_SIZE,
2445 Entry->PoolType - 1);
2446
2447 //
2448 // Release pool quota, if any
2449 //
2450 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2451 {
2452 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2453 if (Process)
2454 {
2455 if (Process->Pcb.Header.Type != ProcessObject)
2456 {
2457 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2458 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2459 KeBugCheckEx(BAD_POOL_CALLER,
2460 0x0D,
2461 (ULONG_PTR)P,
2462 Tag,
2463 (ULONG_PTR)Process);
2464 }
2465 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2466 ObDereferenceObject(Process);
2467 }
2468 }
2469
2470 //
2471 // Is this allocation small enough to have come from a lookaside list?
2472 //
2473 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2474 {
2475 //
2476 // Try pushing it into the per-CPU lookaside list
2477 //
2478 LookasideList = (PoolType == PagedPool) ?
2479 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2480 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2481 LookasideList->TotalFrees++;
2482 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2483 {
2484 LookasideList->FreeHits++;
2485 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2486 return;
2487 }
2488
2489 //
2490 // We failed, try to push it into the global lookaside list
2491 //
2492 LookasideList = (PoolType == PagedPool) ?
2493 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2494 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2495 LookasideList->TotalFrees++;
2496 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2497 {
2498 LookasideList->FreeHits++;
2499 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2500 return;
2501 }
2502 }
2503
2504 //
2505 // Get the pointer to the next entry
2506 //
2507 NextEntry = POOL_BLOCK(Entry, BlockSize);
2508
2509 //
2510 // Update performance counters
2511 //
2512 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2513 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2514
2515 //
2516 // Acquire the pool lock
2517 //
2518 OldIrql = ExLockPool(PoolDesc);
2519
2520 //
2521 // Check if the next allocation is at the end of the page
2522 //
2523 ExpCheckPoolBlocks(Entry);
2524 if (PAGE_ALIGN(NextEntry) != NextEntry)
2525 {
2526 //
2527 // We may be able to combine the block if it's free
2528 //
2529 if (NextEntry->PoolType == 0)
2530 {
2531 //
2532 // The next block is free, so we'll do a combine
2533 //
2534 Combined = TRUE;
2535
2536 //
2537 // Make sure there's actual data in the block -- anything smaller
2538 // than this means we only have the header, so there's no linked list
2539 // for us to remove
2540 //
2541 if ((NextEntry->BlockSize != 1))
2542 {
2543 //
2544 // The block is at least big enough to have a linked list, so go
2545 // ahead and remove it
2546 //
2547 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2548 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2549 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2550 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2551 }
2552
2553 //
2554 // Our entry is now combined with the next entry
2555 //
2556 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2557 }
2558 }
2559
2560 //
2561 // Now check if there was a previous entry on the same page as us
2562 //
2563 if (Entry->PreviousSize)
2564 {
2565 //
2566 // Great, grab that entry and check if it's free
2567 //
2568 NextEntry = POOL_PREV_BLOCK(Entry);
2569 if (NextEntry->PoolType == 0)
2570 {
2571 //
2572 // It is, so we can do a combine
2573 //
2574 Combined = TRUE;
2575
2576 //
2577 // Make sure there's actual data in the block -- anything smaller
2578 // than this means we only have the header so there's no linked list
2579 // for us to remove
2580 //
2581 if ((NextEntry->BlockSize != 1))
2582 {
2583 //
2584 // The block is at least big enough to have a linked list, so go
2585 // ahead and remove it
2586 //
2587 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2588 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2589 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2590 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2591 }
2592
2593 //
2594 // Combine our original block (which might've already been combined
2595 // with the next block), into the previous block
2596 //
2597 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2598
2599 //
2600 // And now we'll work with the previous block instead
2601 //
2602 Entry = NextEntry;
2603 }
2604 }
2605
2606 //
2607 // By now, it may have been possible for our combined blocks to actually
2608 // have made up a full page (if there were only 2-3 allocations on the
2609 // page, they could've all been combined).
2610 //
2611 if ((PAGE_ALIGN(Entry) == Entry) &&
2612 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2613 {
2614 //
2615 // In this case, release the pool lock, update the performance counter,
2616 // and free the page
2617 //
2618 ExUnlockPool(PoolDesc, OldIrql);
2619 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2620 MiFreePoolPages(Entry);
2621 return;
2622 }
2623
2624 //
2625 // Otherwise, we now have a free block (or a combination of 2 or 3)
2626 //
2627 Entry->PoolType = 0;
2628 BlockSize = Entry->BlockSize;
2629 ASSERT(BlockSize != 1);
2630
2631 //
2632 // Check if we actually did combine it with anyone
2633 //
2634 if (Combined)
2635 {
2636 //
2637 // Get the first combined block (either our original to begin with, or
2638 // the one after the original, depending if we combined with the previous)
2639 //
2640 NextEntry = POOL_NEXT_BLOCK(Entry);
2641
2642 //
2643 // As long as the next block isn't on a page boundary, have it point
2644 // back to us
2645 //
2646 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2647 }
2648
2649 //
2650 // Insert this new free block, and release the pool lock
2651 //
2652 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2653 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2654 ExUnlockPool(PoolDesc, OldIrql);
2655 }
2656
2657 /*
2658 * @implemented
2659 */
2660 VOID
2661 NTAPI
2662 ExFreePool(PVOID P)
2663 {
2664 //
2665 // Just free without checking for the tag
2666 //
2667 ExFreePoolWithTag(P, 0);
2668 }
2669
2670 /*
2671 * @unimplemented
2672 */
2673 SIZE_T
2674 NTAPI
2675 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2676 OUT PBOOLEAN QuotaCharged)
2677 {
2678 //
2679 // Not implemented
2680 //
2681 UNIMPLEMENTED;
2682 return FALSE;
2683 }
2684
2685 /*
2686 * @implemented
2687 */
2688
2689 PVOID
2690 NTAPI
2691 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2692 IN SIZE_T NumberOfBytes)
2693 {
2694 //
2695 // Allocate the pool
2696 //
2697 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2698 }
2699
2700 /*
2701 * @implemented
2702 */
2703 PVOID
2704 NTAPI
2705 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2706 IN SIZE_T NumberOfBytes,
2707 IN ULONG Tag,
2708 IN EX_POOL_PRIORITY Priority)
2709 {
2710 PVOID Buffer;
2711
2712 //
2713 // Allocate the pool
2714 //
2715 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2716 if (Buffer == NULL)
2717 {
2718 UNIMPLEMENTED;
2719 }
2720
2721 return Buffer;
2722 }
2723
2724 /*
2725 * @implemented
2726 */
2727 PVOID
2728 NTAPI
2729 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2730 IN SIZE_T NumberOfBytes,
2731 IN ULONG Tag)
2732 {
2733 BOOLEAN Raise = TRUE;
2734 PVOID Buffer;
2735 PPOOL_HEADER Entry;
2736 NTSTATUS Status;
2737 PEPROCESS Process = PsGetCurrentProcess();
2738
2739 //
2740 // Check if we should fail instead of raising an exception
2741 //
2742 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2743 {
2744 Raise = FALSE;
2745 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2746 }
2747
2748 //
2749 // Inject the pool quota mask
2750 //
2751 PoolType += QUOTA_POOL_MASK;
2752
2753 //
2754 // Check if we have enough space to add the quota owner process, as long as
2755 // this isn't the system process, which never gets charged quota
2756 //
2757 ASSERT(NumberOfBytes != 0);
2758 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2759 (Process != PsInitialSystemProcess))
2760 {
2761 //
2762 // Add space for our EPROCESS pointer
2763 //
2764 NumberOfBytes += sizeof(PEPROCESS);
2765 }
2766 else
2767 {
2768 //
2769 // We won't be able to store the pointer, so don't use quota for this
2770 //
2771 PoolType -= QUOTA_POOL_MASK;
2772 }
2773
2774 //
2775 // Allocate the pool buffer now
2776 //
2777 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2778
2779 //
2780 // If the buffer is page-aligned, this is a large page allocation and we
2781 // won't touch it
2782 //
2783 if (PAGE_ALIGN(Buffer) != Buffer)
2784 {
2785 //
2786 // Also if special pool is enabled, and this was allocated from there,
2787 // we won't touch it either
2788 //
2789 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2790 (MmIsSpecialPoolAddress(Buffer)))
2791 {
2792 return Buffer;
2793 }
2794
2795 //
2796 // If it wasn't actually allocated with quota charges, ignore it too
2797 //
2798 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2799
2800 //
2801 // If this is the system process, we don't charge quota, so ignore
2802 //
2803 if (Process == PsInitialSystemProcess) return Buffer;
2804
2805 //
2806 // Actually go and charge quota for the process now
2807 //
2808 Entry = POOL_ENTRY(Buffer);
2809 Status = PsChargeProcessPoolQuota(Process,
2810 PoolType & BASE_POOL_TYPE_MASK,
2811 Entry->BlockSize * POOL_BLOCK_SIZE);
2812 if (!NT_SUCCESS(Status))
2813 {
2814 //
2815 // Quota failed, back out the allocation, clear the owner, and fail
2816 //
2817 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2818 ExFreePoolWithTag(Buffer, Tag);
2819 if (Raise) RtlRaiseStatus(Status);
2820 return NULL;
2821 }
2822
2823 //
2824 // Quota worked, write the owner and then reference it before returning
2825 //
2826 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2827 ObReferenceObject(Process);
2828 }
2829 else if (!(Buffer) && (Raise))
2830 {
2831 //
2832 // The allocation failed, raise an error if we are in raise mode
2833 //
2834 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2835 }
2836
2837 //
2838 // Return the allocated buffer
2839 //
2840 return Buffer;
2841 }
2842
2843 #if DBG && defined(KDBG)
2844
2845 BOOLEAN
2846 ExpKdbgExtPool(
2847 ULONG Argc,
2848 PCHAR Argv[])
2849 {
2850 ULONG_PTR Address = 0, Flags = 0;
2851 PVOID PoolPage;
2852 PPOOL_HEADER Entry;
2853 BOOLEAN ThisOne;
2854 PULONG Data;
2855
2856 if (Argc > 1)
2857 {
2858 /* Get address */
2859 if (!KdbpGetHexNumber(Argv[1], &Address))
2860 {
2861 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2862 return TRUE;
2863 }
2864 }
2865
2866 if (Argc > 2)
2867 {
2868 /* Get address */
2869 if (!KdbpGetHexNumber(Argv[1], &Flags))
2870 {
2871 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2872 return TRUE;
2873 }
2874 }
2875
2876 /* Check if we got an address */
2877 if (Address != 0)
2878 {
2879 /* Get the base page */
2880 PoolPage = PAGE_ALIGN(Address);
2881 }
2882 else
2883 {
2884 KdbpPrint("Heap is unimplemented\n");
2885 return TRUE;
2886 }
2887
2888 /* No paging support! */
2889 if (!MmIsAddressValid(PoolPage))
2890 {
2891 KdbpPrint("Address not accessible!\n");
2892 return TRUE;
2893 }
2894
2895 /* Get pool type */
2896 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2897 KdbpPrint("Allocation is from PagedPool region\n");
2898 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2899 KdbpPrint("Allocation is from NonPagedPool region\n");
2900 else
2901 {
2902 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2903 return TRUE;
2904 }
2905
2906 /* Loop all entries of that page */
2907 Entry = PoolPage;
2908 do
2909 {
2910 /* Check if the address is within that entry */
2911 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2912 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2913
2914 if (!(Flags & 1) || ThisOne)
2915 {
2916 /* Print the line */
2917 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2918 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2919 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2920 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2921 }
2922
2923 if (Flags & 1)
2924 {
2925 Data = (PULONG)(Entry + 1);
2926 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2927 " %p %08lx %08lx %08lx %08lx\n",
2928 &Data[0], Data[0], Data[1], Data[2], Data[3],
2929 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2930 }
2931
2932 /* Go to next entry */
2933 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2934 }
2935 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2936
2937 return TRUE;
2938 }
2939
2940 BOOLEAN
2941 ExpKdbgExtPoolUsed(
2942 ULONG Argc,
2943 PCHAR Argv[])
2944 {
2945 MiDumpPoolConsumers(TRUE);
2946
2947 return TRUE;
2948 }
2949
2950 #endif // DBG && KDBG
2951
2952 /* EOF */