[NTOBJSHEX] Fix typo.
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 /* PRIVATE FUNCTIONS **********************************************************/
464
465 VOID
466 NTAPI
467 INIT_SECTION
468 ExpSeedHotTags(VOID)
469 {
470 ULONG i, Key, Hash, Index;
471 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
472 ULONG TagList[] =
473 {
474 ' oI',
475 ' laH',
476 'PldM',
477 'LooP',
478 'tSbO',
479 ' prI',
480 'bdDN',
481 'LprI',
482 'pOoI',
483 ' ldM',
484 'eliF',
485 'aVMC',
486 'dSeS',
487 'CFtN',
488 'looP',
489 'rPCT',
490 'bNMC',
491 'dTeS',
492 'sFtN',
493 'TPCT',
494 'CPCT',
495 ' yeK',
496 'qSbO',
497 'mNoI',
498 'aEoI',
499 'cPCT',
500 'aFtN',
501 '0ftN',
502 'tceS',
503 'SprI',
504 'ekoT',
505 ' eS',
506 'lCbO',
507 'cScC',
508 'lFtN',
509 'cAeS',
510 'mfSF',
511 'kWcC',
512 'miSF',
513 'CdfA',
514 'EdfA',
515 'orSF',
516 'nftN',
517 'PRIU',
518 'rFpN',
519 'RFpN',
520 'aPeS',
521 'sUeS',
522 'FpcA',
523 'MpcA',
524 'cSeS',
525 'mNbO',
526 'sFpN',
527 'uLeS',
528 'DPcS',
529 'nevE',
530 'vrqR',
531 'ldaV',
532 ' pP',
533 'SdaV',
534 ' daV',
535 'LdaV',
536 'FdaV',
537 ' GIB',
538 };
539
540 //
541 // Loop all 64 hot tags
542 //
543 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
544 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
545 {
546 //
547 // Get the current tag, and compute its hash in the tracker table
548 //
549 Key = TagList[i];
550 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
551
552 //
553 // Loop all the hashes in this index/bucket
554 //
555 Index = Hash;
556 while (TRUE)
557 {
558 //
559 // Find an empty entry, and make sure this isn't the last hash that
560 // can fit.
561 //
562 // On checked builds, also make sure this is the first time we are
563 // seeding this tag.
564 //
565 ASSERT(TrackTable[Hash].Key != Key);
566 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
567 {
568 //
569 // It has been seeded, move on to the next tag
570 //
571 TrackTable[Hash].Key = Key;
572 break;
573 }
574
575 //
576 // This entry was already taken, compute the next possible hash while
577 // making sure we're not back at our initial index.
578 //
579 ASSERT(TrackTable[Hash].Key != Key);
580 Hash = (Hash + 1) & PoolTrackTableMask;
581 if (Hash == Index) break;
582 }
583 }
584 }
585
586 VOID
587 NTAPI
588 ExpRemovePoolTracker(IN ULONG Key,
589 IN SIZE_T NumberOfBytes,
590 IN POOL_TYPE PoolType)
591 {
592 ULONG Hash, Index;
593 PPOOL_TRACKER_TABLE Table, TableEntry;
594 SIZE_T TableMask, TableSize;
595
596 //
597 // Remove the PROTECTED_POOL flag which is not part of the tag
598 //
599 Key &= ~PROTECTED_POOL;
600
601 //
602 // With WinDBG you can set a tag you want to break on when an allocation is
603 // attempted
604 //
605 if (Key == PoolHitTag) DbgBreakPoint();
606
607 //
608 // Why the double indirection? Because normally this function is also used
609 // when doing session pool allocations, which has another set of tables,
610 // sizes, and masks that live in session pool. Now we don't support session
611 // pool so we only ever use the regular tables, but I'm keeping the code this
612 // way so that the day we DO support session pool, it won't require that
613 // many changes
614 //
615 Table = PoolTrackTable;
616 TableMask = PoolTrackTableMask;
617 TableSize = PoolTrackTableSize;
618 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
619
620 //
621 // Compute the hash for this key, and loop all the possible buckets
622 //
623 Hash = ExpComputeHashForTag(Key, TableMask);
624 Index = Hash;
625 while (TRUE)
626 {
627 //
628 // Have we found the entry for this tag? */
629 //
630 TableEntry = &Table[Hash];
631 if (TableEntry->Key == Key)
632 {
633 //
634 // Decrement the counters depending on if this was paged or nonpaged
635 // pool
636 //
637 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
638 {
639 InterlockedIncrement(&TableEntry->NonPagedFrees);
640 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
641 -(SSIZE_T)NumberOfBytes);
642 return;
643 }
644 InterlockedIncrement(&TableEntry->PagedFrees);
645 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
646 -(SSIZE_T)NumberOfBytes);
647 return;
648 }
649
650 //
651 // We should have only ended up with an empty entry if we've reached
652 // the last bucket
653 //
654 if (!TableEntry->Key)
655 {
656 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
657 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
658 ASSERT(Hash == TableMask);
659 }
660
661 //
662 // This path is hit when we don't have an entry, and the current bucket
663 // is full, so we simply try the next one
664 //
665 Hash = (Hash + 1) & TableMask;
666 if (Hash == Index) break;
667 }
668
669 //
670 // And finally this path is hit when all the buckets are full, and we need
671 // some expansion. This path is not yet supported in ReactOS and so we'll
672 // ignore the tag
673 //
674 DPRINT1("Out of pool tag space, ignoring...\n");
675 }
676
677 VOID
678 NTAPI
679 ExpInsertPoolTracker(IN ULONG Key,
680 IN SIZE_T NumberOfBytes,
681 IN POOL_TYPE PoolType)
682 {
683 ULONG Hash, Index;
684 KIRQL OldIrql;
685 PPOOL_TRACKER_TABLE Table, TableEntry;
686 SIZE_T TableMask, TableSize;
687
688 //
689 // Remove the PROTECTED_POOL flag which is not part of the tag
690 //
691 Key &= ~PROTECTED_POOL;
692
693 //
694 // With WinDBG you can set a tag you want to break on when an allocation is
695 // attempted
696 //
697 if (Key == PoolHitTag) DbgBreakPoint();
698
699 //
700 // There is also an internal flag you can set to break on malformed tags
701 //
702 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
703
704 //
705 // ASSERT on ReactOS features not yet supported
706 //
707 ASSERT(!(PoolType & SESSION_POOL_MASK));
708 ASSERT(KeGetCurrentProcessorNumber() == 0);
709
710 //
711 // Why the double indirection? Because normally this function is also used
712 // when doing session pool allocations, which has another set of tables,
713 // sizes, and masks that live in session pool. Now we don't support session
714 // pool so we only ever use the regular tables, but I'm keeping the code this
715 // way so that the day we DO support session pool, it won't require that
716 // many changes
717 //
718 Table = PoolTrackTable;
719 TableMask = PoolTrackTableMask;
720 TableSize = PoolTrackTableSize;
721 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
722
723 //
724 // Compute the hash for this key, and loop all the possible buckets
725 //
726 Hash = ExpComputeHashForTag(Key, TableMask);
727 Index = Hash;
728 while (TRUE)
729 {
730 //
731 // Do we already have an entry for this tag? */
732 //
733 TableEntry = &Table[Hash];
734 if (TableEntry->Key == Key)
735 {
736 //
737 // Increment the counters depending on if this was paged or nonpaged
738 // pool
739 //
740 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
741 {
742 InterlockedIncrement(&TableEntry->NonPagedAllocs);
743 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
744 return;
745 }
746 InterlockedIncrement(&TableEntry->PagedAllocs);
747 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
748 return;
749 }
750
751 //
752 // We don't have an entry yet, but we've found a free bucket for it
753 //
754 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
755 {
756 //
757 // We need to hold the lock while creating a new entry, since other
758 // processors might be in this code path as well
759 //
760 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
761 if (!PoolTrackTable[Hash].Key)
762 {
763 //
764 // We've won the race, so now create this entry in the bucket
765 //
766 ASSERT(Table[Hash].Key == 0);
767 PoolTrackTable[Hash].Key = Key;
768 TableEntry->Key = Key;
769 }
770 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
771
772 //
773 // Now we force the loop to run again, and we should now end up in
774 // the code path above which does the interlocked increments...
775 //
776 continue;
777 }
778
779 //
780 // This path is hit when we don't have an entry, and the current bucket
781 // is full, so we simply try the next one
782 //
783 Hash = (Hash + 1) & TableMask;
784 if (Hash == Index) break;
785 }
786
787 //
788 // And finally this path is hit when all the buckets are full, and we need
789 // some expansion. This path is not yet supported in ReactOS and so we'll
790 // ignore the tag
791 //
792 DPRINT1("Out of pool tag space, ignoring...\n");
793 }
794
795 VOID
796 NTAPI
797 INIT_SECTION
798 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
799 IN POOL_TYPE PoolType,
800 IN ULONG PoolIndex,
801 IN ULONG Threshold,
802 IN PVOID PoolLock)
803 {
804 PLIST_ENTRY NextEntry, LastEntry;
805
806 //
807 // Setup the descriptor based on the caller's request
808 //
809 PoolDescriptor->PoolType = PoolType;
810 PoolDescriptor->PoolIndex = PoolIndex;
811 PoolDescriptor->Threshold = Threshold;
812 PoolDescriptor->LockAddress = PoolLock;
813
814 //
815 // Initialize accounting data
816 //
817 PoolDescriptor->RunningAllocs = 0;
818 PoolDescriptor->RunningDeAllocs = 0;
819 PoolDescriptor->TotalPages = 0;
820 PoolDescriptor->TotalBytes = 0;
821 PoolDescriptor->TotalBigPages = 0;
822
823 //
824 // Nothing pending for now
825 //
826 PoolDescriptor->PendingFrees = NULL;
827 PoolDescriptor->PendingFreeDepth = 0;
828
829 //
830 // Loop all the descriptor's allocation lists and initialize them
831 //
832 NextEntry = PoolDescriptor->ListHeads;
833 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
834 while (NextEntry < LastEntry)
835 {
836 ExpInitializePoolListHead(NextEntry);
837 NextEntry++;
838 }
839
840 //
841 // Note that ReactOS does not support Session Pool Yet
842 //
843 ASSERT(PoolType != PagedPoolSession);
844 }
845
846 VOID
847 NTAPI
848 INIT_SECTION
849 InitializePool(IN POOL_TYPE PoolType,
850 IN ULONG Threshold)
851 {
852 PPOOL_DESCRIPTOR Descriptor;
853 SIZE_T TableSize;
854 ULONG i;
855
856 //
857 // Check what kind of pool this is
858 //
859 if (PoolType == NonPagedPool)
860 {
861 //
862 // Compute the track table size and convert it from a power of two to an
863 // actual byte size
864 //
865 // NOTE: On checked builds, we'll assert if the registry table size was
866 // invalid, while on retail builds we'll just break out of the loop at
867 // that point.
868 //
869 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
870 for (i = 0; i < 32; i++)
871 {
872 if (TableSize & 1)
873 {
874 ASSERT((TableSize & ~1) == 0);
875 if (!(TableSize & ~1)) break;
876 }
877 TableSize >>= 1;
878 }
879
880 //
881 // If we hit bit 32, than no size was defined in the registry, so
882 // we'll use the default size of 2048 entries.
883 //
884 // Otherwise, use the size from the registry, as long as it's not
885 // smaller than 64 entries.
886 //
887 if (i == 32)
888 {
889 PoolTrackTableSize = 2048;
890 }
891 else
892 {
893 PoolTrackTableSize = max(1 << i, 64);
894 }
895
896 //
897 // Loop trying with the biggest specified size first, and cut it down
898 // by a power of two each iteration in case not enough memory exist
899 //
900 while (TRUE)
901 {
902 //
903 // Do not allow overflow
904 //
905 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
906 {
907 PoolTrackTableSize >>= 1;
908 continue;
909 }
910
911 //
912 // Allocate the tracker table and exit the loop if this worked
913 //
914 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
915 (PoolTrackTableSize + 1) *
916 sizeof(POOL_TRACKER_TABLE));
917 if (PoolTrackTable) break;
918
919 //
920 // Otherwise, as long as we're not down to the last bit, keep
921 // iterating
922 //
923 if (PoolTrackTableSize == 1)
924 {
925 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
926 TableSize,
927 0xFFFFFFFF,
928 0xFFFFFFFF,
929 0xFFFFFFFF);
930 }
931 PoolTrackTableSize >>= 1;
932 }
933
934 //
935 // Add one entry, compute the hash, and zero the table
936 //
937 PoolTrackTableSize++;
938 PoolTrackTableMask = PoolTrackTableSize - 2;
939
940 RtlZeroMemory(PoolTrackTable,
941 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
942
943 //
944 // Finally, add the most used tags to speed up those allocations
945 //
946 ExpSeedHotTags();
947
948 //
949 // We now do the exact same thing with the tracker table for big pages
950 //
951 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
952 for (i = 0; i < 32; i++)
953 {
954 if (TableSize & 1)
955 {
956 ASSERT((TableSize & ~1) == 0);
957 if (!(TableSize & ~1)) break;
958 }
959 TableSize >>= 1;
960 }
961
962 //
963 // For big pages, the default tracker table is 4096 entries, while the
964 // minimum is still 64
965 //
966 if (i == 32)
967 {
968 PoolBigPageTableSize = 4096;
969 }
970 else
971 {
972 PoolBigPageTableSize = max(1 << i, 64);
973 }
974
975 //
976 // Again, run the exact same loop we ran earlier, but this time for the
977 // big pool tracker instead
978 //
979 while (TRUE)
980 {
981 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
982 {
983 PoolBigPageTableSize >>= 1;
984 continue;
985 }
986
987 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
988 PoolBigPageTableSize *
989 sizeof(POOL_TRACKER_BIG_PAGES));
990 if (PoolBigPageTable) break;
991
992 if (PoolBigPageTableSize == 1)
993 {
994 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
995 TableSize,
996 0xFFFFFFFF,
997 0xFFFFFFFF,
998 0xFFFFFFFF);
999 }
1000
1001 PoolBigPageTableSize >>= 1;
1002 }
1003
1004 //
1005 // An extra entry is not needed for for the big pool tracker, so just
1006 // compute the hash and zero it
1007 //
1008 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1009 RtlZeroMemory(PoolBigPageTable,
1010 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1011 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1012
1013 //
1014 // During development, print this out so we can see what's happening
1015 //
1016 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1017 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1018 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1019 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1020
1021 //
1022 // Insert the generic tracker for all of big pool
1023 //
1024 ExpInsertPoolTracker('looP',
1025 ROUND_TO_PAGES(PoolBigPageTableSize *
1026 sizeof(POOL_TRACKER_BIG_PAGES)),
1027 NonPagedPool);
1028
1029 //
1030 // No support for NUMA systems at this time
1031 //
1032 ASSERT(KeNumberNodes == 1);
1033
1034 //
1035 // Initialize the tag spinlock
1036 //
1037 KeInitializeSpinLock(&ExpTaggedPoolLock);
1038
1039 //
1040 // Initialize the nonpaged pool descriptor
1041 //
1042 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1043 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1044 NonPagedPool,
1045 0,
1046 Threshold,
1047 NULL);
1048 }
1049 else
1050 {
1051 //
1052 // No support for NUMA systems at this time
1053 //
1054 ASSERT(KeNumberNodes == 1);
1055
1056 //
1057 // Allocate the pool descriptor
1058 //
1059 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1060 sizeof(KGUARDED_MUTEX) +
1061 sizeof(POOL_DESCRIPTOR),
1062 'looP');
1063 if (!Descriptor)
1064 {
1065 //
1066 // This is really bad...
1067 //
1068 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1069 0,
1070 -1,
1071 -1,
1072 -1);
1073 }
1074
1075 //
1076 // Setup the vector and guarded mutex for paged pool
1077 //
1078 PoolVector[PagedPool] = Descriptor;
1079 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1080 ExpPagedPoolDescriptor[0] = Descriptor;
1081 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1082 ExInitializePoolDescriptor(Descriptor,
1083 PagedPool,
1084 0,
1085 Threshold,
1086 ExpPagedPoolMutex);
1087
1088 //
1089 // Insert the generic tracker for all of nonpaged pool
1090 //
1091 ExpInsertPoolTracker('looP',
1092 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1093 NonPagedPool);
1094 }
1095 }
1096
1097 FORCEINLINE
1098 KIRQL
1099 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1100 {
1101 //
1102 // Check if this is nonpaged pool
1103 //
1104 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1105 {
1106 //
1107 // Use the queued spin lock
1108 //
1109 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1110 }
1111 else
1112 {
1113 //
1114 // Use the guarded mutex
1115 //
1116 KeAcquireGuardedMutex(Descriptor->LockAddress);
1117 return APC_LEVEL;
1118 }
1119 }
1120
1121 FORCEINLINE
1122 VOID
1123 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1124 IN KIRQL OldIrql)
1125 {
1126 //
1127 // Check if this is nonpaged pool
1128 //
1129 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1130 {
1131 //
1132 // Use the queued spin lock
1133 //
1134 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1135 }
1136 else
1137 {
1138 //
1139 // Use the guarded mutex
1140 //
1141 KeReleaseGuardedMutex(Descriptor->LockAddress);
1142 }
1143 }
1144
1145 VOID
1146 NTAPI
1147 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1148 IN PVOID DeferredContext,
1149 IN PVOID SystemArgument1,
1150 IN PVOID SystemArgument2)
1151 {
1152 PPOOL_DPC_CONTEXT Context = DeferredContext;
1153 UNREFERENCED_PARAMETER(Dpc);
1154 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1155
1156 //
1157 // Make sure we win the race, and if we did, copy the data atomically
1158 //
1159 if (KeSignalCallDpcSynchronize(SystemArgument2))
1160 {
1161 RtlCopyMemory(Context->PoolTrackTable,
1162 PoolTrackTable,
1163 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1164
1165 //
1166 // This is here because ReactOS does not yet support expansion
1167 //
1168 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1169 }
1170
1171 //
1172 // Regardless of whether we won or not, we must now synchronize and then
1173 // decrement the barrier since this is one more processor that has completed
1174 // the callback.
1175 //
1176 KeSignalCallDpcSynchronize(SystemArgument2);
1177 KeSignalCallDpcDone(SystemArgument1);
1178 }
1179
1180 NTSTATUS
1181 NTAPI
1182 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1183 IN ULONG SystemInformationLength,
1184 IN OUT PULONG ReturnLength OPTIONAL)
1185 {
1186 ULONG TableSize, CurrentLength;
1187 ULONG EntryCount;
1188 NTSTATUS Status = STATUS_SUCCESS;
1189 PSYSTEM_POOLTAG TagEntry;
1190 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1191 POOL_DPC_CONTEXT Context;
1192 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1193
1194 //
1195 // Keep track of how much data the caller's buffer must hold
1196 //
1197 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1198
1199 //
1200 // Initialize the caller's buffer
1201 //
1202 TagEntry = &SystemInformation->TagInfo[0];
1203 SystemInformation->Count = 0;
1204
1205 //
1206 // Capture the number of entries, and the total size needed to make a copy
1207 // of the table
1208 //
1209 EntryCount = (ULONG)PoolTrackTableSize;
1210 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1211
1212 //
1213 // Allocate the "Generic DPC" temporary buffer
1214 //
1215 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1216 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1217
1218 //
1219 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1220 //
1221 Context.PoolTrackTable = Buffer;
1222 Context.PoolTrackTableSize = PoolTrackTableSize;
1223 Context.PoolTrackTableExpansion = NULL;
1224 Context.PoolTrackTableSizeExpansion = 0;
1225 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1226
1227 //
1228 // Now parse the results
1229 //
1230 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1231 {
1232 //
1233 // If the entry is empty, skip it
1234 //
1235 if (!TrackerEntry->Key) continue;
1236
1237 //
1238 // Otherwise, add one more entry to the caller's buffer, and ensure that
1239 // enough space has been allocated in it
1240 //
1241 SystemInformation->Count++;
1242 CurrentLength += sizeof(*TagEntry);
1243 if (SystemInformationLength < CurrentLength)
1244 {
1245 //
1246 // The caller's buffer is too small, so set a failure code. The
1247 // caller will know the count, as well as how much space is needed.
1248 //
1249 // We do NOT break out of the loop, because we want to keep incrementing
1250 // the Count as well as CurrentLength so that the caller can know the
1251 // final numbers
1252 //
1253 Status = STATUS_INFO_LENGTH_MISMATCH;
1254 }
1255 else
1256 {
1257 //
1258 // Small sanity check that our accounting is working correctly
1259 //
1260 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1261 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1262
1263 //
1264 // Return the data into the caller's buffer
1265 //
1266 TagEntry->TagUlong = TrackerEntry->Key;
1267 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1268 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1269 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1270 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1271 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1272 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1273 TagEntry++;
1274 }
1275 }
1276
1277 //
1278 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1279 //
1280 ExFreePoolWithTag(Buffer, 'ofnI');
1281 if (ReturnLength) *ReturnLength = CurrentLength;
1282 return Status;
1283 }
1284
1285 BOOLEAN
1286 NTAPI
1287 ExpAddTagForBigPages(IN PVOID Va,
1288 IN ULONG Key,
1289 IN ULONG NumberOfPages,
1290 IN POOL_TYPE PoolType)
1291 {
1292 ULONG Hash, i = 0;
1293 PVOID OldVa;
1294 KIRQL OldIrql;
1295 SIZE_T TableSize;
1296 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1297 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1298 ASSERT(!(PoolType & SESSION_POOL_MASK));
1299
1300 //
1301 // As the table is expandable, these values must only be read after acquiring
1302 // the lock to avoid a teared access during an expansion
1303 //
1304 Hash = ExpComputePartialHashForAddress(Va);
1305 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1306 Hash &= PoolBigPageTableHash;
1307 TableSize = PoolBigPageTableSize;
1308
1309 //
1310 // We loop from the current hash bucket to the end of the table, and then
1311 // rollover to hash bucket 0 and keep going from there. If we return back
1312 // to the beginning, then we attempt expansion at the bottom of the loop
1313 //
1314 EntryStart = Entry = &PoolBigPageTable[Hash];
1315 EntryEnd = &PoolBigPageTable[TableSize];
1316 do
1317 {
1318 //
1319 // Make sure that this is a free entry and attempt to atomically make the
1320 // entry busy now
1321 //
1322 OldVa = Entry->Va;
1323 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1324 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1325 {
1326 //
1327 // We now own this entry, write down the size and the pool tag
1328 //
1329 Entry->Key = Key;
1330 Entry->NumberOfPages = NumberOfPages;
1331
1332 //
1333 // Add one more entry to the count, and see if we're getting within
1334 // 25% of the table size, at which point we'll do an expansion now
1335 // to avoid blocking too hard later on.
1336 //
1337 // Note that we only do this if it's also been the 16th time that we
1338 // keep losing the race or that we are not finding a free entry anymore,
1339 // which implies a massive number of concurrent big pool allocations.
1340 //
1341 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1342 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1343 {
1344 DPRINT("Should attempt expansion since we now have %lu entries\n",
1345 ExpPoolBigEntriesInUse);
1346 }
1347
1348 //
1349 // We have our entry, return
1350 //
1351 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1352 return TRUE;
1353 }
1354
1355 //
1356 // We don't have our entry yet, so keep trying, making the entry list
1357 // circular if we reach the last entry. We'll eventually break out of
1358 // the loop once we've rolled over and returned back to our original
1359 // hash bucket
1360 //
1361 i++;
1362 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1363 } while (Entry != EntryStart);
1364
1365 //
1366 // This means there's no free hash buckets whatsoever, so we would now have
1367 // to attempt expanding the table
1368 //
1369 DPRINT1("Big pool expansion needed, not implemented!\n");
1370 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1371 return FALSE;
1372 }
1373
1374 ULONG
1375 NTAPI
1376 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1377 OUT PULONG_PTR BigPages,
1378 IN POOL_TYPE PoolType)
1379 {
1380 BOOLEAN FirstTry = TRUE;
1381 SIZE_T TableSize;
1382 KIRQL OldIrql;
1383 ULONG PoolTag, Hash;
1384 PPOOL_TRACKER_BIG_PAGES Entry;
1385 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1386 ASSERT(!(PoolType & SESSION_POOL_MASK));
1387
1388 //
1389 // As the table is expandable, these values must only be read after acquiring
1390 // the lock to avoid a teared access during an expansion
1391 //
1392 Hash = ExpComputePartialHashForAddress(Va);
1393 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1394 Hash &= PoolBigPageTableHash;
1395 TableSize = PoolBigPageTableSize;
1396
1397 //
1398 // Loop while trying to find this big page allocation
1399 //
1400 while (PoolBigPageTable[Hash].Va != Va)
1401 {
1402 //
1403 // Increment the size until we go past the end of the table
1404 //
1405 if (++Hash >= TableSize)
1406 {
1407 //
1408 // Is this the second time we've tried?
1409 //
1410 if (!FirstTry)
1411 {
1412 //
1413 // This means it was never inserted into the pool table and it
1414 // received the special "BIG" tag -- return that and return 0
1415 // so that the code can ask Mm for the page count instead
1416 //
1417 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1418 *BigPages = 0;
1419 return ' GIB';
1420 }
1421
1422 //
1423 // The first time this happens, reset the hash index and try again
1424 //
1425 Hash = 0;
1426 FirstTry = FALSE;
1427 }
1428 }
1429
1430 //
1431 // Now capture all the information we need from the entry, since after we
1432 // release the lock, the data can change
1433 //
1434 Entry = &PoolBigPageTable[Hash];
1435 *BigPages = Entry->NumberOfPages;
1436 PoolTag = Entry->Key;
1437
1438 //
1439 // Set the free bit, and decrement the number of allocations. Finally, release
1440 // the lock and return the tag that was located
1441 //
1442 InterlockedIncrement((PLONG)&Entry->Va);
1443 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1444 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1445 return PoolTag;
1446 }
1447
1448 VOID
1449 NTAPI
1450 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1451 OUT PULONG NonPagedPoolPages,
1452 OUT PULONG PagedPoolAllocs,
1453 OUT PULONG PagedPoolFrees,
1454 OUT PULONG PagedPoolLookasideHits,
1455 OUT PULONG NonPagedPoolAllocs,
1456 OUT PULONG NonPagedPoolFrees,
1457 OUT PULONG NonPagedPoolLookasideHits)
1458 {
1459 ULONG i;
1460 PPOOL_DESCRIPTOR PoolDesc;
1461
1462 //
1463 // Assume all failures
1464 //
1465 *PagedPoolPages = 0;
1466 *PagedPoolAllocs = 0;
1467 *PagedPoolFrees = 0;
1468
1469 //
1470 // Tally up the totals for all the apged pool
1471 //
1472 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1473 {
1474 PoolDesc = ExpPagedPoolDescriptor[i];
1475 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1476 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1477 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1478 }
1479
1480 //
1481 // The first non-paged pool has a hardcoded well-known descriptor name
1482 //
1483 PoolDesc = &NonPagedPoolDescriptor;
1484 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1485 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1486 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1487
1488 //
1489 // If the system has more than one non-paged pool, copy the other descriptor
1490 // totals as well
1491 //
1492 #if 0
1493 if (ExpNumberOfNonPagedPools > 1)
1494 {
1495 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1496 {
1497 PoolDesc = ExpNonPagedPoolDescriptor[i];
1498 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1499 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1500 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1501 }
1502 }
1503 #endif
1504
1505 //
1506 // FIXME: Not yet supported
1507 //
1508 *NonPagedPoolLookasideHits += 0;
1509 *PagedPoolLookasideHits += 0;
1510 }
1511
1512 VOID
1513 NTAPI
1514 ExReturnPoolQuota(IN PVOID P)
1515 {
1516 PPOOL_HEADER Entry;
1517 POOL_TYPE PoolType;
1518 USHORT BlockSize;
1519 PEPROCESS Process;
1520
1521 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1522 (MmIsSpecialPoolAddress(P)))
1523 {
1524 return;
1525 }
1526
1527 Entry = P;
1528 Entry--;
1529 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1530
1531 PoolType = Entry->PoolType - 1;
1532 BlockSize = Entry->BlockSize;
1533
1534 if (PoolType & QUOTA_POOL_MASK)
1535 {
1536 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1537 ASSERT(Process != NULL);
1538 if (Process)
1539 {
1540 if (Process->Pcb.Header.Type != ProcessObject)
1541 {
1542 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1543 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1544 KeBugCheckEx(BAD_POOL_CALLER,
1545 0x0D,
1546 (ULONG_PTR)P,
1547 Entry->PoolTag,
1548 (ULONG_PTR)Process);
1549 }
1550 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1551 PsReturnPoolQuota(Process,
1552 PoolType & BASE_POOL_TYPE_MASK,
1553 BlockSize * POOL_BLOCK_SIZE);
1554 ObDereferenceObject(Process);
1555 }
1556 }
1557 }
1558
1559 /* PUBLIC FUNCTIONS ***********************************************************/
1560
1561 /*
1562 * @implemented
1563 */
1564 PVOID
1565 NTAPI
1566 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1567 IN SIZE_T NumberOfBytes,
1568 IN ULONG Tag)
1569 {
1570 PPOOL_DESCRIPTOR PoolDesc;
1571 PLIST_ENTRY ListHead;
1572 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1573 KIRQL OldIrql;
1574 USHORT BlockSize, i;
1575 ULONG OriginalType;
1576 PKPRCB Prcb = KeGetCurrentPrcb();
1577 PGENERAL_LOOKASIDE LookasideList;
1578
1579 //
1580 // Some sanity checks
1581 //
1582 ASSERT(Tag != 0);
1583 ASSERT(Tag != ' GIB');
1584 ASSERT(NumberOfBytes != 0);
1585 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1586
1587 //
1588 // Not supported in ReactOS
1589 //
1590 ASSERT(!(PoolType & SESSION_POOL_MASK));
1591
1592 //
1593 // Check if verifier or special pool is enabled
1594 //
1595 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1596 {
1597 //
1598 // For verifier, we should call the verification routine
1599 //
1600 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1601 {
1602 DPRINT1("Driver Verifier is not yet supported\n");
1603 }
1604
1605 //
1606 // For special pool, we check if this is a suitable allocation and do
1607 // the special allocation if needed
1608 //
1609 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1610 {
1611 //
1612 // Check if this is a special pool allocation
1613 //
1614 if (MmUseSpecialPool(NumberOfBytes, Tag))
1615 {
1616 //
1617 // Try to allocate using special pool
1618 //
1619 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1620 if (Entry) return Entry;
1621 }
1622 }
1623 }
1624
1625 //
1626 // Get the pool type and its corresponding vector for this request
1627 //
1628 OriginalType = PoolType;
1629 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1630 PoolDesc = PoolVector[PoolType];
1631 ASSERT(PoolDesc != NULL);
1632
1633 //
1634 // Check if this is a big page allocation
1635 //
1636 if (NumberOfBytes > POOL_MAX_ALLOC)
1637 {
1638 //
1639 // Allocate pages for it
1640 //
1641 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1642 if (!Entry)
1643 {
1644 //
1645 // Must succeed pool is deprecated, but still supported. These allocation
1646 // failures must cause an immediate bugcheck
1647 //
1648 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1649 {
1650 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1651 NumberOfBytes,
1652 NonPagedPoolDescriptor.TotalPages,
1653 NonPagedPoolDescriptor.TotalBigPages,
1654 0);
1655 }
1656
1657 //
1658 // Internal debugging
1659 //
1660 ExPoolFailures++;
1661
1662 //
1663 // This flag requests printing failures, and can also further specify
1664 // breaking on failures
1665 //
1666 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1667 {
1668 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1669 NumberOfBytes,
1670 OriginalType);
1671 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1672 }
1673
1674 //
1675 // Finally, this flag requests an exception, which we are more than
1676 // happy to raise!
1677 //
1678 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1679 {
1680 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1681 }
1682
1683 return NULL;
1684 }
1685
1686 //
1687 // Increment required counters
1688 //
1689 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1690 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1691 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1692 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1693
1694 //
1695 // Add a tag for the big page allocation and switch to the generic "BIG"
1696 // tag if we failed to do so, then insert a tracker for this alloation.
1697 //
1698 if (!ExpAddTagForBigPages(Entry,
1699 Tag,
1700 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1701 OriginalType))
1702 {
1703 Tag = ' GIB';
1704 }
1705 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1706 return Entry;
1707 }
1708
1709 //
1710 // Should never request 0 bytes from the pool, but since so many drivers do
1711 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1712 //
1713 if (!NumberOfBytes) NumberOfBytes = 1;
1714
1715 //
1716 // A pool allocation is defined by its data, a linked list to connect it to
1717 // the free list (if necessary), and a pool header to store accounting info.
1718 // Calculate this size, then convert it into a block size (units of pool
1719 // headers)
1720 //
1721 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1722 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1723 // the direct allocation of pages.
1724 //
1725 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1726 / POOL_BLOCK_SIZE);
1727 ASSERT(i < POOL_LISTS_PER_PAGE);
1728
1729 //
1730 // Handle lookaside list optimization for both paged and nonpaged pool
1731 //
1732 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1733 {
1734 //
1735 // Try popping it from the per-CPU lookaside list
1736 //
1737 LookasideList = (PoolType == PagedPool) ?
1738 Prcb->PPPagedLookasideList[i - 1].P :
1739 Prcb->PPNPagedLookasideList[i - 1].P;
1740 LookasideList->TotalAllocates++;
1741 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1742 if (!Entry)
1743 {
1744 //
1745 // We failed, try popping it from the global list
1746 //
1747 LookasideList = (PoolType == PagedPool) ?
1748 Prcb->PPPagedLookasideList[i - 1].L :
1749 Prcb->PPNPagedLookasideList[i - 1].L;
1750 LookasideList->TotalAllocates++;
1751 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1752 }
1753
1754 //
1755 // If we were able to pop it, update the accounting and return the block
1756 //
1757 if (Entry)
1758 {
1759 LookasideList->AllocateHits++;
1760
1761 //
1762 // Get the real entry, write down its pool type, and track it
1763 //
1764 Entry--;
1765 Entry->PoolType = OriginalType + 1;
1766 ExpInsertPoolTracker(Tag,
1767 Entry->BlockSize * POOL_BLOCK_SIZE,
1768 OriginalType);
1769
1770 //
1771 // Return the pool allocation
1772 //
1773 Entry->PoolTag = Tag;
1774 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1775 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1776 return POOL_FREE_BLOCK(Entry);
1777 }
1778 }
1779
1780 //
1781 // Loop in the free lists looking for a block if this size. Start with the
1782 // list optimized for this kind of size lookup
1783 //
1784 ListHead = &PoolDesc->ListHeads[i];
1785 do
1786 {
1787 //
1788 // Are there any free entries available on this list?
1789 //
1790 if (!ExpIsPoolListEmpty(ListHead))
1791 {
1792 //
1793 // Acquire the pool lock now
1794 //
1795 OldIrql = ExLockPool(PoolDesc);
1796
1797 //
1798 // And make sure the list still has entries
1799 //
1800 if (ExpIsPoolListEmpty(ListHead))
1801 {
1802 //
1803 // Someone raced us (and won) before we had a chance to acquire
1804 // the lock.
1805 //
1806 // Try again!
1807 //
1808 ExUnlockPool(PoolDesc, OldIrql);
1809 continue;
1810 }
1811
1812 //
1813 // Remove a free entry from the list
1814 // Note that due to the way we insert free blocks into multiple lists
1815 // there is a guarantee that any block on this list will either be
1816 // of the correct size, or perhaps larger.
1817 //
1818 ExpCheckPoolLinks(ListHead);
1819 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1820 ExpCheckPoolLinks(ListHead);
1821 ExpCheckPoolBlocks(Entry);
1822 ASSERT(Entry->BlockSize >= i);
1823 ASSERT(Entry->PoolType == 0);
1824
1825 //
1826 // Check if this block is larger that what we need. The block could
1827 // not possibly be smaller, due to the reason explained above (and
1828 // we would've asserted on a checked build if this was the case).
1829 //
1830 if (Entry->BlockSize != i)
1831 {
1832 //
1833 // Is there an entry before this one?
1834 //
1835 if (Entry->PreviousSize == 0)
1836 {
1837 //
1838 // There isn't anyone before us, so take the next block and
1839 // turn it into a fragment that contains the leftover data
1840 // that we don't need to satisfy the caller's request
1841 //
1842 FragmentEntry = POOL_BLOCK(Entry, i);
1843 FragmentEntry->BlockSize = Entry->BlockSize - i;
1844
1845 //
1846 // And make it point back to us
1847 //
1848 FragmentEntry->PreviousSize = i;
1849
1850 //
1851 // Now get the block that follows the new fragment and check
1852 // if it's still on the same page as us (and not at the end)
1853 //
1854 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1855 if (PAGE_ALIGN(NextEntry) != NextEntry)
1856 {
1857 //
1858 // Adjust this next block to point to our newly created
1859 // fragment block
1860 //
1861 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1862 }
1863 }
1864 else
1865 {
1866 //
1867 // There is a free entry before us, which we know is smaller
1868 // so we'll make this entry the fragment instead
1869 //
1870 FragmentEntry = Entry;
1871
1872 //
1873 // And then we'll remove from it the actual size required.
1874 // Now the entry is a leftover free fragment
1875 //
1876 Entry->BlockSize -= i;
1877
1878 //
1879 // Now let's go to the next entry after the fragment (which
1880 // used to point to our original free entry) and make it
1881 // reference the new fragment entry instead.
1882 //
1883 // This is the entry that will actually end up holding the
1884 // allocation!
1885 //
1886 Entry = POOL_NEXT_BLOCK(Entry);
1887 Entry->PreviousSize = FragmentEntry->BlockSize;
1888
1889 //
1890 // And now let's go to the entry after that one and check if
1891 // it's still on the same page, and not at the end
1892 //
1893 NextEntry = POOL_BLOCK(Entry, i);
1894 if (PAGE_ALIGN(NextEntry) != NextEntry)
1895 {
1896 //
1897 // Make it reference the allocation entry
1898 //
1899 NextEntry->PreviousSize = i;
1900 }
1901 }
1902
1903 //
1904 // Now our (allocation) entry is the right size
1905 //
1906 Entry->BlockSize = i;
1907
1908 //
1909 // And the next entry is now the free fragment which contains
1910 // the remaining difference between how big the original entry
1911 // was, and the actual size the caller needs/requested.
1912 //
1913 FragmentEntry->PoolType = 0;
1914 BlockSize = FragmentEntry->BlockSize;
1915
1916 //
1917 // Now check if enough free bytes remained for us to have a
1918 // "full" entry, which contains enough bytes for a linked list
1919 // and thus can be used for allocations (up to 8 bytes...)
1920 //
1921 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1922 if (BlockSize != 1)
1923 {
1924 //
1925 // Insert the free entry into the free list for this size
1926 //
1927 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1928 POOL_FREE_BLOCK(FragmentEntry));
1929 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1930 }
1931 }
1932
1933 //
1934 // We have found an entry for this allocation, so set the pool type
1935 // and release the lock since we're done
1936 //
1937 Entry->PoolType = OriginalType + 1;
1938 ExpCheckPoolBlocks(Entry);
1939 ExUnlockPool(PoolDesc, OldIrql);
1940
1941 //
1942 // Increment required counters
1943 //
1944 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1945 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1946
1947 //
1948 // Track this allocation
1949 //
1950 ExpInsertPoolTracker(Tag,
1951 Entry->BlockSize * POOL_BLOCK_SIZE,
1952 OriginalType);
1953
1954 //
1955 // Return the pool allocation
1956 //
1957 Entry->PoolTag = Tag;
1958 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1959 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1960 return POOL_FREE_BLOCK(Entry);
1961 }
1962 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1963
1964 //
1965 // There were no free entries left, so we have to allocate a new fresh page
1966 //
1967 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
1968 if (!Entry)
1969 {
1970 //
1971 // Must succeed pool is deprecated, but still supported. These allocation
1972 // failures must cause an immediate bugcheck
1973 //
1974 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1975 {
1976 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1977 PAGE_SIZE,
1978 NonPagedPoolDescriptor.TotalPages,
1979 NonPagedPoolDescriptor.TotalBigPages,
1980 0);
1981 }
1982
1983 //
1984 // Internal debugging
1985 //
1986 ExPoolFailures++;
1987
1988 //
1989 // This flag requests printing failures, and can also further specify
1990 // breaking on failures
1991 //
1992 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1993 {
1994 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1995 NumberOfBytes,
1996 OriginalType);
1997 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1998 }
1999
2000 //
2001 // Finally, this flag requests an exception, which we are more than
2002 // happy to raise!
2003 //
2004 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2005 {
2006 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2007 }
2008
2009 //
2010 // Return NULL to the caller in all other cases
2011 //
2012 return NULL;
2013 }
2014
2015 //
2016 // Setup the entry data
2017 //
2018 Entry->Ulong1 = 0;
2019 Entry->BlockSize = i;
2020 Entry->PoolType = OriginalType + 1;
2021
2022 //
2023 // This page will have two entries -- one for the allocation (which we just
2024 // created above), and one for the remaining free bytes, which we're about
2025 // to create now. The free bytes are the whole page minus what was allocated
2026 // and then converted into units of block headers.
2027 //
2028 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2029 FragmentEntry = POOL_BLOCK(Entry, i);
2030 FragmentEntry->Ulong1 = 0;
2031 FragmentEntry->BlockSize = BlockSize;
2032 FragmentEntry->PreviousSize = i;
2033
2034 //
2035 // Increment required counters
2036 //
2037 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2038 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2039
2040 //
2041 // Now check if enough free bytes remained for us to have a "full" entry,
2042 // which contains enough bytes for a linked list and thus can be used for
2043 // allocations (up to 8 bytes...)
2044 //
2045 if (FragmentEntry->BlockSize != 1)
2046 {
2047 //
2048 // Excellent -- acquire the pool lock
2049 //
2050 OldIrql = ExLockPool(PoolDesc);
2051
2052 //
2053 // And insert the free entry into the free list for this block size
2054 //
2055 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2056 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2057 POOL_FREE_BLOCK(FragmentEntry));
2058 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2059
2060 //
2061 // Release the pool lock
2062 //
2063 ExpCheckPoolBlocks(Entry);
2064 ExUnlockPool(PoolDesc, OldIrql);
2065 }
2066 else
2067 {
2068 //
2069 // Simply do a sanity check
2070 //
2071 ExpCheckPoolBlocks(Entry);
2072 }
2073
2074 //
2075 // Increment performance counters and track this allocation
2076 //
2077 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2078 ExpInsertPoolTracker(Tag,
2079 Entry->BlockSize * POOL_BLOCK_SIZE,
2080 OriginalType);
2081
2082 //
2083 // And return the pool allocation
2084 //
2085 ExpCheckPoolBlocks(Entry);
2086 Entry->PoolTag = Tag;
2087 return POOL_FREE_BLOCK(Entry);
2088 }
2089
2090 /*
2091 * @implemented
2092 */
2093 PVOID
2094 NTAPI
2095 ExAllocatePool(POOL_TYPE PoolType,
2096 SIZE_T NumberOfBytes)
2097 {
2098 ULONG Tag = TAG_NONE;
2099 #if 0 && DBG
2100 PLDR_DATA_TABLE_ENTRY LdrEntry;
2101
2102 /* Use the first four letters of the driver name, or "None" if unavailable */
2103 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2104 ? MiLookupDataTableEntry(_ReturnAddress())
2105 : NULL;
2106 if (LdrEntry)
2107 {
2108 ULONG i;
2109 Tag = 0;
2110 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2111 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2112 for (; i < 4; i++)
2113 Tag = Tag >> 8 | ' ' << 24;
2114 }
2115 #endif
2116 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2117 }
2118
2119 /*
2120 * @implemented
2121 */
2122 VOID
2123 NTAPI
2124 ExFreePoolWithTag(IN PVOID P,
2125 IN ULONG TagToFree)
2126 {
2127 PPOOL_HEADER Entry, NextEntry;
2128 USHORT BlockSize;
2129 KIRQL OldIrql;
2130 POOL_TYPE PoolType;
2131 PPOOL_DESCRIPTOR PoolDesc;
2132 ULONG Tag;
2133 BOOLEAN Combined = FALSE;
2134 PFN_NUMBER PageCount, RealPageCount;
2135 PKPRCB Prcb = KeGetCurrentPrcb();
2136 PGENERAL_LOOKASIDE LookasideList;
2137 PEPROCESS Process;
2138
2139 //
2140 // Check if any of the debug flags are enabled
2141 //
2142 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2143 POOL_FLAG_CHECK_WORKERS |
2144 POOL_FLAG_CHECK_RESOURCES |
2145 POOL_FLAG_VERIFIER |
2146 POOL_FLAG_CHECK_DEADLOCK |
2147 POOL_FLAG_SPECIAL_POOL))
2148 {
2149 //
2150 // Check if special pool is enabled
2151 //
2152 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2153 {
2154 //
2155 // Check if it was allocated from a special pool
2156 //
2157 if (MmIsSpecialPoolAddress(P))
2158 {
2159 //
2160 // Was deadlock verification also enabled? We can do some extra
2161 // checks at this point
2162 //
2163 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2164 {
2165 DPRINT1("Verifier not yet supported\n");
2166 }
2167
2168 //
2169 // It is, so handle it via special pool free routine
2170 //
2171 MmFreeSpecialPool(P);
2172 return;
2173 }
2174 }
2175
2176 //
2177 // For non-big page allocations, we'll do a bunch of checks in here
2178 //
2179 if (PAGE_ALIGN(P) != P)
2180 {
2181 //
2182 // Get the entry for this pool allocation
2183 // The pointer math here may look wrong or confusing, but it is quite right
2184 //
2185 Entry = P;
2186 Entry--;
2187
2188 //
2189 // Get the pool type
2190 //
2191 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2192
2193 //
2194 // FIXME: Many other debugging checks go here
2195 //
2196 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2197 }
2198 }
2199
2200 //
2201 // Check if this is a big page allocation
2202 //
2203 if (PAGE_ALIGN(P) == P)
2204 {
2205 //
2206 // We need to find the tag for it, so first we need to find out what
2207 // kind of allocation this was (paged or nonpaged), then we can go
2208 // ahead and try finding the tag for it. Remember to get rid of the
2209 // PROTECTED_POOL tag if it's found.
2210 //
2211 // Note that if at insertion time, we failed to add the tag for a big
2212 // pool allocation, we used a special tag called 'BIG' to identify the
2213 // allocation, and we may get this tag back. In this scenario, we must
2214 // manually get the size of the allocation by actually counting through
2215 // the PFN database.
2216 //
2217 PoolType = MmDeterminePoolType(P);
2218 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2219 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2220 if (!Tag)
2221 {
2222 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2223 ASSERT(Tag == ' GIB');
2224 PageCount = 1; // We are going to lie! This might screw up accounting?
2225 }
2226 else if (Tag & PROTECTED_POOL)
2227 {
2228 Tag &= ~PROTECTED_POOL;
2229 }
2230
2231 //
2232 // Check block tag
2233 //
2234 if (TagToFree && TagToFree != Tag)
2235 {
2236 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2237 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2238 }
2239
2240 //
2241 // We have our tag and our page count, so we can go ahead and remove this
2242 // tracker now
2243 //
2244 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2245
2246 //
2247 // Check if any of the debug flags are enabled
2248 //
2249 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2250 POOL_FLAG_CHECK_WORKERS |
2251 POOL_FLAG_CHECK_RESOURCES |
2252 POOL_FLAG_CHECK_DEADLOCK))
2253 {
2254 //
2255 // Was deadlock verification also enabled? We can do some extra
2256 // checks at this point
2257 //
2258 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2259 {
2260 DPRINT1("Verifier not yet supported\n");
2261 }
2262
2263 //
2264 // FIXME: Many debugging checks go here
2265 //
2266 }
2267
2268 //
2269 // Update counters
2270 //
2271 PoolDesc = PoolVector[PoolType];
2272 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2273 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2274 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2275
2276 //
2277 // Do the real free now and update the last counter with the big page count
2278 //
2279 RealPageCount = MiFreePoolPages(P);
2280 ASSERT(RealPageCount == PageCount);
2281 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2282 -(LONG)RealPageCount);
2283 return;
2284 }
2285
2286 //
2287 // Get the entry for this pool allocation
2288 // The pointer math here may look wrong or confusing, but it is quite right
2289 //
2290 Entry = P;
2291 Entry--;
2292 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2293
2294 //
2295 // Get the size of the entry, and it's pool type, then load the descriptor
2296 // for this pool type
2297 //
2298 BlockSize = Entry->BlockSize;
2299 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2300 PoolDesc = PoolVector[PoolType];
2301
2302 //
2303 // Make sure that the IRQL makes sense
2304 //
2305 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2306
2307 //
2308 // Get the pool tag and get rid of the PROTECTED_POOL flag
2309 //
2310 Tag = Entry->PoolTag;
2311 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2312
2313 //
2314 // Check block tag
2315 //
2316 if (TagToFree && TagToFree != Tag)
2317 {
2318 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2319 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2320 }
2321
2322 //
2323 // Track the removal of this allocation
2324 //
2325 ExpRemovePoolTracker(Tag,
2326 BlockSize * POOL_BLOCK_SIZE,
2327 Entry->PoolType - 1);
2328
2329 //
2330 // Release pool quota, if any
2331 //
2332 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2333 {
2334 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2335 if (Process)
2336 {
2337 if (Process->Pcb.Header.Type != ProcessObject)
2338 {
2339 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2340 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2341 KeBugCheckEx(BAD_POOL_CALLER,
2342 0x0D,
2343 (ULONG_PTR)P,
2344 Tag,
2345 (ULONG_PTR)Process);
2346 }
2347 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2348 ObDereferenceObject(Process);
2349 }
2350 }
2351
2352 //
2353 // Is this allocation small enough to have come from a lookaside list?
2354 //
2355 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2356 {
2357 //
2358 // Try pushing it into the per-CPU lookaside list
2359 //
2360 LookasideList = (PoolType == PagedPool) ?
2361 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2362 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2363 LookasideList->TotalFrees++;
2364 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2365 {
2366 LookasideList->FreeHits++;
2367 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2368 return;
2369 }
2370
2371 //
2372 // We failed, try to push it into the global lookaside list
2373 //
2374 LookasideList = (PoolType == PagedPool) ?
2375 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2376 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2377 LookasideList->TotalFrees++;
2378 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2379 {
2380 LookasideList->FreeHits++;
2381 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2382 return;
2383 }
2384 }
2385
2386 //
2387 // Get the pointer to the next entry
2388 //
2389 NextEntry = POOL_BLOCK(Entry, BlockSize);
2390
2391 //
2392 // Update performance counters
2393 //
2394 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2395 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2396
2397 //
2398 // Acquire the pool lock
2399 //
2400 OldIrql = ExLockPool(PoolDesc);
2401
2402 //
2403 // Check if the next allocation is at the end of the page
2404 //
2405 ExpCheckPoolBlocks(Entry);
2406 if (PAGE_ALIGN(NextEntry) != NextEntry)
2407 {
2408 //
2409 // We may be able to combine the block if it's free
2410 //
2411 if (NextEntry->PoolType == 0)
2412 {
2413 //
2414 // The next block is free, so we'll do a combine
2415 //
2416 Combined = TRUE;
2417
2418 //
2419 // Make sure there's actual data in the block -- anything smaller
2420 // than this means we only have the header, so there's no linked list
2421 // for us to remove
2422 //
2423 if ((NextEntry->BlockSize != 1))
2424 {
2425 //
2426 // The block is at least big enough to have a linked list, so go
2427 // ahead and remove it
2428 //
2429 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2430 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2431 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2432 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2433 }
2434
2435 //
2436 // Our entry is now combined with the next entry
2437 //
2438 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2439 }
2440 }
2441
2442 //
2443 // Now check if there was a previous entry on the same page as us
2444 //
2445 if (Entry->PreviousSize)
2446 {
2447 //
2448 // Great, grab that entry and check if it's free
2449 //
2450 NextEntry = POOL_PREV_BLOCK(Entry);
2451 if (NextEntry->PoolType == 0)
2452 {
2453 //
2454 // It is, so we can do a combine
2455 //
2456 Combined = TRUE;
2457
2458 //
2459 // Make sure there's actual data in the block -- anything smaller
2460 // than this means we only have the header so there's no linked list
2461 // for us to remove
2462 //
2463 if ((NextEntry->BlockSize != 1))
2464 {
2465 //
2466 // The block is at least big enough to have a linked list, so go
2467 // ahead and remove it
2468 //
2469 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2470 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2471 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2472 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2473 }
2474
2475 //
2476 // Combine our original block (which might've already been combined
2477 // with the next block), into the previous block
2478 //
2479 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2480
2481 //
2482 // And now we'll work with the previous block instead
2483 //
2484 Entry = NextEntry;
2485 }
2486 }
2487
2488 //
2489 // By now, it may have been possible for our combined blocks to actually
2490 // have made up a full page (if there were only 2-3 allocations on the
2491 // page, they could've all been combined).
2492 //
2493 if ((PAGE_ALIGN(Entry) == Entry) &&
2494 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2495 {
2496 //
2497 // In this case, release the pool lock, update the performance counter,
2498 // and free the page
2499 //
2500 ExUnlockPool(PoolDesc, OldIrql);
2501 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2502 MiFreePoolPages(Entry);
2503 return;
2504 }
2505
2506 //
2507 // Otherwise, we now have a free block (or a combination of 2 or 3)
2508 //
2509 Entry->PoolType = 0;
2510 BlockSize = Entry->BlockSize;
2511 ASSERT(BlockSize != 1);
2512
2513 //
2514 // Check if we actually did combine it with anyone
2515 //
2516 if (Combined)
2517 {
2518 //
2519 // Get the first combined block (either our original to begin with, or
2520 // the one after the original, depending if we combined with the previous)
2521 //
2522 NextEntry = POOL_NEXT_BLOCK(Entry);
2523
2524 //
2525 // As long as the next block isn't on a page boundary, have it point
2526 // back to us
2527 //
2528 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2529 }
2530
2531 //
2532 // Insert this new free block, and release the pool lock
2533 //
2534 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2535 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2536 ExUnlockPool(PoolDesc, OldIrql);
2537 }
2538
2539 /*
2540 * @implemented
2541 */
2542 VOID
2543 NTAPI
2544 ExFreePool(PVOID P)
2545 {
2546 //
2547 // Just free without checking for the tag
2548 //
2549 ExFreePoolWithTag(P, 0);
2550 }
2551
2552 /*
2553 * @unimplemented
2554 */
2555 SIZE_T
2556 NTAPI
2557 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2558 OUT PBOOLEAN QuotaCharged)
2559 {
2560 //
2561 // Not implemented
2562 //
2563 UNIMPLEMENTED;
2564 return FALSE;
2565 }
2566
2567 /*
2568 * @implemented
2569 */
2570
2571 PVOID
2572 NTAPI
2573 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2574 IN SIZE_T NumberOfBytes)
2575 {
2576 //
2577 // Allocate the pool
2578 //
2579 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2580 }
2581
2582 /*
2583 * @implemented
2584 */
2585 PVOID
2586 NTAPI
2587 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2588 IN SIZE_T NumberOfBytes,
2589 IN ULONG Tag,
2590 IN EX_POOL_PRIORITY Priority)
2591 {
2592 PVOID Buffer;
2593
2594 //
2595 // Allocate the pool
2596 //
2597 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2598 if (Buffer == NULL)
2599 {
2600 UNIMPLEMENTED;
2601 }
2602
2603 return Buffer;
2604 }
2605
2606 /*
2607 * @implemented
2608 */
2609 PVOID
2610 NTAPI
2611 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2612 IN SIZE_T NumberOfBytes,
2613 IN ULONG Tag)
2614 {
2615 BOOLEAN Raise = TRUE;
2616 PVOID Buffer;
2617 PPOOL_HEADER Entry;
2618 NTSTATUS Status;
2619 PEPROCESS Process = PsGetCurrentProcess();
2620
2621 //
2622 // Check if we should fail instead of raising an exception
2623 //
2624 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2625 {
2626 Raise = FALSE;
2627 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2628 }
2629
2630 //
2631 // Inject the pool quota mask
2632 //
2633 PoolType += QUOTA_POOL_MASK;
2634
2635 //
2636 // Check if we have enough space to add the quota owner process, as long as
2637 // this isn't the system process, which never gets charged quota
2638 //
2639 ASSERT(NumberOfBytes != 0);
2640 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2641 (Process != PsInitialSystemProcess))
2642 {
2643 //
2644 // Add space for our EPROCESS pointer
2645 //
2646 NumberOfBytes += sizeof(PEPROCESS);
2647 }
2648 else
2649 {
2650 //
2651 // We won't be able to store the pointer, so don't use quota for this
2652 //
2653 PoolType -= QUOTA_POOL_MASK;
2654 }
2655
2656 //
2657 // Allocate the pool buffer now
2658 //
2659 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2660
2661 //
2662 // If the buffer is page-aligned, this is a large page allocation and we
2663 // won't touch it
2664 //
2665 if (PAGE_ALIGN(Buffer) != Buffer)
2666 {
2667 //
2668 // Also if special pool is enabled, and this was allocated from there,
2669 // we won't touch it either
2670 //
2671 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2672 (MmIsSpecialPoolAddress(Buffer)))
2673 {
2674 return Buffer;
2675 }
2676
2677 //
2678 // If it wasn't actually allocated with quota charges, ignore it too
2679 //
2680 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2681
2682 //
2683 // If this is the system process, we don't charge quota, so ignore
2684 //
2685 if (Process == PsInitialSystemProcess) return Buffer;
2686
2687 //
2688 // Actually go and charge quota for the process now
2689 //
2690 Entry = POOL_ENTRY(Buffer);
2691 Status = PsChargeProcessPoolQuota(Process,
2692 PoolType & BASE_POOL_TYPE_MASK,
2693 Entry->BlockSize * POOL_BLOCK_SIZE);
2694 if (!NT_SUCCESS(Status))
2695 {
2696 //
2697 // Quota failed, back out the allocation, clear the owner, and fail
2698 //
2699 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2700 ExFreePoolWithTag(Buffer, Tag);
2701 if (Raise) RtlRaiseStatus(Status);
2702 return NULL;
2703 }
2704
2705 //
2706 // Quota worked, write the owner and then reference it before returning
2707 //
2708 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2709 ObReferenceObject(Process);
2710 }
2711 else if (!(Buffer) && (Raise))
2712 {
2713 //
2714 // The allocation failed, raise an error if we are in raise mode
2715 //
2716 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2717 }
2718
2719 //
2720 // Return the allocated buffer
2721 //
2722 return Buffer;
2723 }
2724
2725 #if DBG && defined(KDBG)
2726
2727 BOOLEAN
2728 ExpKdbgExtPool(
2729 ULONG Argc,
2730 PCHAR Argv[])
2731 {
2732 ULONG_PTR Address = 0, Flags = 0;
2733 PVOID PoolPage;
2734 PPOOL_HEADER Entry;
2735 BOOLEAN ThisOne;
2736 PULONG Data;
2737
2738 if (Argc > 1)
2739 {
2740 /* Get address */
2741 if (!KdbpGetHexNumber(Argv[1], &Address))
2742 {
2743 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2744 return TRUE;
2745 }
2746 }
2747
2748 if (Argc > 2)
2749 {
2750 /* Get address */
2751 if (!KdbpGetHexNumber(Argv[1], &Flags))
2752 {
2753 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2754 return TRUE;
2755 }
2756 }
2757
2758 /* Check if we got an address */
2759 if (Address != 0)
2760 {
2761 /* Get the base page */
2762 PoolPage = PAGE_ALIGN(Address);
2763 }
2764 else
2765 {
2766 KdbpPrint("Heap is unimplemented\n");
2767 return TRUE;
2768 }
2769
2770 /* No paging support! */
2771 if (!MmIsAddressValid(PoolPage))
2772 {
2773 KdbpPrint("Address not accessible!\n");
2774 return TRUE;
2775 }
2776
2777 /* Get pool type */
2778 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2779 KdbpPrint("Allocation is from PagedPool region\n");
2780 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2781 KdbpPrint("Allocation is from NonPagedPool region\n");
2782 else
2783 {
2784 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2785 return TRUE;
2786 }
2787
2788 /* Loop all entries of that page */
2789 Entry = PoolPage;
2790 do
2791 {
2792 /* Check if the address is within that entry */
2793 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2794 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2795
2796 if (!(Flags & 1) || ThisOne)
2797 {
2798 /* Print the line */
2799 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2800 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2801 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2802 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2803 }
2804
2805 if (Flags & 1)
2806 {
2807 Data = (PULONG)(Entry + 1);
2808 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2809 " %p %08lx %08lx %08lx %08lx\n",
2810 &Data[0], Data[0], Data[1], Data[2], Data[3],
2811 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2812 }
2813
2814 /* Go to next entry */
2815 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2816 }
2817 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2818
2819 return TRUE;
2820 }
2821
2822 #endif // DBG && KDBG
2823
2824 /* EOF */