Sync with trunk r58687.
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 /* PRIVATE FUNCTIONS **********************************************************/
464
465 VOID
466 NTAPI
467 INIT_FUNCTION
468 ExpSeedHotTags(VOID)
469 {
470 ULONG i, Key, Hash, Index;
471 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
472 ULONG TagList[] =
473 {
474 ' oI',
475 ' laH',
476 'PldM',
477 'LooP',
478 'tSbO',
479 ' prI',
480 'bdDN',
481 'LprI',
482 'pOoI',
483 ' ldM',
484 'eliF',
485 'aVMC',
486 'dSeS',
487 'CFtN',
488 'looP',
489 'rPCT',
490 'bNMC',
491 'dTeS',
492 'sFtN',
493 'TPCT',
494 'CPCT',
495 ' yeK',
496 'qSbO',
497 'mNoI',
498 'aEoI',
499 'cPCT',
500 'aFtN',
501 '0ftN',
502 'tceS',
503 'SprI',
504 'ekoT',
505 ' eS',
506 'lCbO',
507 'cScC',
508 'lFtN',
509 'cAeS',
510 'mfSF',
511 'kWcC',
512 'miSF',
513 'CdfA',
514 'EdfA',
515 'orSF',
516 'nftN',
517 'PRIU',
518 'rFpN',
519 'RFpN',
520 'aPeS',
521 'sUeS',
522 'FpcA',
523 'MpcA',
524 'cSeS',
525 'mNbO',
526 'sFpN',
527 'uLeS',
528 'DPcS',
529 'nevE',
530 'vrqR',
531 'ldaV',
532 ' pP',
533 'SdaV',
534 ' daV',
535 'LdaV',
536 'FdaV',
537 ' GIB',
538 };
539
540 //
541 // Loop all 64 hot tags
542 //
543 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
544 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
545 {
546 //
547 // Get the current tag, and compute its hash in the tracker table
548 //
549 Key = TagList[i];
550 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
551
552 //
553 // Loop all the hashes in this index/bucket
554 //
555 Index = Hash;
556 while (TRUE)
557 {
558 //
559 // Find an empty entry, and make sure this isn't the last hash that
560 // can fit.
561 //
562 // On checked builds, also make sure this is the first time we are
563 // seeding this tag.
564 //
565 ASSERT(TrackTable[Hash].Key != Key);
566 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
567 {
568 //
569 // It has been seeded, move on to the next tag
570 //
571 TrackTable[Hash].Key = Key;
572 break;
573 }
574
575 //
576 // This entry was already taken, compute the next possible hash while
577 // making sure we're not back at our initial index.
578 //
579 ASSERT(TrackTable[Hash].Key != Key);
580 Hash = (Hash + 1) & PoolTrackTableMask;
581 if (Hash == Index) break;
582 }
583 }
584 }
585
586 VOID
587 NTAPI
588 ExpRemovePoolTracker(IN ULONG Key,
589 IN SIZE_T NumberOfBytes,
590 IN POOL_TYPE PoolType)
591 {
592 ULONG Hash, Index;
593 PPOOL_TRACKER_TABLE Table, TableEntry;
594 SIZE_T TableMask, TableSize;
595
596 //
597 // Remove the PROTECTED_POOL flag which is not part of the tag
598 //
599 Key &= ~PROTECTED_POOL;
600
601 //
602 // With WinDBG you can set a tag you want to break on when an allocation is
603 // attempted
604 //
605 if (Key == PoolHitTag) DbgBreakPoint();
606
607 //
608 // Why the double indirection? Because normally this function is also used
609 // when doing session pool allocations, which has another set of tables,
610 // sizes, and masks that live in session pool. Now we don't support session
611 // pool so we only ever use the regular tables, but I'm keeping the code this
612 // way so that the day we DO support session pool, it won't require that
613 // many changes
614 //
615 Table = PoolTrackTable;
616 TableMask = PoolTrackTableMask;
617 TableSize = PoolTrackTableSize;
618
619 //
620 // Compute the hash for this key, and loop all the possible buckets
621 //
622 Hash = ExpComputeHashForTag(Key, TableMask);
623 Index = Hash;
624 while (TRUE)
625 {
626 //
627 // Have we found the entry for this tag? */
628 //
629 TableEntry = &Table[Hash];
630 if (TableEntry->Key == Key)
631 {
632 //
633 // Decrement the counters depending on if this was paged or nonpaged
634 // pool
635 //
636 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
637 {
638 InterlockedIncrement(&TableEntry->NonPagedFrees);
639 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
640 -(SSIZE_T)NumberOfBytes);
641 return;
642 }
643 InterlockedIncrement(&TableEntry->PagedFrees);
644 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
645 -(SSIZE_T)NumberOfBytes);
646 return;
647 }
648
649 //
650 // We should have only ended up with an empty entry if we've reached
651 // the last bucket
652 //
653 if (!TableEntry->Key)
654 {
655 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
656 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
657 ASSERT(Hash == TableMask);
658 }
659
660 //
661 // This path is hit when we don't have an entry, and the current bucket
662 // is full, so we simply try the next one
663 //
664 Hash = (Hash + 1) & TableMask;
665 if (Hash == Index) break;
666 }
667
668 //
669 // And finally this path is hit when all the buckets are full, and we need
670 // some expansion. This path is not yet supported in ReactOS and so we'll
671 // ignore the tag
672 //
673 DPRINT1("Out of pool tag space, ignoring...\n");
674 }
675
676 VOID
677 NTAPI
678 ExpInsertPoolTracker(IN ULONG Key,
679 IN SIZE_T NumberOfBytes,
680 IN POOL_TYPE PoolType)
681 {
682 ULONG Hash, Index;
683 KIRQL OldIrql;
684 PPOOL_TRACKER_TABLE Table, TableEntry;
685 SIZE_T TableMask, TableSize;
686
687 //
688 // Remove the PROTECTED_POOL flag which is not part of the tag
689 //
690 Key &= ~PROTECTED_POOL;
691
692 //
693 // With WinDBG you can set a tag you want to break on when an allocation is
694 // attempted
695 //
696 if (Key == PoolHitTag) DbgBreakPoint();
697
698 //
699 // There is also an internal flag you can set to break on malformed tags
700 //
701 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
702
703 //
704 // ASSERT on ReactOS features not yet supported
705 //
706 ASSERT(!(PoolType & SESSION_POOL_MASK));
707 ASSERT(KeGetCurrentProcessorNumber() == 0);
708
709 //
710 // Why the double indirection? Because normally this function is also used
711 // when doing session pool allocations, which has another set of tables,
712 // sizes, and masks that live in session pool. Now we don't support session
713 // pool so we only ever use the regular tables, but I'm keeping the code this
714 // way so that the day we DO support session pool, it won't require that
715 // many changes
716 //
717 Table = PoolTrackTable;
718 TableMask = PoolTrackTableMask;
719 TableSize = PoolTrackTableSize;
720
721 //
722 // Compute the hash for this key, and loop all the possible buckets
723 //
724 Hash = ExpComputeHashForTag(Key, TableMask);
725 Index = Hash;
726 while (TRUE)
727 {
728 //
729 // Do we already have an entry for this tag? */
730 //
731 TableEntry = &Table[Hash];
732 if (TableEntry->Key == Key)
733 {
734 //
735 // Increment the counters depending on if this was paged or nonpaged
736 // pool
737 //
738 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
739 {
740 InterlockedIncrement(&TableEntry->NonPagedAllocs);
741 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
742 return;
743 }
744 InterlockedIncrement(&TableEntry->PagedAllocs);
745 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
746 return;
747 }
748
749 //
750 // We don't have an entry yet, but we've found a free bucket for it
751 //
752 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
753 {
754 //
755 // We need to hold the lock while creating a new entry, since other
756 // processors might be in this code path as well
757 //
758 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
759 if (!PoolTrackTable[Hash].Key)
760 {
761 //
762 // We've won the race, so now create this entry in the bucket
763 //
764 ASSERT(Table[Hash].Key == 0);
765 PoolTrackTable[Hash].Key = Key;
766 TableEntry->Key = Key;
767 }
768 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
769
770 //
771 // Now we force the loop to run again, and we should now end up in
772 // the code path above which does the interlocked increments...
773 //
774 continue;
775 }
776
777 //
778 // This path is hit when we don't have an entry, and the current bucket
779 // is full, so we simply try the next one
780 //
781 Hash = (Hash + 1) & TableMask;
782 if (Hash == Index) break;
783 }
784
785 //
786 // And finally this path is hit when all the buckets are full, and we need
787 // some expansion. This path is not yet supported in ReactOS and so we'll
788 // ignore the tag
789 //
790 DPRINT1("Out of pool tag space, ignoring...\n");
791 }
792
793 VOID
794 NTAPI
795 INIT_FUNCTION
796 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
797 IN POOL_TYPE PoolType,
798 IN ULONG PoolIndex,
799 IN ULONG Threshold,
800 IN PVOID PoolLock)
801 {
802 PLIST_ENTRY NextEntry, LastEntry;
803
804 //
805 // Setup the descriptor based on the caller's request
806 //
807 PoolDescriptor->PoolType = PoolType;
808 PoolDescriptor->PoolIndex = PoolIndex;
809 PoolDescriptor->Threshold = Threshold;
810 PoolDescriptor->LockAddress = PoolLock;
811
812 //
813 // Initialize accounting data
814 //
815 PoolDescriptor->RunningAllocs = 0;
816 PoolDescriptor->RunningDeAllocs = 0;
817 PoolDescriptor->TotalPages = 0;
818 PoolDescriptor->TotalBytes = 0;
819 PoolDescriptor->TotalBigPages = 0;
820
821 //
822 // Nothing pending for now
823 //
824 PoolDescriptor->PendingFrees = NULL;
825 PoolDescriptor->PendingFreeDepth = 0;
826
827 //
828 // Loop all the descriptor's allocation lists and initialize them
829 //
830 NextEntry = PoolDescriptor->ListHeads;
831 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
832 while (NextEntry < LastEntry)
833 {
834 ExpInitializePoolListHead(NextEntry);
835 NextEntry++;
836 }
837
838 //
839 // Note that ReactOS does not support Session Pool Yet
840 //
841 ASSERT(PoolType != PagedPoolSession);
842 }
843
844 VOID
845 NTAPI
846 INIT_FUNCTION
847 InitializePool(IN POOL_TYPE PoolType,
848 IN ULONG Threshold)
849 {
850 PPOOL_DESCRIPTOR Descriptor;
851 SIZE_T TableSize;
852 ULONG i;
853
854 //
855 // Check what kind of pool this is
856 //
857 if (PoolType == NonPagedPool)
858 {
859 //
860 // Compute the track table size and convert it from a power of two to an
861 // actual byte size
862 //
863 // NOTE: On checked builds, we'll assert if the registry table size was
864 // invalid, while on retail builds we'll just break out of the loop at
865 // that point.
866 //
867 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
868 for (i = 0; i < 32; i++)
869 {
870 if (TableSize & 1)
871 {
872 ASSERT((TableSize & ~1) == 0);
873 if (!(TableSize & ~1)) break;
874 }
875 TableSize >>= 1;
876 }
877
878 //
879 // If we hit bit 32, than no size was defined in the registry, so
880 // we'll use the default size of 2048 entries.
881 //
882 // Otherwise, use the size from the registry, as long as it's not
883 // smaller than 64 entries.
884 //
885 if (i == 32)
886 {
887 PoolTrackTableSize = 2048;
888 }
889 else
890 {
891 PoolTrackTableSize = max(1 << i, 64);
892 }
893
894 //
895 // Loop trying with the biggest specified size first, and cut it down
896 // by a power of two each iteration in case not enough memory exist
897 //
898 while (TRUE)
899 {
900 //
901 // Do not allow overflow
902 //
903 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
904 {
905 PoolTrackTableSize >>= 1;
906 continue;
907 }
908
909 //
910 // Allocate the tracker table and exit the loop if this worked
911 //
912 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
913 (PoolTrackTableSize + 1) *
914 sizeof(POOL_TRACKER_TABLE));
915 if (PoolTrackTable) break;
916
917 //
918 // Otherwise, as long as we're not down to the last bit, keep
919 // iterating
920 //
921 if (PoolTrackTableSize == 1)
922 {
923 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
924 TableSize,
925 0xFFFFFFFF,
926 0xFFFFFFFF,
927 0xFFFFFFFF);
928 }
929 PoolTrackTableSize >>= 1;
930 }
931
932 //
933 // Finally, add one entry, compute the hash, and zero the table
934 //
935 PoolTrackTableSize++;
936 PoolTrackTableMask = PoolTrackTableSize - 2;
937
938 RtlZeroMemory(PoolTrackTable,
939 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
940
941 //
942 // We now do the exact same thing with the tracker table for big pages
943 //
944 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
945 for (i = 0; i < 32; i++)
946 {
947 if (TableSize & 1)
948 {
949 ASSERT((TableSize & ~1) == 0);
950 if (!(TableSize & ~1)) break;
951 }
952 TableSize >>= 1;
953 }
954
955 //
956 // For big pages, the default tracker table is 4096 entries, while the
957 // minimum is still 64
958 //
959 if (i == 32)
960 {
961 PoolBigPageTableSize = 4096;
962 }
963 else
964 {
965 PoolBigPageTableSize = max(1 << i, 64);
966 }
967
968 //
969 // Again, run the exact same loop we ran earlier, but this time for the
970 // big pool tracker instead
971 //
972 while (TRUE)
973 {
974 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
975 {
976 PoolBigPageTableSize >>= 1;
977 continue;
978 }
979
980 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
981 PoolBigPageTableSize *
982 sizeof(POOL_TRACKER_BIG_PAGES));
983 if (PoolBigPageTable) break;
984
985 if (PoolBigPageTableSize == 1)
986 {
987 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
988 TableSize,
989 0xFFFFFFFF,
990 0xFFFFFFFF,
991 0xFFFFFFFF);
992 }
993
994 PoolBigPageTableSize >>= 1;
995 }
996
997 //
998 // An extra entry is not needed for for the big pool tracker, so just
999 // compute the hash and zero it
1000 //
1001 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1002 RtlZeroMemory(PoolBigPageTable,
1003 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1004 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1005
1006 //
1007 // During development, print this out so we can see what's happening
1008 //
1009 DPRINT1("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1010 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1011 DPRINT1("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1012 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1013
1014 //
1015 // Insert the generic tracker for all of big pool
1016 //
1017 ExpInsertPoolTracker('looP',
1018 ROUND_TO_PAGES(PoolBigPageTableSize *
1019 sizeof(POOL_TRACKER_BIG_PAGES)),
1020 NonPagedPool);
1021
1022 //
1023 // No support for NUMA systems at this time
1024 //
1025 ASSERT(KeNumberNodes == 1);
1026
1027 //
1028 // Initialize the tag spinlock
1029 //
1030 KeInitializeSpinLock(&ExpTaggedPoolLock);
1031
1032 //
1033 // Initialize the nonpaged pool descriptor
1034 //
1035 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1036 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1037 NonPagedPool,
1038 0,
1039 Threshold,
1040 NULL);
1041 }
1042 else
1043 {
1044 //
1045 // No support for NUMA systems at this time
1046 //
1047 ASSERT(KeNumberNodes == 1);
1048
1049 //
1050 // Allocate the pool descriptor
1051 //
1052 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1053 sizeof(KGUARDED_MUTEX) +
1054 sizeof(POOL_DESCRIPTOR),
1055 'looP');
1056 if (!Descriptor)
1057 {
1058 //
1059 // This is really bad...
1060 //
1061 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1062 0,
1063 -1,
1064 -1,
1065 -1);
1066 }
1067
1068 //
1069 // Setup the vector and guarded mutex for paged pool
1070 //
1071 PoolVector[PagedPool] = Descriptor;
1072 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1073 ExpPagedPoolDescriptor[0] = Descriptor;
1074 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1075 ExInitializePoolDescriptor(Descriptor,
1076 PagedPool,
1077 0,
1078 Threshold,
1079 ExpPagedPoolMutex);
1080
1081 //
1082 // Insert the generic tracker for all of nonpaged pool
1083 //
1084 ExpInsertPoolTracker('looP',
1085 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1086 NonPagedPool);
1087 }
1088 }
1089
1090 FORCEINLINE
1091 KIRQL
1092 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1093 {
1094 //
1095 // Check if this is nonpaged pool
1096 //
1097 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1098 {
1099 //
1100 // Use the queued spin lock
1101 //
1102 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1103 }
1104 else
1105 {
1106 //
1107 // Use the guarded mutex
1108 //
1109 KeAcquireGuardedMutex(Descriptor->LockAddress);
1110 return APC_LEVEL;
1111 }
1112 }
1113
1114 FORCEINLINE
1115 VOID
1116 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1117 IN KIRQL OldIrql)
1118 {
1119 //
1120 // Check if this is nonpaged pool
1121 //
1122 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1123 {
1124 //
1125 // Use the queued spin lock
1126 //
1127 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1128 }
1129 else
1130 {
1131 //
1132 // Use the guarded mutex
1133 //
1134 KeReleaseGuardedMutex(Descriptor->LockAddress);
1135 }
1136 }
1137
1138 VOID
1139 NTAPI
1140 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1141 IN PVOID DeferredContext,
1142 IN PVOID SystemArgument1,
1143 IN PVOID SystemArgument2)
1144 {
1145 PPOOL_DPC_CONTEXT Context = DeferredContext;
1146 UNREFERENCED_PARAMETER(Dpc);
1147 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1148
1149 //
1150 // Make sure we win the race, and if we did, copy the data atomically
1151 //
1152 if (KeSignalCallDpcSynchronize(SystemArgument2))
1153 {
1154 RtlCopyMemory(Context->PoolTrackTable,
1155 PoolTrackTable,
1156 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1157
1158 //
1159 // This is here because ReactOS does not yet support expansion
1160 //
1161 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1162 }
1163
1164 //
1165 // Regardless of whether we won or not, we must now synchronize and then
1166 // decrement the barrier since this is one more processor that has completed
1167 // the callback.
1168 //
1169 KeSignalCallDpcSynchronize(SystemArgument2);
1170 KeSignalCallDpcDone(SystemArgument1);
1171 }
1172
1173 NTSTATUS
1174 NTAPI
1175 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1176 IN ULONG SystemInformationLength,
1177 IN OUT PULONG ReturnLength OPTIONAL)
1178 {
1179 ULONG TableSize, CurrentLength;
1180 ULONG EntryCount;
1181 NTSTATUS Status = STATUS_SUCCESS;
1182 PSYSTEM_POOLTAG TagEntry;
1183 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1184 POOL_DPC_CONTEXT Context;
1185 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1186
1187 //
1188 // Keep track of how much data the caller's buffer must hold
1189 //
1190 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1191
1192 //
1193 // Initialize the caller's buffer
1194 //
1195 TagEntry = &SystemInformation->TagInfo[0];
1196 SystemInformation->Count = 0;
1197
1198 //
1199 // Capture the number of entries, and the total size needed to make a copy
1200 // of the table
1201 //
1202 EntryCount = (ULONG)PoolTrackTableSize;
1203 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1204
1205 //
1206 // Allocate the "Generic DPC" temporary buffer
1207 //
1208 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1209 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1210
1211 //
1212 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1213 //
1214 Context.PoolTrackTable = Buffer;
1215 Context.PoolTrackTableSize = PoolTrackTableSize;
1216 Context.PoolTrackTableExpansion = NULL;
1217 Context.PoolTrackTableSizeExpansion = 0;
1218 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1219
1220 //
1221 // Now parse the results
1222 //
1223 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1224 {
1225 //
1226 // If the entry is empty, skip it
1227 //
1228 if (!TrackerEntry->Key) continue;
1229
1230 //
1231 // Otherwise, add one more entry to the caller's buffer, and ensure that
1232 // enough space has been allocated in it
1233 //
1234 SystemInformation->Count++;
1235 CurrentLength += sizeof(*TagEntry);
1236 if (SystemInformationLength < CurrentLength)
1237 {
1238 //
1239 // The caller's buffer is too small, so set a failure code. The
1240 // caller will know the count, as well as how much space is needed.
1241 //
1242 // We do NOT break out of the loop, because we want to keep incrementing
1243 // the Count as well as CurrentLength so that the caller can know the
1244 // final numbers
1245 //
1246 Status = STATUS_INFO_LENGTH_MISMATCH;
1247 }
1248 else
1249 {
1250 //
1251 // Small sanity check that our accounting is working correctly
1252 //
1253 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1254 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1255
1256 //
1257 // Return the data into the caller's buffer
1258 //
1259 TagEntry->TagUlong = TrackerEntry->Key;
1260 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1261 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1262 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1263 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1264 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1265 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1266 TagEntry++;
1267 }
1268 }
1269
1270 //
1271 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1272 //
1273 ExFreePoolWithTag(Buffer, 'ofnI');
1274 if (ReturnLength) *ReturnLength = CurrentLength;
1275 return Status;
1276 }
1277
1278 BOOLEAN
1279 NTAPI
1280 ExpAddTagForBigPages(IN PVOID Va,
1281 IN ULONG Key,
1282 IN ULONG NumberOfPages,
1283 IN POOL_TYPE PoolType)
1284 {
1285 ULONG Hash, i = 0;
1286 PVOID OldVa;
1287 KIRQL OldIrql;
1288 SIZE_T TableSize;
1289 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1290 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1291 ASSERT(!(PoolType & SESSION_POOL_MASK));
1292
1293 //
1294 // As the table is expandable, these values must only be read after acquiring
1295 // the lock to avoid a teared access during an expansion
1296 //
1297 Hash = ExpComputePartialHashForAddress(Va);
1298 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1299 Hash &= PoolBigPageTableHash;
1300 TableSize = PoolBigPageTableSize;
1301
1302 //
1303 // We loop from the current hash bucket to the end of the table, and then
1304 // rollover to hash bucket 0 and keep going from there. If we return back
1305 // to the beginning, then we attempt expansion at the bottom of the loop
1306 //
1307 EntryStart = Entry = &PoolBigPageTable[Hash];
1308 EntryEnd = &PoolBigPageTable[TableSize];
1309 do
1310 {
1311 //
1312 // Make sure that this is a free entry and attempt to atomically make the
1313 // entry busy now
1314 //
1315 OldVa = Entry->Va;
1316 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1317 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1318 {
1319 //
1320 // We now own this entry, write down the size and the pool tag
1321 //
1322 Entry->Key = Key;
1323 Entry->NumberOfPages = NumberOfPages;
1324
1325 //
1326 // Add one more entry to the count, and see if we're getting within
1327 // 25% of the table size, at which point we'll do an expansion now
1328 // to avoid blocking too hard later on.
1329 //
1330 // Note that we only do this if it's also been the 16th time that we
1331 // keep losing the race or that we are not finding a free entry anymore,
1332 // which implies a massive number of concurrent big pool allocations.
1333 //
1334 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1335 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1336 {
1337 DPRINT1("Should attempt expansion since we now have %lu entries\n",
1338 ExpPoolBigEntriesInUse);
1339 }
1340
1341 //
1342 // We have our entry, return
1343 //
1344 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1345 return TRUE;
1346 }
1347
1348 //
1349 // We don't have our entry yet, so keep trying, making the entry list
1350 // circular if we reach the last entry. We'll eventually break out of
1351 // the loop once we've rolled over and returned back to our original
1352 // hash bucket
1353 //
1354 i++;
1355 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1356 } while (Entry != EntryStart);
1357
1358 //
1359 // This means there's no free hash buckets whatsoever, so we would now have
1360 // to attempt expanding the table
1361 //
1362 DPRINT1("Big pool expansion needed, not implemented!\n");
1363 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1364 return FALSE;
1365 }
1366
1367 ULONG
1368 NTAPI
1369 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1370 OUT PULONG_PTR BigPages,
1371 IN POOL_TYPE PoolType)
1372 {
1373 BOOLEAN FirstTry = TRUE;
1374 SIZE_T TableSize;
1375 KIRQL OldIrql;
1376 ULONG PoolTag, Hash;
1377 PPOOL_TRACKER_BIG_PAGES Entry;
1378 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1379 ASSERT(!(PoolType & SESSION_POOL_MASK));
1380
1381 //
1382 // As the table is expandable, these values must only be read after acquiring
1383 // the lock to avoid a teared access during an expansion
1384 //
1385 Hash = ExpComputePartialHashForAddress(Va);
1386 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1387 Hash &= PoolBigPageTableHash;
1388 TableSize = PoolBigPageTableSize;
1389
1390 //
1391 // Loop while trying to find this big page allocation
1392 //
1393 while (PoolBigPageTable[Hash].Va != Va)
1394 {
1395 //
1396 // Increment the size until we go past the end of the table
1397 //
1398 if (++Hash >= TableSize)
1399 {
1400 //
1401 // Is this the second time we've tried?
1402 //
1403 if (!FirstTry)
1404 {
1405 //
1406 // This means it was never inserted into the pool table and it
1407 // received the special "BIG" tag -- return that and return 0
1408 // so that the code can ask Mm for the page count instead
1409 //
1410 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1411 *BigPages = 0;
1412 return ' GIB';
1413 }
1414
1415 //
1416 // The first time this happens, reset the hash index and try again
1417 //
1418 Hash = 0;
1419 FirstTry = FALSE;
1420 }
1421 }
1422
1423 //
1424 // Now capture all the information we need from the entry, since after we
1425 // release the lock, the data can change
1426 //
1427 Entry = &PoolBigPageTable[Hash];
1428 *BigPages = Entry->NumberOfPages;
1429 PoolTag = Entry->Key;
1430
1431 //
1432 // Set the free bit, and decrement the number of allocations. Finally, release
1433 // the lock and return the tag that was located
1434 //
1435 InterlockedIncrement((PLONG)&Entry->Va);
1436 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1437 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1438 return PoolTag;
1439 }
1440
1441 VOID
1442 NTAPI
1443 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1444 OUT PULONG NonPagedPoolPages,
1445 OUT PULONG PagedPoolAllocs,
1446 OUT PULONG PagedPoolFrees,
1447 OUT PULONG PagedPoolLookasideHits,
1448 OUT PULONG NonPagedPoolAllocs,
1449 OUT PULONG NonPagedPoolFrees,
1450 OUT PULONG NonPagedPoolLookasideHits)
1451 {
1452 ULONG i;
1453 PPOOL_DESCRIPTOR PoolDesc;
1454
1455 //
1456 // Assume all failures
1457 //
1458 *PagedPoolPages = 0;
1459 *PagedPoolAllocs = 0;
1460 *PagedPoolFrees = 0;
1461
1462 //
1463 // Tally up the totals for all the apged pool
1464 //
1465 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1466 {
1467 PoolDesc = ExpPagedPoolDescriptor[i];
1468 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1469 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1470 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1471 }
1472
1473 //
1474 // The first non-paged pool has a hardcoded well-known descriptor name
1475 //
1476 PoolDesc = &NonPagedPoolDescriptor;
1477 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1478 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1479 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1480
1481 //
1482 // If the system has more than one non-paged pool, copy the other descriptor
1483 // totals as well
1484 //
1485 #if 0
1486 if (ExpNumberOfNonPagedPools > 1)
1487 {
1488 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1489 {
1490 PoolDesc = ExpNonPagedPoolDescriptor[i];
1491 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1492 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1493 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1494 }
1495 }
1496 #endif
1497
1498 //
1499 // FIXME: Not yet supported
1500 //
1501 *NonPagedPoolLookasideHits += 0;
1502 *PagedPoolLookasideHits += 0;
1503 }
1504
1505 /* PUBLIC FUNCTIONS ***********************************************************/
1506
1507 /*
1508 * @implemented
1509 */
1510 PVOID
1511 NTAPI
1512 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1513 IN SIZE_T NumberOfBytes,
1514 IN ULONG Tag)
1515 {
1516 PPOOL_DESCRIPTOR PoolDesc;
1517 PLIST_ENTRY ListHead;
1518 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1519 KIRQL OldIrql;
1520 USHORT BlockSize, i;
1521 ULONG OriginalType;
1522 PKPRCB Prcb = KeGetCurrentPrcb();
1523 PGENERAL_LOOKASIDE LookasideList;
1524
1525 //
1526 // Some sanity checks
1527 //
1528 ASSERT(Tag != 0);
1529 ASSERT(Tag != ' GIB');
1530 ASSERT(NumberOfBytes != 0);
1531 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1532
1533 //
1534 // Not supported in ReactOS
1535 //
1536 ASSERT(!(PoolType & SESSION_POOL_MASK));
1537
1538 //
1539 // Check if verifier or special pool is enabled
1540 //
1541 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1542 {
1543 //
1544 // For verifier, we should call the verification routine
1545 //
1546 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1547 {
1548 DPRINT1("Driver Verifier is not yet supported\n");
1549 }
1550
1551 //
1552 // For special pool, we check if this is a suitable allocation and do
1553 // the special allocation if needed
1554 //
1555 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1556 {
1557 //
1558 // Check if this is a special pool allocation
1559 //
1560 if (MmUseSpecialPool(NumberOfBytes, Tag))
1561 {
1562 //
1563 // Try to allocate using special pool
1564 //
1565 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1566 if (Entry) return Entry;
1567 }
1568 }
1569 }
1570
1571 //
1572 // Get the pool type and its corresponding vector for this request
1573 //
1574 OriginalType = PoolType;
1575 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1576 PoolDesc = PoolVector[PoolType];
1577 ASSERT(PoolDesc != NULL);
1578
1579 //
1580 // Check if this is a big page allocation
1581 //
1582 if (NumberOfBytes > POOL_MAX_ALLOC)
1583 {
1584 //
1585 // Allocate pages for it
1586 //
1587 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1588 if (!Entry)
1589 {
1590 //
1591 // Must succeed pool is deprecated, but still supported. These allocation
1592 // failures must cause an immediate bugcheck
1593 //
1594 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1595 {
1596 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1597 NumberOfBytes,
1598 NonPagedPoolDescriptor.TotalPages,
1599 NonPagedPoolDescriptor.TotalBigPages,
1600 0);
1601 }
1602
1603 //
1604 // Internal debugging
1605 //
1606 ExPoolFailures++;
1607
1608 //
1609 // This flag requests printing failures, and can also further specify
1610 // breaking on failures
1611 //
1612 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1613 {
1614 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1615 NumberOfBytes,
1616 OriginalType);
1617 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1618 }
1619
1620 //
1621 // Finally, this flag requests an exception, which we are more than
1622 // happy to raise!
1623 //
1624 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1625 {
1626 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1627 }
1628 }
1629
1630 //
1631 // Increment required counters
1632 //
1633 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1634 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1635 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1636 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1637
1638 //
1639 // Add a tag for the big page allocation and switch to the generic "BIG"
1640 // tag if we failed to do so, then insert a tracker for this alloation.
1641 //
1642 if (!ExpAddTagForBigPages(Entry,
1643 Tag,
1644 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1645 OriginalType))
1646 {
1647 Tag = ' GIB';
1648 }
1649 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1650 return Entry;
1651 }
1652
1653 //
1654 // Should never request 0 bytes from the pool, but since so many drivers do
1655 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1656 //
1657 if (!NumberOfBytes) NumberOfBytes = 1;
1658
1659 //
1660 // A pool allocation is defined by its data, a linked list to connect it to
1661 // the free list (if necessary), and a pool header to store accounting info.
1662 // Calculate this size, then convert it into a block size (units of pool
1663 // headers)
1664 //
1665 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1666 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1667 // the direct allocation of pages.
1668 //
1669 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1670 / POOL_BLOCK_SIZE);
1671 ASSERT(i < POOL_LISTS_PER_PAGE);
1672
1673 //
1674 // Handle lookaside list optimization for both paged and nonpaged pool
1675 //
1676 if (i <= MAXIMUM_PROCESSORS)
1677 {
1678 //
1679 // Try popping it from the per-CPU lookaside list
1680 //
1681 LookasideList = (PoolType == PagedPool) ?
1682 Prcb->PPPagedLookasideList[i - 1].P :
1683 Prcb->PPNPagedLookasideList[i - 1].P;
1684 LookasideList->TotalAllocates++;
1685 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1686 if (!Entry)
1687 {
1688 //
1689 // We failed, try popping it from the global list
1690 //
1691 LookasideList = (PoolType == PagedPool) ?
1692 Prcb->PPPagedLookasideList[i - 1].L :
1693 Prcb->PPNPagedLookasideList[i - 1].L;
1694 LookasideList->TotalAllocates++;
1695 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1696 }
1697
1698 //
1699 // If we were able to pop it, update the accounting and return the block
1700 //
1701 if (Entry)
1702 {
1703 LookasideList->AllocateHits++;
1704
1705 //
1706 // Get the real entry, write down its pool type, and track it
1707 //
1708 Entry--;
1709 Entry->PoolType = PoolType + 1;
1710 ExpInsertPoolTracker(Tag,
1711 Entry->BlockSize * POOL_BLOCK_SIZE,
1712 OriginalType);
1713
1714 //
1715 // Return the pool allocation
1716 //
1717 Entry->PoolTag = Tag;
1718 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1719 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1720 return POOL_FREE_BLOCK(Entry);
1721 }
1722 }
1723
1724 //
1725 // Loop in the free lists looking for a block if this size. Start with the
1726 // list optimized for this kind of size lookup
1727 //
1728 ListHead = &PoolDesc->ListHeads[i];
1729 do
1730 {
1731 //
1732 // Are there any free entries available on this list?
1733 //
1734 if (!ExpIsPoolListEmpty(ListHead))
1735 {
1736 //
1737 // Acquire the pool lock now
1738 //
1739 OldIrql = ExLockPool(PoolDesc);
1740
1741 //
1742 // And make sure the list still has entries
1743 //
1744 if (ExpIsPoolListEmpty(ListHead))
1745 {
1746 //
1747 // Someone raced us (and won) before we had a chance to acquire
1748 // the lock.
1749 //
1750 // Try again!
1751 //
1752 ExUnlockPool(PoolDesc, OldIrql);
1753 continue;
1754 }
1755
1756 //
1757 // Remove a free entry from the list
1758 // Note that due to the way we insert free blocks into multiple lists
1759 // there is a guarantee that any block on this list will either be
1760 // of the correct size, or perhaps larger.
1761 //
1762 ExpCheckPoolLinks(ListHead);
1763 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1764 ExpCheckPoolLinks(ListHead);
1765 ExpCheckPoolBlocks(Entry);
1766 ASSERT(Entry->BlockSize >= i);
1767 ASSERT(Entry->PoolType == 0);
1768
1769 //
1770 // Check if this block is larger that what we need. The block could
1771 // not possibly be smaller, due to the reason explained above (and
1772 // we would've asserted on a checked build if this was the case).
1773 //
1774 if (Entry->BlockSize != i)
1775 {
1776 //
1777 // Is there an entry before this one?
1778 //
1779 if (Entry->PreviousSize == 0)
1780 {
1781 //
1782 // There isn't anyone before us, so take the next block and
1783 // turn it into a fragment that contains the leftover data
1784 // that we don't need to satisfy the caller's request
1785 //
1786 FragmentEntry = POOL_BLOCK(Entry, i);
1787 FragmentEntry->BlockSize = Entry->BlockSize - i;
1788
1789 //
1790 // And make it point back to us
1791 //
1792 FragmentEntry->PreviousSize = i;
1793
1794 //
1795 // Now get the block that follows the new fragment and check
1796 // if it's still on the same page as us (and not at the end)
1797 //
1798 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1799 if (PAGE_ALIGN(NextEntry) != NextEntry)
1800 {
1801 //
1802 // Adjust this next block to point to our newly created
1803 // fragment block
1804 //
1805 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1806 }
1807 }
1808 else
1809 {
1810 //
1811 // There is a free entry before us, which we know is smaller
1812 // so we'll make this entry the fragment instead
1813 //
1814 FragmentEntry = Entry;
1815
1816 //
1817 // And then we'll remove from it the actual size required.
1818 // Now the entry is a leftover free fragment
1819 //
1820 Entry->BlockSize -= i;
1821
1822 //
1823 // Now let's go to the next entry after the fragment (which
1824 // used to point to our original free entry) and make it
1825 // reference the new fragment entry instead.
1826 //
1827 // This is the entry that will actually end up holding the
1828 // allocation!
1829 //
1830 Entry = POOL_NEXT_BLOCK(Entry);
1831 Entry->PreviousSize = FragmentEntry->BlockSize;
1832
1833 //
1834 // And now let's go to the entry after that one and check if
1835 // it's still on the same page, and not at the end
1836 //
1837 NextEntry = POOL_BLOCK(Entry, i);
1838 if (PAGE_ALIGN(NextEntry) != NextEntry)
1839 {
1840 //
1841 // Make it reference the allocation entry
1842 //
1843 NextEntry->PreviousSize = i;
1844 }
1845 }
1846
1847 //
1848 // Now our (allocation) entry is the right size
1849 //
1850 Entry->BlockSize = i;
1851
1852 //
1853 // And the next entry is now the free fragment which contains
1854 // the remaining difference between how big the original entry
1855 // was, and the actual size the caller needs/requested.
1856 //
1857 FragmentEntry->PoolType = 0;
1858 BlockSize = FragmentEntry->BlockSize;
1859
1860 //
1861 // Now check if enough free bytes remained for us to have a
1862 // "full" entry, which contains enough bytes for a linked list
1863 // and thus can be used for allocations (up to 8 bytes...)
1864 //
1865 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1866 if (BlockSize != 1)
1867 {
1868 //
1869 // Insert the free entry into the free list for this size
1870 //
1871 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1872 POOL_FREE_BLOCK(FragmentEntry));
1873 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1874 }
1875 }
1876
1877 //
1878 // We have found an entry for this allocation, so set the pool type
1879 // and release the lock since we're done
1880 //
1881 Entry->PoolType = PoolType + 1;
1882 ExpCheckPoolBlocks(Entry);
1883 ExUnlockPool(PoolDesc, OldIrql);
1884
1885 //
1886 // Increment required counters
1887 //
1888 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1889 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1890
1891 //
1892 // Track this allocation
1893 //
1894 ExpInsertPoolTracker(Tag,
1895 Entry->BlockSize * POOL_BLOCK_SIZE,
1896 OriginalType);
1897
1898 //
1899 // Return the pool allocation
1900 //
1901 Entry->PoolTag = Tag;
1902 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1903 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1904 return POOL_FREE_BLOCK(Entry);
1905 }
1906 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1907
1908 //
1909 // There were no free entries left, so we have to allocate a new fresh page
1910 //
1911 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
1912 if (!Entry)
1913 {
1914 //
1915 // Must succeed pool is deprecated, but still supported. These allocation
1916 // failures must cause an immediate bugcheck
1917 //
1918 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1919 {
1920 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1921 PAGE_SIZE,
1922 NonPagedPoolDescriptor.TotalPages,
1923 NonPagedPoolDescriptor.TotalBigPages,
1924 0);
1925 }
1926
1927 //
1928 // Internal debugging
1929 //
1930 ExPoolFailures++;
1931
1932 //
1933 // This flag requests printing failures, and can also further specify
1934 // breaking on failures
1935 //
1936 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1937 {
1938 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1939 NumberOfBytes,
1940 OriginalType);
1941 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1942 }
1943
1944 //
1945 // Finally, this flag requests an exception, which we are more than
1946 // happy to raise!
1947 //
1948 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1949 {
1950 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1951 }
1952
1953 //
1954 // Return NULL to the caller in all other cases
1955 //
1956 return NULL;
1957 }
1958
1959 //
1960 // Setup the entry data
1961 //
1962 Entry->Ulong1 = 0;
1963 Entry->BlockSize = i;
1964 Entry->PoolType = PoolType + 1;
1965
1966 //
1967 // This page will have two entries -- one for the allocation (which we just
1968 // created above), and one for the remaining free bytes, which we're about
1969 // to create now. The free bytes are the whole page minus what was allocated
1970 // and then converted into units of block headers.
1971 //
1972 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1973 FragmentEntry = POOL_BLOCK(Entry, i);
1974 FragmentEntry->Ulong1 = 0;
1975 FragmentEntry->BlockSize = BlockSize;
1976 FragmentEntry->PreviousSize = i;
1977
1978 //
1979 // Increment required counters
1980 //
1981 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1982 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1983
1984 //
1985 // Now check if enough free bytes remained for us to have a "full" entry,
1986 // which contains enough bytes for a linked list and thus can be used for
1987 // allocations (up to 8 bytes...)
1988 //
1989 if (FragmentEntry->BlockSize != 1)
1990 {
1991 //
1992 // Excellent -- acquire the pool lock
1993 //
1994 OldIrql = ExLockPool(PoolDesc);
1995
1996 //
1997 // And insert the free entry into the free list for this block size
1998 //
1999 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2000 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2001 POOL_FREE_BLOCK(FragmentEntry));
2002 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2003
2004 //
2005 // Release the pool lock
2006 //
2007 ExpCheckPoolBlocks(Entry);
2008 ExUnlockPool(PoolDesc, OldIrql);
2009 }
2010 else
2011 {
2012 //
2013 // Simply do a sanity check
2014 //
2015 ExpCheckPoolBlocks(Entry);
2016 }
2017
2018 //
2019 // Increment performance counters and track this allocation
2020 //
2021 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2022 ExpInsertPoolTracker(Tag,
2023 Entry->BlockSize * POOL_BLOCK_SIZE,
2024 PoolType);
2025
2026 //
2027 // And return the pool allocation
2028 //
2029 ExpCheckPoolBlocks(Entry);
2030 Entry->PoolTag = Tag;
2031 return POOL_FREE_BLOCK(Entry);
2032 }
2033
2034 /*
2035 * @implemented
2036 */
2037 PVOID
2038 NTAPI
2039 ExAllocatePool(POOL_TYPE PoolType,
2040 SIZE_T NumberOfBytes)
2041 {
2042 //
2043 // Use a default tag of "None"
2044 //
2045 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
2046 }
2047
2048 /*
2049 * @implemented
2050 */
2051 VOID
2052 NTAPI
2053 ExFreePoolWithTag(IN PVOID P,
2054 IN ULONG TagToFree)
2055 {
2056 PPOOL_HEADER Entry, NextEntry;
2057 USHORT BlockSize;
2058 KIRQL OldIrql;
2059 POOL_TYPE PoolType;
2060 PPOOL_DESCRIPTOR PoolDesc;
2061 ULONG Tag;
2062 BOOLEAN Combined = FALSE;
2063 PFN_NUMBER PageCount, RealPageCount;
2064 PKPRCB Prcb = KeGetCurrentPrcb();
2065 PGENERAL_LOOKASIDE LookasideList;
2066
2067 //
2068 // Check if any of the debug flags are enabled
2069 //
2070 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2071 POOL_FLAG_CHECK_WORKERS |
2072 POOL_FLAG_CHECK_RESOURCES |
2073 POOL_FLAG_VERIFIER |
2074 POOL_FLAG_CHECK_DEADLOCK |
2075 POOL_FLAG_SPECIAL_POOL))
2076 {
2077 //
2078 // Check if special pool is enabled
2079 //
2080 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2081 {
2082 //
2083 // Check if it was allocated from a special pool
2084 //
2085 if (MmIsSpecialPoolAddress(P))
2086 {
2087 //
2088 // Was deadlock verification also enabled? We can do some extra
2089 // checks at this point
2090 //
2091 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2092 {
2093 DPRINT1("Verifier not yet supported\n");
2094 }
2095
2096 //
2097 // It is, so handle it via special pool free routine
2098 //
2099 MmFreeSpecialPool(P);
2100 return;
2101 }
2102 }
2103
2104 //
2105 // For non-big page allocations, we'll do a bunch of checks in here
2106 //
2107 if (PAGE_ALIGN(P) != P)
2108 {
2109 //
2110 // Get the entry for this pool allocation
2111 // The pointer math here may look wrong or confusing, but it is quite right
2112 //
2113 Entry = P;
2114 Entry--;
2115
2116 //
2117 // Get the pool type
2118 //
2119 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2120
2121 //
2122 // FIXME: Many other debugging checks go here
2123 //
2124 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2125 }
2126 }
2127
2128 //
2129 // Check if this is a big page allocation
2130 //
2131 if (PAGE_ALIGN(P) == P)
2132 {
2133 //
2134 // We need to find the tag for it, so first we need to find out what
2135 // kind of allocation this was (paged or nonpaged), then we can go
2136 // ahead and try finding the tag for it. Remember to get rid of the
2137 // PROTECTED_POOL tag if it's found.
2138 //
2139 // Note that if at insertion time, we failed to add the tag for a big
2140 // pool allocation, we used a special tag called 'BIG' to identify the
2141 // allocation, and we may get this tag back. In this scenario, we must
2142 // manually get the size of the allocation by actually counting through
2143 // the PFN database.
2144 //
2145 PoolType = MmDeterminePoolType(P);
2146 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2147 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2148 if (!Tag)
2149 {
2150 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2151 ASSERT(Tag == ' GIB');
2152 PageCount = 1; // We are going to lie! This might screw up accounting?
2153 }
2154 else if (Tag & PROTECTED_POOL)
2155 {
2156 Tag &= ~PROTECTED_POOL;
2157 }
2158
2159 //
2160 // Check block tag
2161 //
2162 if (TagToFree && TagToFree != Tag)
2163 {
2164 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2165 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2166 }
2167
2168 //
2169 // We have our tag and our page count, so we can go ahead and remove this
2170 // tracker now
2171 //
2172 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2173
2174 //
2175 // Check if any of the debug flags are enabled
2176 //
2177 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2178 POOL_FLAG_CHECK_WORKERS |
2179 POOL_FLAG_CHECK_RESOURCES |
2180 POOL_FLAG_CHECK_DEADLOCK))
2181 {
2182 //
2183 // Was deadlock verification also enabled? We can do some extra
2184 // checks at this point
2185 //
2186 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2187 {
2188 DPRINT1("Verifier not yet supported\n");
2189 }
2190
2191 //
2192 // FIXME: Many debugging checks go here
2193 //
2194 }
2195
2196 //
2197 // Update counters
2198 //
2199 PoolDesc = PoolVector[PoolType];
2200 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2201 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2202 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2203
2204 //
2205 // Do the real free now and update the last counter with the big page count
2206 //
2207 RealPageCount = MiFreePoolPages(P);
2208 ASSERT(RealPageCount == PageCount);
2209 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2210 -(LONG)RealPageCount);
2211 return;
2212 }
2213
2214 //
2215 // Get the entry for this pool allocation
2216 // The pointer math here may look wrong or confusing, but it is quite right
2217 //
2218 Entry = P;
2219 Entry--;
2220
2221 //
2222 // Get the size of the entry, and it's pool type, then load the descriptor
2223 // for this pool type
2224 //
2225 BlockSize = Entry->BlockSize;
2226 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2227 PoolDesc = PoolVector[PoolType];
2228
2229 //
2230 // Make sure that the IRQL makes sense
2231 //
2232 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2233
2234 //
2235 // Get the pool tag and get rid of the PROTECTED_POOL flag
2236 //
2237 Tag = Entry->PoolTag;
2238 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2239
2240 //
2241 // Check block tag
2242 //
2243 if (TagToFree && TagToFree != Tag)
2244 {
2245 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2246 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2247 }
2248
2249 //
2250 // Track the removal of this allocation
2251 //
2252 ExpRemovePoolTracker(Tag,
2253 BlockSize * POOL_BLOCK_SIZE,
2254 Entry->PoolType - 1);
2255
2256 //
2257 // Is this allocation small enough to have come from a lookaside list?
2258 //
2259 if (BlockSize <= MAXIMUM_PROCESSORS)
2260 {
2261 //
2262 // Try pushing it into the per-CPU lookaside list
2263 //
2264 LookasideList = (PoolType == PagedPool) ?
2265 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2266 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2267 LookasideList->TotalFrees++;
2268 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2269 {
2270 LookasideList->FreeHits++;
2271 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2272 return;
2273 }
2274
2275 //
2276 // We failed, try to push it into the global lookaside list
2277 //
2278 LookasideList = (PoolType == PagedPool) ?
2279 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2280 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2281 LookasideList->TotalFrees++;
2282 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2283 {
2284 LookasideList->FreeHits++;
2285 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2286 return;
2287 }
2288 }
2289
2290 //
2291 // Get the pointer to the next entry
2292 //
2293 NextEntry = POOL_BLOCK(Entry, BlockSize);
2294
2295 //
2296 // Update performance counters
2297 //
2298 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2299 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2300
2301 //
2302 // Acquire the pool lock
2303 //
2304 OldIrql = ExLockPool(PoolDesc);
2305
2306 //
2307 // Check if the next allocation is at the end of the page
2308 //
2309 ExpCheckPoolBlocks(Entry);
2310 if (PAGE_ALIGN(NextEntry) != NextEntry)
2311 {
2312 //
2313 // We may be able to combine the block if it's free
2314 //
2315 if (NextEntry->PoolType == 0)
2316 {
2317 //
2318 // The next block is free, so we'll do a combine
2319 //
2320 Combined = TRUE;
2321
2322 //
2323 // Make sure there's actual data in the block -- anything smaller
2324 // than this means we only have the header, so there's no linked list
2325 // for us to remove
2326 //
2327 if ((NextEntry->BlockSize != 1))
2328 {
2329 //
2330 // The block is at least big enough to have a linked list, so go
2331 // ahead and remove it
2332 //
2333 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2334 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2335 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2336 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2337 }
2338
2339 //
2340 // Our entry is now combined with the next entry
2341 //
2342 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2343 }
2344 }
2345
2346 //
2347 // Now check if there was a previous entry on the same page as us
2348 //
2349 if (Entry->PreviousSize)
2350 {
2351 //
2352 // Great, grab that entry and check if it's free
2353 //
2354 NextEntry = POOL_PREV_BLOCK(Entry);
2355 if (NextEntry->PoolType == 0)
2356 {
2357 //
2358 // It is, so we can do a combine
2359 //
2360 Combined = TRUE;
2361
2362 //
2363 // Make sure there's actual data in the block -- anything smaller
2364 // than this means we only have the header so there's no linked list
2365 // for us to remove
2366 //
2367 if ((NextEntry->BlockSize != 1))
2368 {
2369 //
2370 // The block is at least big enough to have a linked list, so go
2371 // ahead and remove it
2372 //
2373 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2374 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2375 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2376 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2377 }
2378
2379 //
2380 // Combine our original block (which might've already been combined
2381 // with the next block), into the previous block
2382 //
2383 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2384
2385 //
2386 // And now we'll work with the previous block instead
2387 //
2388 Entry = NextEntry;
2389 }
2390 }
2391
2392 //
2393 // By now, it may have been possible for our combined blocks to actually
2394 // have made up a full page (if there were only 2-3 allocations on the
2395 // page, they could've all been combined).
2396 //
2397 if ((PAGE_ALIGN(Entry) == Entry) &&
2398 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2399 {
2400 //
2401 // In this case, release the pool lock, update the performance counter,
2402 // and free the page
2403 //
2404 ExUnlockPool(PoolDesc, OldIrql);
2405 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2406 MiFreePoolPages(Entry);
2407 return;
2408 }
2409
2410 //
2411 // Otherwise, we now have a free block (or a combination of 2 or 3)
2412 //
2413 Entry->PoolType = 0;
2414 BlockSize = Entry->BlockSize;
2415 ASSERT(BlockSize != 1);
2416
2417 //
2418 // Check if we actually did combine it with anyone
2419 //
2420 if (Combined)
2421 {
2422 //
2423 // Get the first combined block (either our original to begin with, or
2424 // the one after the original, depending if we combined with the previous)
2425 //
2426 NextEntry = POOL_NEXT_BLOCK(Entry);
2427
2428 //
2429 // As long as the next block isn't on a page boundary, have it point
2430 // back to us
2431 //
2432 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2433 }
2434
2435 //
2436 // Insert this new free block, and release the pool lock
2437 //
2438 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2439 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2440 ExUnlockPool(PoolDesc, OldIrql);
2441 }
2442
2443 /*
2444 * @implemented
2445 */
2446 VOID
2447 NTAPI
2448 ExFreePool(PVOID P)
2449 {
2450 //
2451 // Just free without checking for the tag
2452 //
2453 ExFreePoolWithTag(P, 0);
2454 }
2455
2456 /*
2457 * @unimplemented
2458 */
2459 SIZE_T
2460 NTAPI
2461 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2462 OUT PBOOLEAN QuotaCharged)
2463 {
2464 //
2465 // Not implemented
2466 //
2467 UNIMPLEMENTED;
2468 return FALSE;
2469 }
2470
2471 /*
2472 * @implemented
2473 */
2474
2475 PVOID
2476 NTAPI
2477 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2478 IN SIZE_T NumberOfBytes)
2479 {
2480 //
2481 // Allocate the pool
2482 //
2483 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
2484 }
2485
2486 /*
2487 * @implemented
2488 */
2489 PVOID
2490 NTAPI
2491 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2492 IN SIZE_T NumberOfBytes,
2493 IN ULONG Tag,
2494 IN EX_POOL_PRIORITY Priority)
2495 {
2496 //
2497 // Allocate the pool
2498 //
2499 UNIMPLEMENTED;
2500 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2501 }
2502
2503 /*
2504 * @implemented
2505 */
2506 PVOID
2507 NTAPI
2508 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2509 IN SIZE_T NumberOfBytes,
2510 IN ULONG Tag)
2511 {
2512 //
2513 // Allocate the pool
2514 //
2515 UNIMPLEMENTED;
2516 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2517 }
2518
2519 #if DBG && KDBG
2520
2521 BOOLEAN
2522 ExpKdbgExtPool(
2523 ULONG Argc,
2524 PCHAR Argv[])
2525 {
2526 ULONG_PTR Address = 0, Flags = 0;
2527 PVOID PoolPage;
2528 PPOOL_HEADER Entry;
2529 BOOLEAN ThisOne;
2530 PULONG Data;
2531
2532 if (Argc > 1)
2533 {
2534 /* Get address */
2535 if (!KdbpGetHexNumber(Argv[1], &Address))
2536 {
2537 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2538 return TRUE;
2539 }
2540 }
2541
2542 if (Argc > 2)
2543 {
2544 /* Get address */
2545 if (!KdbpGetHexNumber(Argv[1], &Flags))
2546 {
2547 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2548 return TRUE;
2549 }
2550 }
2551
2552 /* Check if we got an address */
2553 if (Address != 0)
2554 {
2555 /* Get the base page */
2556 PoolPage = PAGE_ALIGN(Address);
2557 }
2558 else
2559 {
2560 KdbpPrint("Heap is unimplemented\n");
2561 return TRUE;
2562 }
2563
2564 /* No paging support! */
2565 if (!MmIsAddressValid(PoolPage))
2566 {
2567 KdbpPrint("Address not accessible!\n");
2568 return TRUE;
2569 }
2570
2571 /* Get pool type */
2572 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2573 KdbpPrint("Allocation is from PagedPool region\n");
2574 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2575 KdbpPrint("Allocation is from NonPagedPool region\n");
2576 else
2577 {
2578 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2579 return TRUE;
2580 }
2581
2582 /* Loop all entries of that page */
2583 Entry = PoolPage;
2584 do
2585 {
2586 /* Check if the address is within that entry */
2587 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2588 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2589
2590 if (!(Flags & 1) || ThisOne)
2591 {
2592 /* Print the line */
2593 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2594 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2595 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2596 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2597 }
2598
2599 if (Flags & 1)
2600 {
2601 Data = (PULONG)(Entry + 1);
2602 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2603 " %p %08lx %08lx %08lx %08lx\n",
2604 &Data[0], Data[0], Data[1], Data[2], Data[3],
2605 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2606 }
2607
2608 /* Go to next entry */
2609 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2610 }
2611 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2612
2613 return TRUE;
2614 }
2615
2616 #endif // DBG && KDBG
2617
2618 /* EOF */