Synchronize with trunk revision 59781.
[reactos.git] / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20
21 /* GLOBALS ********************************************************************/
22
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25 typedef struct _POOL_DPC_CONTEXT
26 {
27 PPOOL_TRACKER_TABLE PoolTrackTable;
28 SIZE_T PoolTrackTableSize;
29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30 SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56
57 /*
58 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60 * pool code, but only for checked builds.
61 *
62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63 * that these checks are done even on retail builds, due to the increasing
64 * number of kernel-mode attacks which depend on dangling list pointers and other
65 * kinds of list-based attacks.
66 *
67 * For now, I will leave these checks on all the time, but later they are likely
68 * to be DBG-only, at least until there are enough kernel-mode security attacks
69 * against ReactOS to warrant the performance hit.
70 *
71 * For now, these are not made inline, so we can get good stack traces.
72 */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93 {
94 KeBugCheckEx(BAD_POOL_HEADER,
95 3,
96 (ULONG_PTR)ListHead,
97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99 }
100 }
101
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120 PLIST_ENTRY Blink, Flink;
121 Flink = ExpDecodePoolLink(Entry->Flink);
122 Blink = ExpDecodePoolLink(Entry->Blink);
123 Flink->Blink = ExpEncodePoolLink(Blink);
124 Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131 PLIST_ENTRY Entry, Flink;
132 Entry = ExpDecodePoolLink(ListHead->Flink);
133 Flink = ExpDecodePoolLink(Entry->Flink);
134 ListHead->Flink = ExpEncodePoolLink(Flink);
135 Flink->Blink = ExpEncodePoolLink(ListHead);
136 return Entry;
137 }
138
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143 PLIST_ENTRY Entry, Blink;
144 Entry = ExpDecodePoolLink(ListHead->Blink);
145 Blink = ExpDecodePoolLink(Entry->Blink);
146 ListHead->Blink = ExpEncodePoolLink(Blink);
147 Blink->Flink = ExpEncodePoolLink(ListHead);
148 return Entry;
149 }
150
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154 IN PLIST_ENTRY Entry)
155 {
156 PLIST_ENTRY Blink;
157 ExpCheckPoolLinks(ListHead);
158 Blink = ExpDecodePoolLink(ListHead->Blink);
159 Entry->Flink = ExpEncodePoolLink(ListHead);
160 Entry->Blink = ExpEncodePoolLink(Blink);
161 Blink->Flink = ExpEncodePoolLink(Entry);
162 ListHead->Blink = ExpEncodePoolLink(Entry);
163 ExpCheckPoolLinks(ListHead);
164 }
165
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169 IN PLIST_ENTRY Entry)
170 {
171 PLIST_ENTRY Flink;
172 ExpCheckPoolLinks(ListHead);
173 Flink = ExpDecodePoolLink(ListHead->Flink);
174 Entry->Flink = ExpEncodePoolLink(Flink);
175 Entry->Blink = ExpEncodePoolLink(ListHead);
176 Flink->Blink = ExpEncodePoolLink(Entry);
177 ListHead->Flink = ExpEncodePoolLink(Entry);
178 ExpCheckPoolLinks(ListHead);
179 }
180
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185 PPOOL_HEADER PreviousEntry, NextEntry;
186
187 /* Is there a block before this one? */
188 if (Entry->PreviousSize)
189 {
190 /* Get it */
191 PreviousEntry = POOL_PREV_BLOCK(Entry);
192
193 /* The two blocks must be on the same page! */
194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195 {
196 /* Something is awry */
197 KeBugCheckEx(BAD_POOL_HEADER,
198 6,
199 (ULONG_PTR)PreviousEntry,
200 __LINE__,
201 (ULONG_PTR)Entry);
202 }
203
204 /* This block should also indicate that it's as large as we think it is */
205 if (PreviousEntry->BlockSize != Entry->PreviousSize)
206 {
207 /* Otherwise, someone corrupted one of the sizes */
208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210 Entry->PreviousSize, (char *)&Entry->PoolTag);
211 KeBugCheckEx(BAD_POOL_HEADER,
212 5,
213 (ULONG_PTR)PreviousEntry,
214 __LINE__,
215 (ULONG_PTR)Entry);
216 }
217 }
218 else if (PAGE_ALIGN(Entry) != Entry)
219 {
220 /* If there's no block before us, we are the first block, so we should be on a page boundary */
221 KeBugCheckEx(BAD_POOL_HEADER,
222 7,
223 0,
224 __LINE__,
225 (ULONG_PTR)Entry);
226 }
227
228 /* This block must have a size */
229 if (!Entry->BlockSize)
230 {
231 /* Someone must've corrupted this field */
232 if (Entry->PreviousSize)
233 {
234 PreviousEntry = POOL_PREV_BLOCK(Entry);
235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236 (char *)&PreviousEntry->PoolTag,
237 (char *)&Entry->PoolTag);
238 }
239 else
240 {
241 DPRINT1("Entry tag %.4s\n",
242 (char *)&Entry->PoolTag);
243 }
244 KeBugCheckEx(BAD_POOL_HEADER,
245 8,
246 0,
247 __LINE__,
248 (ULONG_PTR)Entry);
249 }
250
251 /* Okay, now get the next block */
252 NextEntry = POOL_NEXT_BLOCK(Entry);
253
254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255 if (PAGE_ALIGN(NextEntry) != NextEntry)
256 {
257 /* The two blocks must be on the same page! */
258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259 {
260 /* Something is messed up */
261 KeBugCheckEx(BAD_POOL_HEADER,
262 9,
263 (ULONG_PTR)NextEntry,
264 __LINE__,
265 (ULONG_PTR)Entry);
266 }
267
268 /* And this block should think we are as large as we truly are */
269 if (NextEntry->PreviousSize != Entry->BlockSize)
270 {
271 /* Otherwise, someone corrupted the field */
272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273 Entry->BlockSize, (char *)&Entry->PoolTag,
274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275 KeBugCheckEx(BAD_POOL_HEADER,
276 5,
277 (ULONG_PTR)NextEntry,
278 __LINE__,
279 (ULONG_PTR)Entry);
280 }
281 }
282 }
283
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287 PVOID P,
288 POOL_TYPE PoolType,
289 ULONG Tag)
290 {
291 PPOOL_HEADER Entry;
292 ULONG i;
293 KIRQL OldIrql;
294 POOL_TYPE RealPoolType;
295
296 /* Get the pool header */
297 Entry = ((PPOOL_HEADER)P) - 1;
298
299 /* Check if this is a large allocation */
300 if (PAGE_ALIGN(P) == P)
301 {
302 /* Lock the pool table */
303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304
305 /* Find the pool tag */
306 for (i = 0; i < PoolBigPageTableSize; i++)
307 {
308 /* Check if this is our allocation */
309 if (PoolBigPageTable[i].Va == P)
310 {
311 /* Make sure the tag is ok */
312 if (PoolBigPageTable[i].Key != Tag)
313 {
314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315 }
316
317 break;
318 }
319 }
320
321 /* Release the lock */
322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323
324 if (i == PoolBigPageTableSize)
325 {
326 /* Did not find the allocation */
327 //ASSERT(FALSE);
328 }
329
330 /* Get Pool type by address */
331 RealPoolType = MmDeterminePoolType(P);
332 }
333 else
334 {
335 /* Verify the tag */
336 if (Entry->PoolTag != Tag)
337 {
338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339 &Tag, &Entry->PoolTag, Entry->PoolTag);
340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341 }
342
343 /* Check the rest of the header */
344 ExpCheckPoolHeader(Entry);
345
346 /* Get Pool type from entry */
347 RealPoolType = (Entry->PoolType - 1);
348 }
349
350 /* Should we check the pool type? */
351 if (PoolType != -1)
352 {
353 /* Verify the pool type */
354 if (RealPoolType != PoolType)
355 {
356 DPRINT1("Wrong pool type! Expected %s, got %s\n",
357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360 }
361 }
362 }
363
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368 BOOLEAN FoundBlock = FALSE;
369 SIZE_T Size = 0;
370 PPOOL_HEADER Entry;
371
372 /* Get the first entry for this page, make sure it really is the first */
373 Entry = PAGE_ALIGN(Block);
374 ASSERT(Entry->PreviousSize == 0);
375
376 /* Now scan each entry */
377 while (TRUE)
378 {
379 /* When we actually found our block, remember this */
380 if (Entry == Block) FoundBlock = TRUE;
381
382 /* Now validate this block header */
383 ExpCheckPoolHeader(Entry);
384
385 /* And go to the next one, keeping track of our size */
386 Size += Entry->BlockSize;
387 Entry = POOL_NEXT_BLOCK(Entry);
388
389 /* If we hit the last block, stop */
390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391
392 /* If we hit the end of the page, stop */
393 if (PAGE_ALIGN(Entry) == Entry) break;
394 }
395
396 /* We must've found our block, and we must have hit the end of the page */
397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398 {
399 /* Otherwise, the blocks are messed up */
400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401 }
402 }
403
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407 IN SIZE_T NumberOfBytes,
408 IN PVOID Entry)
409 {
410 //
411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412 // be DISPATCH_LEVEL or lower for Non Paged Pool
413 //
414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415 (KeGetCurrentIrql() > APC_LEVEL) :
416 (KeGetCurrentIrql() > DISPATCH_LEVEL))
417 {
418 //
419 // Take the system down
420 //
421 KeBugCheckEx(BAD_POOL_CALLER,
422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423 KeGetCurrentIrql(),
424 PoolType,
425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426 }
427 }
428
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432 IN SIZE_T BucketMask)
433 {
434 //
435 // Compute the hash by multiplying with a large prime number and then XORing
436 // with the HIDWORD of the result.
437 //
438 // Finally, AND with the bucket mask to generate a valid index/bucket into
439 // the table
440 //
441 ULONGLONG Result = (ULONGLONG)40543 * Tag;
442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449 ULONG Result;
450 //
451 // Compute the hash by converting the address into a page number, and then
452 // XORing each nibble with the next one.
453 //
454 // We do *NOT* AND with the bucket mask at this point because big table expansion
455 // might happen. Therefore, the final step of the hash must be performed
456 // while holding the expansion pushlock, and this is why we call this a
457 // "partial" hash only.
458 //
459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462
463 /* PRIVATE FUNCTIONS **********************************************************/
464
465 VOID
466 NTAPI
467 INIT_FUNCTION
468 ExpSeedHotTags(VOID)
469 {
470 ULONG i, Key, Hash, Index;
471 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
472 ULONG TagList[] =
473 {
474 ' oI',
475 ' laH',
476 'PldM',
477 'LooP',
478 'tSbO',
479 ' prI',
480 'bdDN',
481 'LprI',
482 'pOoI',
483 ' ldM',
484 'eliF',
485 'aVMC',
486 'dSeS',
487 'CFtN',
488 'looP',
489 'rPCT',
490 'bNMC',
491 'dTeS',
492 'sFtN',
493 'TPCT',
494 'CPCT',
495 ' yeK',
496 'qSbO',
497 'mNoI',
498 'aEoI',
499 'cPCT',
500 'aFtN',
501 '0ftN',
502 'tceS',
503 'SprI',
504 'ekoT',
505 ' eS',
506 'lCbO',
507 'cScC',
508 'lFtN',
509 'cAeS',
510 'mfSF',
511 'kWcC',
512 'miSF',
513 'CdfA',
514 'EdfA',
515 'orSF',
516 'nftN',
517 'PRIU',
518 'rFpN',
519 'RFpN',
520 'aPeS',
521 'sUeS',
522 'FpcA',
523 'MpcA',
524 'cSeS',
525 'mNbO',
526 'sFpN',
527 'uLeS',
528 'DPcS',
529 'nevE',
530 'vrqR',
531 'ldaV',
532 ' pP',
533 'SdaV',
534 ' daV',
535 'LdaV',
536 'FdaV',
537 ' GIB',
538 };
539
540 //
541 // Loop all 64 hot tags
542 //
543 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
544 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
545 {
546 //
547 // Get the current tag, and compute its hash in the tracker table
548 //
549 Key = TagList[i];
550 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
551
552 //
553 // Loop all the hashes in this index/bucket
554 //
555 Index = Hash;
556 while (TRUE)
557 {
558 //
559 // Find an empty entry, and make sure this isn't the last hash that
560 // can fit.
561 //
562 // On checked builds, also make sure this is the first time we are
563 // seeding this tag.
564 //
565 ASSERT(TrackTable[Hash].Key != Key);
566 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
567 {
568 //
569 // It has been seeded, move on to the next tag
570 //
571 TrackTable[Hash].Key = Key;
572 break;
573 }
574
575 //
576 // This entry was already taken, compute the next possible hash while
577 // making sure we're not back at our initial index.
578 //
579 ASSERT(TrackTable[Hash].Key != Key);
580 Hash = (Hash + 1) & PoolTrackTableMask;
581 if (Hash == Index) break;
582 }
583 }
584 }
585
586 VOID
587 NTAPI
588 ExpRemovePoolTracker(IN ULONG Key,
589 IN SIZE_T NumberOfBytes,
590 IN POOL_TYPE PoolType)
591 {
592 ULONG Hash, Index;
593 PPOOL_TRACKER_TABLE Table, TableEntry;
594 SIZE_T TableMask, TableSize;
595
596 //
597 // Remove the PROTECTED_POOL flag which is not part of the tag
598 //
599 Key &= ~PROTECTED_POOL;
600
601 //
602 // With WinDBG you can set a tag you want to break on when an allocation is
603 // attempted
604 //
605 if (Key == PoolHitTag) DbgBreakPoint();
606
607 //
608 // Why the double indirection? Because normally this function is also used
609 // when doing session pool allocations, which has another set of tables,
610 // sizes, and masks that live in session pool. Now we don't support session
611 // pool so we only ever use the regular tables, but I'm keeping the code this
612 // way so that the day we DO support session pool, it won't require that
613 // many changes
614 //
615 Table = PoolTrackTable;
616 TableMask = PoolTrackTableMask;
617 TableSize = PoolTrackTableSize;
618 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
619
620 //
621 // Compute the hash for this key, and loop all the possible buckets
622 //
623 Hash = ExpComputeHashForTag(Key, TableMask);
624 Index = Hash;
625 while (TRUE)
626 {
627 //
628 // Have we found the entry for this tag? */
629 //
630 TableEntry = &Table[Hash];
631 if (TableEntry->Key == Key)
632 {
633 //
634 // Decrement the counters depending on if this was paged or nonpaged
635 // pool
636 //
637 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
638 {
639 InterlockedIncrement(&TableEntry->NonPagedFrees);
640 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
641 -(SSIZE_T)NumberOfBytes);
642 return;
643 }
644 InterlockedIncrement(&TableEntry->PagedFrees);
645 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
646 -(SSIZE_T)NumberOfBytes);
647 return;
648 }
649
650 //
651 // We should have only ended up with an empty entry if we've reached
652 // the last bucket
653 //
654 if (!TableEntry->Key)
655 {
656 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
657 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
658 ASSERT(Hash == TableMask);
659 }
660
661 //
662 // This path is hit when we don't have an entry, and the current bucket
663 // is full, so we simply try the next one
664 //
665 Hash = (Hash + 1) & TableMask;
666 if (Hash == Index) break;
667 }
668
669 //
670 // And finally this path is hit when all the buckets are full, and we need
671 // some expansion. This path is not yet supported in ReactOS and so we'll
672 // ignore the tag
673 //
674 DPRINT1("Out of pool tag space, ignoring...\n");
675 }
676
677 VOID
678 NTAPI
679 ExpInsertPoolTracker(IN ULONG Key,
680 IN SIZE_T NumberOfBytes,
681 IN POOL_TYPE PoolType)
682 {
683 ULONG Hash, Index;
684 KIRQL OldIrql;
685 PPOOL_TRACKER_TABLE Table, TableEntry;
686 SIZE_T TableMask, TableSize;
687
688 //
689 // Remove the PROTECTED_POOL flag which is not part of the tag
690 //
691 Key &= ~PROTECTED_POOL;
692
693 //
694 // With WinDBG you can set a tag you want to break on when an allocation is
695 // attempted
696 //
697 if (Key == PoolHitTag) DbgBreakPoint();
698
699 //
700 // There is also an internal flag you can set to break on malformed tags
701 //
702 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
703
704 //
705 // ASSERT on ReactOS features not yet supported
706 //
707 ASSERT(!(PoolType & SESSION_POOL_MASK));
708 ASSERT(KeGetCurrentProcessorNumber() == 0);
709
710 //
711 // Why the double indirection? Because normally this function is also used
712 // when doing session pool allocations, which has another set of tables,
713 // sizes, and masks that live in session pool. Now we don't support session
714 // pool so we only ever use the regular tables, but I'm keeping the code this
715 // way so that the day we DO support session pool, it won't require that
716 // many changes
717 //
718 Table = PoolTrackTable;
719 TableMask = PoolTrackTableMask;
720 TableSize = PoolTrackTableSize;
721 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
722
723 //
724 // Compute the hash for this key, and loop all the possible buckets
725 //
726 Hash = ExpComputeHashForTag(Key, TableMask);
727 Index = Hash;
728 while (TRUE)
729 {
730 //
731 // Do we already have an entry for this tag? */
732 //
733 TableEntry = &Table[Hash];
734 if (TableEntry->Key == Key)
735 {
736 //
737 // Increment the counters depending on if this was paged or nonpaged
738 // pool
739 //
740 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
741 {
742 InterlockedIncrement(&TableEntry->NonPagedAllocs);
743 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
744 return;
745 }
746 InterlockedIncrement(&TableEntry->PagedAllocs);
747 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
748 return;
749 }
750
751 //
752 // We don't have an entry yet, but we've found a free bucket for it
753 //
754 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
755 {
756 //
757 // We need to hold the lock while creating a new entry, since other
758 // processors might be in this code path as well
759 //
760 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
761 if (!PoolTrackTable[Hash].Key)
762 {
763 //
764 // We've won the race, so now create this entry in the bucket
765 //
766 ASSERT(Table[Hash].Key == 0);
767 PoolTrackTable[Hash].Key = Key;
768 TableEntry->Key = Key;
769 }
770 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
771
772 //
773 // Now we force the loop to run again, and we should now end up in
774 // the code path above which does the interlocked increments...
775 //
776 continue;
777 }
778
779 //
780 // This path is hit when we don't have an entry, and the current bucket
781 // is full, so we simply try the next one
782 //
783 Hash = (Hash + 1) & TableMask;
784 if (Hash == Index) break;
785 }
786
787 //
788 // And finally this path is hit when all the buckets are full, and we need
789 // some expansion. This path is not yet supported in ReactOS and so we'll
790 // ignore the tag
791 //
792 DPRINT1("Out of pool tag space, ignoring...\n");
793 }
794
795 VOID
796 NTAPI
797 INIT_FUNCTION
798 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
799 IN POOL_TYPE PoolType,
800 IN ULONG PoolIndex,
801 IN ULONG Threshold,
802 IN PVOID PoolLock)
803 {
804 PLIST_ENTRY NextEntry, LastEntry;
805
806 //
807 // Setup the descriptor based on the caller's request
808 //
809 PoolDescriptor->PoolType = PoolType;
810 PoolDescriptor->PoolIndex = PoolIndex;
811 PoolDescriptor->Threshold = Threshold;
812 PoolDescriptor->LockAddress = PoolLock;
813
814 //
815 // Initialize accounting data
816 //
817 PoolDescriptor->RunningAllocs = 0;
818 PoolDescriptor->RunningDeAllocs = 0;
819 PoolDescriptor->TotalPages = 0;
820 PoolDescriptor->TotalBytes = 0;
821 PoolDescriptor->TotalBigPages = 0;
822
823 //
824 // Nothing pending for now
825 //
826 PoolDescriptor->PendingFrees = NULL;
827 PoolDescriptor->PendingFreeDepth = 0;
828
829 //
830 // Loop all the descriptor's allocation lists and initialize them
831 //
832 NextEntry = PoolDescriptor->ListHeads;
833 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
834 while (NextEntry < LastEntry)
835 {
836 ExpInitializePoolListHead(NextEntry);
837 NextEntry++;
838 }
839
840 //
841 // Note that ReactOS does not support Session Pool Yet
842 //
843 ASSERT(PoolType != PagedPoolSession);
844 }
845
846 VOID
847 NTAPI
848 INIT_FUNCTION
849 InitializePool(IN POOL_TYPE PoolType,
850 IN ULONG Threshold)
851 {
852 PPOOL_DESCRIPTOR Descriptor;
853 SIZE_T TableSize;
854 ULONG i;
855
856 //
857 // Check what kind of pool this is
858 //
859 if (PoolType == NonPagedPool)
860 {
861 //
862 // Compute the track table size and convert it from a power of two to an
863 // actual byte size
864 //
865 // NOTE: On checked builds, we'll assert if the registry table size was
866 // invalid, while on retail builds we'll just break out of the loop at
867 // that point.
868 //
869 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
870 for (i = 0; i < 32; i++)
871 {
872 if (TableSize & 1)
873 {
874 ASSERT((TableSize & ~1) == 0);
875 if (!(TableSize & ~1)) break;
876 }
877 TableSize >>= 1;
878 }
879
880 //
881 // If we hit bit 32, than no size was defined in the registry, so
882 // we'll use the default size of 2048 entries.
883 //
884 // Otherwise, use the size from the registry, as long as it's not
885 // smaller than 64 entries.
886 //
887 if (i == 32)
888 {
889 PoolTrackTableSize = 2048;
890 }
891 else
892 {
893 PoolTrackTableSize = max(1 << i, 64);
894 }
895
896 //
897 // Loop trying with the biggest specified size first, and cut it down
898 // by a power of two each iteration in case not enough memory exist
899 //
900 while (TRUE)
901 {
902 //
903 // Do not allow overflow
904 //
905 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
906 {
907 PoolTrackTableSize >>= 1;
908 continue;
909 }
910
911 //
912 // Allocate the tracker table and exit the loop if this worked
913 //
914 PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
915 (PoolTrackTableSize + 1) *
916 sizeof(POOL_TRACKER_TABLE));
917 if (PoolTrackTable) break;
918
919 //
920 // Otherwise, as long as we're not down to the last bit, keep
921 // iterating
922 //
923 if (PoolTrackTableSize == 1)
924 {
925 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
926 TableSize,
927 0xFFFFFFFF,
928 0xFFFFFFFF,
929 0xFFFFFFFF);
930 }
931 PoolTrackTableSize >>= 1;
932 }
933
934 //
935 // Finally, add one entry, compute the hash, and zero the table
936 //
937 PoolTrackTableSize++;
938 PoolTrackTableMask = PoolTrackTableSize - 2;
939
940 RtlZeroMemory(PoolTrackTable,
941 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
942
943 //
944 // We now do the exact same thing with the tracker table for big pages
945 //
946 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
947 for (i = 0; i < 32; i++)
948 {
949 if (TableSize & 1)
950 {
951 ASSERT((TableSize & ~1) == 0);
952 if (!(TableSize & ~1)) break;
953 }
954 TableSize >>= 1;
955 }
956
957 //
958 // For big pages, the default tracker table is 4096 entries, while the
959 // minimum is still 64
960 //
961 if (i == 32)
962 {
963 PoolBigPageTableSize = 4096;
964 }
965 else
966 {
967 PoolBigPageTableSize = max(1 << i, 64);
968 }
969
970 //
971 // Again, run the exact same loop we ran earlier, but this time for the
972 // big pool tracker instead
973 //
974 while (TRUE)
975 {
976 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
977 {
978 PoolBigPageTableSize >>= 1;
979 continue;
980 }
981
982 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
983 PoolBigPageTableSize *
984 sizeof(POOL_TRACKER_BIG_PAGES));
985 if (PoolBigPageTable) break;
986
987 if (PoolBigPageTableSize == 1)
988 {
989 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
990 TableSize,
991 0xFFFFFFFF,
992 0xFFFFFFFF,
993 0xFFFFFFFF);
994 }
995
996 PoolBigPageTableSize >>= 1;
997 }
998
999 //
1000 // An extra entry is not needed for for the big pool tracker, so just
1001 // compute the hash and zero it
1002 //
1003 PoolBigPageTableHash = PoolBigPageTableSize - 1;
1004 RtlZeroMemory(PoolBigPageTable,
1005 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1006 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1007
1008 //
1009 // During development, print this out so we can see what's happening
1010 //
1011 DPRINT1("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1012 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1013 DPRINT1("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1014 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1015
1016 //
1017 // Insert the generic tracker for all of big pool
1018 //
1019 ExpInsertPoolTracker('looP',
1020 ROUND_TO_PAGES(PoolBigPageTableSize *
1021 sizeof(POOL_TRACKER_BIG_PAGES)),
1022 NonPagedPool);
1023
1024 //
1025 // No support for NUMA systems at this time
1026 //
1027 ASSERT(KeNumberNodes == 1);
1028
1029 //
1030 // Initialize the tag spinlock
1031 //
1032 KeInitializeSpinLock(&ExpTaggedPoolLock);
1033
1034 //
1035 // Initialize the nonpaged pool descriptor
1036 //
1037 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1038 ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1039 NonPagedPool,
1040 0,
1041 Threshold,
1042 NULL);
1043 }
1044 else
1045 {
1046 //
1047 // No support for NUMA systems at this time
1048 //
1049 ASSERT(KeNumberNodes == 1);
1050
1051 //
1052 // Allocate the pool descriptor
1053 //
1054 Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1055 sizeof(KGUARDED_MUTEX) +
1056 sizeof(POOL_DESCRIPTOR),
1057 'looP');
1058 if (!Descriptor)
1059 {
1060 //
1061 // This is really bad...
1062 //
1063 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1064 0,
1065 -1,
1066 -1,
1067 -1);
1068 }
1069
1070 //
1071 // Setup the vector and guarded mutex for paged pool
1072 //
1073 PoolVector[PagedPool] = Descriptor;
1074 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1075 ExpPagedPoolDescriptor[0] = Descriptor;
1076 KeInitializeGuardedMutex(ExpPagedPoolMutex);
1077 ExInitializePoolDescriptor(Descriptor,
1078 PagedPool,
1079 0,
1080 Threshold,
1081 ExpPagedPoolMutex);
1082
1083 //
1084 // Insert the generic tracker for all of nonpaged pool
1085 //
1086 ExpInsertPoolTracker('looP',
1087 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1088 NonPagedPool);
1089 }
1090 }
1091
1092 FORCEINLINE
1093 KIRQL
1094 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1095 {
1096 //
1097 // Check if this is nonpaged pool
1098 //
1099 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1100 {
1101 //
1102 // Use the queued spin lock
1103 //
1104 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1105 }
1106 else
1107 {
1108 //
1109 // Use the guarded mutex
1110 //
1111 KeAcquireGuardedMutex(Descriptor->LockAddress);
1112 return APC_LEVEL;
1113 }
1114 }
1115
1116 FORCEINLINE
1117 VOID
1118 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1119 IN KIRQL OldIrql)
1120 {
1121 //
1122 // Check if this is nonpaged pool
1123 //
1124 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1125 {
1126 //
1127 // Use the queued spin lock
1128 //
1129 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1130 }
1131 else
1132 {
1133 //
1134 // Use the guarded mutex
1135 //
1136 KeReleaseGuardedMutex(Descriptor->LockAddress);
1137 }
1138 }
1139
1140 VOID
1141 NTAPI
1142 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1143 IN PVOID DeferredContext,
1144 IN PVOID SystemArgument1,
1145 IN PVOID SystemArgument2)
1146 {
1147 PPOOL_DPC_CONTEXT Context = DeferredContext;
1148 UNREFERENCED_PARAMETER(Dpc);
1149 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1150
1151 //
1152 // Make sure we win the race, and if we did, copy the data atomically
1153 //
1154 if (KeSignalCallDpcSynchronize(SystemArgument2))
1155 {
1156 RtlCopyMemory(Context->PoolTrackTable,
1157 PoolTrackTable,
1158 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1159
1160 //
1161 // This is here because ReactOS does not yet support expansion
1162 //
1163 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1164 }
1165
1166 //
1167 // Regardless of whether we won or not, we must now synchronize and then
1168 // decrement the barrier since this is one more processor that has completed
1169 // the callback.
1170 //
1171 KeSignalCallDpcSynchronize(SystemArgument2);
1172 KeSignalCallDpcDone(SystemArgument1);
1173 }
1174
1175 NTSTATUS
1176 NTAPI
1177 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1178 IN ULONG SystemInformationLength,
1179 IN OUT PULONG ReturnLength OPTIONAL)
1180 {
1181 ULONG TableSize, CurrentLength;
1182 ULONG EntryCount;
1183 NTSTATUS Status = STATUS_SUCCESS;
1184 PSYSTEM_POOLTAG TagEntry;
1185 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1186 POOL_DPC_CONTEXT Context;
1187 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1188
1189 //
1190 // Keep track of how much data the caller's buffer must hold
1191 //
1192 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1193
1194 //
1195 // Initialize the caller's buffer
1196 //
1197 TagEntry = &SystemInformation->TagInfo[0];
1198 SystemInformation->Count = 0;
1199
1200 //
1201 // Capture the number of entries, and the total size needed to make a copy
1202 // of the table
1203 //
1204 EntryCount = (ULONG)PoolTrackTableSize;
1205 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1206
1207 //
1208 // Allocate the "Generic DPC" temporary buffer
1209 //
1210 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1211 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1212
1213 //
1214 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1215 //
1216 Context.PoolTrackTable = Buffer;
1217 Context.PoolTrackTableSize = PoolTrackTableSize;
1218 Context.PoolTrackTableExpansion = NULL;
1219 Context.PoolTrackTableSizeExpansion = 0;
1220 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1221
1222 //
1223 // Now parse the results
1224 //
1225 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1226 {
1227 //
1228 // If the entry is empty, skip it
1229 //
1230 if (!TrackerEntry->Key) continue;
1231
1232 //
1233 // Otherwise, add one more entry to the caller's buffer, and ensure that
1234 // enough space has been allocated in it
1235 //
1236 SystemInformation->Count++;
1237 CurrentLength += sizeof(*TagEntry);
1238 if (SystemInformationLength < CurrentLength)
1239 {
1240 //
1241 // The caller's buffer is too small, so set a failure code. The
1242 // caller will know the count, as well as how much space is needed.
1243 //
1244 // We do NOT break out of the loop, because we want to keep incrementing
1245 // the Count as well as CurrentLength so that the caller can know the
1246 // final numbers
1247 //
1248 Status = STATUS_INFO_LENGTH_MISMATCH;
1249 }
1250 else
1251 {
1252 //
1253 // Small sanity check that our accounting is working correctly
1254 //
1255 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1256 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1257
1258 //
1259 // Return the data into the caller's buffer
1260 //
1261 TagEntry->TagUlong = TrackerEntry->Key;
1262 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1263 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1264 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1265 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1266 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1267 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1268 TagEntry++;
1269 }
1270 }
1271
1272 //
1273 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1274 //
1275 ExFreePoolWithTag(Buffer, 'ofnI');
1276 if (ReturnLength) *ReturnLength = CurrentLength;
1277 return Status;
1278 }
1279
1280 BOOLEAN
1281 NTAPI
1282 ExpAddTagForBigPages(IN PVOID Va,
1283 IN ULONG Key,
1284 IN ULONG NumberOfPages,
1285 IN POOL_TYPE PoolType)
1286 {
1287 ULONG Hash, i = 0;
1288 PVOID OldVa;
1289 KIRQL OldIrql;
1290 SIZE_T TableSize;
1291 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1292 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1293 ASSERT(!(PoolType & SESSION_POOL_MASK));
1294
1295 //
1296 // As the table is expandable, these values must only be read after acquiring
1297 // the lock to avoid a teared access during an expansion
1298 //
1299 Hash = ExpComputePartialHashForAddress(Va);
1300 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1301 Hash &= PoolBigPageTableHash;
1302 TableSize = PoolBigPageTableSize;
1303
1304 //
1305 // We loop from the current hash bucket to the end of the table, and then
1306 // rollover to hash bucket 0 and keep going from there. If we return back
1307 // to the beginning, then we attempt expansion at the bottom of the loop
1308 //
1309 EntryStart = Entry = &PoolBigPageTable[Hash];
1310 EntryEnd = &PoolBigPageTable[TableSize];
1311 do
1312 {
1313 //
1314 // Make sure that this is a free entry and attempt to atomically make the
1315 // entry busy now
1316 //
1317 OldVa = Entry->Va;
1318 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1319 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1320 {
1321 //
1322 // We now own this entry, write down the size and the pool tag
1323 //
1324 Entry->Key = Key;
1325 Entry->NumberOfPages = NumberOfPages;
1326
1327 //
1328 // Add one more entry to the count, and see if we're getting within
1329 // 25% of the table size, at which point we'll do an expansion now
1330 // to avoid blocking too hard later on.
1331 //
1332 // Note that we only do this if it's also been the 16th time that we
1333 // keep losing the race or that we are not finding a free entry anymore,
1334 // which implies a massive number of concurrent big pool allocations.
1335 //
1336 InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1337 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1338 {
1339 DPRINT1("Should attempt expansion since we now have %lu entries\n",
1340 ExpPoolBigEntriesInUse);
1341 }
1342
1343 //
1344 // We have our entry, return
1345 //
1346 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1347 return TRUE;
1348 }
1349
1350 //
1351 // We don't have our entry yet, so keep trying, making the entry list
1352 // circular if we reach the last entry. We'll eventually break out of
1353 // the loop once we've rolled over and returned back to our original
1354 // hash bucket
1355 //
1356 i++;
1357 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1358 } while (Entry != EntryStart);
1359
1360 //
1361 // This means there's no free hash buckets whatsoever, so we would now have
1362 // to attempt expanding the table
1363 //
1364 DPRINT1("Big pool expansion needed, not implemented!\n");
1365 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1366 return FALSE;
1367 }
1368
1369 ULONG
1370 NTAPI
1371 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1372 OUT PULONG_PTR BigPages,
1373 IN POOL_TYPE PoolType)
1374 {
1375 BOOLEAN FirstTry = TRUE;
1376 SIZE_T TableSize;
1377 KIRQL OldIrql;
1378 ULONG PoolTag, Hash;
1379 PPOOL_TRACKER_BIG_PAGES Entry;
1380 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1381 ASSERT(!(PoolType & SESSION_POOL_MASK));
1382
1383 //
1384 // As the table is expandable, these values must only be read after acquiring
1385 // the lock to avoid a teared access during an expansion
1386 //
1387 Hash = ExpComputePartialHashForAddress(Va);
1388 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1389 Hash &= PoolBigPageTableHash;
1390 TableSize = PoolBigPageTableSize;
1391
1392 //
1393 // Loop while trying to find this big page allocation
1394 //
1395 while (PoolBigPageTable[Hash].Va != Va)
1396 {
1397 //
1398 // Increment the size until we go past the end of the table
1399 //
1400 if (++Hash >= TableSize)
1401 {
1402 //
1403 // Is this the second time we've tried?
1404 //
1405 if (!FirstTry)
1406 {
1407 //
1408 // This means it was never inserted into the pool table and it
1409 // received the special "BIG" tag -- return that and return 0
1410 // so that the code can ask Mm for the page count instead
1411 //
1412 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1413 *BigPages = 0;
1414 return ' GIB';
1415 }
1416
1417 //
1418 // The first time this happens, reset the hash index and try again
1419 //
1420 Hash = 0;
1421 FirstTry = FALSE;
1422 }
1423 }
1424
1425 //
1426 // Now capture all the information we need from the entry, since after we
1427 // release the lock, the data can change
1428 //
1429 Entry = &PoolBigPageTable[Hash];
1430 *BigPages = Entry->NumberOfPages;
1431 PoolTag = Entry->Key;
1432
1433 //
1434 // Set the free bit, and decrement the number of allocations. Finally, release
1435 // the lock and return the tag that was located
1436 //
1437 InterlockedIncrement((PLONG)&Entry->Va);
1438 InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1439 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1440 return PoolTag;
1441 }
1442
1443 VOID
1444 NTAPI
1445 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1446 OUT PULONG NonPagedPoolPages,
1447 OUT PULONG PagedPoolAllocs,
1448 OUT PULONG PagedPoolFrees,
1449 OUT PULONG PagedPoolLookasideHits,
1450 OUT PULONG NonPagedPoolAllocs,
1451 OUT PULONG NonPagedPoolFrees,
1452 OUT PULONG NonPagedPoolLookasideHits)
1453 {
1454 ULONG i;
1455 PPOOL_DESCRIPTOR PoolDesc;
1456
1457 //
1458 // Assume all failures
1459 //
1460 *PagedPoolPages = 0;
1461 *PagedPoolAllocs = 0;
1462 *PagedPoolFrees = 0;
1463
1464 //
1465 // Tally up the totals for all the apged pool
1466 //
1467 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1468 {
1469 PoolDesc = ExpPagedPoolDescriptor[i];
1470 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1471 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1472 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1473 }
1474
1475 //
1476 // The first non-paged pool has a hardcoded well-known descriptor name
1477 //
1478 PoolDesc = &NonPagedPoolDescriptor;
1479 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1480 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1481 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1482
1483 //
1484 // If the system has more than one non-paged pool, copy the other descriptor
1485 // totals as well
1486 //
1487 #if 0
1488 if (ExpNumberOfNonPagedPools > 1)
1489 {
1490 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1491 {
1492 PoolDesc = ExpNonPagedPoolDescriptor[i];
1493 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1494 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1495 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1496 }
1497 }
1498 #endif
1499
1500 //
1501 // FIXME: Not yet supported
1502 //
1503 *NonPagedPoolLookasideHits += 0;
1504 *PagedPoolLookasideHits += 0;
1505 }
1506
1507 /* PUBLIC FUNCTIONS ***********************************************************/
1508
1509 /*
1510 * @implemented
1511 */
1512 PVOID
1513 NTAPI
1514 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1515 IN SIZE_T NumberOfBytes,
1516 IN ULONG Tag)
1517 {
1518 PPOOL_DESCRIPTOR PoolDesc;
1519 PLIST_ENTRY ListHead;
1520 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1521 KIRQL OldIrql;
1522 USHORT BlockSize, i;
1523 ULONG OriginalType;
1524 PKPRCB Prcb = KeGetCurrentPrcb();
1525 PGENERAL_LOOKASIDE LookasideList;
1526
1527 //
1528 // Some sanity checks
1529 //
1530 ASSERT(Tag != 0);
1531 ASSERT(Tag != ' GIB');
1532 ASSERT(NumberOfBytes != 0);
1533 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1534
1535 //
1536 // Not supported in ReactOS
1537 //
1538 ASSERT(!(PoolType & SESSION_POOL_MASK));
1539
1540 //
1541 // Check if verifier or special pool is enabled
1542 //
1543 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1544 {
1545 //
1546 // For verifier, we should call the verification routine
1547 //
1548 if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1549 {
1550 DPRINT1("Driver Verifier is not yet supported\n");
1551 }
1552
1553 //
1554 // For special pool, we check if this is a suitable allocation and do
1555 // the special allocation if needed
1556 //
1557 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1558 {
1559 //
1560 // Check if this is a special pool allocation
1561 //
1562 if (MmUseSpecialPool(NumberOfBytes, Tag))
1563 {
1564 //
1565 // Try to allocate using special pool
1566 //
1567 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1568 if (Entry) return Entry;
1569 }
1570 }
1571 }
1572
1573 //
1574 // Get the pool type and its corresponding vector for this request
1575 //
1576 OriginalType = PoolType;
1577 PoolType = PoolType & BASE_POOL_TYPE_MASK;
1578 PoolDesc = PoolVector[PoolType];
1579 ASSERT(PoolDesc != NULL);
1580
1581 //
1582 // Check if this is a big page allocation
1583 //
1584 if (NumberOfBytes > POOL_MAX_ALLOC)
1585 {
1586 //
1587 // Allocate pages for it
1588 //
1589 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1590 if (!Entry)
1591 {
1592 //
1593 // Must succeed pool is deprecated, but still supported. These allocation
1594 // failures must cause an immediate bugcheck
1595 //
1596 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1597 {
1598 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1599 NumberOfBytes,
1600 NonPagedPoolDescriptor.TotalPages,
1601 NonPagedPoolDescriptor.TotalBigPages,
1602 0);
1603 }
1604
1605 //
1606 // Internal debugging
1607 //
1608 ExPoolFailures++;
1609
1610 //
1611 // This flag requests printing failures, and can also further specify
1612 // breaking on failures
1613 //
1614 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1615 {
1616 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1617 NumberOfBytes,
1618 OriginalType);
1619 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1620 }
1621
1622 //
1623 // Finally, this flag requests an exception, which we are more than
1624 // happy to raise!
1625 //
1626 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1627 {
1628 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1629 }
1630 }
1631
1632 //
1633 // Increment required counters
1634 //
1635 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1636 (LONG)BYTES_TO_PAGES(NumberOfBytes));
1637 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1638 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1639
1640 //
1641 // Add a tag for the big page allocation and switch to the generic "BIG"
1642 // tag if we failed to do so, then insert a tracker for this alloation.
1643 //
1644 if (!ExpAddTagForBigPages(Entry,
1645 Tag,
1646 (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1647 OriginalType))
1648 {
1649 Tag = ' GIB';
1650 }
1651 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1652 return Entry;
1653 }
1654
1655 //
1656 // Should never request 0 bytes from the pool, but since so many drivers do
1657 // it, we'll just assume they want 1 byte, based on NT's similar behavior
1658 //
1659 if (!NumberOfBytes) NumberOfBytes = 1;
1660
1661 //
1662 // A pool allocation is defined by its data, a linked list to connect it to
1663 // the free list (if necessary), and a pool header to store accounting info.
1664 // Calculate this size, then convert it into a block size (units of pool
1665 // headers)
1666 //
1667 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1668 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1669 // the direct allocation of pages.
1670 //
1671 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1672 / POOL_BLOCK_SIZE);
1673 ASSERT(i < POOL_LISTS_PER_PAGE);
1674
1675 //
1676 // Handle lookaside list optimization for both paged and nonpaged pool
1677 //
1678 if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1679 {
1680 //
1681 // Try popping it from the per-CPU lookaside list
1682 //
1683 LookasideList = (PoolType == PagedPool) ?
1684 Prcb->PPPagedLookasideList[i - 1].P :
1685 Prcb->PPNPagedLookasideList[i - 1].P;
1686 LookasideList->TotalAllocates++;
1687 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1688 if (!Entry)
1689 {
1690 //
1691 // We failed, try popping it from the global list
1692 //
1693 LookasideList = (PoolType == PagedPool) ?
1694 Prcb->PPPagedLookasideList[i - 1].L :
1695 Prcb->PPNPagedLookasideList[i - 1].L;
1696 LookasideList->TotalAllocates++;
1697 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1698 }
1699
1700 //
1701 // If we were able to pop it, update the accounting and return the block
1702 //
1703 if (Entry)
1704 {
1705 LookasideList->AllocateHits++;
1706
1707 //
1708 // Get the real entry, write down its pool type, and track it
1709 //
1710 Entry--;
1711 Entry->PoolType = PoolType + 1;
1712 ExpInsertPoolTracker(Tag,
1713 Entry->BlockSize * POOL_BLOCK_SIZE,
1714 OriginalType);
1715
1716 //
1717 // Return the pool allocation
1718 //
1719 Entry->PoolTag = Tag;
1720 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1721 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1722 return POOL_FREE_BLOCK(Entry);
1723 }
1724 }
1725
1726 //
1727 // Loop in the free lists looking for a block if this size. Start with the
1728 // list optimized for this kind of size lookup
1729 //
1730 ListHead = &PoolDesc->ListHeads[i];
1731 do
1732 {
1733 //
1734 // Are there any free entries available on this list?
1735 //
1736 if (!ExpIsPoolListEmpty(ListHead))
1737 {
1738 //
1739 // Acquire the pool lock now
1740 //
1741 OldIrql = ExLockPool(PoolDesc);
1742
1743 //
1744 // And make sure the list still has entries
1745 //
1746 if (ExpIsPoolListEmpty(ListHead))
1747 {
1748 //
1749 // Someone raced us (and won) before we had a chance to acquire
1750 // the lock.
1751 //
1752 // Try again!
1753 //
1754 ExUnlockPool(PoolDesc, OldIrql);
1755 continue;
1756 }
1757
1758 //
1759 // Remove a free entry from the list
1760 // Note that due to the way we insert free blocks into multiple lists
1761 // there is a guarantee that any block on this list will either be
1762 // of the correct size, or perhaps larger.
1763 //
1764 ExpCheckPoolLinks(ListHead);
1765 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1766 ExpCheckPoolLinks(ListHead);
1767 ExpCheckPoolBlocks(Entry);
1768 ASSERT(Entry->BlockSize >= i);
1769 ASSERT(Entry->PoolType == 0);
1770
1771 //
1772 // Check if this block is larger that what we need. The block could
1773 // not possibly be smaller, due to the reason explained above (and
1774 // we would've asserted on a checked build if this was the case).
1775 //
1776 if (Entry->BlockSize != i)
1777 {
1778 //
1779 // Is there an entry before this one?
1780 //
1781 if (Entry->PreviousSize == 0)
1782 {
1783 //
1784 // There isn't anyone before us, so take the next block and
1785 // turn it into a fragment that contains the leftover data
1786 // that we don't need to satisfy the caller's request
1787 //
1788 FragmentEntry = POOL_BLOCK(Entry, i);
1789 FragmentEntry->BlockSize = Entry->BlockSize - i;
1790
1791 //
1792 // And make it point back to us
1793 //
1794 FragmentEntry->PreviousSize = i;
1795
1796 //
1797 // Now get the block that follows the new fragment and check
1798 // if it's still on the same page as us (and not at the end)
1799 //
1800 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1801 if (PAGE_ALIGN(NextEntry) != NextEntry)
1802 {
1803 //
1804 // Adjust this next block to point to our newly created
1805 // fragment block
1806 //
1807 NextEntry->PreviousSize = FragmentEntry->BlockSize;
1808 }
1809 }
1810 else
1811 {
1812 //
1813 // There is a free entry before us, which we know is smaller
1814 // so we'll make this entry the fragment instead
1815 //
1816 FragmentEntry = Entry;
1817
1818 //
1819 // And then we'll remove from it the actual size required.
1820 // Now the entry is a leftover free fragment
1821 //
1822 Entry->BlockSize -= i;
1823
1824 //
1825 // Now let's go to the next entry after the fragment (which
1826 // used to point to our original free entry) and make it
1827 // reference the new fragment entry instead.
1828 //
1829 // This is the entry that will actually end up holding the
1830 // allocation!
1831 //
1832 Entry = POOL_NEXT_BLOCK(Entry);
1833 Entry->PreviousSize = FragmentEntry->BlockSize;
1834
1835 //
1836 // And now let's go to the entry after that one and check if
1837 // it's still on the same page, and not at the end
1838 //
1839 NextEntry = POOL_BLOCK(Entry, i);
1840 if (PAGE_ALIGN(NextEntry) != NextEntry)
1841 {
1842 //
1843 // Make it reference the allocation entry
1844 //
1845 NextEntry->PreviousSize = i;
1846 }
1847 }
1848
1849 //
1850 // Now our (allocation) entry is the right size
1851 //
1852 Entry->BlockSize = i;
1853
1854 //
1855 // And the next entry is now the free fragment which contains
1856 // the remaining difference between how big the original entry
1857 // was, and the actual size the caller needs/requested.
1858 //
1859 FragmentEntry->PoolType = 0;
1860 BlockSize = FragmentEntry->BlockSize;
1861
1862 //
1863 // Now check if enough free bytes remained for us to have a
1864 // "full" entry, which contains enough bytes for a linked list
1865 // and thus can be used for allocations (up to 8 bytes...)
1866 //
1867 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1868 if (BlockSize != 1)
1869 {
1870 //
1871 // Insert the free entry into the free list for this size
1872 //
1873 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1874 POOL_FREE_BLOCK(FragmentEntry));
1875 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1876 }
1877 }
1878
1879 //
1880 // We have found an entry for this allocation, so set the pool type
1881 // and release the lock since we're done
1882 //
1883 Entry->PoolType = PoolType + 1;
1884 ExpCheckPoolBlocks(Entry);
1885 ExUnlockPool(PoolDesc, OldIrql);
1886
1887 //
1888 // Increment required counters
1889 //
1890 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1891 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1892
1893 //
1894 // Track this allocation
1895 //
1896 ExpInsertPoolTracker(Tag,
1897 Entry->BlockSize * POOL_BLOCK_SIZE,
1898 OriginalType);
1899
1900 //
1901 // Return the pool allocation
1902 //
1903 Entry->PoolTag = Tag;
1904 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1905 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1906 return POOL_FREE_BLOCK(Entry);
1907 }
1908 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1909
1910 //
1911 // There were no free entries left, so we have to allocate a new fresh page
1912 //
1913 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
1914 if (!Entry)
1915 {
1916 //
1917 // Must succeed pool is deprecated, but still supported. These allocation
1918 // failures must cause an immediate bugcheck
1919 //
1920 if (OriginalType & MUST_SUCCEED_POOL_MASK)
1921 {
1922 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1923 PAGE_SIZE,
1924 NonPagedPoolDescriptor.TotalPages,
1925 NonPagedPoolDescriptor.TotalBigPages,
1926 0);
1927 }
1928
1929 //
1930 // Internal debugging
1931 //
1932 ExPoolFailures++;
1933
1934 //
1935 // This flag requests printing failures, and can also further specify
1936 // breaking on failures
1937 //
1938 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1939 {
1940 DPRINT1("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
1941 NumberOfBytes,
1942 OriginalType);
1943 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1944 }
1945
1946 //
1947 // Finally, this flag requests an exception, which we are more than
1948 // happy to raise!
1949 //
1950 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1951 {
1952 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1953 }
1954
1955 //
1956 // Return NULL to the caller in all other cases
1957 //
1958 return NULL;
1959 }
1960
1961 //
1962 // Setup the entry data
1963 //
1964 Entry->Ulong1 = 0;
1965 Entry->BlockSize = i;
1966 Entry->PoolType = PoolType + 1;
1967
1968 //
1969 // This page will have two entries -- one for the allocation (which we just
1970 // created above), and one for the remaining free bytes, which we're about
1971 // to create now. The free bytes are the whole page minus what was allocated
1972 // and then converted into units of block headers.
1973 //
1974 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
1975 FragmentEntry = POOL_BLOCK(Entry, i);
1976 FragmentEntry->Ulong1 = 0;
1977 FragmentEntry->BlockSize = BlockSize;
1978 FragmentEntry->PreviousSize = i;
1979
1980 //
1981 // Increment required counters
1982 //
1983 InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
1984 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
1985
1986 //
1987 // Now check if enough free bytes remained for us to have a "full" entry,
1988 // which contains enough bytes for a linked list and thus can be used for
1989 // allocations (up to 8 bytes...)
1990 //
1991 if (FragmentEntry->BlockSize != 1)
1992 {
1993 //
1994 // Excellent -- acquire the pool lock
1995 //
1996 OldIrql = ExLockPool(PoolDesc);
1997
1998 //
1999 // And insert the free entry into the free list for this block size
2000 //
2001 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2002 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2003 POOL_FREE_BLOCK(FragmentEntry));
2004 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2005
2006 //
2007 // Release the pool lock
2008 //
2009 ExpCheckPoolBlocks(Entry);
2010 ExUnlockPool(PoolDesc, OldIrql);
2011 }
2012 else
2013 {
2014 //
2015 // Simply do a sanity check
2016 //
2017 ExpCheckPoolBlocks(Entry);
2018 }
2019
2020 //
2021 // Increment performance counters and track this allocation
2022 //
2023 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2024 ExpInsertPoolTracker(Tag,
2025 Entry->BlockSize * POOL_BLOCK_SIZE,
2026 PoolType);
2027
2028 //
2029 // And return the pool allocation
2030 //
2031 ExpCheckPoolBlocks(Entry);
2032 Entry->PoolTag = Tag;
2033 return POOL_FREE_BLOCK(Entry);
2034 }
2035
2036 /*
2037 * @implemented
2038 */
2039 PVOID
2040 NTAPI
2041 ExAllocatePool(POOL_TYPE PoolType,
2042 SIZE_T NumberOfBytes)
2043 {
2044 //
2045 // Use a default tag of "None"
2046 //
2047 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, TAG_NONE);
2048 }
2049
2050 /*
2051 * @implemented
2052 */
2053 VOID
2054 NTAPI
2055 ExFreePoolWithTag(IN PVOID P,
2056 IN ULONG TagToFree)
2057 {
2058 PPOOL_HEADER Entry, NextEntry;
2059 USHORT BlockSize;
2060 KIRQL OldIrql;
2061 POOL_TYPE PoolType;
2062 PPOOL_DESCRIPTOR PoolDesc;
2063 ULONG Tag;
2064 BOOLEAN Combined = FALSE;
2065 PFN_NUMBER PageCount, RealPageCount;
2066 PKPRCB Prcb = KeGetCurrentPrcb();
2067 PGENERAL_LOOKASIDE LookasideList;
2068
2069 //
2070 // Check if any of the debug flags are enabled
2071 //
2072 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2073 POOL_FLAG_CHECK_WORKERS |
2074 POOL_FLAG_CHECK_RESOURCES |
2075 POOL_FLAG_VERIFIER |
2076 POOL_FLAG_CHECK_DEADLOCK |
2077 POOL_FLAG_SPECIAL_POOL))
2078 {
2079 //
2080 // Check if special pool is enabled
2081 //
2082 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2083 {
2084 //
2085 // Check if it was allocated from a special pool
2086 //
2087 if (MmIsSpecialPoolAddress(P))
2088 {
2089 //
2090 // Was deadlock verification also enabled? We can do some extra
2091 // checks at this point
2092 //
2093 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2094 {
2095 DPRINT1("Verifier not yet supported\n");
2096 }
2097
2098 //
2099 // It is, so handle it via special pool free routine
2100 //
2101 MmFreeSpecialPool(P);
2102 return;
2103 }
2104 }
2105
2106 //
2107 // For non-big page allocations, we'll do a bunch of checks in here
2108 //
2109 if (PAGE_ALIGN(P) != P)
2110 {
2111 //
2112 // Get the entry for this pool allocation
2113 // The pointer math here may look wrong or confusing, but it is quite right
2114 //
2115 Entry = P;
2116 Entry--;
2117
2118 //
2119 // Get the pool type
2120 //
2121 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2122
2123 //
2124 // FIXME: Many other debugging checks go here
2125 //
2126 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2127 }
2128 }
2129
2130 //
2131 // Check if this is a big page allocation
2132 //
2133 if (PAGE_ALIGN(P) == P)
2134 {
2135 //
2136 // We need to find the tag for it, so first we need to find out what
2137 // kind of allocation this was (paged or nonpaged), then we can go
2138 // ahead and try finding the tag for it. Remember to get rid of the
2139 // PROTECTED_POOL tag if it's found.
2140 //
2141 // Note that if at insertion time, we failed to add the tag for a big
2142 // pool allocation, we used a special tag called 'BIG' to identify the
2143 // allocation, and we may get this tag back. In this scenario, we must
2144 // manually get the size of the allocation by actually counting through
2145 // the PFN database.
2146 //
2147 PoolType = MmDeterminePoolType(P);
2148 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2149 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2150 if (!Tag)
2151 {
2152 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2153 ASSERT(Tag == ' GIB');
2154 PageCount = 1; // We are going to lie! This might screw up accounting?
2155 }
2156 else if (Tag & PROTECTED_POOL)
2157 {
2158 Tag &= ~PROTECTED_POOL;
2159 }
2160
2161 //
2162 // Check block tag
2163 //
2164 if (TagToFree && TagToFree != Tag)
2165 {
2166 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2167 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2168 }
2169
2170 //
2171 // We have our tag and our page count, so we can go ahead and remove this
2172 // tracker now
2173 //
2174 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2175
2176 //
2177 // Check if any of the debug flags are enabled
2178 //
2179 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2180 POOL_FLAG_CHECK_WORKERS |
2181 POOL_FLAG_CHECK_RESOURCES |
2182 POOL_FLAG_CHECK_DEADLOCK))
2183 {
2184 //
2185 // Was deadlock verification also enabled? We can do some extra
2186 // checks at this point
2187 //
2188 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2189 {
2190 DPRINT1("Verifier not yet supported\n");
2191 }
2192
2193 //
2194 // FIXME: Many debugging checks go here
2195 //
2196 }
2197
2198 //
2199 // Update counters
2200 //
2201 PoolDesc = PoolVector[PoolType];
2202 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2203 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2204 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2205
2206 //
2207 // Do the real free now and update the last counter with the big page count
2208 //
2209 RealPageCount = MiFreePoolPages(P);
2210 ASSERT(RealPageCount == PageCount);
2211 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2212 -(LONG)RealPageCount);
2213 return;
2214 }
2215
2216 //
2217 // Get the entry for this pool allocation
2218 // The pointer math here may look wrong or confusing, but it is quite right
2219 //
2220 Entry = P;
2221 Entry--;
2222
2223 //
2224 // Get the size of the entry, and it's pool type, then load the descriptor
2225 // for this pool type
2226 //
2227 BlockSize = Entry->BlockSize;
2228 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2229 PoolDesc = PoolVector[PoolType];
2230
2231 //
2232 // Make sure that the IRQL makes sense
2233 //
2234 ExpCheckPoolIrqlLevel(PoolType, 0, P);
2235
2236 //
2237 // Get the pool tag and get rid of the PROTECTED_POOL flag
2238 //
2239 Tag = Entry->PoolTag;
2240 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2241
2242 //
2243 // Check block tag
2244 //
2245 if (TagToFree && TagToFree != Tag)
2246 {
2247 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2248 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2249 }
2250
2251 //
2252 // Track the removal of this allocation
2253 //
2254 ExpRemovePoolTracker(Tag,
2255 BlockSize * POOL_BLOCK_SIZE,
2256 Entry->PoolType - 1);
2257
2258 //
2259 // Is this allocation small enough to have come from a lookaside list?
2260 //
2261 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2262 {
2263 //
2264 // Try pushing it into the per-CPU lookaside list
2265 //
2266 LookasideList = (PoolType == PagedPool) ?
2267 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2268 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2269 LookasideList->TotalFrees++;
2270 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2271 {
2272 LookasideList->FreeHits++;
2273 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2274 return;
2275 }
2276
2277 //
2278 // We failed, try to push it into the global lookaside list
2279 //
2280 LookasideList = (PoolType == PagedPool) ?
2281 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2282 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2283 LookasideList->TotalFrees++;
2284 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2285 {
2286 LookasideList->FreeHits++;
2287 InterlockedPushEntrySList(&LookasideList->ListHead, P);
2288 return;
2289 }
2290 }
2291
2292 //
2293 // Get the pointer to the next entry
2294 //
2295 NextEntry = POOL_BLOCK(Entry, BlockSize);
2296
2297 //
2298 // Update performance counters
2299 //
2300 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2301 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2302
2303 //
2304 // Acquire the pool lock
2305 //
2306 OldIrql = ExLockPool(PoolDesc);
2307
2308 //
2309 // Check if the next allocation is at the end of the page
2310 //
2311 ExpCheckPoolBlocks(Entry);
2312 if (PAGE_ALIGN(NextEntry) != NextEntry)
2313 {
2314 //
2315 // We may be able to combine the block if it's free
2316 //
2317 if (NextEntry->PoolType == 0)
2318 {
2319 //
2320 // The next block is free, so we'll do a combine
2321 //
2322 Combined = TRUE;
2323
2324 //
2325 // Make sure there's actual data in the block -- anything smaller
2326 // than this means we only have the header, so there's no linked list
2327 // for us to remove
2328 //
2329 if ((NextEntry->BlockSize != 1))
2330 {
2331 //
2332 // The block is at least big enough to have a linked list, so go
2333 // ahead and remove it
2334 //
2335 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2336 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2337 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2338 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2339 }
2340
2341 //
2342 // Our entry is now combined with the next entry
2343 //
2344 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2345 }
2346 }
2347
2348 //
2349 // Now check if there was a previous entry on the same page as us
2350 //
2351 if (Entry->PreviousSize)
2352 {
2353 //
2354 // Great, grab that entry and check if it's free
2355 //
2356 NextEntry = POOL_PREV_BLOCK(Entry);
2357 if (NextEntry->PoolType == 0)
2358 {
2359 //
2360 // It is, so we can do a combine
2361 //
2362 Combined = TRUE;
2363
2364 //
2365 // Make sure there's actual data in the block -- anything smaller
2366 // than this means we only have the header so there's no linked list
2367 // for us to remove
2368 //
2369 if ((NextEntry->BlockSize != 1))
2370 {
2371 //
2372 // The block is at least big enough to have a linked list, so go
2373 // ahead and remove it
2374 //
2375 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2376 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2377 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2378 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2379 }
2380
2381 //
2382 // Combine our original block (which might've already been combined
2383 // with the next block), into the previous block
2384 //
2385 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2386
2387 //
2388 // And now we'll work with the previous block instead
2389 //
2390 Entry = NextEntry;
2391 }
2392 }
2393
2394 //
2395 // By now, it may have been possible for our combined blocks to actually
2396 // have made up a full page (if there were only 2-3 allocations on the
2397 // page, they could've all been combined).
2398 //
2399 if ((PAGE_ALIGN(Entry) == Entry) &&
2400 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2401 {
2402 //
2403 // In this case, release the pool lock, update the performance counter,
2404 // and free the page
2405 //
2406 ExUnlockPool(PoolDesc, OldIrql);
2407 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2408 MiFreePoolPages(Entry);
2409 return;
2410 }
2411
2412 //
2413 // Otherwise, we now have a free block (or a combination of 2 or 3)
2414 //
2415 Entry->PoolType = 0;
2416 BlockSize = Entry->BlockSize;
2417 ASSERT(BlockSize != 1);
2418
2419 //
2420 // Check if we actually did combine it with anyone
2421 //
2422 if (Combined)
2423 {
2424 //
2425 // Get the first combined block (either our original to begin with, or
2426 // the one after the original, depending if we combined with the previous)
2427 //
2428 NextEntry = POOL_NEXT_BLOCK(Entry);
2429
2430 //
2431 // As long as the next block isn't on a page boundary, have it point
2432 // back to us
2433 //
2434 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2435 }
2436
2437 //
2438 // Insert this new free block, and release the pool lock
2439 //
2440 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2441 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2442 ExUnlockPool(PoolDesc, OldIrql);
2443 }
2444
2445 /*
2446 * @implemented
2447 */
2448 VOID
2449 NTAPI
2450 ExFreePool(PVOID P)
2451 {
2452 //
2453 // Just free without checking for the tag
2454 //
2455 ExFreePoolWithTag(P, 0);
2456 }
2457
2458 /*
2459 * @unimplemented
2460 */
2461 SIZE_T
2462 NTAPI
2463 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2464 OUT PBOOLEAN QuotaCharged)
2465 {
2466 //
2467 // Not implemented
2468 //
2469 UNIMPLEMENTED;
2470 return FALSE;
2471 }
2472
2473 /*
2474 * @implemented
2475 */
2476
2477 PVOID
2478 NTAPI
2479 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2480 IN SIZE_T NumberOfBytes)
2481 {
2482 //
2483 // Allocate the pool
2484 //
2485 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, 'enoN');
2486 }
2487
2488 /*
2489 * @implemented
2490 */
2491 PVOID
2492 NTAPI
2493 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2494 IN SIZE_T NumberOfBytes,
2495 IN ULONG Tag,
2496 IN EX_POOL_PRIORITY Priority)
2497 {
2498 //
2499 // Allocate the pool
2500 //
2501 UNIMPLEMENTED;
2502 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2503 }
2504
2505 /*
2506 * @implemented
2507 */
2508 PVOID
2509 NTAPI
2510 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2511 IN SIZE_T NumberOfBytes,
2512 IN ULONG Tag)
2513 {
2514 //
2515 // Allocate the pool
2516 //
2517 UNIMPLEMENTED;
2518 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2519 }
2520
2521 #if DBG && KDBG
2522
2523 BOOLEAN
2524 ExpKdbgExtPool(
2525 ULONG Argc,
2526 PCHAR Argv[])
2527 {
2528 ULONG_PTR Address = 0, Flags = 0;
2529 PVOID PoolPage;
2530 PPOOL_HEADER Entry;
2531 BOOLEAN ThisOne;
2532 PULONG Data;
2533
2534 if (Argc > 1)
2535 {
2536 /* Get address */
2537 if (!KdbpGetHexNumber(Argv[1], &Address))
2538 {
2539 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2540 return TRUE;
2541 }
2542 }
2543
2544 if (Argc > 2)
2545 {
2546 /* Get address */
2547 if (!KdbpGetHexNumber(Argv[1], &Flags))
2548 {
2549 KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2550 return TRUE;
2551 }
2552 }
2553
2554 /* Check if we got an address */
2555 if (Address != 0)
2556 {
2557 /* Get the base page */
2558 PoolPage = PAGE_ALIGN(Address);
2559 }
2560 else
2561 {
2562 KdbpPrint("Heap is unimplemented\n");
2563 return TRUE;
2564 }
2565
2566 /* No paging support! */
2567 if (!MmIsAddressValid(PoolPage))
2568 {
2569 KdbpPrint("Address not accessible!\n");
2570 return TRUE;
2571 }
2572
2573 /* Get pool type */
2574 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2575 KdbpPrint("Allocation is from PagedPool region\n");
2576 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2577 KdbpPrint("Allocation is from NonPagedPool region\n");
2578 else
2579 {
2580 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2581 return TRUE;
2582 }
2583
2584 /* Loop all entries of that page */
2585 Entry = PoolPage;
2586 do
2587 {
2588 /* Check if the address is within that entry */
2589 ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2590 (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2591
2592 if (!(Flags & 1) || ThisOne)
2593 {
2594 /* Print the line */
2595 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2596 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2597 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2598 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2599 }
2600
2601 if (Flags & 1)
2602 {
2603 Data = (PULONG)(Entry + 1);
2604 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2605 " %p %08lx %08lx %08lx %08lx\n",
2606 &Data[0], Data[0], Data[1], Data[2], Data[3],
2607 &Data[4], Data[4], Data[5], Data[6], Data[7]);
2608 }
2609
2610 /* Go to next entry */
2611 Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2612 }
2613 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2614
2615 return TRUE;
2616 }
2617
2618 #endif // DBG && KDBG
2619
2620 /* EOF */